Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.0 commit in: /
Date: Sat, 20 Apr 2019 23:25:26
Message-Id: 1555758721.3fc69f4634e06b7a81d27e097aeaf5bd6c79fdf5.mpagano@gentoo
1 commit: 3fc69f4634e06b7a81d27e097aeaf5bd6c79fdf5
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Apr 20 11:12:01 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Apr 20 11:12:01 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3fc69f46
7
8 Linux patch 5.0.9
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1008_linux-5.0.9.patch | 3652 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3656 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2dd07a5..dda69ae 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -75,6 +75,10 @@ Patch: 1007_linux-5.0.8.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.0.8
23
24 +Patch: 1008_linux-5.0.9.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.0.9
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1008_linux-5.0.9.patch b/1008_linux-5.0.9.patch
33 new file mode 100644
34 index 0000000..ca29395
35 --- /dev/null
36 +++ b/1008_linux-5.0.9.patch
37 @@ -0,0 +1,3652 @@
38 +diff --git a/Makefile b/Makefile
39 +index f7666051de66..ef192ca04330 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 0
46 +-SUBLEVEL = 8
47 ++SUBLEVEL = 9
48 + EXTRAVERSION =
49 + NAME = Shy Crocodile
50 +
51 +diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
52 +index 87b23b7fb781..aefcf7a4e17a 100644
53 +--- a/arch/arc/configs/hsdk_defconfig
54 ++++ b/arch/arc/configs/hsdk_defconfig
55 +@@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y
56 + # CONFIG_UTS_NS is not set
57 + # CONFIG_PID_NS is not set
58 + CONFIG_BLK_DEV_INITRD=y
59 ++CONFIG_BLK_DEV_RAM=y
60 + CONFIG_EMBEDDED=y
61 + CONFIG_PERF_EVENTS=y
62 + # CONFIG_VM_EVENT_COUNTERS is not set
63 +diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
64 +index 30e090625916..a72bbda2f7aa 100644
65 +--- a/arch/arc/kernel/head.S
66 ++++ b/arch/arc/kernel/head.S
67 +@@ -106,6 +106,7 @@ ENTRY(stext)
68 + ; r2 = pointer to uboot provided cmdline or external DTB in mem
69 + ; These are handled later in handle_uboot_args()
70 + st r0, [@uboot_tag]
71 ++ st r1, [@uboot_magic]
72 + st r2, [@uboot_arg]
73 +
74 + ; setup "current" tsk and optionally cache it in dedicated r25
75 +diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
76 +index 7b2340996cf8..7b3a7b3b380c 100644
77 +--- a/arch/arc/kernel/setup.c
78 ++++ b/arch/arc/kernel/setup.c
79 +@@ -36,6 +36,7 @@ unsigned int intr_to_DE_cnt;
80 +
81 + /* Part of U-boot ABI: see head.S */
82 + int __initdata uboot_tag;
83 ++int __initdata uboot_magic;
84 + char __initdata *uboot_arg;
85 +
86 + const struct machine_desc *machine_desc;
87 +@@ -497,6 +498,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
88 + #define UBOOT_TAG_NONE 0
89 + #define UBOOT_TAG_CMDLINE 1
90 + #define UBOOT_TAG_DTB 2
91 ++/* We always pass 0 as magic from U-boot */
92 ++#define UBOOT_MAGIC_VALUE 0
93 +
94 + void __init handle_uboot_args(void)
95 + {
96 +@@ -511,6 +514,11 @@ void __init handle_uboot_args(void)
97 + goto ignore_uboot_args;
98 + }
99 +
100 ++ if (uboot_magic != UBOOT_MAGIC_VALUE) {
101 ++ pr_warn(IGNORE_ARGS "non zero uboot magic\n");
102 ++ goto ignore_uboot_args;
103 ++ }
104 ++
105 + if (uboot_tag != UBOOT_TAG_NONE &&
106 + uboot_arg_invalid((unsigned long)uboot_arg)) {
107 + pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
108 +diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
109 +index a50dc00d79a2..d0a05a3bdb96 100644
110 +--- a/arch/arm/kernel/patch.c
111 ++++ b/arch/arm/kernel/patch.c
112 +@@ -16,7 +16,7 @@ struct patch {
113 + unsigned int insn;
114 + };
115 +
116 +-static DEFINE_SPINLOCK(patch_lock);
117 ++static DEFINE_RAW_SPINLOCK(patch_lock);
118 +
119 + static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
120 + __acquires(&patch_lock)
121 +@@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
122 + return addr;
123 +
124 + if (flags)
125 +- spin_lock_irqsave(&patch_lock, *flags);
126 ++ raw_spin_lock_irqsave(&patch_lock, *flags);
127 + else
128 + __acquire(&patch_lock);
129 +
130 +@@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
131 + clear_fixmap(fixmap);
132 +
133 + if (flags)
134 +- spin_unlock_irqrestore(&patch_lock, *flags);
135 ++ raw_spin_unlock_irqrestore(&patch_lock, *flags);
136 + else
137 + __release(&patch_lock);
138 + }
139 +diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c
140 +index 46eddbec8d9f..0ab95dd431b3 100644
141 +--- a/arch/mips/bcm47xx/workarounds.c
142 ++++ b/arch/mips/bcm47xx/workarounds.c
143 +@@ -24,6 +24,7 @@ void __init bcm47xx_workarounds(void)
144 + case BCM47XX_BOARD_NETGEAR_WNR3500L:
145 + bcm47xx_workarounds_enable_usb_power(12);
146 + break;
147 ++ case BCM47XX_BOARD_NETGEAR_WNDR3400V2:
148 + case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
149 + bcm47xx_workarounds_enable_usb_power(21);
150 + break;
151 +diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
152 +index d3f42b6bbdac..8a9cff1f129d 100644
153 +--- a/arch/x86/hyperv/hv_init.c
154 ++++ b/arch/x86/hyperv/hv_init.c
155 +@@ -102,9 +102,13 @@ static int hv_cpu_init(unsigned int cpu)
156 + u64 msr_vp_index;
157 + struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
158 + void **input_arg;
159 ++ struct page *pg;
160 +
161 + input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
162 +- *input_arg = page_address(alloc_page(GFP_KERNEL));
163 ++ pg = alloc_page(GFP_KERNEL);
164 ++ if (unlikely(!pg))
165 ++ return -ENOMEM;
166 ++ *input_arg = page_address(pg);
167 +
168 + hv_get_vp_index(msr_vp_index);
169 +
170 +diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
171 +index 58176b56354e..294ed4392a0e 100644
172 +--- a/arch/x86/kernel/aperture_64.c
173 ++++ b/arch/x86/kernel/aperture_64.c
174 +@@ -14,6 +14,7 @@
175 + #define pr_fmt(fmt) "AGP: " fmt
176 +
177 + #include <linux/kernel.h>
178 ++#include <linux/kcore.h>
179 + #include <linux/types.h>
180 + #include <linux/init.h>
181 + #include <linux/memblock.h>
182 +@@ -57,7 +58,7 @@ int fallback_aper_force __initdata;
183 +
184 + int fix_aperture __initdata = 1;
185 +
186 +-#ifdef CONFIG_PROC_VMCORE
187 ++#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
188 + /*
189 + * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
190 + * use the same range because it will remain configured in the northbridge.
191 +@@ -66,20 +67,25 @@ int fix_aperture __initdata = 1;
192 + */
193 + static unsigned long aperture_pfn_start, aperture_page_count;
194 +
195 +-static int gart_oldmem_pfn_is_ram(unsigned long pfn)
196 ++static int gart_mem_pfn_is_ram(unsigned long pfn)
197 + {
198 + return likely((pfn < aperture_pfn_start) ||
199 + (pfn >= aperture_pfn_start + aperture_page_count));
200 + }
201 +
202 +-static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
203 ++static void __init exclude_from_core(u64 aper_base, u32 aper_order)
204 + {
205 + aperture_pfn_start = aper_base >> PAGE_SHIFT;
206 + aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
207 +- WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
208 ++#ifdef CONFIG_PROC_VMCORE
209 ++ WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram));
210 ++#endif
211 ++#ifdef CONFIG_PROC_KCORE
212 ++ WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram));
213 ++#endif
214 + }
215 + #else
216 +-static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
217 ++static void exclude_from_core(u64 aper_base, u32 aper_order)
218 + {
219 + }
220 + #endif
221 +@@ -474,7 +480,7 @@ out:
222 + * may have allocated the range over its e820 RAM
223 + * and fixed up the northbridge
224 + */
225 +- exclude_from_vmcore(last_aper_base, last_aper_order);
226 ++ exclude_from_core(last_aper_base, last_aper_order);
227 +
228 + return 1;
229 + }
230 +@@ -520,7 +526,7 @@ out:
231 + * overlap with the first kernel's memory. We can't access the
232 + * range through vmcore even though it should be part of the dump.
233 + */
234 +- exclude_from_vmcore(aper_alloc, aper_order);
235 ++ exclude_from_core(aper_alloc, aper_order);
236 +
237 + /* Fix up the north bridges */
238 + for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
239 +diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
240 +index d12226f60168..1d9b8aaea06c 100644
241 +--- a/arch/x86/kernel/cpu/cyrix.c
242 ++++ b/arch/x86/kernel/cpu/cyrix.c
243 +@@ -124,7 +124,7 @@ static void set_cx86_reorder(void)
244 + setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
245 +
246 + /* Load/Store Serialize to mem access disable (=reorder it) */
247 +- setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
248 ++ setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
249 + /* set load/store serialize from 1GB to 4GB */
250 + ccr3 |= 0xe0;
251 + setCx86(CX86_CCR3, ccr3);
252 +@@ -135,11 +135,11 @@ static void set_cx86_memwb(void)
253 + pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
254 +
255 + /* CCR2 bit 2: unlock NW bit */
256 +- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
257 ++ setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
258 + /* set 'Not Write-through' */
259 + write_cr0(read_cr0() | X86_CR0_NW);
260 + /* CCR2 bit 2: lock NW bit and set WT1 */
261 +- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
262 ++ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
263 + }
264 +
265 + /*
266 +@@ -153,14 +153,14 @@ static void geode_configure(void)
267 + local_irq_save(flags);
268 +
269 + /* Suspend on halt power saving and enable #SUSP pin */
270 +- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
271 ++ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
272 +
273 + ccr3 = getCx86(CX86_CCR3);
274 + setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
275 +
276 +
277 + /* FPU fast, DTE cache, Mem bypass */
278 +- setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
279 ++ setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
280 + setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
281 +
282 + set_cx86_memwb();
283 +@@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
284 + /* GXm supports extended cpuid levels 'ala' AMD */
285 + if (c->cpuid_level == 2) {
286 + /* Enable cxMMX extensions (GX1 Datasheet 54) */
287 +- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
288 ++ setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
289 +
290 + /*
291 + * GXm : 0x30 ... 0x5f GXm datasheet 51
292 +@@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
293 + if (dir1 > 7) {
294 + dir0_msn++; /* M II */
295 + /* Enable MMX extensions (App note 108) */
296 +- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
297 ++ setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
298 + } else {
299 + /* A 6x86MX - it has the bug. */
300 + set_cpu_bug(c, X86_BUG_COMA);
301 +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
302 +index dfd3aca82c61..fb32925a2e62 100644
303 +--- a/arch/x86/kernel/hpet.c
304 ++++ b/arch/x86/kernel/hpet.c
305 +@@ -905,6 +905,8 @@ int __init hpet_enable(void)
306 + return 0;
307 +
308 + hpet_set_mapping();
309 ++ if (!hpet_virt_address)
310 ++ return 0;
311 +
312 + /*
313 + * Read the period and check for a sane value:
314 +diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
315 +index 34a5c1715148..2882fe1d2a78 100644
316 +--- a/arch/x86/kernel/hw_breakpoint.c
317 ++++ b/arch/x86/kernel/hw_breakpoint.c
318 +@@ -357,6 +357,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
319 + #endif
320 + default:
321 + WARN_ON_ONCE(1);
322 ++ return -EINVAL;
323 + }
324 +
325 + /*
326 +diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
327 +index 3482460d984d..1bfe5c6e6cfe 100644
328 +--- a/arch/x86/kernel/mpparse.c
329 ++++ b/arch/x86/kernel/mpparse.c
330 +@@ -598,8 +598,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
331 + mpf_base = base;
332 + mpf_found = true;
333 +
334 +- pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
335 +- base, base + sizeof(*mpf) - 1, mpf);
336 ++ pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
337 ++ base, base + sizeof(*mpf) - 1);
338 +
339 + memblock_reserve(base, sizeof(*mpf));
340 + if (mpf->physptr)
341 +diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
342 +index 2620baa1f699..507212d75ee2 100644
343 +--- a/block/blk-iolatency.c
344 ++++ b/block/blk-iolatency.c
345 +@@ -75,6 +75,7 @@
346 + #include <linux/blk-mq.h>
347 + #include "blk-rq-qos.h"
348 + #include "blk-stat.h"
349 ++#include "blk.h"
350 +
351 + #define DEFAULT_SCALE_COOKIE 1000000U
352 +
353 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
354 +index 9d66a47d32fb..49e16f009095 100644
355 +--- a/drivers/acpi/ec.c
356 ++++ b/drivers/acpi/ec.c
357 +@@ -194,6 +194,7 @@ static struct workqueue_struct *ec_query_wq;
358 + static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
359 + static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
360 + static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
361 ++static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
362 +
363 + /* --------------------------------------------------------------------------
364 + * Logging/Debugging
365 +@@ -499,6 +500,26 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
366 + ec_log_drv("event blocked");
367 + }
368 +
369 ++/*
370 ++ * Process _Q events that might have accumulated in the EC.
371 ++ * Run with locked ec mutex.
372 ++ */
373 ++static void acpi_ec_clear(struct acpi_ec *ec)
374 ++{
375 ++ int i, status;
376 ++ u8 value = 0;
377 ++
378 ++ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
379 ++ status = acpi_ec_query(ec, &value);
380 ++ if (status || !value)
381 ++ break;
382 ++ }
383 ++ if (unlikely(i == ACPI_EC_CLEAR_MAX))
384 ++ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
385 ++ else
386 ++ pr_info("%d stale EC events cleared\n", i);
387 ++}
388 ++
389 + static void acpi_ec_enable_event(struct acpi_ec *ec)
390 + {
391 + unsigned long flags;
392 +@@ -507,6 +528,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
393 + if (acpi_ec_started(ec))
394 + __acpi_ec_enable_event(ec);
395 + spin_unlock_irqrestore(&ec->lock, flags);
396 ++
397 ++ /* Drain additional events if hardware requires that */
398 ++ if (EC_FLAGS_CLEAR_ON_RESUME)
399 ++ acpi_ec_clear(ec);
400 + }
401 +
402 + #ifdef CONFIG_PM_SLEEP
403 +@@ -1820,6 +1845,31 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id)
404 + }
405 + #endif
406 +
407 ++/*
408 ++ * On some hardware it is necessary to clear events accumulated by the EC during
409 ++ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
410 ++ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
411 ++ *
412 ++ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
413 ++ *
414 ++ * Ideally, the EC should also be instructed NOT to accumulate events during
415 ++ * sleep (which Windows seems to do somehow), but the interface to control this
416 ++ * behaviour is not known at this time.
417 ++ *
418 ++ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
419 ++ * however it is very likely that other Samsung models are affected.
420 ++ *
421 ++ * On systems which don't accumulate _Q events during sleep, this extra check
422 ++ * should be harmless.
423 ++ */
424 ++static int ec_clear_on_resume(const struct dmi_system_id *id)
425 ++{
426 ++ pr_debug("Detected system needing EC poll on resume.\n");
427 ++ EC_FLAGS_CLEAR_ON_RESUME = 1;
428 ++ ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
429 ++ return 0;
430 ++}
431 ++
432 + /*
433 + * Some ECDTs contain wrong register addresses.
434 + * MSI MS-171F
435 +@@ -1869,6 +1919,9 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
436 + ec_honor_ecdt_gpe, "ASUS X580VD", {
437 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
438 + DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
439 ++ {
440 ++ ec_clear_on_resume, "Samsung hardware", {
441 ++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
442 + {},
443 + };
444 +
445 +diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
446 +index 78db97687f26..c4b06cc075f9 100644
447 +--- a/drivers/acpi/utils.c
448 ++++ b/drivers/acpi/utils.c
449 +@@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
450 + match.hrv = hrv;
451 +
452 + dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
453 ++ put_device(dev);
454 + return !!dev;
455 + }
456 + EXPORT_SYMBOL(acpi_dev_present);
457 +diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
458 +index 9ad93ea42fdc..3cde351fb5c9 100644
459 +--- a/drivers/auxdisplay/hd44780.c
460 ++++ b/drivers/auxdisplay/hd44780.c
461 +@@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev)
462 + struct charlcd *lcd = platform_get_drvdata(pdev);
463 +
464 + charlcd_unregister(lcd);
465 ++
466 ++ kfree(lcd);
467 + return 0;
468 + }
469 +
470 +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
471 +index 500de1dee967..a00ca6b8117b 100644
472 +--- a/drivers/base/power/domain.c
473 ++++ b/drivers/base/power/domain.c
474 +@@ -1467,12 +1467,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
475 + if (IS_ERR(gpd_data))
476 + return PTR_ERR(gpd_data);
477 +
478 +- genpd_lock(genpd);
479 +-
480 + ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
481 + if (ret)
482 + goto out;
483 +
484 ++ genpd_lock(genpd);
485 ++
486 + dev_pm_domain_set(dev, &genpd->domain);
487 +
488 + genpd->device_count++;
489 +@@ -1480,9 +1480,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
490 +
491 + list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
492 +
493 +- out:
494 + genpd_unlock(genpd);
495 +-
496 ++ out:
497 + if (ret)
498 + genpd_free_dev_data(dev, gpd_data);
499 + else
500 +@@ -1531,15 +1530,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
501 + genpd->device_count--;
502 + genpd->max_off_time_changed = true;
503 +
504 +- if (genpd->detach_dev)
505 +- genpd->detach_dev(genpd, dev);
506 +-
507 + dev_pm_domain_set(dev, NULL);
508 +
509 + list_del_init(&pdd->list_node);
510 +
511 + genpd_unlock(genpd);
512 +
513 ++ if (genpd->detach_dev)
514 ++ genpd->detach_dev(genpd, dev);
515 ++
516 + genpd_free_dev_data(dev, gpd_data);
517 +
518 + return 0;
519 +diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
520 +index 96670eefaeb2..6d415b20fb70 100644
521 +--- a/drivers/block/paride/pcd.c
522 ++++ b/drivers/block/paride/pcd.c
523 +@@ -314,6 +314,7 @@ static void pcd_init_units(void)
524 + disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
525 + 1, BLK_MQ_F_SHOULD_MERGE);
526 + if (IS_ERR(disk->queue)) {
527 ++ put_disk(disk);
528 + disk->queue = NULL;
529 + continue;
530 + }
531 +@@ -749,8 +750,14 @@ static int pcd_detect(void)
532 + return 0;
533 +
534 + printk("%s: No CD-ROM drive found\n", name);
535 +- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
536 ++ for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
537 ++ if (!cd->disk)
538 ++ continue;
539 ++ blk_cleanup_queue(cd->disk->queue);
540 ++ cd->disk->queue = NULL;
541 ++ blk_mq_free_tag_set(&cd->tag_set);
542 + put_disk(cd->disk);
543 ++ }
544 + pi_unregister_driver(par_drv);
545 + return -1;
546 + }
547 +@@ -1006,8 +1013,14 @@ static int __init pcd_init(void)
548 + pcd_probe_capabilities();
549 +
550 + if (register_blkdev(major, name)) {
551 +- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
552 ++ for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
553 ++ if (!cd->disk)
554 ++ continue;
555 ++
556 ++ blk_cleanup_queue(cd->disk->queue);
557 ++ blk_mq_free_tag_set(&cd->tag_set);
558 + put_disk(cd->disk);
559 ++ }
560 + return -EBUSY;
561 + }
562 +
563 +@@ -1028,6 +1041,9 @@ static void __exit pcd_exit(void)
564 + int unit;
565 +
566 + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
567 ++ if (!cd->disk)
568 ++ continue;
569 ++
570 + if (cd->present) {
571 + del_gendisk(cd->disk);
572 + pi_release(cd->pi);
573 +diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
574 +index e92e7a8eeeb2..35e6e271b219 100644
575 +--- a/drivers/block/paride/pf.c
576 ++++ b/drivers/block/paride/pf.c
577 +@@ -761,8 +761,14 @@ static int pf_detect(void)
578 + return 0;
579 +
580 + printk("%s: No ATAPI disk detected\n", name);
581 +- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
582 ++ for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
583 ++ if (!pf->disk)
584 ++ continue;
585 ++ blk_cleanup_queue(pf->disk->queue);
586 ++ pf->disk->queue = NULL;
587 ++ blk_mq_free_tag_set(&pf->tag_set);
588 + put_disk(pf->disk);
589 ++ }
590 + pi_unregister_driver(par_drv);
591 + return -1;
592 + }
593 +@@ -1025,8 +1031,13 @@ static int __init pf_init(void)
594 + pf_busy = 0;
595 +
596 + if (register_blkdev(major, name)) {
597 +- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
598 ++ for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
599 ++ if (!pf->disk)
600 ++ continue;
601 ++ blk_cleanup_queue(pf->disk->queue);
602 ++ blk_mq_free_tag_set(&pf->tag_set);
603 + put_disk(pf->disk);
604 ++ }
605 + return -EBUSY;
606 + }
607 +
608 +@@ -1047,13 +1058,18 @@ static void __exit pf_exit(void)
609 + int unit;
610 + unregister_blkdev(major, name);
611 + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
612 +- if (!pf->present)
613 ++ if (!pf->disk)
614 + continue;
615 +- del_gendisk(pf->disk);
616 ++
617 ++ if (pf->present)
618 ++ del_gendisk(pf->disk);
619 ++
620 + blk_cleanup_queue(pf->disk->queue);
621 + blk_mq_free_tag_set(&pf->tag_set);
622 + put_disk(pf->disk);
623 +- pi_release(pf->pi);
624 ++
625 ++ if (pf->present)
626 ++ pi_release(pf->pi);
627 + }
628 + }
629 +
630 +diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
631 +index f3442c2bdbdc..3c70004240d6 100644
632 +--- a/drivers/crypto/axis/artpec6_crypto.c
633 ++++ b/drivers/crypto/axis/artpec6_crypto.c
634 +@@ -284,6 +284,7 @@ enum artpec6_crypto_hash_flags {
635 +
636 + struct artpec6_crypto_req_common {
637 + struct list_head list;
638 ++ struct list_head complete_in_progress;
639 + struct artpec6_crypto_dma_descriptors *dma;
640 + struct crypto_async_request *req;
641 + void (*complete)(struct crypto_async_request *req);
642 +@@ -2045,7 +2046,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
643 + return artpec6_crypto_dma_map_descs(common);
644 + }
645 +
646 +-static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
647 ++static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
648 ++ struct list_head *completions)
649 + {
650 + struct artpec6_crypto_req_common *req;
651 +
652 +@@ -2056,7 +2058,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
653 + list_move_tail(&req->list, &ac->pending);
654 + artpec6_crypto_start_dma(req);
655 +
656 +- req->req->complete(req->req, -EINPROGRESS);
657 ++ list_add_tail(&req->complete_in_progress, completions);
658 + }
659 +
660 + /*
661 +@@ -2086,6 +2088,11 @@ static void artpec6_crypto_task(unsigned long data)
662 + struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
663 + struct artpec6_crypto_req_common *req;
664 + struct artpec6_crypto_req_common *n;
665 ++ struct list_head complete_done;
666 ++ struct list_head complete_in_progress;
667 ++
668 ++ INIT_LIST_HEAD(&complete_done);
669 ++ INIT_LIST_HEAD(&complete_in_progress);
670 +
671 + if (list_empty(&ac->pending)) {
672 + pr_debug("Spurious IRQ\n");
673 +@@ -2119,19 +2126,30 @@ static void artpec6_crypto_task(unsigned long data)
674 +
675 + pr_debug("Completing request %p\n", req);
676 +
677 +- list_del(&req->list);
678 ++ list_move_tail(&req->list, &complete_done);
679 +
680 + artpec6_crypto_dma_unmap_all(req);
681 + artpec6_crypto_copy_bounce_buffers(req);
682 +
683 + ac->pending_count--;
684 + artpec6_crypto_common_destroy(req);
685 +- req->complete(req->req);
686 + }
687 +
688 +- artpec6_crypto_process_queue(ac);
689 ++ artpec6_crypto_process_queue(ac, &complete_in_progress);
690 +
691 + spin_unlock_bh(&ac->queue_lock);
692 ++
693 ++ /* Perform the completion callbacks without holding the queue lock
694 ++ * to allow new request submissions from the callbacks.
695 ++ */
696 ++ list_for_each_entry_safe(req, n, &complete_done, list) {
697 ++ req->complete(req->req);
698 ++ }
699 ++
700 ++ list_for_each_entry_safe(req, n, &complete_in_progress,
701 ++ complete_in_progress) {
702 ++ req->req->complete(req->req, -EINPROGRESS);
703 ++ }
704 + }
705 +
706 + static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
707 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
708 +index 3a9b48b227ac..a7208ca0bfe3 100644
709 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
710 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
711 +@@ -546,7 +546,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
712 + struct psp_context *psp = &adev->psp;
713 +
714 + if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
715 +- psp_ring_destroy(psp, PSP_RING_TYPE__KM);
716 ++ psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
717 + goto skip_memalloc;
718 + }
719 +
720 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
721 +index 47243165a082..ae90a99909ef 100644
722 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
723 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
724 +@@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
725 + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
726 + struct queue_properties *q)
727 + {
728 +- uint64_t addr;
729 +- struct cik_mqd *m;
730 +- int retval;
731 +-
732 +- retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
733 +- mqd_mem_obj);
734 +-
735 +- if (retval != 0)
736 +- return -ENOMEM;
737 +-
738 +- m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
739 +- addr = (*mqd_mem_obj)->gpu_addr;
740 +-
741 +- memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
742 +-
743 +- m->header = 0xC0310800;
744 +- m->compute_pipelinestat_enable = 1;
745 +- m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
746 +- m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
747 +- m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
748 +- m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
749 +-
750 +- m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
751 +- PRELOAD_REQ;
752 +- m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
753 +- QUANTUM_DURATION(10);
754 +-
755 +- m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
756 +- m->cp_mqd_base_addr_lo = lower_32_bits(addr);
757 +- m->cp_mqd_base_addr_hi = upper_32_bits(addr);
758 +-
759 +- m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
760 +-
761 +- /*
762 +- * Pipe Priority
763 +- * Identifies the pipe relative priority when this queue is connected
764 +- * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
765 +- * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
766 +- * 0 = CS_LOW (typically below GFX)
767 +- * 1 = CS_MEDIUM (typically between HP3D and GFX
768 +- * 2 = CS_HIGH (typically above HP3D)
769 +- */
770 +- m->cp_hqd_pipe_priority = 1;
771 +- m->cp_hqd_queue_priority = 15;
772 +-
773 +- *mqd = m;
774 +- if (gart_addr)
775 +- *gart_addr = addr;
776 +- retval = mm->update_mqd(mm, m, q);
777 +-
778 +- return retval;
779 ++ return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
780 + }
781 +
782 + static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
783 +diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
784 +index 0573eab0e190..f35e4ab55b27 100644
785 +--- a/drivers/gpu/drm/exynos/exynos_mixer.c
786 ++++ b/drivers/gpu/drm/exynos/exynos_mixer.c
787 +@@ -20,6 +20,7 @@
788 + #include "regs-vp.h"
789 +
790 + #include <linux/kernel.h>
791 ++#include <linux/ktime.h>
792 + #include <linux/spinlock.h>
793 + #include <linux/wait.h>
794 + #include <linux/i2c.h>
795 +@@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
796 + mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
797 + }
798 +
799 +-static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
800 ++static bool mixer_is_synced(struct mixer_context *ctx)
801 + {
802 +- /* block update on vsync */
803 +- mixer_reg_writemask(ctx, MXR_STATUS, enable ?
804 +- MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
805 ++ u32 base, shadow;
806 +
807 ++ if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
808 ++ ctx->mxr_ver == MXR_VER_128_0_0_184)
809 ++ return !(mixer_reg_read(ctx, MXR_CFG) &
810 ++ MXR_CFG_LAYER_UPDATE_COUNT_MASK);
811 ++
812 ++ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
813 ++ vp_reg_read(ctx, VP_SHADOW_UPDATE))
814 ++ return false;
815 ++
816 ++ base = mixer_reg_read(ctx, MXR_CFG);
817 ++ shadow = mixer_reg_read(ctx, MXR_CFG_S);
818 ++ if (base != shadow)
819 ++ return false;
820 ++
821 ++ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
822 ++ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
823 ++ if (base != shadow)
824 ++ return false;
825 ++
826 ++ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
827 ++ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
828 ++ if (base != shadow)
829 ++ return false;
830 ++
831 ++ return true;
832 ++}
833 ++
834 ++static int mixer_wait_for_sync(struct mixer_context *ctx)
835 ++{
836 ++ ktime_t timeout = ktime_add_us(ktime_get(), 100000);
837 ++
838 ++ while (!mixer_is_synced(ctx)) {
839 ++ usleep_range(1000, 2000);
840 ++ if (ktime_compare(ktime_get(), timeout) > 0)
841 ++ return -ETIMEDOUT;
842 ++ }
843 ++ return 0;
844 ++}
845 ++
846 ++static void mixer_disable_sync(struct mixer_context *ctx)
847 ++{
848 ++ mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
849 ++}
850 ++
851 ++static void mixer_enable_sync(struct mixer_context *ctx)
852 ++{
853 ++ if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
854 ++ ctx->mxr_ver == MXR_VER_128_0_0_184)
855 ++ mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
856 ++ mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
857 + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
858 +- vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
859 +- VP_SHADOW_UPDATE_ENABLE : 0);
860 ++ vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
861 + }
862 +
863 + static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
864 +@@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
865 +
866 + spin_lock_irqsave(&ctx->reg_slock, flags);
867 +
868 +- vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
869 + /* interlace or progressive scan mode */
870 + val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
871 + vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
872 +@@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
873 + vp_regs_dump(ctx);
874 + }
875 +
876 +-static void mixer_layer_update(struct mixer_context *ctx)
877 +-{
878 +- mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
879 +-}
880 +-
881 + static void mixer_graph_buffer(struct mixer_context *ctx,
882 + struct exynos_drm_plane *plane)
883 + {
884 +@@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
885 + mixer_cfg_layer(ctx, win, priority, true);
886 + mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
887 +
888 +- /* layer update mandatory for mixer 16.0.33.0 */
889 +- if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
890 +- ctx->mxr_ver == MXR_VER_128_0_0_184)
891 +- mixer_layer_update(ctx);
892 +-
893 + spin_unlock_irqrestore(&ctx->reg_slock, flags);
894 +
895 + mixer_regs_dump(ctx);
896 +@@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
897 + static irqreturn_t mixer_irq_handler(int irq, void *arg)
898 + {
899 + struct mixer_context *ctx = arg;
900 +- u32 val, base, shadow;
901 ++ u32 val;
902 +
903 + spin_lock(&ctx->reg_slock);
904 +
905 +@@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
906 + val &= ~MXR_INT_STATUS_VSYNC;
907 +
908 + /* interlace scan need to check shadow register */
909 +- if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
910 +- if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
911 +- vp_reg_read(ctx, VP_SHADOW_UPDATE))
912 +- goto out;
913 +-
914 +- base = mixer_reg_read(ctx, MXR_CFG);
915 +- shadow = mixer_reg_read(ctx, MXR_CFG_S);
916 +- if (base != shadow)
917 +- goto out;
918 +-
919 +- base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
920 +- shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
921 +- if (base != shadow)
922 +- goto out;
923 +-
924 +- base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
925 +- shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
926 +- if (base != shadow)
927 +- goto out;
928 +- }
929 ++ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
930 ++ && !mixer_is_synced(ctx))
931 ++ goto out;
932 +
933 + drm_crtc_handle_vblank(&ctx->crtc->base);
934 + }
935 +@@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
936 +
937 + static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
938 + {
939 +- struct mixer_context *mixer_ctx = crtc->ctx;
940 ++ struct mixer_context *ctx = crtc->ctx;
941 +
942 +- if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
943 ++ if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
944 + return;
945 +
946 +- mixer_vsync_set_update(mixer_ctx, false);
947 ++ if (mixer_wait_for_sync(ctx))
948 ++ dev_err(ctx->dev, "timeout waiting for VSYNC\n");
949 ++ mixer_disable_sync(ctx);
950 + }
951 +
952 + static void mixer_update_plane(struct exynos_drm_crtc *crtc,
953 +@@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
954 + if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
955 + return;
956 +
957 +- mixer_vsync_set_update(mixer_ctx, true);
958 ++ mixer_enable_sync(mixer_ctx);
959 + exynos_crtc_handle_event(crtc);
960 + }
961 +
962 +@@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
963 +
964 + exynos_drm_pipe_clk_enable(crtc, true);
965 +
966 +- mixer_vsync_set_update(ctx, false);
967 ++ mixer_disable_sync(ctx);
968 +
969 + mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
970 +
971 +@@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
972 +
973 + mixer_commit(ctx);
974 +
975 +- mixer_vsync_set_update(ctx, true);
976 ++ mixer_enable_sync(ctx);
977 +
978 + set_bit(MXR_BIT_POWERED, &ctx->flags);
979 + }
980 +diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
981 +index 8a0f85f5fc1a..6a765682fbfa 100644
982 +--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
983 ++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
984 +@@ -38,6 +38,7 @@ int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
985 +
986 + int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
987 + int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
988 ++int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
989 + int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
990 + int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
991 + int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
992 +diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
993 +index 88a52f6b39fe..7dfbbbc1beea 100644
994 +--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
995 ++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
996 +@@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
997 + }
998 +
999 + ret = pm_runtime_get_sync(drm->dev);
1000 +- if (IS_ERR_VALUE(ret) && ret != -EACCES)
1001 ++ if (ret < 0 && ret != -EACCES)
1002 + return ret;
1003 + ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
1004 + pm_runtime_put_autosuspend(drm->dev);
1005 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1006 +index d9edb5785813..d75fa7678483 100644
1007 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1008 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
1009 +@@ -1613,7 +1613,7 @@ nvd7_chipset = {
1010 + .pci = gf106_pci_new,
1011 + .therm = gf119_therm_new,
1012 + .timer = nv41_timer_new,
1013 +- .volt = gf100_volt_new,
1014 ++ .volt = gf117_volt_new,
1015 + .ce[0] = gf100_ce_new,
1016 + .disp = gf119_disp_new,
1017 + .dma = gf119_dma_new,
1018 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
1019 +index bcd179ba11d0..146adcdd316a 100644
1020 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
1021 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
1022 +@@ -2,6 +2,7 @@ nvkm-y += nvkm/subdev/volt/base.o
1023 + nvkm-y += nvkm/subdev/volt/gpio.o
1024 + nvkm-y += nvkm/subdev/volt/nv40.o
1025 + nvkm-y += nvkm/subdev/volt/gf100.o
1026 ++nvkm-y += nvkm/subdev/volt/gf117.o
1027 + nvkm-y += nvkm/subdev/volt/gk104.o
1028 + nvkm-y += nvkm/subdev/volt/gk20a.o
1029 + nvkm-y += nvkm/subdev/volt/gm20b.o
1030 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
1031 +new file mode 100644
1032 +index 000000000000..547a58f0aeac
1033 +--- /dev/null
1034 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
1035 +@@ -0,0 +1,60 @@
1036 ++/*
1037 ++ * Copyright 2019 Ilia Mirkin
1038 ++ *
1039 ++ * Permission is hereby granted, free of charge, to any person obtaining a
1040 ++ * copy of this software and associated documentation files (the "Software"),
1041 ++ * to deal in the Software without restriction, including without limitation
1042 ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1043 ++ * and/or sell copies of the Software, and to permit persons to whom the
1044 ++ * Software is furnished to do so, subject to the following conditions:
1045 ++ *
1046 ++ * The above copyright notice and this permission notice shall be included in
1047 ++ * all copies or substantial portions of the Software.
1048 ++ *
1049 ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1050 ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1051 ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1052 ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
1053 ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1054 ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1055 ++ * OTHER DEALINGS IN THE SOFTWARE.
1056 ++ *
1057 ++ * Authors: Ilia Mirkin
1058 ++ */
1059 ++#include "priv.h"
1060 ++
1061 ++#include <subdev/fuse.h>
1062 ++
1063 ++static int
1064 ++gf117_volt_speedo_read(struct nvkm_volt *volt)
1065 ++{
1066 ++ struct nvkm_device *device = volt->subdev.device;
1067 ++ struct nvkm_fuse *fuse = device->fuse;
1068 ++
1069 ++ if (!fuse)
1070 ++ return -EINVAL;
1071 ++
1072 ++ return nvkm_fuse_read(fuse, 0x3a8);
1073 ++}
1074 ++
1075 ++static const struct nvkm_volt_func
1076 ++gf117_volt = {
1077 ++ .oneinit = gf100_volt_oneinit,
1078 ++ .vid_get = nvkm_voltgpio_get,
1079 ++ .vid_set = nvkm_voltgpio_set,
1080 ++ .speedo_read = gf117_volt_speedo_read,
1081 ++};
1082 ++
1083 ++int
1084 ++gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
1085 ++{
1086 ++ struct nvkm_volt *volt;
1087 ++ int ret;
1088 ++
1089 ++ ret = nvkm_volt_new_(&gf117_volt, device, index, &volt);
1090 ++ *pvolt = volt;
1091 ++ if (ret)
1092 ++ return ret;
1093 ++
1094 ++ return nvkm_voltgpio_init(volt);
1095 ++}
1096 +diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
1097 +index ca4ae45dd307..8e5724b63f1f 100644
1098 +--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
1099 ++++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
1100 +@@ -70,18 +70,12 @@ static inline struct innolux_panel *to_innolux_panel(struct drm_panel *panel)
1101 + static int innolux_panel_disable(struct drm_panel *panel)
1102 + {
1103 + struct innolux_panel *innolux = to_innolux_panel(panel);
1104 +- int err;
1105 +
1106 + if (!innolux->enabled)
1107 + return 0;
1108 +
1109 + backlight_disable(innolux->backlight);
1110 +
1111 +- err = mipi_dsi_dcs_set_display_off(innolux->link);
1112 +- if (err < 0)
1113 +- DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
1114 +- err);
1115 +-
1116 + innolux->enabled = false;
1117 +
1118 + return 0;
1119 +@@ -95,6 +89,11 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
1120 + if (!innolux->prepared)
1121 + return 0;
1122 +
1123 ++ err = mipi_dsi_dcs_set_display_off(innolux->link);
1124 ++ if (err < 0)
1125 ++ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
1126 ++ err);
1127 ++
1128 + err = mipi_dsi_dcs_enter_sleep_mode(innolux->link);
1129 + if (err < 0) {
1130 + DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
1131 +diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
1132 +index d5a23295dd80..bb7b58407039 100644
1133 +--- a/drivers/gpu/drm/udl/udl_gem.c
1134 ++++ b/drivers/gpu/drm/udl/udl_gem.c
1135 +@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
1136 + *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
1137 +
1138 + out:
1139 +- drm_gem_object_put(&gobj->base);
1140 ++ drm_gem_object_put_unlocked(&gobj->base);
1141 + unlock:
1142 + mutex_unlock(&udl->gem_lock);
1143 + return ret;
1144 +diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
1145 +index 45b2460f3166..e8819d750938 100644
1146 +--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
1147 ++++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
1148 +@@ -668,6 +668,10 @@ static const struct amba_id debug_ids[] = {
1149 + .id = 0x000bbd08,
1150 + .mask = 0x000fffff,
1151 + },
1152 ++ { /* Debug for Cortex-A73 */
1153 ++ .id = 0x000bbd09,
1154 ++ .mask = 0x000fffff,
1155 ++ },
1156 + { 0, 0 },
1157 + };
1158 +
1159 +diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
1160 +index 5344e8993b28..5866f358ea04 100644
1161 +--- a/drivers/infiniband/hw/hfi1/qp.c
1162 ++++ b/drivers/infiniband/hw/hfi1/qp.c
1163 +@@ -833,7 +833,7 @@ void notify_error_qp(struct rvt_qp *qp)
1164 + write_seqlock(lock);
1165 + if (!list_empty(&priv->s_iowait.list) &&
1166 + !(qp->s_flags & RVT_S_BUSY)) {
1167 +- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
1168 ++ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
1169 + list_del_init(&priv->s_iowait.list);
1170 + priv->s_iowait.lock = NULL;
1171 + rvt_put_qp(qp);
1172 +diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
1173 +index 509e467843f6..f4cac63194d9 100644
1174 +--- a/drivers/infiniband/hw/hns/hns_roce_device.h
1175 ++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
1176 +@@ -216,6 +216,26 @@ enum {
1177 + HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
1178 + };
1179 +
1180 ++enum hns_roce_reset_stage {
1181 ++ HNS_ROCE_STATE_NON_RST,
1182 ++ HNS_ROCE_STATE_RST_BEF_DOWN,
1183 ++ HNS_ROCE_STATE_RST_DOWN,
1184 ++ HNS_ROCE_STATE_RST_UNINIT,
1185 ++ HNS_ROCE_STATE_RST_INIT,
1186 ++ HNS_ROCE_STATE_RST_INITED,
1187 ++};
1188 ++
1189 ++enum hns_roce_instance_state {
1190 ++ HNS_ROCE_STATE_NON_INIT,
1191 ++ HNS_ROCE_STATE_INIT,
1192 ++ HNS_ROCE_STATE_INITED,
1193 ++ HNS_ROCE_STATE_UNINIT,
1194 ++};
1195 ++
1196 ++enum {
1197 ++ HNS_ROCE_RST_DIRECT_RETURN = 0,
1198 ++};
1199 ++
1200 + #define HNS_ROCE_CMD_SUCCESS 1
1201 +
1202 + #define HNS_ROCE_PORT_DOWN 0
1203 +@@ -898,6 +918,7 @@ struct hns_roce_dev {
1204 + spinlock_t bt_cmd_lock;
1205 + bool active;
1206 + bool is_reset;
1207 ++ unsigned long reset_cnt;
1208 + struct hns_roce_ib_iboe iboe;
1209 +
1210 + struct list_head pgdir_list;
1211 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1212 +index 543fa1504cd3..7ac06576d791 100644
1213 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1214 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1215 +@@ -5800,6 +5800,7 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
1216 + static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
1217 + struct hnae3_handle *handle)
1218 + {
1219 ++ struct hns_roce_v2_priv *priv = hr_dev->priv;
1220 + const struct pci_device_id *id;
1221 + int i;
1222 +
1223 +@@ -5830,10 +5831,13 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
1224 + hr_dev->cmd_mod = 1;
1225 + hr_dev->loop_idc = 0;
1226 +
1227 ++ hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
1228 ++ priv->handle = handle;
1229 ++
1230 + return 0;
1231 + }
1232 +
1233 +-static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
1234 ++static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
1235 + {
1236 + struct hns_roce_dev *hr_dev;
1237 + int ret;
1238 +@@ -5850,7 +5854,6 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
1239 +
1240 + hr_dev->pci_dev = handle->pdev;
1241 + hr_dev->dev = &handle->pdev->dev;
1242 +- handle->priv = hr_dev;
1243 +
1244 + ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
1245 + if (ret) {
1246 +@@ -5864,6 +5867,8 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
1247 + goto error_failed_get_cfg;
1248 + }
1249 +
1250 ++ handle->priv = hr_dev;
1251 ++
1252 + return 0;
1253 +
1254 + error_failed_get_cfg:
1255 +@@ -5875,7 +5880,7 @@ error_failed_kzalloc:
1256 + return ret;
1257 + }
1258 +
1259 +-static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
1260 ++static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
1261 + bool reset)
1262 + {
1263 + struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
1264 +@@ -5883,24 +5888,78 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
1265 + if (!hr_dev)
1266 + return;
1267 +
1268 ++ handle->priv = NULL;
1269 + hns_roce_exit(hr_dev);
1270 + kfree(hr_dev->priv);
1271 + ib_dealloc_device(&hr_dev->ib_dev);
1272 + }
1273 +
1274 ++static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
1275 ++{
1276 ++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1277 ++ struct device *dev = &handle->pdev->dev;
1278 ++ int ret;
1279 ++
1280 ++ handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
1281 ++
1282 ++ if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
1283 ++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
1284 ++ goto reset_chk_err;
1285 ++ }
1286 ++
1287 ++ ret = __hns_roce_hw_v2_init_instance(handle);
1288 ++ if (ret) {
1289 ++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
1290 ++ dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
1291 ++ if (ops->ae_dev_resetting(handle) ||
1292 ++ ops->get_hw_reset_stat(handle))
1293 ++ goto reset_chk_err;
1294 ++ else
1295 ++ return ret;
1296 ++ }
1297 ++
1298 ++ handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
1299 ++
1300 ++
1301 ++ return 0;
1302 ++
1303 ++reset_chk_err:
1304 ++ dev_err(dev, "Device is busy in resetting state.\n"
1305 ++ "please retry later.\n");
1306 ++
1307 ++ return -EBUSY;
1308 ++}
1309 ++
1310 ++static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
1311 ++ bool reset)
1312 ++{
1313 ++ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
1314 ++ return;
1315 ++
1316 ++ handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
1317 ++
1318 ++ __hns_roce_hw_v2_uninit_instance(handle, reset);
1319 ++
1320 ++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
1321 ++}
1322 + static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
1323 + {
1324 +- struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
1325 ++ struct hns_roce_dev *hr_dev;
1326 + struct ib_event event;
1327 +
1328 +- if (!hr_dev) {
1329 +- dev_err(&handle->pdev->dev,
1330 +- "Input parameter handle->priv is NULL!\n");
1331 +- return -EINVAL;
1332 ++ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
1333 ++ set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
1334 ++ return 0;
1335 + }
1336 +
1337 ++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
1338 ++ clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
1339 ++
1340 ++ hr_dev = (struct hns_roce_dev *)handle->priv;
1341 ++ if (!hr_dev)
1342 ++ return 0;
1343 ++
1344 + hr_dev->active = false;
1345 +- hr_dev->is_reset = true;
1346 +
1347 + event.event = IB_EVENT_DEVICE_FATAL;
1348 + event.device = &hr_dev->ib_dev;
1349 +@@ -5912,17 +5971,29 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
1350 +
1351 + static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
1352 + {
1353 ++ struct device *dev = &handle->pdev->dev;
1354 + int ret;
1355 +
1356 +- ret = hns_roce_hw_v2_init_instance(handle);
1357 ++ if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
1358 ++ &handle->rinfo.state)) {
1359 ++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
1360 ++ return 0;
1361 ++ }
1362 ++
1363 ++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
1364 ++
1365 ++ dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
1366 ++ ret = __hns_roce_hw_v2_init_instance(handle);
1367 + if (ret) {
1368 + /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
1369 + * callback function, RoCE Engine reinitialize. If RoCE reinit
1370 + * failed, we should inform NIC driver.
1371 + */
1372 + handle->priv = NULL;
1373 +- dev_err(&handle->pdev->dev,
1374 +- "In reset process RoCE reinit failed %d.\n", ret);
1375 ++ dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
1376 ++ } else {
1377 ++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
1378 ++ dev_info(dev, "Reset done, RoCE client reinit finished.\n");
1379 + }
1380 +
1381 + return ret;
1382 +@@ -5930,8 +6001,14 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
1383 +
1384 + static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
1385 + {
1386 ++ if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
1387 ++ return 0;
1388 ++
1389 ++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
1390 ++ dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
1391 + msleep(100);
1392 +- hns_roce_hw_v2_uninit_instance(handle, false);
1393 ++ __hns_roce_hw_v2_uninit_instance(handle, false);
1394 ++
1395 + return 0;
1396 + }
1397 +
1398 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1399 +index b72d0443c835..5398aa718cfc 100644
1400 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1401 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1402 +@@ -1546,6 +1546,7 @@ struct hns_roce_link_table_entry {
1403 + #define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20)
1404 +
1405 + struct hns_roce_v2_priv {
1406 ++ struct hnae3_handle *handle;
1407 + struct hns_roce_v2_cmq cmq;
1408 + struct hns_roce_link_table tsq;
1409 + struct hns_roce_link_table tpq;
1410 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
1411 +index 59e978141ad4..e99177533930 100644
1412 +--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
1413 ++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
1414 +@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
1415 +
1416 + rcu_read_lock();
1417 + in = __in_dev_get_rcu(upper_dev);
1418 +- local_ipaddr = ntohl(in->ifa_list->ifa_address);
1419 ++
1420 ++ if (!in->ifa_list)
1421 ++ local_ipaddr = 0;
1422 ++ else
1423 ++ local_ipaddr = ntohl(in->ifa_list->ifa_address);
1424 ++
1425 + rcu_read_unlock();
1426 + } else {
1427 + local_ipaddr = ntohl(ifa->ifa_address);
1428 +@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
1429 + case NETDEV_UP:
1430 + /* Fall through */
1431 + case NETDEV_CHANGEADDR:
1432 ++
1433 ++ /* Just skip if no need to handle ARP cache */
1434 ++ if (!local_ipaddr)
1435 ++ break;
1436 ++
1437 + i40iw_manage_arp_cache(iwdev,
1438 + netdev->dev_addr,
1439 + &local_ipaddr,
1440 +diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
1441 +index 782499abcd98..2a0b59a4b6eb 100644
1442 +--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
1443 ++++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
1444 +@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
1445 + unsigned long flags;
1446 +
1447 + for (i = 0 ; i < dev->num_ports; i++) {
1448 +- cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
1449 + det = &sriov->alias_guid.ports_guid[i];
1450 ++ cancel_delayed_work_sync(&det->alias_guid_work);
1451 + spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
1452 + while (!list_empty(&det->cb_list)) {
1453 + cb_ctx = list_entry(det->cb_list.next,
1454 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1455 +index dbd6824dfffa..53b1fbadc496 100644
1456 +--- a/drivers/iommu/intel-iommu.c
1457 ++++ b/drivers/iommu/intel-iommu.c
1458 +@@ -1534,6 +1534,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1459 + u32 pmen;
1460 + unsigned long flags;
1461 +
1462 ++ if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1463 ++ return;
1464 ++
1465 + raw_spin_lock_irqsave(&iommu->register_lock, flags);
1466 + pmen = readl(iommu->reg + DMAR_PMEN_REG);
1467 + pmen &= ~DMA_PMEN_EPM;
1468 +@@ -5328,7 +5331,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
1469 +
1470 + ctx_lo = context[0].lo;
1471 +
1472 +- sdev->did = domain->iommu_did[iommu->seq_id];
1473 ++ sdev->did = FLPT_DEFAULT_DID;
1474 + sdev->sid = PCI_DEVID(info->bus, info->devfn);
1475 +
1476 + if (!(ctx_lo & CONTEXT_PASIDE)) {
1477 +diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
1478 +index 567b29c47608..98b6e1d4b1a6 100644
1479 +--- a/drivers/irqchip/irq-mbigen.c
1480 ++++ b/drivers/irqchip/irq-mbigen.c
1481 +@@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
1482 + void __iomem *base = d->chip_data;
1483 + u32 val;
1484 +
1485 ++ if (!msg->address_lo && !msg->address_hi)
1486 ++ return;
1487 ++
1488 + base += get_mbigen_vec_reg(d->hwirq);
1489 + val = readl_relaxed(base);
1490 +
1491 +diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
1492 +index a93296b9b45d..7bd1d4cb2e19 100644
1493 +--- a/drivers/irqchip/irq-stm32-exti.c
1494 ++++ b/drivers/irqchip/irq-stm32-exti.c
1495 +@@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
1496 + const struct stm32_exti_bank *stm32_bank;
1497 + struct stm32_exti_chip_data *chip_data;
1498 + void __iomem *base = h_data->base;
1499 +- u32 irqs_mask;
1500 +
1501 + stm32_bank = h_data->drv_data->exti_banks[bank_idx];
1502 + chip_data = &h_data->chips_data[bank_idx];
1503 +@@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
1504 +
1505 + raw_spin_lock_init(&chip_data->rlock);
1506 +
1507 +- /* Determine number of irqs supported */
1508 +- writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
1509 +- irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
1510 +-
1511 + /*
1512 + * This IP has no reset, so after hot reboot we should
1513 + * clear registers to avoid residue
1514 + */
1515 + writel_relaxed(0, base + stm32_bank->imr_ofst);
1516 + writel_relaxed(0, base + stm32_bank->emr_ofst);
1517 +- writel_relaxed(0, base + stm32_bank->rtsr_ofst);
1518 +- writel_relaxed(0, base + stm32_bank->ftsr_ofst);
1519 +- writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
1520 +- if (stm32_bank->fpr_ofst != UNDEF_REG)
1521 +- writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
1522 +
1523 + pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
1524 +
1525 +diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
1526 +index 2837dc77478e..f0f9eb30bd2b 100644
1527 +--- a/drivers/misc/lkdtm/core.c
1528 ++++ b/drivers/misc/lkdtm/core.c
1529 +@@ -152,7 +152,9 @@ static const struct crashtype crashtypes[] = {
1530 + CRASHTYPE(EXEC_VMALLOC),
1531 + CRASHTYPE(EXEC_RODATA),
1532 + CRASHTYPE(EXEC_USERSPACE),
1533 ++ CRASHTYPE(EXEC_NULL),
1534 + CRASHTYPE(ACCESS_USERSPACE),
1535 ++ CRASHTYPE(ACCESS_NULL),
1536 + CRASHTYPE(WRITE_RO),
1537 + CRASHTYPE(WRITE_RO_AFTER_INIT),
1538 + CRASHTYPE(WRITE_KERN),
1539 +diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
1540 +index 3c6fd327e166..b69ee004a3f7 100644
1541 +--- a/drivers/misc/lkdtm/lkdtm.h
1542 ++++ b/drivers/misc/lkdtm/lkdtm.h
1543 +@@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void);
1544 + void lkdtm_EXEC_VMALLOC(void);
1545 + void lkdtm_EXEC_RODATA(void);
1546 + void lkdtm_EXEC_USERSPACE(void);
1547 ++void lkdtm_EXEC_NULL(void);
1548 + void lkdtm_ACCESS_USERSPACE(void);
1549 ++void lkdtm_ACCESS_NULL(void);
1550 +
1551 + /* lkdtm_refcount.c */
1552 + void lkdtm_REFCOUNT_INC_OVERFLOW(void);
1553 +diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
1554 +index 53b85c9d16b8..62f76d506f04 100644
1555 +--- a/drivers/misc/lkdtm/perms.c
1556 ++++ b/drivers/misc/lkdtm/perms.c
1557 +@@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write)
1558 + {
1559 + void (*func)(void) = dst;
1560 +
1561 +- pr_info("attempting ok execution at %p\n", do_nothing);
1562 ++ pr_info("attempting ok execution at %px\n", do_nothing);
1563 + do_nothing();
1564 +
1565 + if (write == CODE_WRITE) {
1566 +@@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write)
1567 + flush_icache_range((unsigned long)dst,
1568 + (unsigned long)dst + EXEC_SIZE);
1569 + }
1570 +- pr_info("attempting bad execution at %p\n", func);
1571 ++ pr_info("attempting bad execution at %px\n", func);
1572 + func();
1573 + }
1574 +
1575 +@@ -66,14 +66,14 @@ static void execute_user_location(void *dst)
1576 + /* Intentionally crossing kernel/user memory boundary. */
1577 + void (*func)(void) = dst;
1578 +
1579 +- pr_info("attempting ok execution at %p\n", do_nothing);
1580 ++ pr_info("attempting ok execution at %px\n", do_nothing);
1581 + do_nothing();
1582 +
1583 + copied = access_process_vm(current, (unsigned long)dst, do_nothing,
1584 + EXEC_SIZE, FOLL_WRITE);
1585 + if (copied < EXEC_SIZE)
1586 + return;
1587 +- pr_info("attempting bad execution at %p\n", func);
1588 ++ pr_info("attempting bad execution at %px\n", func);
1589 + func();
1590 + }
1591 +
1592 +@@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void)
1593 + /* Explicitly cast away "const" for the test. */
1594 + unsigned long *ptr = (unsigned long *)&rodata;
1595 +
1596 +- pr_info("attempting bad rodata write at %p\n", ptr);
1597 ++ pr_info("attempting bad rodata write at %px\n", ptr);
1598 + *ptr ^= 0xabcd1234;
1599 + }
1600 +
1601 +@@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
1602 + return;
1603 + }
1604 +
1605 +- pr_info("attempting bad ro_after_init write at %p\n", ptr);
1606 ++ pr_info("attempting bad ro_after_init write at %px\n", ptr);
1607 + *ptr ^= 0xabcd1234;
1608 + }
1609 +
1610 +@@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void)
1611 + size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
1612 + ptr = (unsigned char *)do_overwritten;
1613 +
1614 +- pr_info("attempting bad %zu byte write at %p\n", size, ptr);
1615 ++ pr_info("attempting bad %zu byte write at %px\n", size, ptr);
1616 + memcpy(ptr, (unsigned char *)do_nothing, size);
1617 + flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
1618 +
1619 +@@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void)
1620 + vm_munmap(user_addr, PAGE_SIZE);
1621 + }
1622 +
1623 ++void lkdtm_EXEC_NULL(void)
1624 ++{
1625 ++ execute_location(NULL, CODE_AS_IS);
1626 ++}
1627 ++
1628 + void lkdtm_ACCESS_USERSPACE(void)
1629 + {
1630 + unsigned long user_addr, tmp = 0;
1631 +@@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void)
1632 +
1633 + ptr = (unsigned long *)user_addr;
1634 +
1635 +- pr_info("attempting bad read at %p\n", ptr);
1636 ++ pr_info("attempting bad read at %px\n", ptr);
1637 + tmp = *ptr;
1638 + tmp += 0xc0dec0de;
1639 +
1640 +- pr_info("attempting bad write at %p\n", ptr);
1641 ++ pr_info("attempting bad write at %px\n", ptr);
1642 + *ptr = tmp;
1643 +
1644 + vm_munmap(user_addr, PAGE_SIZE);
1645 + }
1646 +
1647 ++void lkdtm_ACCESS_NULL(void)
1648 ++{
1649 ++ unsigned long tmp;
1650 ++ unsigned long *ptr = (unsigned long *)NULL;
1651 ++
1652 ++ pr_info("attempting bad read at %px\n", ptr);
1653 ++ tmp = *ptr;
1654 ++ tmp += 0xc0dec0de;
1655 ++
1656 ++ pr_info("attempting bad write at %px\n", ptr);
1657 ++ *ptr = tmp;
1658 ++}
1659 ++
1660 + void __init lkdtm_perms_init(void)
1661 + {
1662 + /* Make sure we can write to __ro_after_init values during __init */
1663 +diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
1664 +index 9e68c3645e22..e6f14257a7d0 100644
1665 +--- a/drivers/mmc/host/davinci_mmc.c
1666 ++++ b/drivers/mmc/host/davinci_mmc.c
1667 +@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1668 + {
1669 + }
1670 + #endif
1671 +-static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1672 ++static void init_mmcsd_host(struct mmc_davinci_host *host)
1673 + {
1674 +
1675 + mmc_davinci_reset_ctrl(host, 1);
1676 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
1677 +index 09c774fe8853..854a55d4332a 100644
1678 +--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
1679 ++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
1680 +@@ -463,6 +463,8 @@ struct hnae3_ae_ops {
1681 + int (*set_gro_en)(struct hnae3_handle *handle, int enable);
1682 + u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id);
1683 + void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
1684 ++ int (*mac_connect_phy)(struct hnae3_handle *handle);
1685 ++ void (*mac_disconnect_phy)(struct hnae3_handle *handle);
1686 + };
1687 +
1688 + struct hnae3_dcb_ops {
1689 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1690 +index d84c50068f66..40b69eaf2cb3 100644
1691 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1692 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
1693 +@@ -3519,6 +3519,25 @@ static int hns3_init_mac_addr(struct net_device *netdev, bool init)
1694 + return ret;
1695 + }
1696 +
1697 ++static int hns3_init_phy(struct net_device *netdev)
1698 ++{
1699 ++ struct hnae3_handle *h = hns3_get_handle(netdev);
1700 ++ int ret = 0;
1701 ++
1702 ++ if (h->ae_algo->ops->mac_connect_phy)
1703 ++ ret = h->ae_algo->ops->mac_connect_phy(h);
1704 ++
1705 ++ return ret;
1706 ++}
1707 ++
1708 ++static void hns3_uninit_phy(struct net_device *netdev)
1709 ++{
1710 ++ struct hnae3_handle *h = hns3_get_handle(netdev);
1711 ++
1712 ++ if (h->ae_algo->ops->mac_disconnect_phy)
1713 ++ h->ae_algo->ops->mac_disconnect_phy(h);
1714 ++}
1715 ++
1716 + static int hns3_restore_fd_rules(struct net_device *netdev)
1717 + {
1718 + struct hnae3_handle *h = hns3_get_handle(netdev);
1719 +@@ -3627,6 +3646,10 @@ static int hns3_client_init(struct hnae3_handle *handle)
1720 + goto out_init_ring_data;
1721 + }
1722 +
1723 ++ ret = hns3_init_phy(netdev);
1724 ++ if (ret)
1725 ++ goto out_init_phy;
1726 ++
1727 + ret = register_netdev(netdev);
1728 + if (ret) {
1729 + dev_err(priv->dev, "probe register netdev fail!\n");
1730 +@@ -3651,6 +3674,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
1731 + return ret;
1732 +
1733 + out_reg_netdev_fail:
1734 ++ hns3_uninit_phy(netdev);
1735 ++out_init_phy:
1736 ++ hns3_uninit_all_ring(priv);
1737 + out_init_ring_data:
1738 + (void)hns3_nic_uninit_vector_data(priv);
1739 + out_init_vector_data:
1740 +@@ -3685,6 +3711,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
1741 +
1742 + hns3_force_clear_all_rx_ring(handle);
1743 +
1744 ++ hns3_uninit_phy(netdev);
1745 ++
1746 + ret = hns3_nic_uninit_vector_data(priv);
1747 + if (ret)
1748 + netdev_err(netdev, "uninit vector error\n");
1749 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1750 +index f7637c08bb3a..cb7571747af7 100644
1751 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1752 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1753 +@@ -6959,16 +6959,6 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
1754 + *tp_mdix = ETH_TP_MDI;
1755 + }
1756 +
1757 +-static int hclge_init_instance_hw(struct hclge_dev *hdev)
1758 +-{
1759 +- return hclge_mac_connect_phy(hdev);
1760 +-}
1761 +-
1762 +-static void hclge_uninit_instance_hw(struct hclge_dev *hdev)
1763 +-{
1764 +- hclge_mac_disconnect_phy(hdev);
1765 +-}
1766 +-
1767 + static int hclge_init_client_instance(struct hnae3_client *client,
1768 + struct hnae3_ae_dev *ae_dev)
1769 + {
1770 +@@ -6988,13 +6978,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
1771 + if (ret)
1772 + goto clear_nic;
1773 +
1774 +- ret = hclge_init_instance_hw(hdev);
1775 +- if (ret) {
1776 +- client->ops->uninit_instance(&vport->nic,
1777 +- 0);
1778 +- goto clear_nic;
1779 +- }
1780 +-
1781 + hnae3_set_client_init_flag(client, ae_dev, 1);
1782 +
1783 + if (hdev->roce_client &&
1784 +@@ -7079,7 +7062,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
1785 + if (client->type == HNAE3_CLIENT_ROCE)
1786 + return;
1787 + if (hdev->nic_client && client->ops->uninit_instance) {
1788 +- hclge_uninit_instance_hw(hdev);
1789 + client->ops->uninit_instance(&vport->nic, 0);
1790 + hdev->nic_client = NULL;
1791 + vport->nic.client = NULL;
1792 +@@ -8012,6 +7994,8 @@ static const struct hnae3_ae_ops hclge_ops = {
1793 + .set_gro_en = hclge_gro_en,
1794 + .get_global_queue_id = hclge_covert_handle_qid_global,
1795 + .set_timer_task = hclge_set_timer_task,
1796 ++ .mac_connect_phy = hclge_mac_connect_phy,
1797 ++ .mac_disconnect_phy = hclge_mac_disconnect_phy,
1798 + };
1799 +
1800 + static struct hnae3_ae_algo ae_algo = {
1801 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1802 +index dabb8437f8dc..84f28785ba28 100644
1803 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1804 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1805 +@@ -195,8 +195,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
1806 + netdev_err(netdev, "failed to configure flow control.\n");
1807 + }
1808 +
1809 +-int hclge_mac_connect_phy(struct hclge_dev *hdev)
1810 ++int hclge_mac_connect_phy(struct hnae3_handle *handle)
1811 + {
1812 ++ struct hclge_vport *vport = hclge_get_vport(handle);
1813 ++ struct hclge_dev *hdev = vport->back;
1814 + struct net_device *netdev = hdev->vport[0].nic.netdev;
1815 + struct phy_device *phydev = hdev->hw.mac.phydev;
1816 + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1817 +@@ -229,8 +231,10 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
1818 + return 0;
1819 + }
1820 +
1821 +-void hclge_mac_disconnect_phy(struct hclge_dev *hdev)
1822 ++void hclge_mac_disconnect_phy(struct hnae3_handle *handle)
1823 + {
1824 ++ struct hclge_vport *vport = hclge_get_vport(handle);
1825 ++ struct hclge_dev *hdev = vport->back;
1826 + struct phy_device *phydev = hdev->hw.mac.phydev;
1827 +
1828 + if (!phydev)
1829 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
1830 +index 5fbf7dddb5d9..ef095d9c566f 100644
1831 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
1832 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
1833 +@@ -5,8 +5,8 @@
1834 + #define __HCLGE_MDIO_H
1835 +
1836 + int hclge_mac_mdio_config(struct hclge_dev *hdev);
1837 +-int hclge_mac_connect_phy(struct hclge_dev *hdev);
1838 +-void hclge_mac_disconnect_phy(struct hclge_dev *hdev);
1839 ++int hclge_mac_connect_phy(struct hnae3_handle *handle);
1840 ++void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
1841 + void hclge_mac_start_phy(struct hclge_dev *hdev);
1842 + void hclge_mac_stop_phy(struct hclge_dev *hdev);
1843 +
1844 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1845 +index c25acace7d91..e91005d0f20c 100644
1846 +--- a/drivers/pci/pci.c
1847 ++++ b/drivers/pci/pci.c
1848 +@@ -1233,7 +1233,6 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
1849 + pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1850 + }
1851 +
1852 +-
1853 + static int pci_save_pcix_state(struct pci_dev *dev)
1854 + {
1855 + int pos;
1856 +@@ -1270,6 +1269,45 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
1857 + pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1858 + }
1859 +
1860 ++static void pci_save_ltr_state(struct pci_dev *dev)
1861 ++{
1862 ++ int ltr;
1863 ++ struct pci_cap_saved_state *save_state;
1864 ++ u16 *cap;
1865 ++
1866 ++ if (!pci_is_pcie(dev))
1867 ++ return;
1868 ++
1869 ++ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1870 ++ if (!ltr)
1871 ++ return;
1872 ++
1873 ++ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1874 ++ if (!save_state) {
1875 ++ pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1876 ++ return;
1877 ++ }
1878 ++
1879 ++ cap = (u16 *)&save_state->cap.data[0];
1880 ++ pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1881 ++ pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1882 ++}
1883 ++
1884 ++static void pci_restore_ltr_state(struct pci_dev *dev)
1885 ++{
1886 ++ struct pci_cap_saved_state *save_state;
1887 ++ int ltr;
1888 ++ u16 *cap;
1889 ++
1890 ++ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1891 ++ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1892 ++ if (!save_state || !ltr)
1893 ++ return;
1894 ++
1895 ++ cap = (u16 *)&save_state->cap.data[0];
1896 ++ pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1897 ++ pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1898 ++}
1899 +
1900 + /**
1901 + * pci_save_state - save the PCI configuration space of a device before suspending
1902 +@@ -1291,6 +1329,7 @@ int pci_save_state(struct pci_dev *dev)
1903 + if (i != 0)
1904 + return i;
1905 +
1906 ++ pci_save_ltr_state(dev);
1907 + pci_save_dpc_state(dev);
1908 + return pci_save_vc_state(dev);
1909 + }
1910 +@@ -1390,7 +1429,12 @@ void pci_restore_state(struct pci_dev *dev)
1911 + if (!dev->state_saved)
1912 + return;
1913 +
1914 +- /* PCI Express register must be restored first */
1915 ++ /*
1916 ++ * Restore max latencies (in the LTR capability) before enabling
1917 ++ * LTR itself (in the PCIe capability).
1918 ++ */
1919 ++ pci_restore_ltr_state(dev);
1920 ++
1921 + pci_restore_pcie_state(dev);
1922 + pci_restore_pasid_state(dev);
1923 + pci_restore_pri_state(dev);
1924 +@@ -2501,6 +2545,25 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
1925 + pm_runtime_put_sync(parent);
1926 + }
1927 +
1928 ++static const struct dmi_system_id bridge_d3_blacklist[] = {
1929 ++#ifdef CONFIG_X86
1930 ++ {
1931 ++ /*
1932 ++ * Gigabyte X299 root port is not marked as hotplug capable
1933 ++ * which allows Linux to power manage it. However, this
1934 ++ * confuses the BIOS SMI handler so don't power manage root
1935 ++ * ports on that system.
1936 ++ */
1937 ++ .ident = "X299 DESIGNARE EX-CF",
1938 ++ .matches = {
1939 ++ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
1940 ++ DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
1941 ++ },
1942 ++ },
1943 ++#endif
1944 ++ { }
1945 ++};
1946 ++
1947 + /**
1948 + * pci_bridge_d3_possible - Is it possible to put the bridge into D3
1949 + * @bridge: Bridge to check
1950 +@@ -2546,6 +2609,9 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
1951 + if (bridge->is_hotplug_bridge)
1952 + return false;
1953 +
1954 ++ if (dmi_check_system(bridge_d3_blacklist))
1955 ++ return false;
1956 ++
1957 + /*
1958 + * It should be safe to put PCIe ports from 2015 or newer
1959 + * to D3.
1960 +@@ -2998,6 +3064,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1961 + if (error)
1962 + pci_err(dev, "unable to preallocate PCI-X save buffer\n");
1963 +
1964 ++ error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
1965 ++ 2 * sizeof(u16));
1966 ++ if (error)
1967 ++ pci_err(dev, "unable to allocate suspend buffer for LTR\n");
1968 ++
1969 + pci_allocate_vc_save_buffers(dev);
1970 + }
1971 +
1972 +diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
1973 +index c37e74ee609d..a9cbe5be277b 100644
1974 +--- a/drivers/platform/x86/intel_pmc_core.c
1975 ++++ b/drivers/platform/x86/intel_pmc_core.c
1976 +@@ -15,6 +15,7 @@
1977 + #include <linux/bitfield.h>
1978 + #include <linux/debugfs.h>
1979 + #include <linux/delay.h>
1980 ++#include <linux/dmi.h>
1981 + #include <linux/io.h>
1982 + #include <linux/module.h>
1983 + #include <linux/pci.h>
1984 +@@ -139,6 +140,7 @@ static const struct pmc_reg_map spt_reg_map = {
1985 + .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
1986 + .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
1987 + .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
1988 ++ .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
1989 + };
1990 +
1991 + /* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */
1992 +@@ -751,6 +753,37 @@ static const struct pci_device_id pmc_pci_ids[] = {
1993 + { 0, },
1994 + };
1995 +
1996 ++/*
1997 ++ * This quirk can be used on those platforms where
1998 ++ * the platform BIOS enforces 24Mhx Crystal to shutdown
1999 ++ * before PMC can assert SLP_S0#.
2000 ++ */
2001 ++int quirk_xtal_ignore(const struct dmi_system_id *id)
2002 ++{
2003 ++ struct pmc_dev *pmcdev = &pmc;
2004 ++ u32 value;
2005 ++
2006 ++ value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
2007 ++ /* 24MHz Crystal Shutdown Qualification Disable */
2008 ++ value |= SPT_PMC_VRIC1_XTALSDQDIS;
2009 ++ /* Low Voltage Mode Enable */
2010 ++ value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
2011 ++ pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
2012 ++ return 0;
2013 ++}
2014 ++
2015 ++static const struct dmi_system_id pmc_core_dmi_table[] = {
2016 ++ {
2017 ++ .callback = quirk_xtal_ignore,
2018 ++ .ident = "HP Elite x2 1013 G3",
2019 ++ .matches = {
2020 ++ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
2021 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
2022 ++ },
2023 ++ },
2024 ++ {}
2025 ++};
2026 ++
2027 + static int __init pmc_core_probe(void)
2028 + {
2029 + struct pmc_dev *pmcdev = &pmc;
2030 +@@ -792,6 +825,7 @@ static int __init pmc_core_probe(void)
2031 + return err;
2032 + }
2033 +
2034 ++ dmi_check_system(pmc_core_dmi_table);
2035 + pr_info(" initialized\n");
2036 + return 0;
2037 + }
2038 +diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
2039 +index 1a0104d2cbf0..9bc16d7d2917 100644
2040 +--- a/drivers/platform/x86/intel_pmc_core.h
2041 ++++ b/drivers/platform/x86/intel_pmc_core.h
2042 +@@ -25,6 +25,7 @@
2043 + #define SPT_PMC_MTPMC_OFFSET 0x20
2044 + #define SPT_PMC_MFPMC_OFFSET 0x38
2045 + #define SPT_PMC_LTR_IGNORE_OFFSET 0x30C
2046 ++#define SPT_PMC_VRIC1_OFFSET 0x31c
2047 + #define SPT_PMC_MPHY_CORE_STS_0 0x1143
2048 + #define SPT_PMC_MPHY_CORE_STS_1 0x1142
2049 + #define SPT_PMC_MPHY_COM_STS_0 0x1155
2050 +@@ -135,6 +136,9 @@ enum ppfear_regs {
2051 + #define SPT_PMC_BIT_MPHY_CMN_LANE2 BIT(2)
2052 + #define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3)
2053 +
2054 ++#define SPT_PMC_VRIC1_SLPS0LVEN BIT(13)
2055 ++#define SPT_PMC_VRIC1_XTALSDQDIS BIT(22)
2056 ++
2057 + /* Cannonlake Power Management Controller register offsets */
2058 + #define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4
2059 + #define CNP_PMC_PM_CFG_OFFSET 0x1818
2060 +@@ -217,6 +221,7 @@ struct pmc_reg_map {
2061 + const int pm_read_disable_bit;
2062 + const u32 slps0_dbg_offset;
2063 + const u32 ltr_ignore_max;
2064 ++ const u32 pm_vric1_offset;
2065 + };
2066 +
2067 + /**
2068 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2069 +index 7e35ce2162d0..503fda4e7e8e 100644
2070 +--- a/drivers/scsi/qla2xxx/qla_os.c
2071 ++++ b/drivers/scsi/qla2xxx/qla_os.c
2072 +@@ -1459,7 +1459,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
2073 + goto eh_reset_failed;
2074 + }
2075 + err = 2;
2076 +- if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1)
2077 ++ if (do_reset(fcport, cmd->device->lun, 1)
2078 + != QLA_SUCCESS) {
2079 + ql_log(ql_log_warn, vha, 0x800c,
2080 + "do_reset failed for cmd=%p.\n", cmd);
2081 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2082 +index 5a6e8e12701a..655ad26106e4 100644
2083 +--- a/drivers/scsi/scsi_lib.c
2084 ++++ b/drivers/scsi/scsi_lib.c
2085 +@@ -598,9 +598,16 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
2086 + if (!blk_rq_is_scsi(req)) {
2087 + WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
2088 + cmd->flags &= ~SCMD_INITIALIZED;
2089 +- destroy_rcu_head(&cmd->rcu);
2090 + }
2091 +
2092 ++ /*
2093 ++ * Calling rcu_barrier() is not necessary here because the
2094 ++ * SCSI error handler guarantees that the function called by
2095 ++ * call_rcu() has been called before scsi_end_request() is
2096 ++ * called.
2097 ++ */
2098 ++ destroy_rcu_head(&cmd->rcu);
2099 ++
2100 + /*
2101 + * In the MQ case the command gets freed by __blk_mq_end_request,
2102 + * so we have to do all cleanup that depends on it earlier.
2103 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
2104 +index 0508831d6fb9..0a82e93566dc 100644
2105 +--- a/drivers/scsi/scsi_transport_iscsi.c
2106 ++++ b/drivers/scsi/scsi_transport_iscsi.c
2107 +@@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
2108 + scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
2109 + /* flush running scans then delete devices */
2110 + flush_work(&session->scan_work);
2111 ++ /* flush running unbind operations */
2112 ++ flush_work(&session->unbind_work);
2113 + __iscsi_unbind_session(&session->unbind_work);
2114 +
2115 + /* hw iscsi may not have removed all connections from session */
2116 +diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
2117 +index 720760cd493f..ba39647a690c 100644
2118 +--- a/drivers/thermal/broadcom/bcm2835_thermal.c
2119 ++++ b/drivers/thermal/broadcom/bcm2835_thermal.c
2120 +@@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
2121 +
2122 + static void bcm2835_thermal_debugfs(struct platform_device *pdev)
2123 + {
2124 +- struct thermal_zone_device *tz = platform_get_drvdata(pdev);
2125 +- struct bcm2835_thermal_data *data = tz->devdata;
2126 ++ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
2127 + struct debugfs_regset32 *regset;
2128 +
2129 + data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
2130 +@@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
2131 +
2132 + data->tz = tz;
2133 +
2134 +- platform_set_drvdata(pdev, tz);
2135 ++ platform_set_drvdata(pdev, data);
2136 +
2137 + /*
2138 + * Thermal_zone doesn't enable hwmon as default,
2139 +@@ -290,8 +289,8 @@ err_clk:
2140 +
2141 + static int bcm2835_thermal_remove(struct platform_device *pdev)
2142 + {
2143 +- struct thermal_zone_device *tz = platform_get_drvdata(pdev);
2144 +- struct bcm2835_thermal_data *data = tz->devdata;
2145 ++ struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
2146 ++ struct thermal_zone_device *tz = data->tz;
2147 +
2148 + debugfs_remove_recursive(data->debugfsdir);
2149 + thermal_zone_of_sensor_unregister(&pdev->dev, tz);
2150 +diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
2151 +index 61ca7ce3624e..5f3ed24e26ec 100644
2152 +--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
2153 ++++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
2154 +@@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
2155 + INT3400_THERMAL_PASSIVE_1,
2156 + INT3400_THERMAL_ACTIVE,
2157 + INT3400_THERMAL_CRITICAL,
2158 ++ INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
2159 ++ INT3400_THERMAL_EMERGENCY_CALL_MODE,
2160 ++ INT3400_THERMAL_PASSIVE_2,
2161 ++ INT3400_THERMAL_POWER_BOSS,
2162 ++ INT3400_THERMAL_VIRTUAL_SENSOR,
2163 ++ INT3400_THERMAL_COOLING_MODE,
2164 ++ INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
2165 + INT3400_THERMAL_MAXIMUM_UUID,
2166 + };
2167 +
2168 +@@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
2169 + "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
2170 + "3A95C389-E4B8-4629-A526-C52C88626BAE",
2171 + "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
2172 ++ "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
2173 ++ "5349962F-71E6-431D-9AE8-0A635B710AEE",
2174 ++ "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
2175 ++ "F5A35014-C209-46A4-993A-EB56DE7530A1",
2176 ++ "6ED722A7-9240-48A5-B479-31EEF723D7CF",
2177 ++ "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
2178 ++ "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
2179 + };
2180 +
2181 + struct int3400_thermal_priv {
2182 +@@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
2183 +
2184 + platform_set_drvdata(pdev, priv);
2185 +
2186 +- if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
2187 +- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
2188 +- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
2189 +- }
2190 ++ int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
2191 ++ int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
2192 ++
2193 + priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
2194 + priv, &int3400_thermal_ops,
2195 + &int3400_thermal_params, 0, 0);
2196 +diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
2197 +index 7571f7c2e7c9..ac7256b5f020 100644
2198 +--- a/drivers/thermal/intel/intel_powerclamp.c
2199 ++++ b/drivers/thermal/intel/intel_powerclamp.c
2200 +@@ -101,7 +101,7 @@ struct powerclamp_worker_data {
2201 + bool clamping;
2202 + };
2203 +
2204 +-static struct powerclamp_worker_data * __percpu worker_data;
2205 ++static struct powerclamp_worker_data __percpu *worker_data;
2206 + static struct thermal_cooling_device *cooling_dev;
2207 + static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
2208 + * clamping kthread worker
2209 +@@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
2210 + struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
2211 + struct kthread_worker *worker;
2212 +
2213 +- worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
2214 ++ worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
2215 + if (IS_ERR(worker))
2216 + return;
2217 +
2218 +diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
2219 +index 48eef552cba4..fc9399d9c082 100644
2220 +--- a/drivers/thermal/samsung/exynos_tmu.c
2221 ++++ b/drivers/thermal/samsung/exynos_tmu.c
2222 +@@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp)
2223 + struct exynos_tmu_data *data = p;
2224 + int value, ret = 0;
2225 +
2226 +- if (!data || !data->tmu_read || !data->enabled)
2227 ++ if (!data || !data->tmu_read)
2228 + return -EINVAL;
2229 + else if (!data->enabled)
2230 + /*
2231 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2232 +index 07cad54b84f1..e8e125acd712 100644
2233 +--- a/fs/cifs/cifsfs.c
2234 ++++ b/fs/cifs/cifsfs.c
2235 +@@ -1010,7 +1010,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
2236 + unsigned int xid;
2237 + int rc;
2238 +
2239 +- if (remap_flags & ~REMAP_FILE_ADVISORY)
2240 ++ if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
2241 + return -EINVAL;
2242 +
2243 + cifs_dbg(FYI, "clone range\n");
2244 +diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
2245 +index 924269cec135..e32c264e3adb 100644
2246 +--- a/fs/cifs/smb2maperror.c
2247 ++++ b/fs/cifs/smb2maperror.c
2248 +@@ -1036,7 +1036,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
2249 + {STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
2250 + "STATUS_UNFINISHED_CONTEXT_DELETED"},
2251 + {STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
2252 +- {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
2253 ++ /* Note that ENOATTTR and ENODATA are the same errno */
2254 ++ {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
2255 + {STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
2256 + {STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
2257 + "STATUS_WRONG_CREDENTIAL_HANDLE"},
2258 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2259 +index b29f711ab965..ea56b1cdbdde 100644
2260 +--- a/fs/cifs/smb2ops.c
2261 ++++ b/fs/cifs/smb2ops.c
2262 +@@ -949,6 +949,16 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
2263 + resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2264 + memset(rsp_iov, 0, sizeof(rsp_iov));
2265 +
2266 ++ if (ses->server->ops->query_all_EAs) {
2267 ++ if (!ea_value) {
2268 ++ rc = ses->server->ops->query_all_EAs(xid, tcon, path,
2269 ++ ea_name, NULL, 0,
2270 ++ cifs_sb);
2271 ++ if (rc == -ENODATA)
2272 ++ goto sea_exit;
2273 ++ }
2274 ++ }
2275 ++
2276 + /* Open */
2277 + memset(&open_iov, 0, sizeof(open_iov));
2278 + rqst[0].rq_iov = open_iov;
2279 +diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
2280 +index 59be48206932..b49bc925fb4f 100644
2281 +--- a/fs/cifs/trace.h
2282 ++++ b/fs/cifs/trace.h
2283 +@@ -378,19 +378,19 @@ DECLARE_EVENT_CLASS(smb3_tcon_class,
2284 + __field(unsigned int, xid)
2285 + __field(__u32, tid)
2286 + __field(__u64, sesid)
2287 +- __field(const char *, unc_name)
2288 ++ __string(name, unc_name)
2289 + __field(int, rc)
2290 + ),
2291 + TP_fast_assign(
2292 + __entry->xid = xid;
2293 + __entry->tid = tid;
2294 + __entry->sesid = sesid;
2295 +- __entry->unc_name = unc_name;
2296 ++ __assign_str(name, unc_name);
2297 + __entry->rc = rc;
2298 + ),
2299 + TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
2300 + __entry->xid, __entry->sesid, __entry->tid,
2301 +- __entry->unc_name, __entry->rc)
2302 ++ __get_str(name), __entry->rc)
2303 + )
2304 +
2305 + #define DEFINE_SMB3_TCON_EVENT(name) \
2306 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
2307 +index 2e76fb55d94a..5f24fdc140ad 100644
2308 +--- a/fs/ext4/ioctl.c
2309 ++++ b/fs/ext4/ioctl.c
2310 +@@ -999,6 +999,13 @@ resizefs_out:
2311 + if (!blk_queue_discard(q))
2312 + return -EOPNOTSUPP;
2313 +
2314 ++ /*
2315 ++ * We haven't replayed the journal, so we cannot use our
2316 ++ * block-bitmap-guided storage zapping commands.
2317 ++ */
2318 ++ if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
2319 ++ return -EROFS;
2320 ++
2321 + if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2322 + sizeof(range)))
2323 + return -EFAULT;
2324 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2325 +index 3d9b18505c0c..e7ae26e36c9c 100644
2326 +--- a/fs/ext4/resize.c
2327 ++++ b/fs/ext4/resize.c
2328 +@@ -932,11 +932,18 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
2329 + memcpy(n_group_desc, o_group_desc,
2330 + EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
2331 + n_group_desc[gdb_num] = gdb_bh;
2332 ++
2333 ++ BUFFER_TRACE(gdb_bh, "get_write_access");
2334 ++ err = ext4_journal_get_write_access(handle, gdb_bh);
2335 ++ if (err) {
2336 ++ kvfree(n_group_desc);
2337 ++ brelse(gdb_bh);
2338 ++ return err;
2339 ++ }
2340 ++
2341 + EXT4_SB(sb)->s_group_desc = n_group_desc;
2342 + EXT4_SB(sb)->s_gdb_count++;
2343 + kvfree(o_group_desc);
2344 +- BUFFER_TRACE(gdb_bh, "get_write_access");
2345 +- err = ext4_journal_get_write_access(handle, gdb_bh);
2346 + return err;
2347 + }
2348 +
2349 +@@ -2073,6 +2080,10 @@ out:
2350 + free_flex_gd(flex_gd);
2351 + if (resize_inode != NULL)
2352 + iput(resize_inode);
2353 +- ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
2354 ++ if (err)
2355 ++ ext4_warning(sb, "error (%d) occurred during "
2356 ++ "file system resize", err);
2357 ++ ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2358 ++ ext4_blocks_count(es));
2359 + return err;
2360 + }
2361 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2362 +index fb12d3c17c1b..b9bca7298f96 100644
2363 +--- a/fs/ext4/super.c
2364 ++++ b/fs/ext4/super.c
2365 +@@ -430,6 +430,12 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
2366 + spin_unlock(&sbi->s_md_lock);
2367 + }
2368 +
2369 ++static bool system_going_down(void)
2370 ++{
2371 ++ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
2372 ++ || system_state == SYSTEM_RESTART;
2373 ++}
2374 ++
2375 + /* Deal with the reporting of failure conditions on a filesystem such as
2376 + * inconsistencies detected or read IO failures.
2377 + *
2378 +@@ -460,7 +466,12 @@ static void ext4_handle_error(struct super_block *sb)
2379 + if (journal)
2380 + jbd2_journal_abort(journal, -EIO);
2381 + }
2382 +- if (test_opt(sb, ERRORS_RO)) {
2383 ++ /*
2384 ++ * We force ERRORS_RO behavior when system is rebooting. Otherwise we
2385 ++ * could panic during 'reboot -f' as the underlying device got already
2386 ++ * disabled.
2387 ++ */
2388 ++ if (test_opt(sb, ERRORS_RO) || system_going_down()) {
2389 + ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
2390 + /*
2391 + * Make sure updated value of ->s_mount_flags will be visible
2392 +@@ -468,8 +479,7 @@ static void ext4_handle_error(struct super_block *sb)
2393 + */
2394 + smp_wmb();
2395 + sb->s_flags |= SB_RDONLY;
2396 +- }
2397 +- if (test_opt(sb, ERRORS_PANIC)) {
2398 ++ } else if (test_opt(sb, ERRORS_PANIC)) {
2399 + if (EXT4_SB(sb)->s_journal &&
2400 + !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
2401 + return;
2402 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
2403 +index f955cd3e0677..7743fa83b895 100644
2404 +--- a/fs/f2fs/checkpoint.c
2405 ++++ b/fs/f2fs/checkpoint.c
2406 +@@ -306,8 +306,9 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
2407 + goto skip_write;
2408 +
2409 + /* collect a number of dirty meta pages and write together */
2410 +- if (wbc->for_kupdate ||
2411 +- get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
2412 ++ if (wbc->sync_mode != WB_SYNC_ALL &&
2413 ++ get_pages(sbi, F2FS_DIRTY_META) <
2414 ++ nr_pages_to_skip(sbi, META))
2415 + goto skip_write;
2416 +
2417 + /* if locked failed, cp will flush dirty pages instead */
2418 +@@ -405,7 +406,7 @@ static int f2fs_set_meta_page_dirty(struct page *page)
2419 + if (!PageDirty(page)) {
2420 + __set_page_dirty_nobuffers(page);
2421 + inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
2422 +- SetPagePrivate(page);
2423 ++ f2fs_set_page_private(page, 0);
2424 + f2fs_trace_pid(page);
2425 + return 1;
2426 + }
2427 +@@ -956,7 +957,7 @@ void f2fs_update_dirty_page(struct inode *inode, struct page *page)
2428 + inode_inc_dirty_pages(inode);
2429 + spin_unlock(&sbi->inode_lock[type]);
2430 +
2431 +- SetPagePrivate(page);
2432 ++ f2fs_set_page_private(page, 0);
2433 + f2fs_trace_pid(page);
2434 + }
2435 +
2436 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
2437 +index f91d8630c9a2..c99aab23efea 100644
2438 +--- a/fs/f2fs/data.c
2439 ++++ b/fs/f2fs/data.c
2440 +@@ -2711,8 +2711,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
2441 + if (IS_ATOMIC_WRITTEN_PAGE(page))
2442 + return f2fs_drop_inmem_page(inode, page);
2443 +
2444 +- set_page_private(page, 0);
2445 +- ClearPagePrivate(page);
2446 ++ f2fs_clear_page_private(page);
2447 + }
2448 +
2449 + int f2fs_release_page(struct page *page, gfp_t wait)
2450 +@@ -2726,8 +2725,7 @@ int f2fs_release_page(struct page *page, gfp_t wait)
2451 + return 0;
2452 +
2453 + clear_cold_data(page);
2454 +- set_page_private(page, 0);
2455 +- ClearPagePrivate(page);
2456 ++ f2fs_clear_page_private(page);
2457 + return 1;
2458 + }
2459 +
2460 +@@ -2795,12 +2793,8 @@ int f2fs_migrate_page(struct address_space *mapping,
2461 + return -EAGAIN;
2462 + }
2463 +
2464 +- /*
2465 +- * A reference is expected if PagePrivate set when move mapping,
2466 +- * however F2FS breaks this for maintaining dirty page counts when
2467 +- * truncating pages. So here adjusting the 'extra_count' make it work.
2468 +- */
2469 +- extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
2470 ++ /* one extra reference was held for atomic_write page */
2471 ++ extra_count = atomic_written ? 1 : 0;
2472 + rc = migrate_page_move_mapping(mapping, newpage,
2473 + page, mode, extra_count);
2474 + if (rc != MIGRATEPAGE_SUCCESS) {
2475 +@@ -2821,9 +2815,10 @@ int f2fs_migrate_page(struct address_space *mapping,
2476 + get_page(newpage);
2477 + }
2478 +
2479 +- if (PagePrivate(page))
2480 +- SetPagePrivate(newpage);
2481 +- set_page_private(newpage, page_private(page));
2482 ++ if (PagePrivate(page)) {
2483 ++ f2fs_set_page_private(newpage, page_private(page));
2484 ++ f2fs_clear_page_private(page);
2485 ++ }
2486 +
2487 + if (mode != MIGRATE_SYNC_NO_COPY)
2488 + migrate_page_copy(newpage, page);
2489 +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
2490 +index 50d0d36280fa..99a6063c2327 100644
2491 +--- a/fs/f2fs/dir.c
2492 ++++ b/fs/f2fs/dir.c
2493 +@@ -728,7 +728,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
2494 + !f2fs_truncate_hole(dir, page->index, page->index + 1)) {
2495 + f2fs_clear_page_cache_dirty_tag(page);
2496 + clear_page_dirty_for_io(page);
2497 +- ClearPagePrivate(page);
2498 ++ f2fs_clear_page_private(page);
2499 + ClearPageUptodate(page);
2500 + clear_cold_data(page);
2501 + inode_dec_dirty_pages(dir);
2502 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
2503 +index 279bc00489cc..6d9186a6528c 100644
2504 +--- a/fs/f2fs/f2fs.h
2505 ++++ b/fs/f2fs/f2fs.h
2506 +@@ -2825,6 +2825,27 @@ static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
2507 + return true;
2508 + }
2509 +
2510 ++static inline void f2fs_set_page_private(struct page *page,
2511 ++ unsigned long data)
2512 ++{
2513 ++ if (PagePrivate(page))
2514 ++ return;
2515 ++
2516 ++ get_page(page);
2517 ++ SetPagePrivate(page);
2518 ++ set_page_private(page, data);
2519 ++}
2520 ++
2521 ++static inline void f2fs_clear_page_private(struct page *page)
2522 ++{
2523 ++ if (!PagePrivate(page))
2524 ++ return;
2525 ++
2526 ++ set_page_private(page, 0);
2527 ++ ClearPagePrivate(page);
2528 ++ f2fs_put_page(page, 0);
2529 ++}
2530 ++
2531 + /*
2532 + * file.c
2533 + */
2534 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
2535 +index ae2b45e75847..30ed43bce110 100644
2536 +--- a/fs/f2fs/file.c
2537 ++++ b/fs/f2fs/file.c
2538 +@@ -768,7 +768,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
2539 + {
2540 + struct inode *inode = d_inode(dentry);
2541 + int err;
2542 +- bool size_changed = false;
2543 +
2544 + if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2545 + return -EIO;
2546 +@@ -843,8 +842,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
2547 + down_write(&F2FS_I(inode)->i_sem);
2548 + F2FS_I(inode)->last_disk_size = i_size_read(inode);
2549 + up_write(&F2FS_I(inode)->i_sem);
2550 +-
2551 +- size_changed = true;
2552 + }
2553 +
2554 + __setattr_copy(inode, attr);
2555 +@@ -858,7 +855,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
2556 + }
2557 +
2558 + /* file size may changed here */
2559 +- f2fs_mark_inode_dirty_sync(inode, size_changed);
2560 ++ f2fs_mark_inode_dirty_sync(inode, true);
2561 +
2562 + /* inode change will produce dirty node pages flushed by checkpoint */
2563 + f2fs_balance_fs(F2FS_I_SB(inode), true);
2564 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
2565 +index 4f450e573312..3f99ab288695 100644
2566 +--- a/fs/f2fs/node.c
2567 ++++ b/fs/f2fs/node.c
2568 +@@ -1920,7 +1920,9 @@ static int f2fs_write_node_pages(struct address_space *mapping,
2569 + f2fs_balance_fs_bg(sbi);
2570 +
2571 + /* collect a number of dirty node pages and write together */
2572 +- if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
2573 ++ if (wbc->sync_mode != WB_SYNC_ALL &&
2574 ++ get_pages(sbi, F2FS_DIRTY_NODES) <
2575 ++ nr_pages_to_skip(sbi, NODE))
2576 + goto skip_write;
2577 +
2578 + if (wbc->sync_mode == WB_SYNC_ALL)
2579 +@@ -1959,7 +1961,7 @@ static int f2fs_set_node_page_dirty(struct page *page)
2580 + if (!PageDirty(page)) {
2581 + __set_page_dirty_nobuffers(page);
2582 + inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
2583 +- SetPagePrivate(page);
2584 ++ f2fs_set_page_private(page, 0);
2585 + f2fs_trace_pid(page);
2586 + return 1;
2587 + }
2588 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
2589 +index e1b1d390b329..b6c8b0696ef6 100644
2590 +--- a/fs/f2fs/segment.c
2591 ++++ b/fs/f2fs/segment.c
2592 +@@ -191,8 +191,7 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
2593 +
2594 + f2fs_trace_pid(page);
2595 +
2596 +- set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
2597 +- SetPagePrivate(page);
2598 ++ f2fs_set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
2599 +
2600 + new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
2601 +
2602 +@@ -280,8 +279,7 @@ next:
2603 + ClearPageUptodate(page);
2604 + clear_cold_data(page);
2605 + }
2606 +- set_page_private(page, 0);
2607 +- ClearPagePrivate(page);
2608 ++ f2fs_clear_page_private(page);
2609 + f2fs_put_page(page, 1);
2610 +
2611 + list_del(&cur->list);
2612 +@@ -370,8 +368,7 @@ void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
2613 + kmem_cache_free(inmem_entry_slab, cur);
2614 +
2615 + ClearPageUptodate(page);
2616 +- set_page_private(page, 0);
2617 +- ClearPagePrivate(page);
2618 ++ f2fs_clear_page_private(page);
2619 + f2fs_put_page(page, 0);
2620 +
2621 + trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
2622 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
2623 +index 5892fa3c885f..144ffba3ec5a 100644
2624 +--- a/fs/f2fs/super.c
2625 ++++ b/fs/f2fs/super.c
2626 +@@ -1460,9 +1460,16 @@ static int f2fs_enable_quotas(struct super_block *sb);
2627 +
2628 + static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2629 + {
2630 ++ unsigned int s_flags = sbi->sb->s_flags;
2631 + struct cp_control cpc;
2632 +- int err;
2633 ++ int err = 0;
2634 ++ int ret;
2635 +
2636 ++ if (s_flags & SB_RDONLY) {
2637 ++ f2fs_msg(sbi->sb, KERN_ERR,
2638 ++ "checkpoint=disable on readonly fs");
2639 ++ return -EINVAL;
2640 ++ }
2641 + sbi->sb->s_flags |= SB_ACTIVE;
2642 +
2643 + f2fs_update_time(sbi, DISABLE_TIME);
2644 +@@ -1470,18 +1477,24 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2645 + while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2646 + mutex_lock(&sbi->gc_mutex);
2647 + err = f2fs_gc(sbi, true, false, NULL_SEGNO);
2648 +- if (err == -ENODATA)
2649 ++ if (err == -ENODATA) {
2650 ++ err = 0;
2651 + break;
2652 ++ }
2653 + if (err && err != -EAGAIN)
2654 +- return err;
2655 ++ break;
2656 + }
2657 +
2658 +- err = sync_filesystem(sbi->sb);
2659 +- if (err)
2660 +- return err;
2661 ++ ret = sync_filesystem(sbi->sb);
2662 ++ if (ret || err) {
2663 ++ err = ret ? ret: err;
2664 ++ goto restore_flag;
2665 ++ }
2666 +
2667 +- if (f2fs_disable_cp_again(sbi))
2668 +- return -EAGAIN;
2669 ++ if (f2fs_disable_cp_again(sbi)) {
2670 ++ err = -EAGAIN;
2671 ++ goto restore_flag;
2672 ++ }
2673 +
2674 + mutex_lock(&sbi->gc_mutex);
2675 + cpc.reason = CP_PAUSE;
2676 +@@ -1490,7 +1503,9 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2677 +
2678 + sbi->unusable_block_count = 0;
2679 + mutex_unlock(&sbi->gc_mutex);
2680 +- return 0;
2681 ++restore_flag:
2682 ++ sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
2683 ++ return err;
2684 + }
2685 +
2686 + static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2687 +@@ -3359,7 +3374,7 @@ skip_recovery:
2688 + if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2689 + err = f2fs_disable_checkpoint(sbi);
2690 + if (err)
2691 +- goto free_meta;
2692 ++ goto sync_free_meta;
2693 + } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
2694 + f2fs_enable_checkpoint(sbi);
2695 + }
2696 +@@ -3372,7 +3387,7 @@ skip_recovery:
2697 + /* After POR, we can run background GC thread.*/
2698 + err = f2fs_start_gc_thread(sbi);
2699 + if (err)
2700 +- goto free_meta;
2701 ++ goto sync_free_meta;
2702 + }
2703 + kvfree(options);
2704 +
2705 +@@ -3394,6 +3409,11 @@ skip_recovery:
2706 + f2fs_update_time(sbi, REQ_TIME);
2707 + return 0;
2708 +
2709 ++sync_free_meta:
2710 ++ /* safe to flush all the data */
2711 ++ sync_filesystem(sbi->sb);
2712 ++ retry = false;
2713 ++
2714 + free_meta:
2715 + #ifdef CONFIG_QUOTA
2716 + f2fs_truncate_quota_inode_pages(sb);
2717 +@@ -3407,6 +3427,8 @@ free_meta:
2718 + * falls into an infinite loop in f2fs_sync_meta_pages().
2719 + */
2720 + truncate_inode_pages_final(META_MAPPING(sbi));
2721 ++ /* evict some inodes being cached by GC */
2722 ++ evict_inodes(sb);
2723 + f2fs_unregister_sysfs(sbi);
2724 + free_root_inode:
2725 + dput(sb->s_root);
2726 +diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
2727 +index 73b92985198b..6b6fe6431a64 100644
2728 +--- a/fs/f2fs/xattr.c
2729 ++++ b/fs/f2fs/xattr.c
2730 +@@ -347,7 +347,7 @@ check:
2731 + *base_addr = txattr_addr;
2732 + return 0;
2733 + out:
2734 +- kzfree(txattr_addr);
2735 ++ kvfree(txattr_addr);
2736 + return err;
2737 + }
2738 +
2739 +@@ -390,7 +390,7 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
2740 + *base_addr = txattr_addr;
2741 + return 0;
2742 + fail:
2743 +- kzfree(txattr_addr);
2744 ++ kvfree(txattr_addr);
2745 + return err;
2746 + }
2747 +
2748 +@@ -517,7 +517,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
2749 + }
2750 + error = size;
2751 + out:
2752 +- kzfree(base_addr);
2753 ++ kvfree(base_addr);
2754 + return error;
2755 + }
2756 +
2757 +@@ -563,7 +563,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
2758 + }
2759 + error = buffer_size - rest;
2760 + cleanup:
2761 +- kzfree(base_addr);
2762 ++ kvfree(base_addr);
2763 + return error;
2764 + }
2765 +
2766 +@@ -694,7 +694,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
2767 + if (!error && S_ISDIR(inode->i_mode))
2768 + set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
2769 + exit:
2770 +- kzfree(base_addr);
2771 ++ kvfree(base_addr);
2772 + return error;
2773 + }
2774 +
2775 +diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
2776 +index 798f1253141a..3b7b8e95c98a 100644
2777 +--- a/fs/notify/inotify/inotify_user.c
2778 ++++ b/fs/notify/inotify/inotify_user.c
2779 +@@ -519,8 +519,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
2780 + fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
2781 + if (!fsn_mark)
2782 + return -ENOENT;
2783 +- else if (create)
2784 +- return -EEXIST;
2785 ++ else if (create) {
2786 ++ ret = -EEXIST;
2787 ++ goto out;
2788 ++ }
2789 +
2790 + i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
2791 +
2792 +@@ -548,6 +550,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
2793 + /* return the wd */
2794 + ret = i_mark->wd;
2795 +
2796 ++out:
2797 + /* match the get from fsnotify_find_mark() */
2798 + fsnotify_put_mark(fsn_mark);
2799 +
2800 +diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
2801 +index bbcc185062bb..d29d869abec1 100644
2802 +--- a/fs/proc/kcore.c
2803 ++++ b/fs/proc/kcore.c
2804 +@@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head);
2805 + static DECLARE_RWSEM(kclist_lock);
2806 + static int kcore_need_update = 1;
2807 +
2808 ++/*
2809 ++ * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
2810 ++ * Same as oldmem_pfn_is_ram in vmcore
2811 ++ */
2812 ++static int (*mem_pfn_is_ram)(unsigned long pfn);
2813 ++
2814 ++int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
2815 ++{
2816 ++ if (mem_pfn_is_ram)
2817 ++ return -EBUSY;
2818 ++ mem_pfn_is_ram = fn;
2819 ++ return 0;
2820 ++}
2821 ++
2822 ++static int pfn_is_ram(unsigned long pfn)
2823 ++{
2824 ++ if (mem_pfn_is_ram)
2825 ++ return mem_pfn_is_ram(pfn);
2826 ++ else
2827 ++ return 1;
2828 ++}
2829 ++
2830 + /* This doesn't grab kclist_lock, so it should only be used at init time. */
2831 + void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
2832 + int type)
2833 +@@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
2834 + goto out;
2835 + }
2836 + m = NULL; /* skip the list anchor */
2837 ++ } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
2838 ++ if (clear_user(buffer, tsz)) {
2839 ++ ret = -EFAULT;
2840 ++ goto out;
2841 ++ }
2842 + } else if (m->type == KCORE_VMALLOC) {
2843 + vread(buf, (char *)start, tsz);
2844 + /* we have to zero-fill user buffer even if no read */
2845 +diff --git a/include/linux/atalk.h b/include/linux/atalk.h
2846 +index 840cf92307ba..d5cfc0b15b76 100644
2847 +--- a/include/linux/atalk.h
2848 ++++ b/include/linux/atalk.h
2849 +@@ -158,7 +158,7 @@ extern int sysctl_aarp_retransmit_limit;
2850 + extern int sysctl_aarp_resolve_time;
2851 +
2852 + #ifdef CONFIG_SYSCTL
2853 +-extern void atalk_register_sysctl(void);
2854 ++extern int atalk_register_sysctl(void);
2855 + extern void atalk_unregister_sysctl(void);
2856 + #else
2857 + static inline int atalk_register_sysctl(void)
2858 +diff --git a/include/linux/kcore.h b/include/linux/kcore.h
2859 +index 8c3f8c14eeaa..c843f4a9c512 100644
2860 +--- a/include/linux/kcore.h
2861 ++++ b/include/linux/kcore.h
2862 +@@ -44,6 +44,8 @@ void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
2863 + m->vaddr = (unsigned long)vaddr;
2864 + kclist_add(m, addr, sz, KCORE_REMAP);
2865 + }
2866 ++
2867 ++extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
2868 + #else
2869 + static inline
2870 + void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
2871 +diff --git a/include/linux/swap.h b/include/linux/swap.h
2872 +index 622025ac1461..f1146ed21062 100644
2873 +--- a/include/linux/swap.h
2874 ++++ b/include/linux/swap.h
2875 +@@ -157,9 +157,9 @@ struct swap_extent {
2876 + /*
2877 + * Max bad pages in the new format..
2878 + */
2879 +-#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
2880 + #define MAX_SWAP_BADPAGES \
2881 +- ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
2882 ++ ((offsetof(union swap_header, magic.magic) - \
2883 ++ offsetof(union swap_header, info.badpages)) / sizeof(int))
2884 +
2885 + enum {
2886 + SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
2887 +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
2888 +index 5b50fe4906d2..7b60fd186cfe 100644
2889 +--- a/include/trace/events/rxrpc.h
2890 ++++ b/include/trace/events/rxrpc.h
2891 +@@ -76,6 +76,7 @@ enum rxrpc_client_trace {
2892 + rxrpc_client_chan_disconnect,
2893 + rxrpc_client_chan_pass,
2894 + rxrpc_client_chan_unstarted,
2895 ++ rxrpc_client_chan_wait_failed,
2896 + rxrpc_client_cleanup,
2897 + rxrpc_client_count,
2898 + rxrpc_client_discard,
2899 +@@ -276,6 +277,7 @@ enum rxrpc_tx_point {
2900 + EM(rxrpc_client_chan_disconnect, "ChDisc") \
2901 + EM(rxrpc_client_chan_pass, "ChPass") \
2902 + EM(rxrpc_client_chan_unstarted, "ChUnst") \
2903 ++ EM(rxrpc_client_chan_wait_failed, "ChWtFl") \
2904 + EM(rxrpc_client_cleanup, "Clean ") \
2905 + EM(rxrpc_client_count, "Count ") \
2906 + EM(rxrpc_client_discard, "Discar") \
2907 +diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
2908 +index 2ada5e21dfa6..4a8f390a2b82 100644
2909 +--- a/kernel/bpf/inode.c
2910 ++++ b/kernel/bpf/inode.c
2911 +@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
2912 + }
2913 + EXPORT_SYMBOL(bpf_prog_get_type_path);
2914 +
2915 +-static void bpf_evict_inode(struct inode *inode)
2916 +-{
2917 +- enum bpf_type type;
2918 +-
2919 +- truncate_inode_pages_final(&inode->i_data);
2920 +- clear_inode(inode);
2921 +-
2922 +- if (S_ISLNK(inode->i_mode))
2923 +- kfree(inode->i_link);
2924 +- if (!bpf_inode_type(inode, &type))
2925 +- bpf_any_put(inode->i_private, type);
2926 +-}
2927 +-
2928 + /*
2929 + * Display the mount options in /proc/mounts.
2930 + */
2931 +@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
2932 + return 0;
2933 + }
2934 +
2935 ++static void bpf_destroy_inode_deferred(struct rcu_head *head)
2936 ++{
2937 ++ struct inode *inode = container_of(head, struct inode, i_rcu);
2938 ++ enum bpf_type type;
2939 ++
2940 ++ if (S_ISLNK(inode->i_mode))
2941 ++ kfree(inode->i_link);
2942 ++ if (!bpf_inode_type(inode, &type))
2943 ++ bpf_any_put(inode->i_private, type);
2944 ++ free_inode_nonrcu(inode);
2945 ++}
2946 ++
2947 ++static void bpf_destroy_inode(struct inode *inode)
2948 ++{
2949 ++ call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
2950 ++}
2951 ++
2952 + static const struct super_operations bpf_super_ops = {
2953 + .statfs = simple_statfs,
2954 + .drop_inode = generic_delete_inode,
2955 + .show_options = bpf_show_options,
2956 +- .evict_inode = bpf_evict_inode,
2957 ++ .destroy_inode = bpf_destroy_inode,
2958 + };
2959 +
2960 + enum {
2961 +diff --git a/kernel/events/core.c b/kernel/events/core.c
2962 +index 26d6edab051a..2e2305a81047 100644
2963 +--- a/kernel/events/core.c
2964 ++++ b/kernel/events/core.c
2965 +@@ -7178,6 +7178,7 @@ static void perf_event_mmap_output(struct perf_event *event,
2966 + struct perf_output_handle handle;
2967 + struct perf_sample_data sample;
2968 + int size = mmap_event->event_id.header.size;
2969 ++ u32 type = mmap_event->event_id.header.type;
2970 + int ret;
2971 +
2972 + if (!perf_event_mmap_match(event, data))
2973 +@@ -7221,6 +7222,7 @@ static void perf_event_mmap_output(struct perf_event *event,
2974 + perf_output_end(&handle);
2975 + out:
2976 + mmap_event->event_id.header.size = size;
2977 ++ mmap_event->event_id.header.type = type;
2978 + }
2979 +
2980 + static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
2981 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2982 +index 01a2489de94e..62cc29364fba 100644
2983 +--- a/kernel/sched/core.c
2984 ++++ b/kernel/sched/core.c
2985 +@@ -6942,7 +6942,7 @@ static int __maybe_unused cpu_period_quota_parse(char *buf,
2986 + {
2987 + char tok[21]; /* U64_MAX */
2988 +
2989 +- if (!sscanf(buf, "%s %llu", tok, periodp))
2990 ++ if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
2991 + return -EINVAL;
2992 +
2993 + *periodp *= NSEC_PER_USEC;
2994 +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
2995 +index 033ec7c45f13..1ccf77f6d346 100644
2996 +--- a/kernel/sched/cpufreq_schedutil.c
2997 ++++ b/kernel/sched/cpufreq_schedutil.c
2998 +@@ -48,10 +48,10 @@ struct sugov_cpu {
2999 +
3000 + bool iowait_boost_pending;
3001 + unsigned int iowait_boost;
3002 +- unsigned int iowait_boost_max;
3003 + u64 last_update;
3004 +
3005 + unsigned long bw_dl;
3006 ++ unsigned long min;
3007 + unsigned long max;
3008 +
3009 + /* The field below is for single-CPU policies only: */
3010 +@@ -303,8 +303,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
3011 + if (delta_ns <= TICK_NSEC)
3012 + return false;
3013 +
3014 +- sg_cpu->iowait_boost = set_iowait_boost
3015 +- ? sg_cpu->sg_policy->policy->min : 0;
3016 ++ sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
3017 + sg_cpu->iowait_boost_pending = set_iowait_boost;
3018 +
3019 + return true;
3020 +@@ -344,14 +343,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
3021 +
3022 + /* Double the boost at each request */
3023 + if (sg_cpu->iowait_boost) {
3024 +- sg_cpu->iowait_boost <<= 1;
3025 +- if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
3026 +- sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
3027 ++ sg_cpu->iowait_boost =
3028 ++ min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
3029 + return;
3030 + }
3031 +
3032 + /* First wakeup after IO: start with minimum boost */
3033 +- sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
3034 ++ sg_cpu->iowait_boost = sg_cpu->min;
3035 + }
3036 +
3037 + /**
3038 +@@ -373,47 +371,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
3039 + * This mechanism is designed to boost high frequently IO waiting tasks, while
3040 + * being more conservative on tasks which does sporadic IO operations.
3041 + */
3042 +-static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
3043 +- unsigned long *util, unsigned long *max)
3044 ++static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
3045 ++ unsigned long util, unsigned long max)
3046 + {
3047 +- unsigned int boost_util, boost_max;
3048 ++ unsigned long boost;
3049 +
3050 + /* No boost currently required */
3051 + if (!sg_cpu->iowait_boost)
3052 +- return;
3053 ++ return util;
3054 +
3055 + /* Reset boost if the CPU appears to have been idle enough */
3056 + if (sugov_iowait_reset(sg_cpu, time, false))
3057 +- return;
3058 ++ return util;
3059 +
3060 +- /*
3061 +- * An IO waiting task has just woken up:
3062 +- * allow to further double the boost value
3063 +- */
3064 +- if (sg_cpu->iowait_boost_pending) {
3065 +- sg_cpu->iowait_boost_pending = false;
3066 +- } else {
3067 ++ if (!sg_cpu->iowait_boost_pending) {
3068 + /*
3069 +- * Otherwise: reduce the boost value and disable it when we
3070 +- * reach the minimum.
3071 ++ * No boost pending; reduce the boost value.
3072 + */
3073 + sg_cpu->iowait_boost >>= 1;
3074 +- if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
3075 ++ if (sg_cpu->iowait_boost < sg_cpu->min) {
3076 + sg_cpu->iowait_boost = 0;
3077 +- return;
3078 ++ return util;
3079 + }
3080 + }
3081 +
3082 ++ sg_cpu->iowait_boost_pending = false;
3083 ++
3084 + /*
3085 +- * Apply the current boost value: a CPU is boosted only if its current
3086 +- * utilization is smaller then the current IO boost level.
3087 ++ * @util is already in capacity scale; convert iowait_boost
3088 ++ * into the same scale so we can compare.
3089 + */
3090 +- boost_util = sg_cpu->iowait_boost;
3091 +- boost_max = sg_cpu->iowait_boost_max;
3092 +- if (*util * boost_max < *max * boost_util) {
3093 +- *util = boost_util;
3094 +- *max = boost_max;
3095 +- }
3096 ++ boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
3097 ++ return max(boost, util);
3098 + }
3099 +
3100 + #ifdef CONFIG_NO_HZ_COMMON
3101 +@@ -460,7 +449,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
3102 +
3103 + util = sugov_get_util(sg_cpu);
3104 + max = sg_cpu->max;
3105 +- sugov_iowait_apply(sg_cpu, time, &util, &max);
3106 ++ util = sugov_iowait_apply(sg_cpu, time, util, max);
3107 + next_f = get_next_freq(sg_policy, util, max);
3108 + /*
3109 + * Do not reduce the frequency if the CPU has not been idle
3110 +@@ -500,7 +489,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
3111 +
3112 + j_util = sugov_get_util(j_sg_cpu);
3113 + j_max = j_sg_cpu->max;
3114 +- sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
3115 ++ j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
3116 +
3117 + if (j_util * max > j_max * util) {
3118 + util = j_util;
3119 +@@ -837,7 +826,9 @@ static int sugov_start(struct cpufreq_policy *policy)
3120 + memset(sg_cpu, 0, sizeof(*sg_cpu));
3121 + sg_cpu->cpu = cpu;
3122 + sg_cpu->sg_policy = sg_policy;
3123 +- sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
3124 ++ sg_cpu->min =
3125 ++ (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
3126 ++ policy->cpuinfo.max_freq;
3127 + }
3128 +
3129 + for_each_cpu(cpu, policy->cpus) {
3130 +diff --git a/lib/div64.c b/lib/div64.c
3131 +index 01c8602bb6ff..ee146bb4c558 100644
3132 +--- a/lib/div64.c
3133 ++++ b/lib/div64.c
3134 +@@ -109,7 +109,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
3135 + quot = div_u64_rem(dividend, divisor, &rem32);
3136 + *remainder = rem32;
3137 + } else {
3138 +- int n = 1 + fls(high);
3139 ++ int n = fls(high);
3140 + quot = div_u64(dividend >> n, divisor >> n);
3141 +
3142 + if (quot != 0)
3143 +@@ -147,7 +147,7 @@ u64 div64_u64(u64 dividend, u64 divisor)
3144 + if (high == 0) {
3145 + quot = div_u64(dividend, divisor);
3146 + } else {
3147 +- int n = 1 + fls(high);
3148 ++ int n = fls(high);
3149 + quot = div_u64(dividend >> n, divisor >> n);
3150 +
3151 + if (quot != 0)
3152 +diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
3153 +index 8006295f8bd7..dda73991bb54 100644
3154 +--- a/net/appletalk/atalk_proc.c
3155 ++++ b/net/appletalk/atalk_proc.c
3156 +@@ -255,7 +255,7 @@ out_interface:
3157 + goto out;
3158 + }
3159 +
3160 +-void __exit atalk_proc_exit(void)
3161 ++void atalk_proc_exit(void)
3162 + {
3163 + remove_proc_entry("interface", atalk_proc_dir);
3164 + remove_proc_entry("route", atalk_proc_dir);
3165 +diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
3166 +index 9b6bc5abe946..795fbc6c06aa 100644
3167 +--- a/net/appletalk/ddp.c
3168 ++++ b/net/appletalk/ddp.c
3169 +@@ -1910,12 +1910,16 @@ static const char atalk_err_snap[] __initconst =
3170 + /* Called by proto.c on kernel start up */
3171 + static int __init atalk_init(void)
3172 + {
3173 +- int rc = proto_register(&ddp_proto, 0);
3174 ++ int rc;
3175 +
3176 +- if (rc != 0)
3177 ++ rc = proto_register(&ddp_proto, 0);
3178 ++ if (rc)
3179 + goto out;
3180 +
3181 +- (void)sock_register(&atalk_family_ops);
3182 ++ rc = sock_register(&atalk_family_ops);
3183 ++ if (rc)
3184 ++ goto out_proto;
3185 ++
3186 + ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
3187 + if (!ddp_dl)
3188 + printk(atalk_err_snap);
3189 +@@ -1923,12 +1927,33 @@ static int __init atalk_init(void)
3190 + dev_add_pack(&ltalk_packet_type);
3191 + dev_add_pack(&ppptalk_packet_type);
3192 +
3193 +- register_netdevice_notifier(&ddp_notifier);
3194 ++ rc = register_netdevice_notifier(&ddp_notifier);
3195 ++ if (rc)
3196 ++ goto out_sock;
3197 ++
3198 + aarp_proto_init();
3199 +- atalk_proc_init();
3200 +- atalk_register_sysctl();
3201 ++ rc = atalk_proc_init();
3202 ++ if (rc)
3203 ++ goto out_aarp;
3204 ++
3205 ++ rc = atalk_register_sysctl();
3206 ++ if (rc)
3207 ++ goto out_proc;
3208 + out:
3209 + return rc;
3210 ++out_proc:
3211 ++ atalk_proc_exit();
3212 ++out_aarp:
3213 ++ aarp_cleanup_module();
3214 ++ unregister_netdevice_notifier(&ddp_notifier);
3215 ++out_sock:
3216 ++ dev_remove_pack(&ppptalk_packet_type);
3217 ++ dev_remove_pack(&ltalk_packet_type);
3218 ++ unregister_snap_client(ddp_dl);
3219 ++ sock_unregister(PF_APPLETALK);
3220 ++out_proto:
3221 ++ proto_unregister(&ddp_proto);
3222 ++ goto out;
3223 + }
3224 + module_init(atalk_init);
3225 +
3226 +diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
3227 +index c744a853fa5f..d945b7c0176d 100644
3228 +--- a/net/appletalk/sysctl_net_atalk.c
3229 ++++ b/net/appletalk/sysctl_net_atalk.c
3230 +@@ -45,9 +45,12 @@ static struct ctl_table atalk_table[] = {
3231 +
3232 + static struct ctl_table_header *atalk_table_header;
3233 +
3234 +-void atalk_register_sysctl(void)
3235 ++int __init atalk_register_sysctl(void)
3236 + {
3237 + atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
3238 ++ if (!atalk_table_header)
3239 ++ return -ENOMEM;
3240 ++ return 0;
3241 + }
3242 +
3243 + void atalk_unregister_sysctl(void)
3244 +diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
3245 +index 5cf6d9f4761d..83797b3949e2 100644
3246 +--- a/net/rxrpc/conn_client.c
3247 ++++ b/net/rxrpc/conn_client.c
3248 +@@ -704,6 +704,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
3249 +
3250 + ret = rxrpc_wait_for_channel(call, gfp);
3251 + if (ret < 0) {
3252 ++ trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
3253 + rxrpc_disconnect_client_call(call);
3254 + goto out;
3255 + }
3256 +@@ -774,16 +775,22 @@ static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
3257 + */
3258 + void rxrpc_disconnect_client_call(struct rxrpc_call *call)
3259 + {
3260 +- unsigned int channel = call->cid & RXRPC_CHANNELMASK;
3261 + struct rxrpc_connection *conn = call->conn;
3262 +- struct rxrpc_channel *chan = &conn->channels[channel];
3263 ++ struct rxrpc_channel *chan = NULL;
3264 + struct rxrpc_net *rxnet = conn->params.local->rxnet;
3265 ++ unsigned int channel = -1;
3266 ++ u32 cid;
3267 +
3268 ++ spin_lock(&conn->channel_lock);
3269 ++
3270 ++ cid = call->cid;
3271 ++ if (cid) {
3272 ++ channel = cid & RXRPC_CHANNELMASK;
3273 ++ chan = &conn->channels[channel];
3274 ++ }
3275 + trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
3276 + call->conn = NULL;
3277 +
3278 +- spin_lock(&conn->channel_lock);
3279 +-
3280 + /* Calls that have never actually been assigned a channel can simply be
3281 + * discarded. If the conn didn't get used either, it will follow
3282 + * immediately unless someone else grabs it in the meantime.
3283 +@@ -807,7 +814,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
3284 + goto out;
3285 + }
3286 +
3287 +- ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
3288 ++ if (rcu_access_pointer(chan->call) != call) {
3289 ++ spin_unlock(&conn->channel_lock);
3290 ++ BUG();
3291 ++ }
3292 +
3293 + /* If a client call was exposed to the world, we save the result for
3294 + * retransmission.
3295 +diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
3296 +index 5b02bd49fde4..4e4ecc21760b 100644
3297 +--- a/sound/drivers/opl3/opl3_voice.h
3298 ++++ b/sound/drivers/opl3/opl3_voice.h
3299 +@@ -41,7 +41,7 @@ void snd_opl3_timer_func(struct timer_list *t);
3300 +
3301 + /* Prototypes for opl3_drums.c */
3302 + void snd_opl3_load_drums(struct snd_opl3 *opl3);
3303 +-void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
3304 ++void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
3305 +
3306 + /* Prototypes for opl3_oss.c */
3307 + #if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
3308 +diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
3309 +index d77dcba276b5..1eb8b61a185b 100644
3310 +--- a/sound/isa/sb/sb8.c
3311 ++++ b/sound/isa/sb/sb8.c
3312 +@@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
3313 +
3314 + /* block the 0x388 port to avoid PnP conflicts */
3315 + acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
3316 ++ if (!acard->fm_res) {
3317 ++ err = -EBUSY;
3318 ++ goto _err;
3319 ++ }
3320 +
3321 + if (port[dev] != SNDRV_AUTO_PORT) {
3322 + if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
3323 +diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
3324 +index 907cf1a46712..3ef2b27ebbe8 100644
3325 +--- a/sound/pci/echoaudio/echoaudio.c
3326 ++++ b/sound/pci/echoaudio/echoaudio.c
3327 +@@ -1954,6 +1954,11 @@ static int snd_echo_create(struct snd_card *card,
3328 + }
3329 + chip->dsp_registers = (volatile u32 __iomem *)
3330 + ioremap_nocache(chip->dsp_registers_phys, sz);
3331 ++ if (!chip->dsp_registers) {
3332 ++ dev_err(chip->card->dev, "ioremap failed\n");
3333 ++ snd_echo_free(chip);
3334 ++ return -ENOMEM;
3335 ++ }
3336 +
3337 + if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
3338 + KBUILD_MODNAME, chip)) {
3339 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
3340 +index 169e347c76f6..9ba1a2e1ed7a 100644
3341 +--- a/tools/lib/bpf/libbpf.c
3342 ++++ b/tools/lib/bpf/libbpf.c
3343 +@@ -627,7 +627,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
3344 + bool strict = !(flags & MAPS_RELAX_COMPAT);
3345 + int i, map_idx, map_def_sz, nr_maps = 0;
3346 + Elf_Scn *scn;
3347 +- Elf_Data *data;
3348 ++ Elf_Data *data = NULL;
3349 + Elf_Data *symbols = obj->efile.symbols;
3350 +
3351 + if (obj->efile.maps_shndx < 0)
3352 +diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
3353 +index 4ac7775fbc11..4851285ba00c 100644
3354 +--- a/tools/perf/Documentation/perf-config.txt
3355 ++++ b/tools/perf/Documentation/perf-config.txt
3356 +@@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this:
3357 +
3358 + [report]
3359 + # Defaults
3360 +- sort-order = comm,dso,symbol
3361 ++ sort_order = comm,dso,symbol
3362 + percent-limit = 0
3363 + queue-size = 0
3364 + children = true
3365 +diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
3366 +index 4bc2085e5197..39c05f89104e 100644
3367 +--- a/tools/perf/Documentation/perf-stat.txt
3368 ++++ b/tools/perf/Documentation/perf-stat.txt
3369 +@@ -72,9 +72,8 @@ report::
3370 + --all-cpus::
3371 + system-wide collection from all CPUs (default if no target is specified)
3372 +
3373 +--c::
3374 +---scale::
3375 +- scale/normalize counter values
3376 ++--no-scale::
3377 ++ Don't scale/normalize counter values
3378 +
3379 + -d::
3380 + --detailed::
3381 +diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
3382 +index 0c0a6e824934..2af067859966 100644
3383 +--- a/tools/perf/bench/epoll-ctl.c
3384 ++++ b/tools/perf/bench/epoll-ctl.c
3385 +@@ -224,7 +224,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
3386 + pthread_attr_t thread_attr, *attrp = NULL;
3387 + cpu_set_t cpuset;
3388 + unsigned int i, j;
3389 +- int ret;
3390 ++ int ret = 0;
3391 +
3392 + if (!noaffinity)
3393 + pthread_attr_init(&thread_attr);
3394 +diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
3395 +index 5a11534e96a0..fe85448abd45 100644
3396 +--- a/tools/perf/bench/epoll-wait.c
3397 ++++ b/tools/perf/bench/epoll-wait.c
3398 +@@ -293,7 +293,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
3399 + pthread_attr_t thread_attr, *attrp = NULL;
3400 + cpu_set_t cpuset;
3401 + unsigned int i, j;
3402 +- int ret, events = EPOLLIN;
3403 ++ int ret = 0, events = EPOLLIN;
3404 +
3405 + if (oneshot)
3406 + events |= EPOLLONESHOT;
3407 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
3408 +index 63a3afc7f32b..a52295dbad2b 100644
3409 +--- a/tools/perf/builtin-stat.c
3410 ++++ b/tools/perf/builtin-stat.c
3411 +@@ -728,7 +728,8 @@ static struct option stat_options[] = {
3412 + "system-wide collection from all CPUs"),
3413 + OPT_BOOLEAN('g', "group", &group,
3414 + "put the counters into a counter group"),
3415 +- OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
3416 ++ OPT_BOOLEAN(0, "scale", &stat_config.scale,
3417 ++ "Use --no-scale to disable counter scaling for multiplexing"),
3418 + OPT_INCR('v', "verbose", &verbose,
3419 + "be more verbose (show counter open errors, etc)"),
3420 + OPT_INTEGER('r', "repeat", &stat_config.run_count,
3421 +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
3422 +index f64e312db787..616408251e25 100644
3423 +--- a/tools/perf/builtin-top.c
3424 ++++ b/tools/perf/builtin-top.c
3425 +@@ -1633,8 +1633,9 @@ int cmd_top(int argc, const char **argv)
3426 + annotation_config__init();
3427 +
3428 + symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
3429 +- if (symbol__init(NULL) < 0)
3430 +- return -1;
3431 ++ status = symbol__init(NULL);
3432 ++ if (status < 0)
3433 ++ goto out_delete_evlist;
3434 +
3435 + sort__setup_elide(stdout);
3436 +
3437 +diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
3438 +index 6d598cc071ae..1a9c3becf5ff 100644
3439 +--- a/tools/perf/tests/backward-ring-buffer.c
3440 ++++ b/tools/perf/tests/backward-ring-buffer.c
3441 +@@ -18,7 +18,7 @@ static void testcase(void)
3442 + int i;
3443 +
3444 + for (i = 0; i < NR_ITERS; i++) {
3445 +- char proc_name[10];
3446 ++ char proc_name[15];
3447 +
3448 + snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
3449 + prctl(PR_SET_NAME, proc_name);
3450 +diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
3451 +index ea7acf403727..71f60c0f9faa 100644
3452 +--- a/tools/perf/tests/evsel-tp-sched.c
3453 ++++ b/tools/perf/tests/evsel-tp-sched.c
3454 +@@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
3455 + if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
3456 + ret = -1;
3457 +
3458 ++ perf_evsel__delete(evsel);
3459 + return ret;
3460 + }
3461 +diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
3462 +index 01f0706995a9..9acc1e80b936 100644
3463 +--- a/tools/perf/tests/expr.c
3464 ++++ b/tools/perf/tests/expr.c
3465 +@@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
3466 + const char *p;
3467 + const char **other;
3468 + double val;
3469 +- int ret;
3470 ++ int i, ret;
3471 + struct parse_ctx ctx;
3472 + int num_other;
3473 +
3474 +@@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
3475 + TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
3476 + TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
3477 + TEST_ASSERT_VAL("find other", other[3] == NULL);
3478 ++
3479 ++ for (i = 0; i < num_other; i++)
3480 ++ free((void *)other[i]);
3481 + free((void *)other);
3482 +
3483 + return 0;
3484 +diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
3485 +index c531e6deb104..493ecb611540 100644
3486 +--- a/tools/perf/tests/openat-syscall-all-cpus.c
3487 ++++ b/tools/perf/tests/openat-syscall-all-cpus.c
3488 +@@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
3489 + if (IS_ERR(evsel)) {
3490 + tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
3491 + pr_debug("%s\n", errbuf);
3492 +- goto out_thread_map_delete;
3493 ++ goto out_cpu_map_delete;
3494 + }
3495 +
3496 + if (perf_evsel__open(evsel, cpus, threads) < 0) {
3497 +@@ -119,6 +119,8 @@ out_close_fd:
3498 + perf_evsel__close_fd(evsel);
3499 + out_evsel_delete:
3500 + perf_evsel__delete(evsel);
3501 ++out_cpu_map_delete:
3502 ++ cpu_map__put(cpus);
3503 + out_thread_map_delete:
3504 + thread_map__put(threads);
3505 + return err;
3506 +diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
3507 +index 04b1d53e4bf9..1d352621bd48 100644
3508 +--- a/tools/perf/util/build-id.c
3509 ++++ b/tools/perf/util/build-id.c
3510 +@@ -183,6 +183,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
3511 + return bf;
3512 + }
3513 +
3514 ++/* The caller is responsible to free the returned buffer. */
3515 + char *build_id_cache__origname(const char *sbuild_id)
3516 + {
3517 + char *linkname;
3518 +diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
3519 +index 1ea8f898f1a1..9ecdbd5986b3 100644
3520 +--- a/tools/perf/util/config.c
3521 ++++ b/tools/perf/util/config.c
3522 +@@ -632,11 +632,10 @@ static int collect_config(const char *var, const char *value,
3523 + }
3524 +
3525 + ret = set_value(item, value);
3526 +- return ret;
3527 +
3528 + out_free:
3529 + free(key);
3530 +- return -1;
3531 ++ return ret;
3532 + }
3533 +
3534 + int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
3535 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
3536 +index dbc0466db368..50c933044f88 100644
3537 +--- a/tools/perf/util/evsel.c
3538 ++++ b/tools/perf/util/evsel.c
3539 +@@ -1289,6 +1289,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
3540 + {
3541 + assert(list_empty(&evsel->node));
3542 + assert(evsel->evlist == NULL);
3543 ++ perf_evsel__free_counts(evsel);
3544 + perf_evsel__free_fd(evsel);
3545 + perf_evsel__free_id(evsel);
3546 + perf_evsel__free_config_terms(evsel);
3547 +@@ -1341,8 +1342,7 @@ void perf_counts_values__scale(struct perf_counts_values *count,
3548 + scaled = 1;
3549 + count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
3550 + }
3551 +- } else
3552 +- count->ena = count->run = 0;
3553 ++ }
3554 +
3555 + if (pscaled)
3556 + *pscaled = scaled;
3557 +diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
3558 +index 8aad8330e392..e416e76f5600 100644
3559 +--- a/tools/perf/util/hist.c
3560 ++++ b/tools/perf/util/hist.c
3561 +@@ -1048,8 +1048,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
3562 +
3563 + err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
3564 + iter->evsel, al, max_stack_depth);
3565 +- if (err)
3566 ++ if (err) {
3567 ++ map__put(alm);
3568 + return err;
3569 ++ }
3570 +
3571 + err = iter->ops->prepare_entry(iter, al);
3572 + if (err)
3573 +diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
3574 +index 6751301a755c..2b37f56f0549 100644
3575 +--- a/tools/perf/util/map.c
3576 ++++ b/tools/perf/util/map.c
3577 +@@ -571,10 +571,25 @@ static void __maps__purge(struct maps *maps)
3578 + }
3579 + }
3580 +
3581 ++static void __maps__purge_names(struct maps *maps)
3582 ++{
3583 ++ struct rb_root *root = &maps->names;
3584 ++ struct rb_node *next = rb_first(root);
3585 ++
3586 ++ while (next) {
3587 ++ struct map *pos = rb_entry(next, struct map, rb_node_name);
3588 ++
3589 ++ next = rb_next(&pos->rb_node_name);
3590 ++ rb_erase_init(&pos->rb_node_name, root);
3591 ++ map__put(pos);
3592 ++ }
3593 ++}
3594 ++
3595 + static void maps__exit(struct maps *maps)
3596 + {
3597 + down_write(&maps->lock);
3598 + __maps__purge(maps);
3599 ++ __maps__purge_names(maps);
3600 + up_write(&maps->lock);
3601 + }
3602 +
3603 +@@ -911,6 +926,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
3604 + {
3605 + rb_erase_init(&map->rb_node, &maps->entries);
3606 + map__put(map);
3607 ++
3608 ++ rb_erase_init(&map->rb_node_name, &maps->names);
3609 ++ map__put(map);
3610 + }
3611 +
3612 + void maps__remove(struct maps *maps, struct map *map)
3613 +diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
3614 +index ea523d3b248f..989fed6f43b5 100644
3615 +--- a/tools/perf/util/ordered-events.c
3616 ++++ b/tools/perf/util/ordered-events.c
3617 +@@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
3618 + "FINAL",
3619 + "ROUND",
3620 + "HALF ",
3621 ++ "TOP ",
3622 ++ "TIME ",
3623 + };
3624 + int err;
3625 + bool show_progress = false;
3626 +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
3627 +index 920e1e6551dd..03860313313c 100644
3628 +--- a/tools/perf/util/parse-events.c
3629 ++++ b/tools/perf/util/parse-events.c
3630 +@@ -2271,6 +2271,7 @@ static bool is_event_supported(u8 type, unsigned config)
3631 + perf_evsel__delete(evsel);
3632 + }
3633 +
3634 ++ thread_map__put(tmap);
3635 + return ret;
3636 + }
3637 +
3638 +@@ -2341,6 +2342,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
3639 + printf(" %-50s [%s]\n", buf, "SDT event");
3640 + free(buf);
3641 + }
3642 ++ free(path);
3643 + } else
3644 + printf(" %-50s [%s]\n", nd->s, "SDT event");
3645 + if (nd2) {
3646 +diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
3647 +index 4d40515307b8..2856cc9d5a31 100644
3648 +--- a/tools/perf/util/stat.c
3649 ++++ b/tools/perf/util/stat.c
3650 +@@ -291,10 +291,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
3651 + break;
3652 + case AGGR_GLOBAL:
3653 + aggr->val += count->val;
3654 +- if (config->scale) {
3655 +- aggr->ena += count->ena;
3656 +- aggr->run += count->run;
3657 +- }
3658 ++ aggr->ena += count->ena;
3659 ++ aggr->run += count->run;
3660 + case AGGR_UNSET:
3661 + default:
3662 + break;
3663 +@@ -442,10 +440,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel,
3664 + struct perf_event_attr *attr = &evsel->attr;
3665 + struct perf_evsel *leader = evsel->leader;
3666 +
3667 +- if (config->scale) {
3668 +- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
3669 +- PERF_FORMAT_TOTAL_TIME_RUNNING;
3670 +- }
3671 ++ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
3672 ++ PERF_FORMAT_TOTAL_TIME_RUNNING;
3673 +
3674 + /*
3675 + * The event is part of non trivial group, let's enable
3676 +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
3677 +index 9327c0ddc3a5..c3fad065c89c 100644
3678 +--- a/tools/power/x86/turbostat/turbostat.c
3679 ++++ b/tools/power/x86/turbostat/turbostat.c
3680 +@@ -5077,6 +5077,9 @@ int fork_it(char **argv)
3681 + signal(SIGQUIT, SIG_IGN);
3682 + if (waitpid(child_pid, &status, 0) == -1)
3683 + err(status, "waitpid");
3684 ++
3685 ++ if (WIFEXITED(status))
3686 ++ status = WEXITSTATUS(status);
3687 + }
3688 + /*
3689 + * n.b. fork_it() does not check for errors from for_all_cpus()