Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 22 Sep 2021 11:40:46
Message-Id: 1632310829.9008f39a3d63e3006ef77c698aa32930cc2aebae.mpagano@gentoo
1 commit: 9008f39a3d63e3006ef77c698aa32930cc2aebae
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 22 11:40:29 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 22 11:40:29 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9008f39a
7
8 Linux patch 4.19.207
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1206_linux-4.19.207.patch | 9342 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 9346 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 1c6d7ee..6a76482 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -863,6 +863,10 @@ Patch: 1205_linux-4.19.206.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.206
23
24 +Patch: 1206_linux-4.19.207.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.207
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1206_linux-4.19.207.patch b/1206_linux-4.19.207.patch
33 new file mode 100644
34 index 0000000..7309df1
35 --- /dev/null
36 +++ b/1206_linux-4.19.207.patch
37 @@ -0,0 +1,9342 @@
38 +diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
39 +index 1649117e6087d..9cd7926c87814 100644
40 +--- a/Documentation/admin-guide/devices.txt
41 ++++ b/Documentation/admin-guide/devices.txt
42 +@@ -2993,10 +2993,10 @@
43 + 65 = /dev/infiniband/issm1 Second InfiniBand IsSM device
44 + ...
45 + 127 = /dev/infiniband/issm63 63rd InfiniBand IsSM device
46 +- 128 = /dev/infiniband/uverbs0 First InfiniBand verbs device
47 +- 129 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
48 ++ 192 = /dev/infiniband/uverbs0 First InfiniBand verbs device
49 ++ 193 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
50 + ...
51 +- 159 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
52 ++ 223 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
53 +
54 + 232 char Biometric Devices
55 + 0 = /dev/biometric/sensor0/fingerprint first fingerprint sensor on first device
56 +diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
57 +index c059ab74ed886..a4a75fa795249 100644
58 +--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
59 ++++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
60 +@@ -122,7 +122,7 @@ on various other factors also like;
61 + so the device should have enough free bytes available its OOB/Spare
62 + area to accommodate ECC for entire page. In general following expression
63 + helps in determining if given device can accommodate ECC syndrome:
64 +- "2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE"
65 ++ "2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE"
66 + where
67 + OOBSIZE number of bytes in OOB/spare area
68 + PAGESIZE number of bytes in main-area of device page
69 +diff --git a/Makefile b/Makefile
70 +index 3a3eea3ab10a5..77dd62aa0bbe5 100644
71 +--- a/Makefile
72 ++++ b/Makefile
73 +@@ -1,7 +1,7 @@
74 + # SPDX-License-Identifier: GPL-2.0
75 + VERSION = 4
76 + PATCHLEVEL = 19
77 +-SUBLEVEL = 206
78 ++SUBLEVEL = 207
79 + EXTRAVERSION =
80 + NAME = "People's Front"
81 +
82 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
83 +index cf9619d4efb4f..c5254c5967ed6 100644
84 +--- a/arch/arc/mm/cache.c
85 ++++ b/arch/arc/mm/cache.c
86 +@@ -1112,7 +1112,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
87 + clear_page(to);
88 + clear_bit(PG_dc_clean, &page->flags);
89 + }
90 +-
91 ++EXPORT_SYMBOL(clear_user_page);
92 +
93 + /**********************************************************************
94 + * Explicit Cache flush request from user space via syscall
95 +diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
96 +index ec2327a3796d5..1b3a4144646b0 100644
97 +--- a/arch/arm/boot/compressed/Makefile
98 ++++ b/arch/arm/boot/compressed/Makefile
99 +@@ -90,6 +90,8 @@ $(addprefix $(obj)/,$(libfdt_objs) atags_to_fdt.o): \
100 + $(addprefix $(obj)/,$(libfdt_hdrs))
101 +
102 + ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
103 ++CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN}
104 ++CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280
105 + OBJS += $(libfdt_objs) atags_to_fdt.o
106 + endif
107 +
108 +diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
109 +index f202396e3f2a8..f346673d34ead 100644
110 +--- a/arch/arm/boot/dts/imx53-ppd.dts
111 ++++ b/arch/arm/boot/dts/imx53-ppd.dts
112 +@@ -70,6 +70,12 @@
113 + clock-frequency = <11289600>;
114 + };
115 +
116 ++ achc_24M: achc-clock {
117 ++ compatible = "fixed-clock";
118 ++ #clock-cells = <0>;
119 ++ clock-frequency = <24000000>;
120 ++ };
121 ++
122 + sgtlsound: sound {
123 + compatible = "fsl,imx53-cpuvo-sgtl5000",
124 + "fsl,imx-audio-sgtl5000";
125 +@@ -287,16 +293,13 @@
126 + &gpio4 12 GPIO_ACTIVE_LOW>;
127 + status = "okay";
128 +
129 +- spidev0: spi@0 {
130 +- compatible = "ge,achc";
131 +- reg = <0>;
132 +- spi-max-frequency = <1000000>;
133 +- };
134 +-
135 +- spidev1: spi@1 {
136 +- compatible = "ge,achc";
137 +- reg = <1>;
138 +- spi-max-frequency = <1000000>;
139 ++ spidev0: spi@1 {
140 ++ compatible = "ge,achc", "nxp,kinetis-k20";
141 ++ reg = <1>, <0>;
142 ++ vdd-supply = <&reg_3v3>;
143 ++ vdda-supply = <&reg_3v3>;
144 ++ clocks = <&achc_24M>;
145 ++ reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
146 + };
147 +
148 + gpioxra0: gpio@2 {
149 +diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
150 +index 4a99c92551049..d0153bbbdbeb8 100644
151 +--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
152 ++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
153 +@@ -1296,9 +1296,9 @@
154 + <&mmcc DSI1_BYTE_CLK>,
155 + <&mmcc DSI_PIXEL_CLK>,
156 + <&mmcc DSI1_ESC_CLK>;
157 +- clock-names = "iface_clk", "bus_clk", "core_mmss_clk",
158 +- "src_clk", "byte_clk", "pixel_clk",
159 +- "core_clk";
160 ++ clock-names = "iface", "bus", "core_mmss",
161 ++ "src", "byte", "pixel",
162 ++ "core";
163 +
164 + assigned-clocks = <&mmcc DSI1_BYTE_SRC>,
165 + <&mmcc DSI1_ESC_SRC>,
166 +diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
167 +index 20137fc578b1b..394a6b4dc69d5 100644
168 +--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
169 ++++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
170 +@@ -185,8 +185,9 @@
171 + nvidia,pins = "ata", "atb", "atc", "atd", "ate",
172 + "cdev1", "cdev2", "dap1", "dtb", "gma",
173 + "gmb", "gmc", "gmd", "gme", "gpu7",
174 +- "gpv", "i2cp", "pta", "rm", "slxa",
175 +- "slxk", "spia", "spib", "uac";
176 ++ "gpv", "i2cp", "irrx", "irtx", "pta",
177 ++ "rm", "slxa", "slxk", "spia", "spib",
178 ++ "uac";
179 + nvidia,pull = <TEGRA_PIN_PULL_NONE>;
180 + nvidia,tristate = <TEGRA_PIN_DISABLE>;
181 + };
182 +@@ -211,7 +212,7 @@
183 + conf_ddc {
184 + nvidia,pins = "ddc", "dta", "dtd", "kbca",
185 + "kbcb", "kbcc", "kbcd", "kbce", "kbcf",
186 +- "sdc";
187 ++ "sdc", "uad", "uca";
188 + nvidia,pull = <TEGRA_PIN_PULL_UP>;
189 + nvidia,tristate = <TEGRA_PIN_DISABLE>;
190 + };
191 +@@ -221,10 +222,9 @@
192 + "lvp0", "owc", "sdb";
193 + nvidia,tristate = <TEGRA_PIN_ENABLE>;
194 + };
195 +- conf_irrx {
196 +- nvidia,pins = "irrx", "irtx", "sdd", "spic",
197 +- "spie", "spih", "uaa", "uab", "uad",
198 +- "uca", "ucb";
199 ++ conf_sdd {
200 ++ nvidia,pins = "sdd", "spic", "spie", "spih",
201 ++ "uaa", "uab", "ucb";
202 + nvidia,pull = <TEGRA_PIN_PULL_UP>;
203 + nvidia,tristate = <TEGRA_PIN_ENABLE>;
204 + };
205 +diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
206 +index 8cad59465af39..8b679e2ca3c3d 100644
207 +--- a/arch/arm/kernel/Makefile
208 ++++ b/arch/arm/kernel/Makefile
209 +@@ -17,10 +17,14 @@ CFLAGS_REMOVE_return_address.o = -pg
210 + # Object file lists.
211 +
212 + obj-y := elf.o entry-common.o irq.o opcodes.o \
213 +- process.o ptrace.o reboot.o return_address.o \
214 ++ process.o ptrace.o reboot.o \
215 + setup.o signal.o sigreturn_codes.o \
216 + stacktrace.o sys_arm.o time.o traps.o
217 +
218 ++ifneq ($(CONFIG_ARM_UNWIND),y)
219 ++obj-$(CONFIG_FRAME_POINTER) += return_address.o
220 ++endif
221 ++
222 + obj-$(CONFIG_ATAGS) += atags_parse.o
223 + obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
224 + obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
225 +diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
226 +index 36ed35073289b..f945742dea449 100644
227 +--- a/arch/arm/kernel/return_address.c
228 ++++ b/arch/arm/kernel/return_address.c
229 +@@ -10,8 +10,6 @@
230 + */
231 + #include <linux/export.h>
232 + #include <linux/ftrace.h>
233 +-
234 +-#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
235 + #include <linux/sched.h>
236 +
237 + #include <asm/stacktrace.h>
238 +@@ -56,6 +54,4 @@ void *return_address(unsigned int level)
239 + return NULL;
240 + }
241 +
242 +-#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
243 +-
244 + EXPORT_SYMBOL_GPL(return_address);
245 +diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
246 +index ae0a61c61a6e1..14be73ca107a5 100644
247 +--- a/arch/arm/mach-imx/mmdc.c
248 ++++ b/arch/arm/mach-imx/mmdc.c
249 +@@ -109,6 +109,7 @@ struct mmdc_pmu {
250 + struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
251 + struct hlist_node node;
252 + struct fsl_mmdc_devtype_data *devtype_data;
253 ++ struct clk *mmdc_ipg_clk;
254 + };
255 +
256 + /*
257 +@@ -474,11 +475,13 @@ static int imx_mmdc_remove(struct platform_device *pdev)
258 + cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
259 + perf_pmu_unregister(&pmu_mmdc->pmu);
260 + iounmap(pmu_mmdc->mmdc_base);
261 ++ clk_disable_unprepare(pmu_mmdc->mmdc_ipg_clk);
262 + kfree(pmu_mmdc);
263 + return 0;
264 + }
265 +
266 +-static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base)
267 ++static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base,
268 ++ struct clk *mmdc_ipg_clk)
269 + {
270 + struct mmdc_pmu *pmu_mmdc;
271 + char *name;
272 +@@ -506,6 +509,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
273 + }
274 +
275 + mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
276 ++ pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
277 + if (mmdc_num == 0)
278 + name = "mmdc";
279 + else
280 +@@ -541,7 +545,7 @@ pmu_free:
281 +
282 + #else
283 + #define imx_mmdc_remove NULL
284 +-#define imx_mmdc_perf_init(pdev, mmdc_base) 0
285 ++#define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
286 + #endif
287 +
288 + static int imx_mmdc_probe(struct platform_device *pdev)
289 +@@ -579,9 +583,11 @@ static int imx_mmdc_probe(struct platform_device *pdev)
290 + val &= ~(1 << BP_MMDC_MAPSR_PSD);
291 + writel_relaxed(val, reg);
292 +
293 +- err = imx_mmdc_perf_init(pdev, mmdc_base);
294 +- if (err)
295 ++ err = imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk);
296 ++ if (err) {
297 + iounmap(mmdc_base);
298 ++ clk_disable_unprepare(mmdc_ipg_clk);
299 ++ }
300 +
301 + return err;
302 + }
303 +diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
304 +index 328ced7bfaf26..79b12e7445373 100644
305 +--- a/arch/arm/net/bpf_jit_32.c
306 ++++ b/arch/arm/net/bpf_jit_32.c
307 +@@ -1578,6 +1578,9 @@ exit:
308 + rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
309 + emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
310 + break;
311 ++ /* speculation barrier */
312 ++ case BPF_ST | BPF_NOSPEC:
313 ++ break;
314 + /* ST: *(size *)(dst + off) = imm */
315 + case BPF_ST | BPF_MEM | BPF_W:
316 + case BPF_ST | BPF_MEM | BPF_H:
317 +diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
318 +index 5c5e57026c275..c607297922fdb 100644
319 +--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
320 ++++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
321 +@@ -91,7 +91,7 @@
322 + #address-cells = <0>;
323 + interrupt-controller;
324 + reg = <0x11001000 0x1000>,
325 +- <0x11002000 0x1000>,
326 ++ <0x11002000 0x2000>,
327 + <0x11004000 0x2000>,
328 + <0x11006000 0x2000>;
329 + };
330 +diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
331 +index c13ddee8262b9..58acf21d8d333 100644
332 +--- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
333 ++++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
334 +@@ -28,7 +28,7 @@
335 + stdout-path = "serial0";
336 + };
337 +
338 +- memory {
339 ++ memory@40000000 {
340 + device_type = "memory";
341 + reg = <0x0 0x40000000 0x0 0x20000000>;
342 + };
343 +diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
344 +index a780f6714b445..74ab40b76ad53 100644
345 +--- a/arch/arm64/include/asm/kernel-pgtable.h
346 ++++ b/arch/arm64/include/asm/kernel-pgtable.h
347 +@@ -76,8 +76,8 @@
348 + #define EARLY_KASLR (0)
349 + #endif
350 +
351 +-#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \
352 +- - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
353 ++#define EARLY_ENTRIES(vstart, vend, shift) \
354 ++ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
355 +
356 + #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
357 +
358 +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
359 +index 19c87b52c001f..89ab68cb35bbd 100644
360 +--- a/arch/arm64/kernel/fpsimd.c
361 ++++ b/arch/arm64/kernel/fpsimd.c
362 +@@ -434,7 +434,7 @@ size_t sve_state_size(struct task_struct const *task)
363 + void sve_alloc(struct task_struct *task)
364 + {
365 + if (task->thread.sve_state) {
366 +- memset(task->thread.sve_state, 0, sve_state_size(current));
367 ++ memset(task->thread.sve_state, 0, sve_state_size(task));
368 + return;
369 + }
370 +
371 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
372 +index c85ea70b92936..ea7f059dbcb6c 100644
373 +--- a/arch/arm64/kernel/head.S
374 ++++ b/arch/arm64/kernel/head.S
375 +@@ -202,7 +202,7 @@ ENDPROC(preserve_boot_args)
376 + * to be composed of multiple pages. (This effectively scales the end index).
377 + *
378 + * vstart: virtual address of start of range
379 +- * vend: virtual address of end of range
380 ++ * vend: virtual address of end of range - we map [vstart, vend]
381 + * shift: shift used to transform virtual address into index
382 + * ptrs: number of entries in page table
383 + * istart: index in table corresponding to vstart
384 +@@ -239,17 +239,18 @@ ENDPROC(preserve_boot_args)
385 + *
386 + * tbl: location of page table
387 + * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
388 +- * vstart: start address to map
389 +- * vend: end address to map - we map [vstart, vend]
390 ++ * vstart: virtual address of start of range
391 ++ * vend: virtual address of end of range - we map [vstart, vend - 1]
392 + * flags: flags to use to map last level entries
393 + * phys: physical address corresponding to vstart - physical memory is contiguous
394 + * pgds: the number of pgd entries
395 + *
396 + * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
397 +- * Preserves: vstart, vend, flags
398 +- * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
399 ++ * Preserves: vstart, flags
400 ++ * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
401 + */
402 + .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
403 ++ sub \vend, \vend, #1
404 + add \rtbl, \tbl, #PAGE_SIZE
405 + mov \sv, \rtbl
406 + mov \count, #0
407 +diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
408 +index 7f0258ed1f5fe..6876e8205042a 100644
409 +--- a/arch/arm64/net/bpf_jit_comp.c
410 ++++ b/arch/arm64/net/bpf_jit_comp.c
411 +@@ -685,6 +685,19 @@ emit_cond_jmp:
412 + }
413 + break;
414 +
415 ++ /* speculation barrier */
416 ++ case BPF_ST | BPF_NOSPEC:
417 ++ /*
418 ++ * Nothing required here.
419 ++ *
420 ++ * In case of arm64, we rely on the firmware mitigation of
421 ++ * Speculative Store Bypass as controlled via the ssbd kernel
422 ++ * parameter. Whenever the mitigation is enabled, it works
423 ++ * for all of the kernel code with no need to provide any
424 ++ * additional instructions.
425 ++ */
426 ++ break;
427 ++
428 + /* ST: *(size *)(dst + off) = imm */
429 + case BPF_ST | BPF_MEM | BPF_W:
430 + case BPF_ST | BPF_MEM | BPF_H:
431 +diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
432 +index e45ce4243aaa3..76262dc40e791 100644
433 +--- a/arch/m68k/emu/nfeth.c
434 ++++ b/arch/m68k/emu/nfeth.c
435 +@@ -258,8 +258,8 @@ static void __exit nfeth_cleanup(void)
436 +
437 + for (i = 0; i < MAX_UNIT; i++) {
438 + if (nfeth_dev[i]) {
439 +- unregister_netdev(nfeth_dev[0]);
440 +- free_netdev(nfeth_dev[0]);
441 ++ unregister_netdev(nfeth_dev[i]);
442 ++ free_netdev(nfeth_dev[i]);
443 + }
444 + }
445 + free_irq(nfEtherIRQ, nfeth_interrupt);
446 +diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
447 +index 7859b6e498634..5b5d78a7882a4 100644
448 +--- a/arch/mips/mti-malta/malta-dtshim.c
449 ++++ b/arch/mips/mti-malta/malta-dtshim.c
450 +@@ -26,7 +26,7 @@
451 + #define ROCIT_CONFIG_GEN1_MEMMAP_SHIFT 8
452 + #define ROCIT_CONFIG_GEN1_MEMMAP_MASK (0xf << 8)
453 +
454 +-static unsigned char fdt_buf[16 << 10] __initdata;
455 ++static unsigned char fdt_buf[16 << 10] __initdata __aligned(8);
456 +
457 + /* determined physical memory size, not overridden by command line args */
458 + extern unsigned long physical_memsize;
459 +diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
460 +index 3832c46286082..947a7172c814e 100644
461 +--- a/arch/mips/net/ebpf_jit.c
462 ++++ b/arch/mips/net/ebpf_jit.c
463 +@@ -1282,6 +1282,9 @@ jeq_common:
464 + }
465 + break;
466 +
467 ++ case BPF_ST | BPF_NOSPEC: /* speculation barrier */
468 ++ break;
469 ++
470 + case BPF_ST | BPF_B | BPF_MEM:
471 + case BPF_ST | BPF_H | BPF_MEM:
472 + case BPF_ST | BPF_W | BPF_MEM:
473 +diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
474 +index 01b59d2ce1747..c2c3ce8a0f84e 100644
475 +--- a/arch/openrisc/kernel/entry.S
476 ++++ b/arch/openrisc/kernel/entry.S
477 +@@ -551,6 +551,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
478 + l.bnf 1f // ext irq enabled, all ok.
479 + l.nop
480 +
481 ++#ifdef CONFIG_PRINTK
482 + l.addi r1,r1,-0x8
483 + l.movhi r3,hi(42f)
484 + l.ori r3,r3,lo(42f)
485 +@@ -564,6 +565,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
486 + .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r"
487 + .align 4
488 + .previous
489 ++#endif
490 +
491 + l.ori r4,r4,SPR_SR_IEE // fix the bug
492 + // l.sw PT_SR(r1),r4
493 +diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
494 +index 342073f44d3f1..79b05eac2b33c 100644
495 +--- a/arch/parisc/kernel/signal.c
496 ++++ b/arch/parisc/kernel/signal.c
497 +@@ -239,6 +239,12 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
498 + #endif
499 +
500 + usp = (regs->gr[30] & ~(0x01UL));
501 ++#ifdef CONFIG_64BIT
502 ++ if (is_compat_task()) {
503 ++ /* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */
504 ++ usp = (compat_uint_t)usp;
505 ++ }
506 ++#endif
507 + /*FIXME: frame_size parameter is unused, remove it. */
508 + frame = get_sigframe(&ksig->ka, usp, sizeof(*frame));
509 +
510 +diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
511 +index 9b9d17437373b..0247b8e6cb1b7 100644
512 +--- a/arch/powerpc/boot/crt0.S
513 ++++ b/arch/powerpc/boot/crt0.S
514 +@@ -49,9 +49,6 @@ p_end: .long _end
515 + p_pstack: .long _platform_stack_top
516 + #endif
517 +
518 +- .globl _zimage_start
519 +- /* Clang appears to require the .weak directive to be after the symbol
520 +- * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */
521 + .weak _zimage_start
522 + _zimage_start:
523 + .globl _zimage_start_lib
524 +diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
525 +index 8661eea78503f..4bf81a111179f 100644
526 +--- a/arch/powerpc/kernel/module_64.c
527 ++++ b/arch/powerpc/kernel/module_64.c
528 +@@ -719,7 +719,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
529 + /*
530 + * If found, replace it with:
531 + * addis r2, r12, (.TOC.-func)@ha
532 +- * addi r2, r12, (.TOC.-func)@l
533 ++ * addi r2, r2, (.TOC.-func)@l
534 + */
535 + ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
536 + ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
537 +diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
538 +index 23b5f755e419b..dadcc8bab336a 100644
539 +--- a/arch/powerpc/kernel/stacktrace.c
540 ++++ b/arch/powerpc/kernel/stacktrace.c
541 +@@ -8,6 +8,7 @@
542 + * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
543 + */
544 +
545 ++#include <linux/delay.h>
546 + #include <linux/export.h>
547 + #include <linux/kallsyms.h>
548 + #include <linux/module.h>
549 +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
550 +index 7e3ab477f67fe..e7d56ddba43aa 100644
551 +--- a/arch/powerpc/net/bpf_jit_comp64.c
552 ++++ b/arch/powerpc/net/bpf_jit_comp64.c
553 +@@ -596,6 +596,12 @@ emit_clear:
554 + }
555 + break;
556 +
557 ++ /*
558 ++ * BPF_ST NOSPEC (speculation barrier)
559 ++ */
560 ++ case BPF_ST | BPF_NOSPEC:
561 ++ break;
562 ++
563 + /*
564 + * BPF_ST(X)
565 + */
566 +diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
567 +index 43fabb3cae0fa..160b86d9d8199 100644
568 +--- a/arch/powerpc/perf/hv-gpci.c
569 ++++ b/arch/powerpc/perf/hv-gpci.c
570 +@@ -168,7 +168,7 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
571 + */
572 + count = 0;
573 + for (i = offset; i < offset + length; i++)
574 +- count |= arg->bytes[i] << (i - offset);
575 ++ count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
576 +
577 + *value = count;
578 + out:
579 +diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
580 +index 68f415e334a59..10009a0cdb37c 100644
581 +--- a/arch/s390/kernel/jump_label.c
582 ++++ b/arch/s390/kernel/jump_label.c
583 +@@ -41,7 +41,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
584 + unsigned char *ipe = (unsigned char *)expected;
585 + unsigned char *ipn = (unsigned char *)new;
586 +
587 +- pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
588 ++ pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
589 + pr_emerg("Found: %6ph\n", ipc);
590 + pr_emerg("Expected: %6ph\n", ipe);
591 + pr_emerg("New: %6ph\n", ipn);
592 +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
593 +index 3515f2b55eb9e..dc5ecaea30d71 100644
594 +--- a/arch/s390/kvm/interrupt.c
595 ++++ b/arch/s390/kvm/interrupt.c
596 +@@ -318,13 +318,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
597 + static void __set_cpu_idle(struct kvm_vcpu *vcpu)
598 + {
599 + kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
600 +- set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
601 ++ set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.float_int.idle_mask);
602 + }
603 +
604 + static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
605 + {
606 + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
607 +- clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
608 ++ clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.float_int.idle_mask);
609 + }
610 +
611 + static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
612 +diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
613 +index 981e3ba974616..0a2ffd5378be2 100644
614 +--- a/arch/s390/kvm/kvm-s390.h
615 ++++ b/arch/s390/kvm/kvm-s390.h
616 +@@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
617 +
618 + static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
619 + {
620 +- return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
621 ++ return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.float_int.idle_mask);
622 + }
623 +
624 + static inline int kvm_is_ucontrol(struct kvm *kvm)
625 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
626 +index e42354b15e0bc..8508c2c0e2a3a 100644
627 +--- a/arch/s390/net/bpf_jit_comp.c
628 ++++ b/arch/s390/net/bpf_jit_comp.c
629 +@@ -595,8 +595,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
630 + case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
631 + if (!imm)
632 + break;
633 +- /* agfi %dst,-imm */
634 +- EMIT6_IMM(0xc2080000, dst_reg, -imm);
635 ++ if (imm == -0x80000000) {
636 ++ /* algfi %dst,0x80000000 */
637 ++ EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
638 ++ } else {
639 ++ /* agfi %dst,-imm */
640 ++ EMIT6_IMM(0xc2080000, dst_reg, -imm);
641 ++ }
642 + break;
643 + /*
644 + * BPF_MUL
645 +@@ -883,6 +888,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
646 + break;
647 + }
648 + break;
649 ++ /*
650 ++ * BPF_NOSPEC (speculation barrier)
651 ++ */
652 ++ case BPF_ST | BPF_NOSPEC:
653 ++ break;
654 + /*
655 + * BPF_ST(X)
656 + */
657 +diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
658 +index ec4da4dc98f12..1bb1e64d4377d 100644
659 +--- a/arch/sparc/net/bpf_jit_comp_64.c
660 ++++ b/arch/sparc/net/bpf_jit_comp_64.c
661 +@@ -1261,6 +1261,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
662 + emit(opcode | RS1(src) | rs2 | RD(dst), ctx);
663 + break;
664 + }
665 ++ /* speculation barrier */
666 ++ case BPF_ST | BPF_NOSPEC:
667 ++ break;
668 + /* ST: *(size *)(dst + off) = imm */
669 + case BPF_ST | BPF_MEM | BPF_W:
670 + case BPF_ST | BPF_MEM | BPF_H:
671 +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
672 +index 2410bd4bb48f3..2d9e1372b070c 100644
673 +--- a/arch/x86/events/amd/ibs.c
674 ++++ b/arch/x86/events/amd/ibs.c
675 +@@ -90,6 +90,7 @@ struct perf_ibs {
676 + unsigned long offset_mask[1];
677 + int offset_max;
678 + unsigned int fetch_count_reset_broken : 1;
679 ++ unsigned int fetch_ignore_if_zero_rip : 1;
680 + struct cpu_perf_ibs __percpu *pcpu;
681 +
682 + struct attribute **format_attrs;
683 +@@ -674,6 +675,10 @@ fail:
684 + if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
685 + regs.flags &= ~PERF_EFLAGS_EXACT;
686 + } else {
687 ++ /* Workaround for erratum #1197 */
688 ++ if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
689 ++ goto out;
690 ++
691 + set_linear_ip(&regs, ibs_data.regs[1]);
692 + regs.flags |= PERF_EFLAGS_EXACT;
693 + }
694 +@@ -767,6 +772,9 @@ static __init void perf_event_ibs_init(void)
695 + if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
696 + perf_ibs_fetch.fetch_count_reset_broken = 1;
697 +
698 ++ if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
699 ++ perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
700 ++
701 + perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
702 +
703 + if (ibs_caps & IBS_CAPS_OPCNT) {
704 +diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
705 +index f03100bc5fd12..849f0ba53a9b9 100644
706 +--- a/arch/x86/events/intel/pt.c
707 ++++ b/arch/x86/events/intel/pt.c
708 +@@ -69,7 +69,7 @@ static struct pt_cap_desc {
709 + PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
710 + PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
711 + PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
712 +- PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3),
713 ++ PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x7),
714 + PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
715 + PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff),
716 + PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
717 +diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
718 +index 6eeb17dfde48e..5fee674fe59d9 100644
719 +--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
720 ++++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
721 +@@ -252,6 +252,12 @@ static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
722 + case QOS_L3_MBM_LOCAL_EVENT_ID:
723 + m = &rr->d->mbm_local[rmid];
724 + break;
725 ++ default:
726 ++ /*
727 ++ * Code would never reach here because an invalid
728 ++ * event id would fail the __rmid_read.
729 ++ */
730 ++ return RMID_VAL_ERROR;
731 + }
732 +
733 + if (rr->first) {
734 +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
735 +index 6489cc19ed069..b0f3a996df15f 100644
736 +--- a/arch/x86/kernel/reboot.c
737 ++++ b/arch/x86/kernel/reboot.c
738 +@@ -388,10 +388,11 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
739 + },
740 + { /* Handle problems with rebooting on the OptiPlex 990. */
741 + .callback = set_pci_reboot,
742 +- .ident = "Dell OptiPlex 990",
743 ++ .ident = "Dell OptiPlex 990 BIOS A0x",
744 + .matches = {
745 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
746 + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
747 ++ DMI_MATCH(DMI_BIOS_VERSION, "A0"),
748 + },
749 + },
750 + { /* Handle problems with rebooting on Dell 300's */
751 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
752 +index f913127e942a1..417abc9ba1ad4 100644
753 +--- a/arch/x86/kvm/x86.c
754 ++++ b/arch/x86/kvm/x86.c
755 +@@ -2511,6 +2511,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
756 + if (!msr_info->host_initiated) {
757 + s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
758 + adjust_tsc_offset_guest(vcpu, adj);
759 ++ /* Before back to guest, tsc_timestamp must be adjusted
760 ++ * as well, otherwise guest's percpu pvclock time could jump.
761 ++ */
762 ++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
763 + }
764 + vcpu->arch.ia32_tsc_adjust_msr = data;
765 + }
766 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
767 +index 81e85a8dd3005..4b25a1ad18ffd 100644
768 +--- a/arch/x86/mm/init_64.c
769 ++++ b/arch/x86/mm/init_64.c
770 +@@ -1289,18 +1289,18 @@ int kern_addr_valid(unsigned long addr)
771 + return 0;
772 +
773 + p4d = p4d_offset(pgd, addr);
774 +- if (p4d_none(*p4d))
775 ++ if (!p4d_present(*p4d))
776 + return 0;
777 +
778 + pud = pud_offset(p4d, addr);
779 +- if (pud_none(*pud))
780 ++ if (!pud_present(*pud))
781 + return 0;
782 +
783 + if (pud_large(*pud))
784 + return pfn_valid(pud_pfn(*pud));
785 +
786 + pmd = pmd_offset(pud, addr);
787 +- if (pmd_none(*pmd))
788 ++ if (!pmd_present(*pmd))
789 + return 0;
790 +
791 + if (pmd_large(*pmd))
792 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
793 +index 924ca27a6139b..81c3d4b4c7e2c 100644
794 +--- a/arch/x86/net/bpf_jit_comp.c
795 ++++ b/arch/x86/net/bpf_jit_comp.c
796 +@@ -731,6 +731,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
797 + }
798 + break;
799 +
800 ++ /* speculation barrier */
801 ++ case BPF_ST | BPF_NOSPEC:
802 ++ if (boot_cpu_has(X86_FEATURE_XMM2))
803 ++ /* Emit 'lfence' */
804 ++ EMIT3(0x0F, 0xAE, 0xE8);
805 ++ break;
806 ++
807 + /* ST: *(u8*)(dst_reg + off) = imm */
808 + case BPF_ST | BPF_MEM | BPF_B:
809 + if (is_ereg(dst_reg))
810 +diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
811 +index adee990abab14..f48300988bc2a 100644
812 +--- a/arch/x86/net/bpf_jit_comp32.c
813 ++++ b/arch/x86/net/bpf_jit_comp32.c
814 +@@ -1683,6 +1683,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
815 + i++;
816 + break;
817 + }
818 ++ /* speculation barrier */
819 ++ case BPF_ST | BPF_NOSPEC:
820 ++ if (boot_cpu_has(X86_FEATURE_XMM2))
821 ++ /* Emit 'lfence' */
822 ++ EMIT3(0x0F, 0xAE, 0xE8);
823 ++ break;
824 + /* ST: *(u8*)(dst_reg + off) = imm */
825 + case BPF_ST | BPF_MEM | BPF_H:
826 + case BPF_ST | BPF_MEM | BPF_B:
827 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
828 +index 1c3e9185934c4..7966136352b56 100644
829 +--- a/arch/x86/xen/enlighten_pv.c
830 ++++ b/arch/x86/xen/enlighten_pv.c
831 +@@ -1187,6 +1187,11 @@ static void __init xen_dom0_set_legacy_features(void)
832 + x86_platform.legacy.rtc = 1;
833 + }
834 +
835 ++static void __init xen_domu_set_legacy_features(void)
836 ++{
837 ++ x86_platform.legacy.rtc = 0;
838 ++}
839 ++
840 + /* First C function to be called on Xen boot */
841 + asmlinkage __visible void __init xen_start_kernel(void)
842 + {
843 +@@ -1354,6 +1359,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
844 + add_preferred_console("xenboot", 0, NULL);
845 + if (pci_xen)
846 + x86_init.pci.arch_init = pci_xen_init;
847 ++ x86_platform.set_legacy_features =
848 ++ xen_domu_set_legacy_features;
849 + } else {
850 + const struct dom0_vga_console_info *info =
851 + (void *)((char *)xen_start_info +
852 +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
853 +index 82577eec6d0a7..f9b31eb6846c4 100644
854 +--- a/arch/x86/xen/p2m.c
855 ++++ b/arch/x86/xen/p2m.c
856 +@@ -613,8 +613,8 @@ int xen_alloc_p2m_entry(unsigned long pfn)
857 + }
858 +
859 + /* Expanded the p2m? */
860 +- if (pfn > xen_p2m_last_pfn) {
861 +- xen_p2m_last_pfn = pfn;
862 ++ if (pfn >= xen_p2m_last_pfn) {
863 ++ xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
864 + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
865 + }
866 +
867 +diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
868 +index b9ad83a0ee5db..d9b0709d6c9ce 100644
869 +--- a/arch/xtensa/Kconfig
870 ++++ b/arch/xtensa/Kconfig
871 +@@ -25,7 +25,7 @@ config XTENSA
872 + select HAVE_DMA_CONTIGUOUS
873 + select HAVE_EXIT_THREAD
874 + select HAVE_FUNCTION_TRACER
875 +- select HAVE_FUTEX_CMPXCHG if !MMU
876 ++ select HAVE_FUTEX_CMPXCHG if !MMU && FUTEX
877 + select HAVE_HW_BREAKPOINT if PERF_EVENTS
878 + select HAVE_IRQ_TIME_ACCOUNTING
879 + select HAVE_MEMBLOCK
880 +diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
881 +index af81a62faba64..e7faea3d73d3b 100644
882 +--- a/arch/xtensa/platforms/iss/console.c
883 ++++ b/arch/xtensa/platforms/iss/console.c
884 +@@ -168,9 +168,13 @@ static const struct tty_operations serial_ops = {
885 +
886 + int __init rs_init(void)
887 + {
888 +- tty_port_init(&serial_port);
889 ++ int ret;
890 +
891 + serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
892 ++ if (!serial_driver)
893 ++ return -ENOMEM;
894 ++
895 ++ tty_port_init(&serial_port);
896 +
897 + pr_info("%s %s\n", serial_name, serial_version);
898 +
899 +@@ -190,8 +194,15 @@ int __init rs_init(void)
900 + tty_set_operations(serial_driver, &serial_ops);
901 + tty_port_link_device(&serial_port, serial_driver, 0);
902 +
903 +- if (tty_register_driver(serial_driver))
904 +- panic("Couldn't register serial driver\n");
905 ++ ret = tty_register_driver(serial_driver);
906 ++ if (ret) {
907 ++ pr_err("Couldn't register serial driver\n");
908 ++ tty_driver_kref_put(serial_driver);
909 ++ tty_port_destroy(&serial_port);
910 ++
911 ++ return ret;
912 ++ }
913 ++
914 + return 0;
915 + }
916 +
917 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
918 +index 5b3e5483c657c..c8c94e8e0f721 100644
919 +--- a/block/bfq-iosched.c
920 ++++ b/block/bfq-iosched.c
921 +@@ -2137,6 +2137,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
922 + * are likely to increase the throughput.
923 + */
924 + bfqq->new_bfqq = new_bfqq;
925 ++ /*
926 ++ * The above assignment schedules the following redirections:
927 ++ * each time some I/O for bfqq arrives, the process that
928 ++ * generated that I/O is disassociated from bfqq and
929 ++ * associated with new_bfqq. Here we increases new_bfqq->ref
930 ++ * in advance, adding the number of processes that are
931 ++ * expected to be associated with new_bfqq as they happen to
932 ++ * issue I/O.
933 ++ */
934 + new_bfqq->ref += process_refs;
935 + return new_bfqq;
936 + }
937 +@@ -2196,6 +2205,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
938 + {
939 + struct bfq_queue *in_service_bfqq, *new_bfqq;
940 +
941 ++ /* if a merge has already been setup, then proceed with that first */
942 ++ if (bfqq->new_bfqq)
943 ++ return bfqq->new_bfqq;
944 ++
945 + /*
946 + * Prevent bfqq from being merged if it has been created too
947 + * long ago. The idea is that true cooperating processes, and
948 +@@ -2210,9 +2223,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
949 + if (bfq_too_late_for_merging(bfqq))
950 + return NULL;
951 +
952 +- if (bfqq->new_bfqq)
953 +- return bfqq->new_bfqq;
954 +-
955 + if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
956 + return NULL;
957 +
958 +@@ -4241,7 +4251,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
959 + if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
960 + pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
961 + bfqq->new_ioprio);
962 +- bfqq->new_ioprio = IOPRIO_BE_NR;
963 ++ bfqq->new_ioprio = IOPRIO_BE_NR - 1;
964 + }
965 +
966 + bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
967 +diff --git a/block/blk-zoned.c b/block/blk-zoned.c
968 +index c461cf63f1f40..d33e89d28dbeb 100644
969 +--- a/block/blk-zoned.c
970 ++++ b/block/blk-zoned.c
971 +@@ -319,9 +319,6 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
972 + if (!blk_queue_is_zoned(q))
973 + return -ENOTTY;
974 +
975 +- if (!capable(CAP_SYS_ADMIN))
976 +- return -EACCES;
977 +-
978 + if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
979 + return -EFAULT;
980 +
981 +@@ -380,9 +377,6 @@ int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
982 + if (!blk_queue_is_zoned(q))
983 + return -ENOTTY;
984 +
985 +- if (!capable(CAP_SYS_ADMIN))
986 +- return -EACCES;
987 +-
988 + if (!(mode & FMODE_WRITE))
989 + return -EBADF;
990 +
991 +diff --git a/certs/Makefile b/certs/Makefile
992 +index 5d0999b9e21b1..ca3c71e3a3d9f 100644
993 +--- a/certs/Makefile
994 ++++ b/certs/Makefile
995 +@@ -46,11 +46,19 @@ endif
996 + redirect_openssl = 2>&1
997 + quiet_redirect_openssl = 2>&1
998 + silent_redirect_openssl = 2>/dev/null
999 ++openssl_available = $(shell openssl help 2>/dev/null && echo yes)
1000 +
1001 + # We do it this way rather than having a boolean option for enabling an
1002 + # external private key, because 'make randconfig' might enable such a
1003 + # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
1004 + ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
1005 ++
1006 ++ifeq ($(openssl_available),yes)
1007 ++X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
1008 ++
1009 ++$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
1010 ++endif
1011 ++
1012 + $(obj)/signing_key.pem: $(obj)/x509.genkey
1013 + @$(kecho) "###"
1014 + @$(kecho) "### Now generating an X.509 key pair to be used for signing modules."
1015 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1016 +index db1d86af21b4d..b95a4194a68db 100644
1017 +--- a/drivers/ata/libata-core.c
1018 ++++ b/drivers/ata/libata-core.c
1019 +@@ -4574,6 +4574,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1020 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
1021 + { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1022 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
1023 ++ { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1024 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
1025 ++ { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1026 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
1027 + { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1028 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
1029 +
1030 +@@ -6412,7 +6416,7 @@ int ata_host_start(struct ata_host *host)
1031 + have_stop = 1;
1032 + }
1033 +
1034 +- if (host->ops->host_stop)
1035 ++ if (host->ops && host->ops->host_stop)
1036 + have_stop = 1;
1037 +
1038 + if (have_stop) {
1039 +diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
1040 +index 6f142aa54f5f9..8487048c5ec9b 100644
1041 +--- a/drivers/ata/sata_dwc_460ex.c
1042 ++++ b/drivers/ata/sata_dwc_460ex.c
1043 +@@ -1253,24 +1253,20 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1044 + irq = irq_of_parse_and_map(np, 0);
1045 + if (irq == NO_IRQ) {
1046 + dev_err(&ofdev->dev, "no SATA DMA irq\n");
1047 +- err = -ENODEV;
1048 +- goto error_out;
1049 ++ return -ENODEV;
1050 + }
1051 +
1052 + #ifdef CONFIG_SATA_DWC_OLD_DMA
1053 + if (!of_find_property(np, "dmas", NULL)) {
1054 + err = sata_dwc_dma_init_old(ofdev, hsdev);
1055 + if (err)
1056 +- goto error_out;
1057 ++ return err;
1058 + }
1059 + #endif
1060 +
1061 + hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
1062 +- if (IS_ERR(hsdev->phy)) {
1063 +- err = PTR_ERR(hsdev->phy);
1064 +- hsdev->phy = NULL;
1065 +- goto error_out;
1066 +- }
1067 ++ if (IS_ERR(hsdev->phy))
1068 ++ return PTR_ERR(hsdev->phy);
1069 +
1070 + err = phy_init(hsdev->phy);
1071 + if (err)
1072 +diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
1073 +index 1cda505d6a852..9664cce49109b 100644
1074 +--- a/drivers/base/power/trace.c
1075 ++++ b/drivers/base/power/trace.c
1076 +@@ -11,6 +11,7 @@
1077 + #include <linux/export.h>
1078 + #include <linux/rtc.h>
1079 + #include <linux/suspend.h>
1080 ++#include <linux/init.h>
1081 +
1082 + #include <linux/mc146818rtc.h>
1083 +
1084 +@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
1085 + const char *file = *(const char **)(tracedata + 2);
1086 + unsigned int user_hash_value, file_hash_value;
1087 +
1088 ++ if (!x86_platform.legacy.rtc)
1089 ++ return;
1090 ++
1091 + user_hash_value = user % USERHASH;
1092 + file_hash_value = hash_string(lineno, file, FILEHASH);
1093 + set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
1094 +@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
1095 +
1096 + static int early_resume_init(void)
1097 + {
1098 ++ if (!x86_platform.legacy.rtc)
1099 ++ return 0;
1100 ++
1101 + hash_value_early_read = read_magic_time();
1102 + register_pm_notifier(&pm_trace_nb);
1103 + return 0;
1104 +@@ -277,6 +284,9 @@ static int late_resume_init(void)
1105 + unsigned int val = hash_value_early_read;
1106 + unsigned int user, file, dev;
1107 +
1108 ++ if (!x86_platform.legacy.rtc)
1109 ++ return 0;
1110 ++
1111 + user = val % USERHASH;
1112 + val = val / USERHASH;
1113 + file = val % FILEHASH;
1114 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
1115 +index e8b3353c18eb8..330ab9c85d1b8 100644
1116 +--- a/drivers/base/regmap/regmap.c
1117 ++++ b/drivers/base/regmap/regmap.c
1118 +@@ -1479,7 +1479,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1119 + if (ret) {
1120 + dev_err(map->dev,
1121 + "Error in caching of register: %x ret: %d\n",
1122 +- reg + i, ret);
1123 ++ reg + regmap_get_offset(map, i), ret);
1124 + return ret;
1125 + }
1126 + }
1127 +diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
1128 +index fc1f4acdd189e..c0f203deaf0b5 100644
1129 +--- a/drivers/bcma/main.c
1130 ++++ b/drivers/bcma/main.c
1131 +@@ -236,6 +236,7 @@ EXPORT_SYMBOL(bcma_core_irq);
1132 +
1133 + void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
1134 + {
1135 ++ device_initialize(&core->dev);
1136 + core->dev.release = bcma_release_core_dev;
1137 + core->dev.bus = &bcma_bus_type;
1138 + dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
1139 +@@ -299,11 +300,10 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
1140 + {
1141 + int err;
1142 +
1143 +- err = device_register(&core->dev);
1144 ++ err = device_add(&core->dev);
1145 + if (err) {
1146 + bcma_err(bus, "Could not register dev for core 0x%03X\n",
1147 + core->id.id);
1148 +- put_device(&core->dev);
1149 + return;
1150 + }
1151 + core->dev_registered = true;
1152 +@@ -394,7 +394,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
1153 + /* Now noone uses internally-handled cores, we can free them */
1154 + list_for_each_entry_safe(core, tmp, &bus->cores, list) {
1155 + list_del(&core->list);
1156 +- kfree(core);
1157 ++ put_device(&core->dev);
1158 + }
1159 + }
1160 +
1161 +diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
1162 +index e101f286ac353..60662771bd465 100644
1163 +--- a/drivers/block/Kconfig
1164 ++++ b/drivers/block/Kconfig
1165 +@@ -242,7 +242,7 @@ config BLK_DEV_LOOP_MIN_COUNT
1166 + dynamically allocated with the /dev/loop-control interface.
1167 +
1168 + config BLK_DEV_CRYPTOLOOP
1169 +- tristate "Cryptoloop Support"
1170 ++ tristate "Cryptoloop Support (DEPRECATED)"
1171 + select CRYPTO
1172 + select CRYPTO_CBC
1173 + depends on BLK_DEV_LOOP
1174 +@@ -254,7 +254,7 @@ config BLK_DEV_CRYPTOLOOP
1175 + WARNING: This device is not safe for journaled file systems like
1176 + ext3 or Reiserfs. Please use the Device Mapper crypto module
1177 + instead, which can be configured to be on-disk compatible with the
1178 +- cryptoloop device.
1179 ++ cryptoloop device. cryptoloop support will be removed in Linux 5.16.
1180 +
1181 + source "drivers/block/drbd/Kconfig"
1182 +
1183 +diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
1184 +index 7033a4beda669..1b84105dfe62d 100644
1185 +--- a/drivers/block/cryptoloop.c
1186 ++++ b/drivers/block/cryptoloop.c
1187 +@@ -201,6 +201,8 @@ init_cryptoloop(void)
1188 +
1189 + if (rc)
1190 + printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
1191 ++ else
1192 ++ pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n");
1193 + return rc;
1194 + }
1195 +
1196 +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
1197 +index 3806fd8fef0b1..a9490c8e82a70 100644
1198 +--- a/drivers/clk/clk.c
1199 ++++ b/drivers/clk/clk.c
1200 +@@ -46,11 +46,6 @@ static struct hlist_head *all_lists[] = {
1201 + NULL,
1202 + };
1203 +
1204 +-static struct hlist_head *orphan_list[] = {
1205 +- &clk_orphan_list,
1206 +- NULL,
1207 +-};
1208 +-
1209 + /*** private data structures ***/
1210 +
1211 + struct clk_core {
1212 +@@ -2629,6 +2624,11 @@ static int inited = 0;
1213 + static DEFINE_MUTEX(clk_debug_lock);
1214 + static HLIST_HEAD(clk_debug_list);
1215 +
1216 ++static struct hlist_head *orphan_list[] = {
1217 ++ &clk_orphan_list,
1218 ++ NULL,
1219 ++};
1220 ++
1221 + static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
1222 + int level)
1223 + {
1224 +diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
1225 +index 890ebf623261b..38612cd9092eb 100644
1226 +--- a/drivers/clk/mvebu/kirkwood.c
1227 ++++ b/drivers/clk/mvebu/kirkwood.c
1228 +@@ -254,6 +254,7 @@ static const char *powersave_parents[] = {
1229 + static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = {
1230 + { "powersave", powersave_parents, ARRAY_SIZE(powersave_parents),
1231 + 11, 1, 0 },
1232 ++ { }
1233 + };
1234 +
1235 + static struct clk *clk_muxing_get_src(
1236 +diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
1237 +index cec90a4c79b34..7a6d4c4c0feba 100644
1238 +--- a/drivers/clocksource/sh_cmt.c
1239 ++++ b/drivers/clocksource/sh_cmt.c
1240 +@@ -576,7 +576,8 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
1241 + ch->flags |= flag;
1242 +
1243 + /* setup timeout if no clockevent */
1244 +- if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
1245 ++ if (ch->cmt->num_channels == 1 &&
1246 ++ flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
1247 + __sh_cmt_set_next(ch, ch->max_match_value);
1248 + out:
1249 + raw_spin_unlock_irqrestore(&ch->lock, flags);
1250 +@@ -612,20 +613,25 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
1251 + static u64 sh_cmt_clocksource_read(struct clocksource *cs)
1252 + {
1253 + struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
1254 +- unsigned long flags;
1255 + u32 has_wrapped;
1256 +- u64 value;
1257 +- u32 raw;
1258 +
1259 +- raw_spin_lock_irqsave(&ch->lock, flags);
1260 +- value = ch->total_cycles;
1261 +- raw = sh_cmt_get_counter(ch, &has_wrapped);
1262 ++ if (ch->cmt->num_channels == 1) {
1263 ++ unsigned long flags;
1264 ++ u64 value;
1265 ++ u32 raw;
1266 +
1267 +- if (unlikely(has_wrapped))
1268 +- raw += ch->match_value + 1;
1269 +- raw_spin_unlock_irqrestore(&ch->lock, flags);
1270 ++ raw_spin_lock_irqsave(&ch->lock, flags);
1271 ++ value = ch->total_cycles;
1272 ++ raw = sh_cmt_get_counter(ch, &has_wrapped);
1273 ++
1274 ++ if (unlikely(has_wrapped))
1275 ++ raw += ch->match_value + 1;
1276 ++ raw_spin_unlock_irqrestore(&ch->lock, flags);
1277 ++
1278 ++ return value + raw;
1279 ++ }
1280 +
1281 +- return value + raw;
1282 ++ return sh_cmt_get_counter(ch, &has_wrapped);
1283 + }
1284 +
1285 + static int sh_cmt_clocksource_enable(struct clocksource *cs)
1286 +@@ -688,7 +694,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
1287 + cs->disable = sh_cmt_clocksource_disable;
1288 + cs->suspend = sh_cmt_clocksource_suspend;
1289 + cs->resume = sh_cmt_clocksource_resume;
1290 +- cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
1291 ++ cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
1292 + cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
1293 +
1294 + dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
1295 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
1296 +index 5da985604692f..3ebadabf54110 100644
1297 +--- a/drivers/cpufreq/powernv-cpufreq.c
1298 ++++ b/drivers/cpufreq/powernv-cpufreq.c
1299 +@@ -46,6 +46,7 @@
1300 + #define MAX_PSTATE_SHIFT 32
1301 + #define LPSTATE_SHIFT 48
1302 + #define GPSTATE_SHIFT 56
1303 ++#define MAX_NR_CHIPS 32
1304 +
1305 + #define MAX_RAMP_DOWN_TIME 5120
1306 + /*
1307 +@@ -1051,12 +1052,20 @@ static int init_chip_info(void)
1308 + unsigned int *chip;
1309 + unsigned int cpu, i;
1310 + unsigned int prev_chip_id = UINT_MAX;
1311 ++ cpumask_t *chip_cpu_mask;
1312 + int ret = 0;
1313 +
1314 + chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
1315 + if (!chip)
1316 + return -ENOMEM;
1317 +
1318 ++ /* Allocate a chip cpu mask large enough to fit mask for all chips */
1319 ++ chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
1320 ++ if (!chip_cpu_mask) {
1321 ++ ret = -ENOMEM;
1322 ++ goto free_and_return;
1323 ++ }
1324 ++
1325 + for_each_possible_cpu(cpu) {
1326 + unsigned int id = cpu_to_chip_id(cpu);
1327 +
1328 +@@ -1064,22 +1073,25 @@ static int init_chip_info(void)
1329 + prev_chip_id = id;
1330 + chip[nr_chips++] = id;
1331 + }
1332 ++ cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
1333 + }
1334 +
1335 + chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
1336 + if (!chips) {
1337 + ret = -ENOMEM;
1338 +- goto free_and_return;
1339 ++ goto out_free_chip_cpu_mask;
1340 + }
1341 +
1342 + for (i = 0; i < nr_chips; i++) {
1343 + chips[i].id = chip[i];
1344 +- cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
1345 ++ cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
1346 + INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
1347 + for_each_cpu(cpu, &chips[i].mask)
1348 + per_cpu(chip_info, cpu) = &chips[i];
1349 + }
1350 +
1351 ++out_free_chip_cpu_mask:
1352 ++ kfree(chip_cpu_mask);
1353 + free_and_return:
1354 + kfree(chip);
1355 + return ret;
1356 +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
1357 +index b0c592073a4a3..da834ae3586b6 100644
1358 +--- a/drivers/crypto/mxs-dcp.c
1359 ++++ b/drivers/crypto/mxs-dcp.c
1360 +@@ -167,15 +167,19 @@ static struct dcp *global_sdcp;
1361 +
1362 + static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
1363 + {
1364 ++ int dma_err;
1365 + struct dcp *sdcp = global_sdcp;
1366 + const int chan = actx->chan;
1367 + uint32_t stat;
1368 + unsigned long ret;
1369 + struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
1370 +-
1371 + dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
1372 + DMA_TO_DEVICE);
1373 +
1374 ++ dma_err = dma_mapping_error(sdcp->dev, desc_phys);
1375 ++ if (dma_err)
1376 ++ return dma_err;
1377 ++
1378 + reinit_completion(&sdcp->completion[chan]);
1379 +
1380 + /* Clear status register. */
1381 +@@ -213,18 +217,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
1382 + static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
1383 + struct ablkcipher_request *req, int init)
1384 + {
1385 ++ dma_addr_t key_phys, src_phys, dst_phys;
1386 + struct dcp *sdcp = global_sdcp;
1387 + struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
1388 + struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
1389 + int ret;
1390 +
1391 +- dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
1392 +- 2 * AES_KEYSIZE_128,
1393 +- DMA_TO_DEVICE);
1394 +- dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
1395 +- DCP_BUF_SZ, DMA_TO_DEVICE);
1396 +- dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
1397 +- DCP_BUF_SZ, DMA_FROM_DEVICE);
1398 ++ key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
1399 ++ 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
1400 ++ ret = dma_mapping_error(sdcp->dev, key_phys);
1401 ++ if (ret)
1402 ++ return ret;
1403 ++
1404 ++ src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
1405 ++ DCP_BUF_SZ, DMA_TO_DEVICE);
1406 ++ ret = dma_mapping_error(sdcp->dev, src_phys);
1407 ++ if (ret)
1408 ++ goto err_src;
1409 ++
1410 ++ dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
1411 ++ DCP_BUF_SZ, DMA_FROM_DEVICE);
1412 ++ ret = dma_mapping_error(sdcp->dev, dst_phys);
1413 ++ if (ret)
1414 ++ goto err_dst;
1415 +
1416 + if (actx->fill % AES_BLOCK_SIZE) {
1417 + dev_err(sdcp->dev, "Invalid block size!\n");
1418 +@@ -262,10 +277,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
1419 + ret = mxs_dcp_start_dma(actx);
1420 +
1421 + aes_done_run:
1422 ++ dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
1423 ++err_dst:
1424 ++ dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
1425 ++err_src:
1426 + dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
1427 + DMA_TO_DEVICE);
1428 +- dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
1429 +- dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
1430 +
1431 + return ret;
1432 + }
1433 +@@ -280,21 +297,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
1434 +
1435 + struct scatterlist *dst = req->dst;
1436 + struct scatterlist *src = req->src;
1437 +- const int nents = sg_nents(req->src);
1438 ++ int dst_nents = sg_nents(dst);
1439 +
1440 + const int out_off = DCP_BUF_SZ;
1441 + uint8_t *in_buf = sdcp->coh->aes_in_buf;
1442 + uint8_t *out_buf = sdcp->coh->aes_out_buf;
1443 +
1444 +- uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
1445 + uint32_t dst_off = 0;
1446 ++ uint8_t *src_buf = NULL;
1447 + uint32_t last_out_len = 0;
1448 +
1449 + uint8_t *key = sdcp->coh->aes_key;
1450 +
1451 + int ret = 0;
1452 +- int split = 0;
1453 +- unsigned int i, len, clen, rem = 0, tlen = 0;
1454 ++ unsigned int i, len, clen, tlen = 0;
1455 + int init = 0;
1456 + bool limit_hit = false;
1457 +
1458 +@@ -312,7 +328,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
1459 + memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
1460 + }
1461 +
1462 +- for_each_sg(req->src, src, nents, i) {
1463 ++ for_each_sg(req->src, src, sg_nents(src), i) {
1464 + src_buf = sg_virt(src);
1465 + len = sg_dma_len(src);
1466 + tlen += len;
1467 +@@ -337,34 +353,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
1468 + * submit the buffer.
1469 + */
1470 + if (actx->fill == out_off || sg_is_last(src) ||
1471 +- limit_hit) {
1472 ++ limit_hit) {
1473 + ret = mxs_dcp_run_aes(actx, req, init);
1474 + if (ret)
1475 + return ret;
1476 + init = 0;
1477 +
1478 +- out_tmp = out_buf;
1479 ++ sg_pcopy_from_buffer(dst, dst_nents, out_buf,
1480 ++ actx->fill, dst_off);
1481 ++ dst_off += actx->fill;
1482 + last_out_len = actx->fill;
1483 +- while (dst && actx->fill) {
1484 +- if (!split) {
1485 +- dst_buf = sg_virt(dst);
1486 +- dst_off = 0;
1487 +- }
1488 +- rem = min(sg_dma_len(dst) - dst_off,
1489 +- actx->fill);
1490 +-
1491 +- memcpy(dst_buf + dst_off, out_tmp, rem);
1492 +- out_tmp += rem;
1493 +- dst_off += rem;
1494 +- actx->fill -= rem;
1495 +-
1496 +- if (dst_off == sg_dma_len(dst)) {
1497 +- dst = sg_next(dst);
1498 +- split = 0;
1499 +- } else {
1500 +- split = 1;
1501 +- }
1502 +- }
1503 ++ actx->fill = 0;
1504 + }
1505 + } while (len);
1506 +
1507 +@@ -565,6 +564,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
1508 + dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
1509 + DCP_BUF_SZ, DMA_TO_DEVICE);
1510 +
1511 ++ ret = dma_mapping_error(sdcp->dev, buf_phys);
1512 ++ if (ret)
1513 ++ return ret;
1514 ++
1515 + /* Fill in the DMA descriptor. */
1516 + desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
1517 + MXS_DCP_CONTROL0_INTERRUPT |
1518 +@@ -597,6 +600,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
1519 + if (rctx->fini) {
1520 + digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
1521 + DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
1522 ++ ret = dma_mapping_error(sdcp->dev, digest_phys);
1523 ++ if (ret)
1524 ++ goto done_run;
1525 ++
1526 + desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
1527 + desc->payload = digest_phys;
1528 + }
1529 +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
1530 +index 4d31ef4724366..180f2f61b8fbc 100644
1531 +--- a/drivers/crypto/omap-sham.c
1532 ++++ b/drivers/crypto/omap-sham.c
1533 +@@ -1739,7 +1739,7 @@ static void omap_sham_done_task(unsigned long data)
1534 + if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1535 + goto finish;
1536 + } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1537 +- if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1538 ++ if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1539 + omap_sham_update_dma_stop(dd);
1540 + if (dd->err) {
1541 + err = dd->err;
1542 +diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
1543 +index d2d0ae445fd89..7c7d49a8a4034 100644
1544 +--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
1545 ++++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
1546 +@@ -123,10 +123,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
1547 + hw_data->enable_error_correction = adf_vf_void_noop;
1548 + hw_data->init_admin_comms = adf_vf_int_noop;
1549 + hw_data->exit_admin_comms = adf_vf_void_noop;
1550 +- hw_data->send_admin_init = adf_vf2pf_init;
1551 ++ hw_data->send_admin_init = adf_vf2pf_notify_init;
1552 + hw_data->init_arb = adf_vf_int_noop;
1553 + hw_data->exit_arb = adf_vf_void_noop;
1554 +- hw_data->disable_iov = adf_vf2pf_shutdown;
1555 ++ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
1556 + hw_data->get_accel_mask = get_accel_mask;
1557 + hw_data->get_ae_mask = get_ae_mask;
1558 + hw_data->get_num_accels = get_num_accels;
1559 +diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
1560 +index 38e4bc04f407b..90e8a7564756b 100644
1561 +--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
1562 ++++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
1563 +@@ -123,10 +123,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
1564 + hw_data->enable_error_correction = adf_vf_void_noop;
1565 + hw_data->init_admin_comms = adf_vf_int_noop;
1566 + hw_data->exit_admin_comms = adf_vf_void_noop;
1567 +- hw_data->send_admin_init = adf_vf2pf_init;
1568 ++ hw_data->send_admin_init = adf_vf2pf_notify_init;
1569 + hw_data->init_arb = adf_vf_int_noop;
1570 + hw_data->exit_arb = adf_vf_void_noop;
1571 +- hw_data->disable_iov = adf_vf2pf_shutdown;
1572 ++ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
1573 + hw_data->get_accel_mask = get_accel_mask;
1574 + hw_data->get_ae_mask = get_ae_mask;
1575 + hw_data->get_num_accels = get_num_accels;
1576 +diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
1577 +index d78f8d5c89c3f..289dd7e48d4a4 100644
1578 +--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
1579 ++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
1580 +@@ -239,8 +239,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
1581 + void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
1582 + void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
1583 +
1584 +-int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
1585 +-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
1586 ++int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
1587 ++void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
1588 + int adf_init_pf_wq(void);
1589 + void adf_exit_pf_wq(void);
1590 + int adf_init_vf_wq(void);
1591 +@@ -263,12 +263,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
1592 + {
1593 + }
1594 +
1595 +-static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
1596 ++static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
1597 + {
1598 + return 0;
1599 + }
1600 +
1601 +-static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
1602 ++static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
1603 + {
1604 + }
1605 +
1606 +diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
1607 +index 26556c7130497..7a7d43c475342 100644
1608 +--- a/drivers/crypto/qat/qat_common/adf_init.c
1609 ++++ b/drivers/crypto/qat/qat_common/adf_init.c
1610 +@@ -105,6 +105,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
1611 + struct service_hndl *service;
1612 + struct list_head *list_itr;
1613 + struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1614 ++ int ret;
1615 +
1616 + if (!hw_data) {
1617 + dev_err(&GET_DEV(accel_dev),
1618 +@@ -171,9 +172,9 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
1619 + }
1620 +
1621 + hw_data->enable_error_correction(accel_dev);
1622 +- hw_data->enable_vf2pf_comms(accel_dev);
1623 ++ ret = hw_data->enable_vf2pf_comms(accel_dev);
1624 +
1625 +- return 0;
1626 ++ return ret;
1627 + }
1628 + EXPORT_SYMBOL_GPL(adf_dev_init);
1629 +
1630 +diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
1631 +index 4898ef41fd9fd..7d319c5c071c4 100644
1632 +--- a/drivers/crypto/qat/qat_common/adf_isr.c
1633 ++++ b/drivers/crypto/qat/qat_common/adf_isr.c
1634 +@@ -59,6 +59,8 @@
1635 + #include "adf_transport_access_macros.h"
1636 + #include "adf_transport_internal.h"
1637 +
1638 ++#define ADF_MAX_NUM_VFS 32
1639 ++
1640 + static int adf_enable_msix(struct adf_accel_dev *accel_dev)
1641 + {
1642 + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
1643 +@@ -111,7 +113,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
1644 + struct adf_bar *pmisc =
1645 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
1646 + void __iomem *pmisc_bar_addr = pmisc->virt_addr;
1647 +- u32 vf_mask;
1648 ++ unsigned long vf_mask;
1649 +
1650 + /* Get the interrupt sources triggered by VFs */
1651 + vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
1652 +@@ -132,8 +134,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
1653 + * unless the VF is malicious and is attempting to
1654 + * flood the host OS with VF2PF interrupts.
1655 + */
1656 +- for_each_set_bit(i, (const unsigned long *)&vf_mask,
1657 +- (sizeof(vf_mask) * BITS_PER_BYTE)) {
1658 ++ for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
1659 + vf_info = accel_dev->pf.vf_info + i;
1660 +
1661 + if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
1662 +diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
1663 +index b3875fdf6cd72..c64481160b711 100644
1664 +--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
1665 ++++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
1666 +@@ -231,7 +231,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
1667 +
1668 + return ret;
1669 + }
1670 +-EXPORT_SYMBOL_GPL(adf_iov_putmsg);
1671 +
1672 + void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
1673 + {
1674 +@@ -361,6 +360,8 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
1675 + msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
1676 + BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
1677 +
1678 ++ reinit_completion(&accel_dev->vf.iov_msg_completion);
1679 ++
1680 + /* Send request from VF to PF */
1681 + ret = adf_iov_putmsg(accel_dev, msg, 0);
1682 + if (ret) {
1683 +diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
1684 +index cd5f37dffe8a6..1830194567e84 100644
1685 +--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
1686 ++++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
1687 +@@ -49,14 +49,14 @@
1688 + #include "adf_pf2vf_msg.h"
1689 +
1690 + /**
1691 +- * adf_vf2pf_init() - send init msg to PF
1692 ++ * adf_vf2pf_notify_init() - send init msg to PF
1693 + * @accel_dev: Pointer to acceleration VF device.
1694 + *
1695 + * Function sends an init messge from the VF to a PF
1696 + *
1697 + * Return: 0 on success, error code otherwise.
1698 + */
1699 +-int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
1700 ++int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
1701 + {
1702 + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
1703 + (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
1704 +@@ -69,17 +69,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
1705 + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
1706 + return 0;
1707 + }
1708 +-EXPORT_SYMBOL_GPL(adf_vf2pf_init);
1709 ++EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
1710 +
1711 + /**
1712 +- * adf_vf2pf_shutdown() - send shutdown msg to PF
1713 ++ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
1714 + * @accel_dev: Pointer to acceleration VF device.
1715 + *
1716 + * Function sends a shutdown messge from the VF to a PF
1717 + *
1718 + * Return: void
1719 + */
1720 +-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
1721 ++void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
1722 + {
1723 + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
1724 + (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
1725 +@@ -89,4 +89,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
1726 + dev_err(&GET_DEV(accel_dev),
1727 + "Failed to send Shutdown event to PF\n");
1728 + }
1729 +-EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
1730 ++EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
1731 +diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
1732 +index df9a1f35b8320..ef90902c8200d 100644
1733 +--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
1734 ++++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
1735 +@@ -203,6 +203,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
1736 + struct adf_bar *pmisc =
1737 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
1738 + void __iomem *pmisc_bar_addr = pmisc->virt_addr;
1739 ++ bool handled = false;
1740 + u32 v_int;
1741 +
1742 + /* Read VF INT source CSR to determine the source of VF interrupt */
1743 +@@ -215,7 +216,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
1744 +
1745 + /* Schedule tasklet to handle interrupt BH */
1746 + tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
1747 +- return IRQ_HANDLED;
1748 ++ handled = true;
1749 + }
1750 +
1751 + /* Check bundle interrupt */
1752 +@@ -227,10 +228,10 @@ static irqreturn_t adf_isr(int irq, void *privdata)
1753 + WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
1754 + 0);
1755 + tasklet_hi_schedule(&bank->resp_handler);
1756 +- return IRQ_HANDLED;
1757 ++ handled = true;
1758 + }
1759 +
1760 +- return IRQ_NONE;
1761 ++ return handled ? IRQ_HANDLED : IRQ_NONE;
1762 + }
1763 +
1764 + static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
1765 +diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
1766 +index a3b4dd8099a7b..3a8361c83f0b1 100644
1767 +--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
1768 ++++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
1769 +@@ -123,10 +123,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
1770 + hw_data->enable_error_correction = adf_vf_void_noop;
1771 + hw_data->init_admin_comms = adf_vf_int_noop;
1772 + hw_data->exit_admin_comms = adf_vf_void_noop;
1773 +- hw_data->send_admin_init = adf_vf2pf_init;
1774 ++ hw_data->send_admin_init = adf_vf2pf_notify_init;
1775 + hw_data->init_arb = adf_vf_int_noop;
1776 + hw_data->exit_arb = adf_vf_void_noop;
1777 +- hw_data->disable_iov = adf_vf2pf_shutdown;
1778 ++ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
1779 + hw_data->get_accel_mask = get_accel_mask;
1780 + hw_data->get_ae_mask = get_ae_mask;
1781 + hw_data->get_num_accels = get_num_accels;
1782 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1783 +index 7a55baa861e58..07e1a286ee431 100644
1784 +--- a/drivers/crypto/talitos.c
1785 ++++ b/drivers/crypto/talitos.c
1786 +@@ -853,7 +853,11 @@ static void talitos_unregister_rng(struct device *dev)
1787 + * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
1788 + */
1789 + #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
1790 ++#ifdef CONFIG_CRYPTO_DEV_TALITOS_SEC2
1791 + #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
1792 ++#else
1793 ++#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
1794 ++#endif
1795 + #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
1796 +
1797 + struct talitos_ctx {
1798 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
1799 +index f2739995c335a..199eccee0b0bb 100644
1800 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
1801 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
1802 +@@ -338,7 +338,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
1803 + void
1804 + amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
1805 + {
1806 +- u8 val;
1807 ++ u8 val = 0;
1808 +
1809 + if (!amdgpu_connector->router.ddc_valid)
1810 + return;
1811 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1812 +index b14ce112703f0..e25952d516e22 100644
1813 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1814 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1815 +@@ -216,7 +216,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
1816 + c++;
1817 + }
1818 +
1819 +- BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
1820 ++ BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
1821 +
1822 + placement->num_placement = c;
1823 + placement->placement = places;
1824 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
1825 +index 0d9e410ca01e2..dbfe5623997d4 100644
1826 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
1827 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
1828 +@@ -92,29 +92,29 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
1829 +
1830 + rd_buf_ptr = rd_buf;
1831 +
1832 +- str_len = strlen("Current: %d %d %d ");
1833 +- snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
1834 ++ str_len = strlen("Current: %d 0x%x %d ");
1835 ++ snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ",
1836 + link->cur_link_settings.lane_count,
1837 + link->cur_link_settings.link_rate,
1838 + link->cur_link_settings.link_spread);
1839 + rd_buf_ptr += str_len;
1840 +
1841 +- str_len = strlen("Verified: %d %d %d ");
1842 +- snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
1843 ++ str_len = strlen("Verified: %d 0x%x %d ");
1844 ++ snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ",
1845 + link->verified_link_cap.lane_count,
1846 + link->verified_link_cap.link_rate,
1847 + link->verified_link_cap.link_spread);
1848 + rd_buf_ptr += str_len;
1849 +
1850 +- str_len = strlen("Reported: %d %d %d ");
1851 +- snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
1852 ++ str_len = strlen("Reported: %d 0x%x %d ");
1853 ++ snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ",
1854 + link->reported_link_cap.lane_count,
1855 + link->reported_link_cap.link_rate,
1856 + link->reported_link_cap.link_spread);
1857 + rd_buf_ptr += str_len;
1858 +
1859 +- str_len = strlen("Preferred: %d %d %d ");
1860 +- snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
1861 ++ str_len = strlen("Preferred: %d 0x%x %d ");
1862 ++ snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n",
1863 + link->preferred_link_setting.lane_count,
1864 + link->preferred_link_setting.link_rate,
1865 + link->preferred_link_setting.link_spread);
1866 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
1867 +index 79bafea663542..a40ea5c685728 100644
1868 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
1869 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
1870 +@@ -296,10 +296,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
1871 + int i;
1872 +
1873 + for (i = 0; i < ctx->mixer_count; i++) {
1874 +- DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
1875 +- DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
1876 +- DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
1877 +- DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
1878 ++ enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
1879 ++
1880 ++ DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
1881 ++ DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
1882 ++ DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
1883 ++ DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
1884 + }
1885 + }
1886 +
1887 +diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
1888 +index ff8164cc6738d..822cef472a7e9 100644
1889 +--- a/drivers/gpu/drm/msm/dsi/dsi.c
1890 ++++ b/drivers/gpu/drm/msm/dsi/dsi.c
1891 +@@ -34,8 +34,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
1892 + }
1893 +
1894 + phy_pdev = of_find_device_by_node(phy_node);
1895 +- if (phy_pdev)
1896 ++ if (phy_pdev) {
1897 + msm_dsi->phy = platform_get_drvdata(phy_pdev);
1898 ++ msm_dsi->phy_dev = &phy_pdev->dev;
1899 ++ }
1900 +
1901 + of_node_put(phy_node);
1902 +
1903 +@@ -44,8 +46,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
1904 + return -EPROBE_DEFER;
1905 + }
1906 +
1907 +- msm_dsi->phy_dev = get_device(&phy_pdev->dev);
1908 +-
1909 + return 0;
1910 + }
1911 +
1912 +diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
1913 +index a9d2501500a19..170371770dd41 100644
1914 +--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
1915 ++++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
1916 +@@ -545,21 +545,21 @@ static const struct ipu_rgb def_bgra_16 = {
1917 + .bits_per_pixel = 16,
1918 + };
1919 +
1920 +-#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
1921 +-#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
1922 +- (pix->width * ((y) / 2) / 2) + (x) / 2)
1923 +-#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
1924 +- (pix->width * pix->height / 4) + \
1925 +- (pix->width * ((y) / 2) / 2) + (x) / 2)
1926 +-#define U2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
1927 +- (pix->width * (y) / 2) + (x) / 2)
1928 +-#define V2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
1929 +- (pix->width * pix->height / 2) + \
1930 +- (pix->width * (y) / 2) + (x) / 2)
1931 +-#define UV_OFFSET(pix, x, y) ((pix->width * pix->height) + \
1932 +- (pix->width * ((y) / 2)) + (x))
1933 +-#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
1934 +- (pix->width * y) + (x))
1935 ++#define Y_OFFSET(pix, x, y) ((x) + pix->bytesperline * (y))
1936 ++#define U_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
1937 ++ (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
1938 ++#define V_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
1939 ++ (pix->bytesperline * pix->height / 4) + \
1940 ++ (pix->bytesperline * ((y) / 2) / 2) + (x) / 2)
1941 ++#define U2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
1942 ++ (pix->bytesperline * (y) / 2) + (x) / 2)
1943 ++#define V2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
1944 ++ (pix->bytesperline * pix->height / 2) + \
1945 ++ (pix->bytesperline * (y) / 2) + (x) / 2)
1946 ++#define UV_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
1947 ++ (pix->bytesperline * ((y) / 2)) + (x))
1948 ++#define UV2_OFFSET(pix, x, y) ((pix->bytesperline * pix->height) + \
1949 ++ (pix->bytesperline * y) + (x))
1950 +
1951 + #define NUM_ALPHA_CHANNELS 7
1952 +
1953 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1954 +index 4dd151b2924e2..d56ef395eb693 100644
1955 +--- a/drivers/hid/hid-input.c
1956 ++++ b/drivers/hid/hid-input.c
1957 +@@ -427,8 +427,6 @@ static int hidinput_get_battery_property(struct power_supply *psy,
1958 +
1959 + if (dev->battery_status == HID_BATTERY_UNKNOWN)
1960 + val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
1961 +- else if (dev->battery_capacity == 100)
1962 +- val->intval = POWER_SUPPLY_STATUS_FULL;
1963 + else
1964 + val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
1965 + break;
1966 +diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
1967 +index ff340d7ae2e52..6a880c2623808 100644
1968 +--- a/drivers/i2c/busses/i2c-highlander.c
1969 ++++ b/drivers/i2c/busses/i2c-highlander.c
1970 +@@ -379,7 +379,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
1971 + platform_set_drvdata(pdev, dev);
1972 +
1973 + dev->irq = platform_get_irq(pdev, 0);
1974 +- if (iic_force_poll)
1975 ++ if (dev->irq < 0 || iic_force_poll)
1976 + dev->irq = 0;
1977 +
1978 + if (dev->irq) {
1979 +diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
1980 +index 85cbe4b555786..d4fe7ccccb226 100644
1981 +--- a/drivers/i2c/busses/i2c-iop3xx.c
1982 ++++ b/drivers/i2c/busses/i2c-iop3xx.c
1983 +@@ -456,16 +456,14 @@ iop3xx_i2c_probe(struct platform_device *pdev)
1984 +
1985 + irq = platform_get_irq(pdev, 0);
1986 + if (irq < 0) {
1987 +- ret = -ENXIO;
1988 ++ ret = irq;
1989 + goto unmap;
1990 + }
1991 + ret = request_irq(irq, iop3xx_i2c_irq_handler, 0,
1992 + pdev->name, adapter_data);
1993 +
1994 +- if (ret) {
1995 +- ret = -EIO;
1996 ++ if (ret)
1997 + goto unmap;
1998 +- }
1999 +
2000 + memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
2001 + new_adapter->owner = THIS_MODULE;
2002 +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
2003 +index 2bb4d20ead32b..e09b065a6aff0 100644
2004 +--- a/drivers/i2c/busses/i2c-mt65xx.c
2005 ++++ b/drivers/i2c/busses/i2c-mt65xx.c
2006 +@@ -804,7 +804,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
2007 + return PTR_ERR(i2c->pdmabase);
2008 +
2009 + irq = platform_get_irq(pdev, 0);
2010 +- if (irq <= 0)
2011 ++ if (irq < 0)
2012 + return irq;
2013 +
2014 + init_completion(&i2c->msg_complete);
2015 +diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
2016 +index d3603e261a847..4c60369203882 100644
2017 +--- a/drivers/i2c/busses/i2c-s3c2410.c
2018 ++++ b/drivers/i2c/busses/i2c-s3c2410.c
2019 +@@ -1179,7 +1179,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
2020 + */
2021 + if (!(i2c->quirks & QUIRK_POLL)) {
2022 + i2c->irq = ret = platform_get_irq(pdev, 0);
2023 +- if (ret <= 0) {
2024 ++ if (ret < 0) {
2025 + dev_err(&pdev->dev, "cannot find IRQ\n");
2026 + clk_unprepare(i2c->clk);
2027 + return ret;
2028 +diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
2029 +index 13fdb4dfe3562..cc3a24c43e574 100644
2030 +--- a/drivers/iio/dac/ad5624r_spi.c
2031 ++++ b/drivers/iio/dac/ad5624r_spi.c
2032 +@@ -230,7 +230,7 @@ static int ad5624r_probe(struct spi_device *spi)
2033 + if (!indio_dev)
2034 + return -ENOMEM;
2035 + st = iio_priv(indio_dev);
2036 +- st->reg = devm_regulator_get(&spi->dev, "vcc");
2037 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
2038 + if (!IS_ERR(st->reg)) {
2039 + ret = regulator_enable(st->reg);
2040 + if (ret)
2041 +@@ -241,6 +241,22 @@ static int ad5624r_probe(struct spi_device *spi)
2042 + goto error_disable_reg;
2043 +
2044 + voltage_uv = ret;
2045 ++ } else {
2046 ++ if (PTR_ERR(st->reg) != -ENODEV)
2047 ++ return PTR_ERR(st->reg);
2048 ++ /* Backwards compatibility. This naming is not correct */
2049 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
2050 ++ if (!IS_ERR(st->reg)) {
2051 ++ ret = regulator_enable(st->reg);
2052 ++ if (ret)
2053 ++ return ret;
2054 ++
2055 ++ ret = regulator_get_voltage(st->reg);
2056 ++ if (ret < 0)
2057 ++ goto error_disable_reg;
2058 ++
2059 ++ voltage_uv = ret;
2060 ++ }
2061 + }
2062 +
2063 + spi_set_drvdata(spi, indio_dev);
2064 +diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
2065 +index 99dd8452724de..57aec656ab7fb 100644
2066 +--- a/drivers/infiniband/core/iwcm.c
2067 ++++ b/drivers/infiniband/core/iwcm.c
2068 +@@ -1173,29 +1173,34 @@ static int __init iw_cm_init(void)
2069 +
2070 + ret = iwpm_init(RDMA_NL_IWCM);
2071 + if (ret)
2072 +- pr_err("iw_cm: couldn't init iwpm\n");
2073 +- else
2074 +- rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
2075 ++ return ret;
2076 ++
2077 + iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
2078 + if (!iwcm_wq)
2079 +- return -ENOMEM;
2080 ++ goto err_alloc;
2081 +
2082 + iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
2083 + iwcm_ctl_table);
2084 + if (!iwcm_ctl_table_hdr) {
2085 + pr_err("iw_cm: couldn't register sysctl paths\n");
2086 +- destroy_workqueue(iwcm_wq);
2087 +- return -ENOMEM;
2088 ++ goto err_sysctl;
2089 + }
2090 +
2091 ++ rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
2092 + return 0;
2093 ++
2094 ++err_sysctl:
2095 ++ destroy_workqueue(iwcm_wq);
2096 ++err_alloc:
2097 ++ iwpm_exit(RDMA_NL_IWCM);
2098 ++ return -ENOMEM;
2099 + }
2100 +
2101 + static void __exit iw_cm_cleanup(void)
2102 + {
2103 ++ rdma_nl_unregister(RDMA_NL_IWCM);
2104 + unregister_net_sysctl_table(iwcm_ctl_table_hdr);
2105 + destroy_workqueue(iwcm_wq);
2106 +- rdma_nl_unregister(RDMA_NL_IWCM);
2107 + iwpm_exit(RDMA_NL_IWCM);
2108 + }
2109 +
2110 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
2111 +index 7787ec42f81e1..2df75db52e917 100644
2112 +--- a/drivers/md/bcache/super.c
2113 ++++ b/drivers/md/bcache/super.c
2114 +@@ -824,20 +824,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
2115 + n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
2116 + d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
2117 + if (!d->full_dirty_stripes)
2118 +- return -ENOMEM;
2119 ++ goto out_free_stripe_sectors_dirty;
2120 +
2121 + idx = ida_simple_get(&bcache_device_idx, 0,
2122 + BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
2123 + if (idx < 0)
2124 +- return idx;
2125 ++ goto out_free_full_dirty_stripes;
2126 +
2127 + if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
2128 + BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
2129 +- goto err;
2130 ++ goto out_ida_remove;
2131 +
2132 + d->disk = alloc_disk(BCACHE_MINORS);
2133 + if (!d->disk)
2134 +- goto err;
2135 ++ goto out_bioset_exit;
2136 +
2137 + set_capacity(d->disk, sectors);
2138 + snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
2139 +@@ -872,8 +872,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
2140 +
2141 + return 0;
2142 +
2143 +-err:
2144 ++out_bioset_exit:
2145 ++ bioset_exit(&d->bio_split);
2146 ++out_ida_remove:
2147 + ida_simple_remove(&bcache_device_idx, idx);
2148 ++out_free_full_dirty_stripes:
2149 ++ kvfree(d->full_dirty_stripes);
2150 ++out_free_stripe_sectors_dirty:
2151 ++ kvfree(d->stripe_sectors_dirty);
2152 + return -ENOMEM;
2153 +
2154 + }
2155 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
2156 +index 85559f772d0d6..a6a26f8e4d8e1 100644
2157 +--- a/drivers/md/dm-crypt.c
2158 ++++ b/drivers/md/dm-crypt.c
2159 +@@ -2181,7 +2181,12 @@ static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2160 + struct crypt_config *cc = pool_data;
2161 + struct page *page;
2162 +
2163 +- if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
2164 ++ /*
2165 ++ * Note, percpu_counter_read_positive() may over (and under) estimate
2166 ++ * the current usage by at most (batch - 1) * num_online_cpus() pages,
2167 ++ * but avoids potential spinlock contention of an exact result.
2168 ++ */
2169 ++ if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
2170 + likely(gfp_mask & __GFP_NORETRY))
2171 + return NULL;
2172 +
2173 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
2174 +index 85077f4d257a7..a6a5cee6b9430 100644
2175 +--- a/drivers/md/dm-thin-metadata.c
2176 ++++ b/drivers/md/dm-thin-metadata.c
2177 +@@ -901,7 +901,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
2178 + return -EBUSY;
2179 + }
2180 +
2181 +- if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
2182 ++ if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
2183 + r = __commit_transaction(pmd);
2184 + if (r < 0)
2185 + DMWARN("%s: __commit_transaction() failed, error = %d",
2186 +diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
2187 +index 492a3f8ac1199..0401daa0f7fbd 100644
2188 +--- a/drivers/md/persistent-data/dm-block-manager.c
2189 ++++ b/drivers/md/persistent-data/dm-block-manager.c
2190 +@@ -494,7 +494,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
2191 + void *p;
2192 + int r;
2193 +
2194 +- if (bm->read_only)
2195 ++ if (dm_bm_is_read_only(bm))
2196 + return -EPERM;
2197 +
2198 + p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
2199 +@@ -563,7 +563,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
2200 + struct buffer_aux *aux;
2201 + void *p;
2202 +
2203 +- if (bm->read_only)
2204 ++ if (dm_bm_is_read_only(bm))
2205 + return -EPERM;
2206 +
2207 + p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
2208 +@@ -603,7 +603,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock);
2209 +
2210 + int dm_bm_flush(struct dm_block_manager *bm)
2211 + {
2212 +- if (bm->read_only)
2213 ++ if (dm_bm_is_read_only(bm))
2214 + return -EPERM;
2215 +
2216 + return dm_bufio_write_dirty_buffers(bm->bufio);
2217 +@@ -617,19 +617,21 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
2218 +
2219 + bool dm_bm_is_read_only(struct dm_block_manager *bm)
2220 + {
2221 +- return bm->read_only;
2222 ++ return (bm ? bm->read_only : true);
2223 + }
2224 + EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
2225 +
2226 + void dm_bm_set_read_only(struct dm_block_manager *bm)
2227 + {
2228 +- bm->read_only = true;
2229 ++ if (bm)
2230 ++ bm->read_only = true;
2231 + }
2232 + EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
2233 +
2234 + void dm_bm_set_read_write(struct dm_block_manager *bm)
2235 + {
2236 +- bm->read_only = false;
2237 ++ if (bm)
2238 ++ bm->read_only = false;
2239 + }
2240 + EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
2241 +
2242 +diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
2243 +index 3c3f8cb148451..5fa787e023c7e 100644
2244 +--- a/drivers/media/dvb-frontends/dib8000.c
2245 ++++ b/drivers/media/dvb-frontends/dib8000.c
2246 +@@ -2110,32 +2110,55 @@ static void dib8000_load_ana_fe_coefs(struct dib8000_state *state, const s16 *an
2247 + dib8000_write_word(state, 117 + mode, ana_fe[mode]);
2248 + }
2249 +
2250 +-static const u16 lut_prbs_2k[14] = {
2251 +- 0, 0x423, 0x009, 0x5C7, 0x7A6, 0x3D8, 0x527, 0x7FF, 0x79B, 0x3D6, 0x3A2, 0x53B, 0x2F4, 0x213
2252 ++static const u16 lut_prbs_2k[13] = {
2253 ++ 0x423, 0x009, 0x5C7,
2254 ++ 0x7A6, 0x3D8, 0x527,
2255 ++ 0x7FF, 0x79B, 0x3D6,
2256 ++ 0x3A2, 0x53B, 0x2F4,
2257 ++ 0x213
2258 + };
2259 +-static const u16 lut_prbs_4k[14] = {
2260 +- 0, 0x208, 0x0C3, 0x7B9, 0x423, 0x5C7, 0x3D8, 0x7FF, 0x3D6, 0x53B, 0x213, 0x029, 0x0D0, 0x48E
2261 ++
2262 ++static const u16 lut_prbs_4k[13] = {
2263 ++ 0x208, 0x0C3, 0x7B9,
2264 ++ 0x423, 0x5C7, 0x3D8,
2265 ++ 0x7FF, 0x3D6, 0x53B,
2266 ++ 0x213, 0x029, 0x0D0,
2267 ++ 0x48E
2268 + };
2269 +-static const u16 lut_prbs_8k[14] = {
2270 +- 0, 0x740, 0x069, 0x7DD, 0x208, 0x7B9, 0x5C7, 0x7FF, 0x53B, 0x029, 0x48E, 0x4C4, 0x367, 0x684
2271 ++
2272 ++static const u16 lut_prbs_8k[13] = {
2273 ++ 0x740, 0x069, 0x7DD,
2274 ++ 0x208, 0x7B9, 0x5C7,
2275 ++ 0x7FF, 0x53B, 0x029,
2276 ++ 0x48E, 0x4C4, 0x367,
2277 ++ 0x684
2278 + };
2279 +
2280 + static u16 dib8000_get_init_prbs(struct dib8000_state *state, u16 subchannel)
2281 + {
2282 + int sub_channel_prbs_group = 0;
2283 ++ int prbs_group;
2284 +
2285 +- sub_channel_prbs_group = (subchannel / 3) + 1;
2286 +- dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
2287 ++ sub_channel_prbs_group = subchannel / 3;
2288 ++ if (sub_channel_prbs_group >= ARRAY_SIZE(lut_prbs_2k))
2289 ++ return 0;
2290 +
2291 + switch (state->fe[0]->dtv_property_cache.transmission_mode) {
2292 + case TRANSMISSION_MODE_2K:
2293 +- return lut_prbs_2k[sub_channel_prbs_group];
2294 ++ prbs_group = lut_prbs_2k[sub_channel_prbs_group];
2295 ++ break;
2296 + case TRANSMISSION_MODE_4K:
2297 +- return lut_prbs_4k[sub_channel_prbs_group];
2298 ++ prbs_group = lut_prbs_4k[sub_channel_prbs_group];
2299 ++ break;
2300 + default:
2301 + case TRANSMISSION_MODE_8K:
2302 +- return lut_prbs_8k[sub_channel_prbs_group];
2303 ++ prbs_group = lut_prbs_8k[sub_channel_prbs_group];
2304 + }
2305 ++
2306 ++ dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n",
2307 ++ sub_channel_prbs_group, subchannel, prbs_group);
2308 ++
2309 ++ return prbs_group;
2310 + }
2311 +
2312 + static void dib8000_set_13seg_channel(struct dib8000_state *state)
2313 +@@ -2412,10 +2435,8 @@ static void dib8000_set_isdbt_common_channel(struct dib8000_state *state, u8 seq
2314 + /* TSB or ISDBT ? apply it now */
2315 + if (c->isdbt_sb_mode) {
2316 + dib8000_set_sb_channel(state);
2317 +- if (c->isdbt_sb_subchannel < 14)
2318 +- init_prbs = dib8000_get_init_prbs(state, c->isdbt_sb_subchannel);
2319 +- else
2320 +- init_prbs = 0;
2321 ++ init_prbs = dib8000_get_init_prbs(state,
2322 ++ c->isdbt_sb_subchannel);
2323 + } else {
2324 + dib8000_set_13seg_channel(state);
2325 + init_prbs = 0xfff;
2326 +@@ -3007,6 +3028,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
2327 +
2328 + unsigned long *timeout = &state->timeout;
2329 + unsigned long now = jiffies;
2330 ++ u16 init_prbs;
2331 + #ifdef DIB8000_AGC_FREEZE
2332 + u16 agc1, agc2;
2333 + #endif
2334 +@@ -3305,8 +3327,10 @@ static int dib8000_tune(struct dvb_frontend *fe)
2335 + break;
2336 +
2337 + case CT_DEMOD_STEP_11: /* 41 : init prbs autosearch */
2338 +- if (state->subchannel <= 41) {
2339 +- dib8000_set_subchannel_prbs(state, dib8000_get_init_prbs(state, state->subchannel));
2340 ++ init_prbs = dib8000_get_init_prbs(state, state->subchannel);
2341 ++
2342 ++ if (init_prbs) {
2343 ++ dib8000_set_subchannel_prbs(state, init_prbs);
2344 + *tune_state = CT_DEMOD_STEP_9;
2345 + } else {
2346 + *tune_state = CT_DEMOD_STOP;
2347 +diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
2348 +index 31a1e2294843a..85395813c0f2e 100644
2349 +--- a/drivers/media/i2c/imx258.c
2350 ++++ b/drivers/media/i2c/imx258.c
2351 +@@ -22,7 +22,7 @@
2352 + #define IMX258_CHIP_ID 0x0258
2353 +
2354 + /* V_TIMING internal */
2355 +-#define IMX258_VTS_30FPS 0x0c98
2356 ++#define IMX258_VTS_30FPS 0x0c50
2357 + #define IMX258_VTS_30FPS_2K 0x0638
2358 + #define IMX258_VTS_30FPS_VGA 0x034c
2359 + #define IMX258_VTS_MAX 0xffff
2360 +@@ -46,7 +46,7 @@
2361 + /* Analog gain control */
2362 + #define IMX258_REG_ANALOG_GAIN 0x0204
2363 + #define IMX258_ANA_GAIN_MIN 0
2364 +-#define IMX258_ANA_GAIN_MAX 0x1fff
2365 ++#define IMX258_ANA_GAIN_MAX 480
2366 + #define IMX258_ANA_GAIN_STEP 1
2367 + #define IMX258_ANA_GAIN_DEFAULT 0x0
2368 +
2369 +diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
2370 +index d114ac5243eca..4f8dc3f56785a 100644
2371 +--- a/drivers/media/i2c/tda1997x.c
2372 ++++ b/drivers/media/i2c/tda1997x.c
2373 +@@ -1695,14 +1695,15 @@ static int tda1997x_query_dv_timings(struct v4l2_subdev *sd,
2374 + struct v4l2_dv_timings *timings)
2375 + {
2376 + struct tda1997x_state *state = to_state(sd);
2377 ++ int ret;
2378 +
2379 + v4l_dbg(1, debug, state->client, "%s\n", __func__);
2380 + memset(timings, 0, sizeof(struct v4l2_dv_timings));
2381 + mutex_lock(&state->lock);
2382 +- tda1997x_detect_std(state, timings);
2383 ++ ret = tda1997x_detect_std(state, timings);
2384 + mutex_unlock(&state->lock);
2385 +
2386 +- return 0;
2387 ++ return ret;
2388 + }
2389 +
2390 + static const struct v4l2_subdev_video_ops tda1997x_video_ops = {
2391 +@@ -2229,6 +2230,7 @@ static int tda1997x_core_init(struct v4l2_subdev *sd)
2392 + /* get initial HDMI status */
2393 + state->hdmi_status = io_read(sd, REG_HDMI_FLAGS);
2394 +
2395 ++ io_write(sd, REG_EDID_ENABLE, EDID_ENABLE_A_EN | EDID_ENABLE_B_EN);
2396 + return 0;
2397 + }
2398 +
2399 +diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
2400 +index 420897241248c..4197b311cff46 100644
2401 +--- a/drivers/media/platform/qcom/venus/venc.c
2402 ++++ b/drivers/media/platform/qcom/venus/venc.c
2403 +@@ -316,6 +316,8 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
2404 + else
2405 + return NULL;
2406 + fmt = find_format(inst, pixmp->pixelformat, f->type);
2407 ++ if (!fmt)
2408 ++ return NULL;
2409 + }
2410 +
2411 + pixmp->width = clamp(pixmp->width, frame_width_min(inst),
2412 +diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
2413 +index aba488cd0e645..a2c20ca799c44 100644
2414 +--- a/drivers/media/platform/tegra-cec/tegra_cec.c
2415 ++++ b/drivers/media/platform/tegra-cec/tegra_cec.c
2416 +@@ -383,7 +383,11 @@ static int tegra_cec_probe(struct platform_device *pdev)
2417 + return -ENOENT;
2418 + }
2419 +
2420 +- clk_prepare_enable(cec->clk);
2421 ++ ret = clk_prepare_enable(cec->clk);
2422 ++ if (ret) {
2423 ++ dev_err(&pdev->dev, "Unable to prepare clock for CEC\n");
2424 ++ return ret;
2425 ++ }
2426 +
2427 + /* set context info. */
2428 + cec->dev = &pdev->dev;
2429 +@@ -462,9 +466,7 @@ static int tegra_cec_resume(struct platform_device *pdev)
2430 +
2431 + dev_notice(&pdev->dev, "Resuming\n");
2432 +
2433 +- clk_prepare_enable(cec->clk);
2434 +-
2435 +- return 0;
2436 ++ return clk_prepare_enable(cec->clk);
2437 + }
2438 + #endif
2439 +
2440 +diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
2441 +index 3822d9ebcb46c..5abbde7e5d5b3 100644
2442 +--- a/drivers/media/rc/rc-loopback.c
2443 ++++ b/drivers/media/rc/rc-loopback.c
2444 +@@ -52,7 +52,7 @@ static int loop_set_tx_mask(struct rc_dev *dev, u32 mask)
2445 +
2446 + if ((mask & (RXMASK_REGULAR | RXMASK_LEARNING)) != mask) {
2447 + dprintk("invalid tx mask: %u\n", mask);
2448 +- return -EINVAL;
2449 ++ return 2;
2450 + }
2451 +
2452 + dprintk("setting tx mask: %u\n", mask);
2453 +diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
2454 +index 43e0e0fd715b9..705c2901a89e8 100644
2455 +--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
2456 ++++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
2457 +@@ -133,7 +133,7 @@ ret:
2458 +
2459 + static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
2460 + {
2461 +- int i;
2462 ++ int i, ret;
2463 + u8 b;
2464 +
2465 + mac[0] = 0x00;
2466 +@@ -142,7 +142,9 @@ static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
2467 +
2468 + /* this is a complete guess, but works for my box */
2469 + for (i = 136; i < 139; i++) {
2470 +- dibusb_read_eeprom_byte(d,i, &b);
2471 ++ ret = dibusb_read_eeprom_byte(d, i, &b);
2472 ++ if (ret)
2473 ++ return ret;
2474 +
2475 + mac[5 - (i - 136)] = b;
2476 + }
2477 +diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
2478 +index c3529ea59da95..fcd66757b34dc 100644
2479 +--- a/drivers/media/usb/dvb-usb/vp702x.c
2480 ++++ b/drivers/media/usb/dvb-usb/vp702x.c
2481 +@@ -294,16 +294,22 @@ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
2482 + static int vp702x_read_mac_addr(struct dvb_usb_device *d,u8 mac[6])
2483 + {
2484 + u8 i, *buf;
2485 ++ int ret;
2486 + struct vp702x_device_state *st = d->priv;
2487 +
2488 + mutex_lock(&st->buf_mutex);
2489 + buf = st->buf;
2490 +- for (i = 6; i < 12; i++)
2491 +- vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1, &buf[i - 6], 1);
2492 ++ for (i = 6; i < 12; i++) {
2493 ++ ret = vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1,
2494 ++ &buf[i - 6], 1);
2495 ++ if (ret < 0)
2496 ++ goto err;
2497 ++ }
2498 +
2499 + memcpy(mac, buf, 6);
2500 ++err:
2501 + mutex_unlock(&st->buf_mutex);
2502 +- return 0;
2503 ++ return ret;
2504 + }
2505 +
2506 + static int vp702x_frontend_attach(struct dvb_usb_adapter *adap)
2507 +diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
2508 +index 3612f0d730ddd..92007a225d8ea 100644
2509 +--- a/drivers/media/usb/em28xx/em28xx-input.c
2510 ++++ b/drivers/media/usb/em28xx/em28xx-input.c
2511 +@@ -865,7 +865,6 @@ error:
2512 + kfree(ir);
2513 + ref_put:
2514 + em28xx_shutdown_buttons(dev);
2515 +- kref_put(&dev->ref, em28xx_free_device);
2516 + return err;
2517 + }
2518 +
2519 +diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
2520 +index 62aeebcdd7f71..c7b5a3321cd74 100644
2521 +--- a/drivers/media/usb/go7007/go7007-driver.c
2522 ++++ b/drivers/media/usb/go7007/go7007-driver.c
2523 +@@ -699,49 +699,23 @@ struct go7007 *go7007_alloc(const struct go7007_board_info *board,
2524 + struct device *dev)
2525 + {
2526 + struct go7007 *go;
2527 +- int i;
2528 +
2529 + go = kzalloc(sizeof(struct go7007), GFP_KERNEL);
2530 + if (go == NULL)
2531 + return NULL;
2532 + go->dev = dev;
2533 + go->board_info = board;
2534 +- go->board_id = 0;
2535 + go->tuner_type = -1;
2536 +- go->channel_number = 0;
2537 +- go->name[0] = 0;
2538 + mutex_init(&go->hw_lock);
2539 + init_waitqueue_head(&go->frame_waitq);
2540 + spin_lock_init(&go->spinlock);
2541 + go->status = STATUS_INIT;
2542 +- memset(&go->i2c_adapter, 0, sizeof(go->i2c_adapter));
2543 +- go->i2c_adapter_online = 0;
2544 +- go->interrupt_available = 0;
2545 + init_waitqueue_head(&go->interrupt_waitq);
2546 +- go->input = 0;
2547 + go7007_update_board(go);
2548 +- go->encoder_h_halve = 0;
2549 +- go->encoder_v_halve = 0;
2550 +- go->encoder_subsample = 0;
2551 + go->format = V4L2_PIX_FMT_MJPEG;
2552 + go->bitrate = 1500000;
2553 + go->fps_scale = 1;
2554 +- go->pali = 0;
2555 + go->aspect_ratio = GO7007_RATIO_1_1;
2556 +- go->gop_size = 0;
2557 +- go->ipb = 0;
2558 +- go->closed_gop = 0;
2559 +- go->repeat_seqhead = 0;
2560 +- go->seq_header_enable = 0;
2561 +- go->gop_header_enable = 0;
2562 +- go->dvd_mode = 0;
2563 +- go->interlace_coding = 0;
2564 +- for (i = 0; i < 4; ++i)
2565 +- go->modet[i].enable = 0;
2566 +- for (i = 0; i < 1624; ++i)
2567 +- go->modet_map[i] = 0;
2568 +- go->audio_deliver = NULL;
2569 +- go->audio_enabled = 0;
2570 +
2571 + return go;
2572 + }
2573 +diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
2574 +index e33fa78ef98dd..f46db1b6a27c4 100644
2575 +--- a/drivers/media/usb/stkwebcam/stk-webcam.c
2576 ++++ b/drivers/media/usb/stkwebcam/stk-webcam.c
2577 +@@ -1355,7 +1355,7 @@ static int stk_camera_probe(struct usb_interface *interface,
2578 + if (!dev->isoc_ep) {
2579 + pr_err("Could not find isoc-in endpoint\n");
2580 + err = -ENODEV;
2581 +- goto error;
2582 ++ goto error_put;
2583 + }
2584 + dev->vsettings.palette = V4L2_PIX_FMT_RGB565;
2585 + dev->vsettings.mode = MODE_VGA;
2586 +@@ -1368,10 +1368,12 @@ static int stk_camera_probe(struct usb_interface *interface,
2587 +
2588 + err = stk_register_video_device(dev);
2589 + if (err)
2590 +- goto error;
2591 ++ goto error_put;
2592 +
2593 + return 0;
2594 +
2595 ++error_put:
2596 ++ usb_put_intf(interface);
2597 + error:
2598 + v4l2_ctrl_handler_free(hdl);
2599 + v4l2_device_unregister(&dev->v4l2_dev);
2600 +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
2601 +index 06167c51af128..2ca1e8ce6159d 100644
2602 +--- a/drivers/media/usb/uvc/uvc_v4l2.c
2603 ++++ b/drivers/media/usb/uvc/uvc_v4l2.c
2604 +@@ -900,8 +900,8 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
2605 + {
2606 + struct uvc_fh *handle = fh;
2607 + struct uvc_video_chain *chain = handle->chain;
2608 ++ u8 *buf;
2609 + int ret;
2610 +- u8 i;
2611 +
2612 + if (chain->selector == NULL ||
2613 + (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
2614 +@@ -909,22 +909,27 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
2615 + return 0;
2616 + }
2617 +
2618 ++ buf = kmalloc(1, GFP_KERNEL);
2619 ++ if (!buf)
2620 ++ return -ENOMEM;
2621 ++
2622 + ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, chain->selector->id,
2623 + chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
2624 +- &i, 1);
2625 +- if (ret < 0)
2626 +- return ret;
2627 ++ buf, 1);
2628 ++ if (!ret)
2629 ++ *input = *buf - 1;
2630 +
2631 +- *input = i - 1;
2632 +- return 0;
2633 ++ kfree(buf);
2634 ++
2635 ++ return ret;
2636 + }
2637 +
2638 + static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
2639 + {
2640 + struct uvc_fh *handle = fh;
2641 + struct uvc_video_chain *chain = handle->chain;
2642 ++ u8 *buf;
2643 + int ret;
2644 +- u32 i;
2645 +
2646 + ret = uvc_acquire_privileges(handle);
2647 + if (ret < 0)
2648 +@@ -940,10 +945,17 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
2649 + if (input >= chain->selector->bNrInPins)
2650 + return -EINVAL;
2651 +
2652 +- i = input + 1;
2653 +- return uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
2654 +- chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
2655 +- &i, 1);
2656 ++ buf = kmalloc(1, GFP_KERNEL);
2657 ++ if (!buf)
2658 ++ return -ENOMEM;
2659 ++
2660 ++ *buf = input + 1;
2661 ++ ret = uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
2662 ++ chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
2663 ++ buf, 1);
2664 ++ kfree(buf);
2665 ++
2666 ++ return ret;
2667 + }
2668 +
2669 + static int uvc_ioctl_queryctrl(struct file *file, void *fh,
2670 +diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
2671 +index a24b40dfec97a..af38c989ff336 100644
2672 +--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
2673 ++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
2674 +@@ -196,7 +196,7 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
2675 + if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
2676 + return false;
2677 +
2678 +- for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
2679 ++ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
2680 + if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
2681 + fnc, fnc_handle) &&
2682 + v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
2683 +@@ -218,7 +218,7 @@ bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
2684 + {
2685 + unsigned int i;
2686 +
2687 +- for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
2688 ++ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
2689 + const struct v4l2_bt_timings *bt =
2690 + &v4l2_dv_timings_presets[i].bt;
2691 +
2692 +diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
2693 +index 11ab17f64c649..f0527e7698677 100644
2694 +--- a/drivers/mfd/ab8500-core.c
2695 ++++ b/drivers/mfd/ab8500-core.c
2696 +@@ -493,7 +493,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
2697 + if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
2698 + line += 1;
2699 +
2700 +- handle_nested_irq(irq_create_mapping(ab8500->domain, line));
2701 ++ handle_nested_irq(irq_find_mapping(ab8500->domain, line));
2702 + }
2703 +
2704 + return 0;
2705 +diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
2706 +index aa65931142ba7..dcb341d627582 100644
2707 +--- a/drivers/mfd/axp20x.c
2708 ++++ b/drivers/mfd/axp20x.c
2709 +@@ -127,12 +127,13 @@ static const struct regmap_range axp288_writeable_ranges[] = {
2710 +
2711 + static const struct regmap_range axp288_volatile_ranges[] = {
2712 + regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
2713 ++ regmap_reg_range(AXP22X_PWR_OUT_CTRL1, AXP22X_ALDO3_V_OUT),
2714 + regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
2715 + regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
2716 + regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
2717 + regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
2718 + regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
2719 +- regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
2720 ++ regmap_reg_range(AXP20X_GPIO1_CTRL, AXP22X_GPIO_STATE),
2721 + regmap_reg_range(AXP288_RT_BATT_V_H, AXP288_RT_BATT_V_L),
2722 + regmap_reg_range(AXP20X_FG_RES, AXP288_FG_CC_CAP_REG),
2723 + };
2724 +diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
2725 +index 566caca4efd8e..722ad2c368a56 100644
2726 +--- a/drivers/mfd/stmpe.c
2727 ++++ b/drivers/mfd/stmpe.c
2728 +@@ -1035,7 +1035,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
2729 +
2730 + if (variant->id_val == STMPE801_ID ||
2731 + variant->id_val == STMPE1600_ID) {
2732 +- int base = irq_create_mapping(stmpe->domain, 0);
2733 ++ int base = irq_find_mapping(stmpe->domain, 0);
2734 +
2735 + handle_nested_irq(base);
2736 + return IRQ_HANDLED;
2737 +@@ -1063,7 +1063,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
2738 + while (status) {
2739 + int bit = __ffs(status);
2740 + int line = bank * 8 + bit;
2741 +- int nestedirq = irq_create_mapping(stmpe->domain, line);
2742 ++ int nestedirq = irq_find_mapping(stmpe->domain, line);
2743 +
2744 + handle_nested_irq(nestedirq);
2745 + status &= ~(1 << bit);
2746 +diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
2747 +index cc9e563f23aa6..7062baf606858 100644
2748 +--- a/drivers/mfd/tc3589x.c
2749 ++++ b/drivers/mfd/tc3589x.c
2750 +@@ -187,7 +187,7 @@ again:
2751 +
2752 + while (status) {
2753 + int bit = __ffs(status);
2754 +- int virq = irq_create_mapping(tc3589x->domain, bit);
2755 ++ int virq = irq_find_mapping(tc3589x->domain, bit);
2756 +
2757 + handle_nested_irq(virq);
2758 + status &= ~(1 << bit);
2759 +diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
2760 +index 18710f3b5c534..2c58d9b99a394 100644
2761 +--- a/drivers/mfd/wm8994-irq.c
2762 ++++ b/drivers/mfd/wm8994-irq.c
2763 +@@ -159,7 +159,7 @@ static irqreturn_t wm8994_edge_irq(int irq, void *data)
2764 + struct wm8994 *wm8994 = data;
2765 +
2766 + while (gpio_get_value_cansleep(wm8994->pdata.irq_gpio))
2767 +- handle_nested_irq(irq_create_mapping(wm8994->edge_irq, 0));
2768 ++ handle_nested_irq(irq_find_mapping(wm8994->edge_irq, 0));
2769 +
2770 + return IRQ_HANDLED;
2771 + }
2772 +diff --git a/drivers/misc/aspeed-lpc-ctrl.c b/drivers/misc/aspeed-lpc-ctrl.c
2773 +index a024f8042259a..870ab0dfcde06 100644
2774 +--- a/drivers/misc/aspeed-lpc-ctrl.c
2775 ++++ b/drivers/misc/aspeed-lpc-ctrl.c
2776 +@@ -50,7 +50,7 @@ static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma)
2777 + unsigned long vsize = vma->vm_end - vma->vm_start;
2778 + pgprot_t prot = vma->vm_page_prot;
2779 +
2780 +- if (vma->vm_pgoff + vsize > lpc_ctrl->mem_base + lpc_ctrl->mem_size)
2781 ++ if (vma->vm_pgoff + vma_pages(vma) > lpc_ctrl->mem_size >> PAGE_SHIFT)
2782 + return -EINVAL;
2783 +
2784 + /* ast2400/2500 AHB accesses are not cache coherent */
2785 +diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
2786 +index 9bc97d6a651db..db433d285effa 100644
2787 +--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
2788 ++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
2789 +@@ -2249,7 +2249,8 @@ int vmci_qp_broker_map(struct vmci_handle handle,
2790 +
2791 + result = VMCI_SUCCESS;
2792 +
2793 +- if (context_id != VMCI_HOST_CONTEXT_ID) {
2794 ++ if (context_id != VMCI_HOST_CONTEXT_ID &&
2795 ++ !QPBROKERSTATE_HAS_MEM(entry)) {
2796 + struct vmci_qp_page_store page_store;
2797 +
2798 + page_store.pages = guest_mem;
2799 +@@ -2356,7 +2357,8 @@ int vmci_qp_broker_unmap(struct vmci_handle handle,
2800 + goto out;
2801 + }
2802 +
2803 +- if (context_id != VMCI_HOST_CONTEXT_ID) {
2804 ++ if (context_id != VMCI_HOST_CONTEXT_ID &&
2805 ++ QPBROKERSTATE_HAS_MEM(entry)) {
2806 + qp_acquire_queue_mutex(entry->produce_q);
2807 + result = qp_save_headers(entry);
2808 + if (result < VMCI_SUCCESS)
2809 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
2810 +index 7b2bb32e35554..d1cc0fdbc51c8 100644
2811 +--- a/drivers/mmc/core/block.c
2812 ++++ b/drivers/mmc/core/block.c
2813 +@@ -592,6 +592,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
2814 + }
2815 +
2816 + mmc_wait_for_req(card->host, &mrq);
2817 ++ memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
2818 +
2819 + if (cmd.error) {
2820 + dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
2821 +@@ -641,8 +642,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
2822 + if (idata->ic.postsleep_min_us)
2823 + usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
2824 +
2825 +- memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
2826 +-
2827 + if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
2828 + /*
2829 + * Ensure RPMB/R1B command has completed by polling CMD13
2830 +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
2831 +index 8e09586f880f1..e3991df078efb 100644
2832 +--- a/drivers/mmc/host/dw_mmc.c
2833 ++++ b/drivers/mmc/host/dw_mmc.c
2834 +@@ -808,6 +808,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
2835 + int ret = 0;
2836 +
2837 + /* Set external dma config: burst size, burst width */
2838 ++ memset(&cfg, 0, sizeof(cfg));
2839 + cfg.dst_addr = host->phy_regs + fifo_offset;
2840 + cfg.src_addr = cfg.dst_addr;
2841 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2842 +diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
2843 +index a0670e9cd0127..5553a5643f405 100644
2844 +--- a/drivers/mmc/host/moxart-mmc.c
2845 ++++ b/drivers/mmc/host/moxart-mmc.c
2846 +@@ -631,6 +631,7 @@ static int moxart_probe(struct platform_device *pdev)
2847 + host->dma_chan_tx, host->dma_chan_rx);
2848 + host->have_dma = true;
2849 +
2850 ++ memset(&cfg, 0, sizeof(cfg));
2851 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2852 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2853 +
2854 +diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
2855 +index 02de6a5701d6c..c1de8fa50fe8f 100644
2856 +--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
2857 ++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
2858 +@@ -551,9 +551,22 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host,
2859 + return 0;
2860 + }
2861 +
2862 ++static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
2863 ++{
2864 ++ rtsx_pci_write_register(host->pcr, SD_CFG1,
2865 ++ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
2866 ++}
2867 ++
2868 ++static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
2869 ++{
2870 ++ rtsx_pci_write_register(host->pcr, SD_CFG1,
2871 ++ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
2872 ++}
2873 ++
2874 + static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
2875 + {
2876 + struct mmc_data *data = mrq->data;
2877 ++ int err;
2878 +
2879 + if (host->sg_count < 0) {
2880 + data->error = host->sg_count;
2881 +@@ -562,22 +575,19 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
2882 + return data->error;
2883 + }
2884 +
2885 +- if (data->flags & MMC_DATA_READ)
2886 +- return sd_read_long_data(host, mrq);
2887 ++ if (data->flags & MMC_DATA_READ) {
2888 ++ if (host->initial_mode)
2889 ++ sd_disable_initial_mode(host);
2890 +
2891 +- return sd_write_long_data(host, mrq);
2892 +-}
2893 ++ err = sd_read_long_data(host, mrq);
2894 +
2895 +-static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
2896 +-{
2897 +- rtsx_pci_write_register(host->pcr, SD_CFG1,
2898 +- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
2899 +-}
2900 ++ if (host->initial_mode)
2901 ++ sd_enable_initial_mode(host);
2902 +
2903 +-static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
2904 +-{
2905 +- rtsx_pci_write_register(host->pcr, SD_CFG1,
2906 +- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
2907 ++ return err;
2908 ++ }
2909 ++
2910 ++ return sd_write_long_data(host, mrq);
2911 + }
2912 +
2913 + static void sd_normal_rw(struct realtek_pci_sdmmc *host,
2914 +diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
2915 +index 9c77bfe4334f3..d1a2418b0c5e5 100644
2916 +--- a/drivers/mmc/host/sdhci-of-arasan.c
2917 ++++ b/drivers/mmc/host/sdhci-of-arasan.c
2918 +@@ -185,7 +185,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
2919 + * through low speeds without power cycling.
2920 + */
2921 + sdhci_set_clock(host, host->max_clk);
2922 +- phy_power_on(sdhci_arasan->phy);
2923 ++ if (phy_power_on(sdhci_arasan->phy)) {
2924 ++ pr_err("%s: Cannot power on phy.\n",
2925 ++ mmc_hostname(host->mmc));
2926 ++ return;
2927 ++ }
2928 ++
2929 + sdhci_arasan->is_phy_on = true;
2930 +
2931 + /*
2932 +@@ -221,7 +226,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
2933 + msleep(20);
2934 +
2935 + if (ctrl_phy) {
2936 +- phy_power_on(sdhci_arasan->phy);
2937 ++ if (phy_power_on(sdhci_arasan->phy)) {
2938 ++ pr_err("%s: Cannot power on phy.\n",
2939 ++ mmc_hostname(host->mmc));
2940 ++ return;
2941 ++ }
2942 ++
2943 + sdhci_arasan->is_phy_on = true;
2944 + }
2945 + }
2946 +@@ -395,7 +405,9 @@ static int sdhci_arasan_suspend(struct device *dev)
2947 + ret = phy_power_off(sdhci_arasan->phy);
2948 + if (ret) {
2949 + dev_err(dev, "Cannot power off phy.\n");
2950 +- sdhci_resume_host(host);
2951 ++ if (sdhci_resume_host(host))
2952 ++ dev_err(dev, "Cannot resume host.\n");
2953 ++
2954 + return ret;
2955 + }
2956 + sdhci_arasan->is_phy_on = false;
2957 +diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
2958 +index 3304594177c6d..1fee298d5680c 100644
2959 +--- a/drivers/mtd/nand/raw/cafe_nand.c
2960 ++++ b/drivers/mtd/nand/raw/cafe_nand.c
2961 +@@ -758,7 +758,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
2962 + "CAFE NAND", mtd);
2963 + if (err) {
2964 + dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
2965 +- goto out_ior;
2966 ++ goto out_free_rs;
2967 + }
2968 +
2969 + /* Disable master reset, enable NAND clock */
2970 +@@ -802,6 +802,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
2971 + /* Disable NAND IRQ in global IRQ mask register */
2972 + cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
2973 + free_irq(pdev->irq, mtd);
2974 ++ out_free_rs:
2975 ++ free_rs(cafe->rs);
2976 + out_ior:
2977 + pci_iounmap(pdev, cafe->mmio);
2978 + out_free_mtd:
2979 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2980 +index c814b266af794..d6c5f41b17f7e 100644
2981 +--- a/drivers/net/bonding/bond_main.c
2982 ++++ b/drivers/net/bonding/bond_main.c
2983 +@@ -1912,7 +1912,6 @@ static int __bond_release_one(struct net_device *bond_dev,
2984 + /* recompute stats just before removing the slave */
2985 + bond_get_stats(bond->dev, &bond->bond_stats);
2986 +
2987 +- bond_upper_dev_unlink(bond, slave);
2988 + /* unregister rx_handler early so bond_handle_frame wouldn't be called
2989 + * for this slave anymore.
2990 + */
2991 +@@ -1921,6 +1920,8 @@ static int __bond_release_one(struct net_device *bond_dev,
2992 + if (BOND_MODE(bond) == BOND_MODE_8023AD)
2993 + bond_3ad_unbind_slave(slave);
2994 +
2995 ++ bond_upper_dev_unlink(bond, slave);
2996 ++
2997 + if (bond_mode_can_use_xmit_hash(bond))
2998 + bond_update_slave_arr(bond, slave);
2999 +
3000 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
3001 +index 7eaeab65d39f5..451121f47c89a 100644
3002 +--- a/drivers/net/dsa/b53/b53_common.c
3003 ++++ b/drivers/net/dsa/b53/b53_common.c
3004 +@@ -2135,9 +2135,8 @@ static int b53_switch_init(struct b53_device *dev)
3005 + dev->cpu_port = 5;
3006 + }
3007 +
3008 +- /* cpu port is always last */
3009 +- dev->num_ports = dev->cpu_port + 1;
3010 + dev->enabled_ports |= BIT(dev->cpu_port);
3011 ++ dev->num_ports = fls(dev->enabled_ports);
3012 +
3013 + /* Include non standard CPU port built-in PHYs to be probed */
3014 + if (is539x(dev) || is531x5(dev)) {
3015 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
3016 +index 77005f6366eb1..b3ff8d13c31af 100644
3017 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
3018 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
3019 +@@ -1245,7 +1245,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
3020 +
3021 + /* SR-IOV capability was enabled but there are no VFs*/
3022 + if (iov->total == 0) {
3023 +- err = -EINVAL;
3024 ++ err = 0;
3025 + goto failed;
3026 + }
3027 +
3028 +diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
3029 +index 8f912de44defc..ecb0fb1ddde7c 100644
3030 +--- a/drivers/net/ethernet/cadence/macb_ptp.c
3031 ++++ b/drivers/net/ethernet/cadence/macb_ptp.c
3032 +@@ -286,6 +286,12 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
3033 +
3034 + if (GEM_BFEXT(DMA_RXVALID, desc->addr)) {
3035 + desc_ptp = macb_ptp_desc(bp, desc);
3036 ++ /* Unlikely but check */
3037 ++ if (!desc_ptp) {
3038 ++ dev_warn_ratelimited(&bp->pdev->dev,
3039 ++ "Timestamp not supported in BD\n");
3040 ++ return;
3041 ++ }
3042 + gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
3043 + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
3044 + shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
3045 +@@ -318,8 +324,11 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
3046 + if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
3047 + return -ENOMEM;
3048 +
3049 +- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3050 + desc_ptp = macb_ptp_desc(queue->bp, desc);
3051 ++ /* Unlikely but check */
3052 ++ if (!desc_ptp)
3053 ++ return -EINVAL;
3054 ++ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3055 + tx_timestamp = &queue->tx_timestamps[head];
3056 + tx_timestamp->skb = skb;
3057 + /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
3058 +diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
3059 +index 0ccdde366ae17..540d99f59226e 100644
3060 +--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
3061 ++++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
3062 +@@ -1153,6 +1153,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3063 + if (!adapter->registered_device_map) {
3064 + pr_err("%s: could not register any net devices\n",
3065 + pci_name(pdev));
3066 ++ err = -EINVAL;
3067 + goto out_release_adapter_res;
3068 + }
3069 +
3070 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3071 +index 19165a3548bfd..4fb80ed897f22 100644
3072 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3073 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3074 +@@ -29,6 +29,8 @@ static const char hns3_driver_string[] =
3075 + static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
3076 + static struct hnae3_client client;
3077 +
3078 ++#define HNS3_MIN_TUN_PKT_LEN 65U
3079 ++
3080 + /* hns3_pci_tbl - PCI Device ID Table
3081 + *
3082 + * Last entry must be all 0s
3083 +@@ -792,8 +794,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
3084 + HNS3_L4T_TCP);
3085 + break;
3086 + case IPPROTO_UDP:
3087 +- if (hns3_tunnel_csum_bug(skb))
3088 +- return skb_checksum_help(skb);
3089 ++ if (hns3_tunnel_csum_bug(skb)) {
3090 ++ int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
3091 ++
3092 ++ return ret ? ret : skb_checksum_help(skb);
3093 ++ }
3094 +
3095 + hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
3096 + hnae3_set_field(*type_cs_vlan_tso,
3097 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
3098 +index 4008007c2e340..d97641b9928bb 100644
3099 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
3100 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
3101 +@@ -4038,6 +4038,14 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3102 + return 0;
3103 + }
3104 +
3105 ++ if (adapter->failover_pending) {
3106 ++ adapter->init_done_rc = -EAGAIN;
3107 ++ netdev_dbg(netdev, "Failover pending, ignoring login response\n");
3108 ++ complete(&adapter->init_done);
3109 ++ /* login response buffer will be released on reset */
3110 ++ return 0;
3111 ++ }
3112 ++
3113 + netdev->mtu = adapter->req_mtu - ETH_HLEN;
3114 +
3115 + netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
3116 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3117 +index 774f0a619a6da..f0aa7f0e54803 100644
3118 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3119 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3120 +@@ -1558,9 +1558,9 @@ static int build_match_list(struct match_list_head *match_head,
3121 +
3122 + curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
3123 + if (!curr_match) {
3124 ++ rcu_read_unlock();
3125 + free_match_list(match_head);
3126 +- err = -ENOMEM;
3127 +- goto out;
3128 ++ return -ENOMEM;
3129 + }
3130 + if (!tree_get_node(&g->node)) {
3131 + kfree(curr_match);
3132 +@@ -1569,7 +1569,6 @@ static int build_match_list(struct match_list_head *match_head,
3133 + curr_match->g = g;
3134 + list_add_tail(&curr_match->list, &match_head->list);
3135 + }
3136 +-out:
3137 + rcu_read_unlock();
3138 + return err;
3139 + }
3140 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
3141 +index 049a83b40e469..9d77f318d11ed 100644
3142 +--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
3143 ++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
3144 +@@ -439,7 +439,12 @@ static int qed_enable_msix(struct qed_dev *cdev,
3145 + rc = cnt;
3146 + }
3147 +
3148 +- if (rc > 0) {
3149 ++ /* For VFs, we should return with an error in case we didn't get the
3150 ++ * exact number of msix vectors as we requested.
3151 ++ * Not doing that will lead to a crash when starting queues for
3152 ++ * this VF.
3153 ++ */
3154 ++ if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
3155 + /* MSI-x configuration was achieved */
3156 + int_params->out.int_mode = QED_INT_MODE_MSIX;
3157 + int_params->out.num_vectors = rc;
3158 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
3159 +index 0d62db3241bed..d16cadca2f7ef 100644
3160 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
3161 ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
3162 +@@ -2841,6 +2841,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3163 + struct qed_nvm_image_att *p_image_att)
3164 + {
3165 + enum nvm_image_type type;
3166 ++ int rc;
3167 + u32 i;
3168 +
3169 + /* Translate image_id into MFW definitions */
3170 +@@ -2866,7 +2867,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3171 + return -EINVAL;
3172 + }
3173 +
3174 +- qed_mcp_nvm_info_populate(p_hwfn);
3175 ++ rc = qed_mcp_nvm_info_populate(p_hwfn);
3176 ++ if (rc)
3177 ++ return rc;
3178 ++
3179 + for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3180 + if (type == p_hwfn->nvm_info.image_att[i].image_type)
3181 + break;
3182 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
3183 +index 1aabb2e7a38b5..756c5943f5e01 100644
3184 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
3185 ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
3186 +@@ -1676,6 +1676,7 @@ static void qede_sync_free_irqs(struct qede_dev *edev)
3187 + }
3188 +
3189 + edev->int_info.used_cnt = 0;
3190 ++ edev->int_info.msix_cnt = 0;
3191 + }
3192 +
3193 + static int qede_req_msix_irqs(struct qede_dev *edev)
3194 +@@ -2193,7 +2194,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
3195 + goto out;
3196 + err4:
3197 + qede_sync_free_irqs(edev);
3198 +- memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
3199 + err3:
3200 + qede_napi_disable_remove(edev);
3201 + err2:
3202 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
3203 +index c48a0e2d4d7ef..6a009d51ec510 100644
3204 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
3205 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
3206 +@@ -440,7 +440,6 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
3207 + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
3208 + msleep(20);
3209 +
3210 +- qlcnic_rom_unlock(adapter);
3211 + /* big hammer don't reset CAM block on reset */
3212 + QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
3213 +
3214 +diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
3215 +index 9d188931bc09e..afd49c7fd87fe 100644
3216 +--- a/drivers/net/ethernet/qualcomm/qca_spi.c
3217 ++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
3218 +@@ -413,7 +413,7 @@ qcaspi_receive(struct qcaspi *qca)
3219 + skb_put(qca->rx_skb, retcode);
3220 + qca->rx_skb->protocol = eth_type_trans(
3221 + qca->rx_skb, qca->rx_skb->dev);
3222 +- qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
3223 ++ skb_checksum_none_assert(qca->rx_skb);
3224 + netif_rx_ni(qca->rx_skb);
3225 + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
3226 + net_dev->mtu + VLAN_ETH_HLEN);
3227 +diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
3228 +index db6068cd7a1f2..466e9d07697a1 100644
3229 +--- a/drivers/net/ethernet/qualcomm/qca_uart.c
3230 ++++ b/drivers/net/ethernet/qualcomm/qca_uart.c
3231 +@@ -107,7 +107,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
3232 + skb_put(qca->rx_skb, retcode);
3233 + qca->rx_skb->protocol = eth_type_trans(
3234 + qca->rx_skb, qca->rx_skb->dev);
3235 +- qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
3236 ++ skb_checksum_none_assert(qca->rx_skb);
3237 + netif_rx_ni(qca->rx_skb);
3238 + qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
3239 + netdev->mtu +
3240 +diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
3241 +index aa11b70b9ca48..2199bd08f4d6a 100644
3242 +--- a/drivers/net/ethernet/rdc/r6040.c
3243 ++++ b/drivers/net/ethernet/rdc/r6040.c
3244 +@@ -133,6 +133,8 @@
3245 + #define PHY_ST 0x8A /* PHY status register */
3246 + #define MAC_SM 0xAC /* MAC status machine */
3247 + #define MAC_SM_RST 0x0002 /* MAC status machine reset */
3248 ++#define MD_CSC 0xb6 /* MDC speed control register */
3249 ++#define MD_CSC_DEFAULT 0x0030
3250 + #define MAC_ID 0xBE /* Identifier register */
3251 +
3252 + #define TX_DCNT 0x80 /* TX descriptor count */
3253 +@@ -368,8 +370,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
3254 + {
3255 + void __iomem *ioaddr = lp->base;
3256 + int limit = MAC_DEF_TIMEOUT;
3257 +- u16 cmd;
3258 ++ u16 cmd, md_csc;
3259 +
3260 ++ md_csc = ioread16(ioaddr + MD_CSC);
3261 + iowrite16(MAC_RST, ioaddr + MCR1);
3262 + while (limit--) {
3263 + cmd = ioread16(ioaddr + MCR1);
3264 +@@ -381,6 +384,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
3265 + iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
3266 + iowrite16(0, ioaddr + MAC_SM);
3267 + mdelay(5);
3268 ++
3269 ++ /* Restore MDIO clock frequency */
3270 ++ if (md_csc != MD_CSC_DEFAULT)
3271 ++ iowrite16(md_csc, ioaddr + MD_CSC);
3272 + }
3273 +
3274 + static void r6040_init_mac_regs(struct net_device *dev)
3275 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
3276 +index 394ab9cdfe2c7..c44aea47c1208 100644
3277 +--- a/drivers/net/ethernet/renesas/sh_eth.c
3278 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
3279 +@@ -2547,6 +2547,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
3280 + else
3281 + txdesc->status |= cpu_to_le32(TD_TACT);
3282 +
3283 ++ wmb(); /* cur_tx must be incremented after TACT bit was set */
3284 + mdp->cur_tx++;
3285 +
3286 + if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
3287 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
3288 +index 0f56f8e336917..03b11f191c262 100644
3289 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
3290 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
3291 +@@ -288,10 +288,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
3292 + val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
3293 + break;
3294 + default:
3295 +- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
3296 +- phy_modes(gmac->phy_mode));
3297 +- err = -EINVAL;
3298 +- goto err_remove_config_dt;
3299 ++ goto err_unsupported_phy;
3300 + }
3301 + regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
3302 +
3303 +@@ -308,10 +305,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
3304 + NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
3305 + break;
3306 + default:
3307 +- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
3308 +- phy_modes(gmac->phy_mode));
3309 +- err = -EINVAL;
3310 +- goto err_remove_config_dt;
3311 ++ goto err_unsupported_phy;
3312 + }
3313 + regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
3314 +
3315 +@@ -328,8 +322,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
3316 + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
3317 + break;
3318 + default:
3319 +- /* We don't get here; the switch above will have errored out */
3320 +- unreachable();
3321 ++ goto err_unsupported_phy;
3322 + }
3323 + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
3324 +
3325 +@@ -360,6 +353,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
3326 +
3327 + return 0;
3328 +
3329 ++err_unsupported_phy:
3330 ++ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
3331 ++ phy_modes(gmac->phy_mode));
3332 ++ err = -EINVAL;
3333 ++
3334 + err_remove_config_dt:
3335 + stmmac_remove_config_dt(pdev, plat_dat);
3336 +
3337 +diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
3338 +index d8ba512f166ad..41040756307a1 100644
3339 +--- a/drivers/net/ethernet/wiznet/w5100.c
3340 ++++ b/drivers/net/ethernet/wiznet/w5100.c
3341 +@@ -1059,6 +1059,8 @@ static int w5100_mmio_probe(struct platform_device *pdev)
3342 + mac_addr = data->mac_addr;
3343 +
3344 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3345 ++ if (!mem)
3346 ++ return -EINVAL;
3347 + if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
3348 + ops = &w5100_mmio_indirect_ops;
3349 + else
3350 +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
3351 +index 939de185bc6b8..178234e94cd16 100644
3352 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
3353 ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
3354 +@@ -736,10 +736,8 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
3355 + /* Kick off the transfer */
3356 + lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
3357 +
3358 +- if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
3359 +- netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
3360 ++ if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
3361 + netif_stop_queue(ndev);
3362 +- }
3363 +
3364 + return NETDEV_TX_OK;
3365 + }
3366 +diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h
3367 +index 21aa24c741b96..daae7fa58fb82 100644
3368 +--- a/drivers/net/phy/dp83640_reg.h
3369 ++++ b/drivers/net/phy/dp83640_reg.h
3370 +@@ -5,7 +5,7 @@
3371 + #ifndef HAVE_DP83640_REGISTERS
3372 + #define HAVE_DP83640_REGISTERS
3373 +
3374 +-#define PAGE0 0x0000
3375 ++/* #define PAGE0 0x0000 */
3376 + #define PHYCR2 0x001c /* PHY Control Register 2 */
3377 +
3378 + #define PAGE4 0x0004
3379 +diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
3380 +index 0362acd5cdcaa..cdd1b193fd4fe 100644
3381 +--- a/drivers/net/usb/cdc_mbim.c
3382 ++++ b/drivers/net/usb/cdc_mbim.c
3383 +@@ -655,6 +655,11 @@ static const struct usb_device_id mbim_devs[] = {
3384 + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
3385 + },
3386 +
3387 ++ /* Telit LN920 */
3388 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1061, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
3389 ++ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
3390 ++ },
3391 ++
3392 + /* default entry */
3393 + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
3394 + .driver_info = (unsigned long)&cdc_mbim_info_zlp,
3395 +diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
3396 +index 987ebae8ea0e1..afa7a82ffd5d3 100644
3397 +--- a/drivers/net/wireless/ath/ath6kl/wmi.c
3398 ++++ b/drivers/net/wireless/ath/ath6kl/wmi.c
3399 +@@ -2513,8 +2513,10 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
3400 + goto free_data_skb;
3401 +
3402 + for (index = 0; index < num_pri_streams; index++) {
3403 +- if (WARN_ON(!data_sync_bufs[index].skb))
3404 ++ if (WARN_ON(!data_sync_bufs[index].skb)) {
3405 ++ ret = -ENOMEM;
3406 + goto free_data_skb;
3407 ++ }
3408 +
3409 + ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
3410 + data_sync_bufs[index].
3411 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3412 +index 983e1abbd9e43..4d45d5a8ad2ed 100644
3413 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3414 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3415 +@@ -3351,7 +3351,8 @@ found:
3416 + "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
3417 + cptr, code, reference, length, major, minor);
3418 + if ((!AR_SREV_9485(ah) && length >= 1024) ||
3419 +- (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
3420 ++ (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485) ||
3421 ++ (length > cptr)) {
3422 + ath_dbg(common, EEPROM, "Skipping bad header\n");
3423 + cptr -= COMP_HDR_LEN;
3424 + continue;
3425 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
3426 +index 9f438d8e59f2f..daad9e7b17cf5 100644
3427 +--- a/drivers/net/wireless/ath/ath9k/hw.c
3428 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
3429 +@@ -1622,7 +1622,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
3430 + ath9k_hw_gpio_request_out(ah, i, NULL,
3431 + AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
3432 + ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
3433 +- ath9k_hw_gpio_free(ah, i);
3434 + }
3435 + }
3436 +
3437 +@@ -2729,14 +2728,17 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
3438 + static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
3439 + const char *label)
3440 + {
3441 ++ int err;
3442 ++
3443 + if (ah->caps.gpio_requested & BIT(gpio))
3444 + return;
3445 +
3446 +- /* may be requested by BSP, free anyway */
3447 +- gpio_free(gpio);
3448 +-
3449 +- if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
3450 ++ err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label);
3451 ++ if (err) {
3452 ++ ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n",
3453 ++ gpio, err);
3454 + return;
3455 ++ }
3456 +
3457 + ah->caps.gpio_requested |= BIT(gpio);
3458 + }
3459 +diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
3460 +index ad5d3919435c9..87a41d0ededc1 100644
3461 +--- a/drivers/ntb/test/ntb_perf.c
3462 ++++ b/drivers/ntb/test/ntb_perf.c
3463 +@@ -600,6 +600,7 @@ static int perf_setup_inbuf(struct perf_peer *peer)
3464 + return -ENOMEM;
3465 + }
3466 + if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
3467 ++ ret = -EINVAL;
3468 + dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
3469 + goto err_free_inbuf;
3470 + }
3471 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
3472 +index 8798274dc3ba7..ffd6a7204509a 100644
3473 +--- a/drivers/nvme/host/rdma.c
3474 ++++ b/drivers/nvme/host/rdma.c
3475 +@@ -643,13 +643,13 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
3476 + if (ret)
3477 + return ret;
3478 +
3479 +- ctrl->ctrl.queue_count = nr_io_queues + 1;
3480 +- if (ctrl->ctrl.queue_count < 2) {
3481 ++ if (nr_io_queues == 0) {
3482 + dev_err(ctrl->ctrl.device,
3483 + "unable to set any I/O queues\n");
3484 + return -ENOMEM;
3485 + }
3486 +
3487 ++ ctrl->ctrl.queue_count = nr_io_queues + 1;
3488 + dev_info(ctrl->ctrl.device,
3489 + "creating %d I/O queues.\n", nr_io_queues);
3490 +
3491 +diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
3492 +index a32e60b024b8d..6675b5e56960c 100644
3493 +--- a/drivers/of/kobj.c
3494 ++++ b/drivers/of/kobj.c
3495 +@@ -119,7 +119,7 @@ int __of_attach_node_sysfs(struct device_node *np)
3496 + struct property *pp;
3497 + int rc;
3498 +
3499 +- if (!of_kset)
3500 ++ if (!IS_ENABLED(CONFIG_SYSFS) || !of_kset)
3501 + return 0;
3502 +
3503 + np->kobj.kset = of_kset;
3504 +diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
3505 +index 5d41dda6da4e7..75daa16f38b7f 100644
3506 +--- a/drivers/parport/ieee1284_ops.c
3507 ++++ b/drivers/parport/ieee1284_ops.c
3508 +@@ -535,7 +535,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
3509 + goto out;
3510 +
3511 + /* Yield the port for a while. */
3512 +- if (count && dev->port->irq != PARPORT_IRQ_NONE) {
3513 ++ if (dev->port->irq != PARPORT_IRQ_NONE) {
3514 + parport_release (dev);
3515 + schedule_timeout_interruptible(msecs_to_jiffies(40));
3516 + parport_claim_or_block (dev);
3517 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
3518 +index 74aa9da85aa26..a9669b28c2a6d 100644
3519 +--- a/drivers/pci/controller/pci-aardvark.c
3520 ++++ b/drivers/pci/controller/pci-aardvark.c
3521 +@@ -166,7 +166,7 @@
3522 + (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
3523 + PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
3524 +
3525 +-#define PIO_RETRY_CNT 500
3526 ++#define PIO_RETRY_CNT 750000 /* 1.5 s */
3527 + #define PIO_RETRY_DELAY 2 /* 2 us*/
3528 +
3529 + #define LINK_WAIT_MAX_RETRIES 10
3530 +@@ -181,6 +181,7 @@ struct advk_pcie {
3531 + struct list_head resources;
3532 + struct irq_domain *irq_domain;
3533 + struct irq_chip irq_chip;
3534 ++ raw_spinlock_t irq_lock;
3535 + struct irq_domain *msi_domain;
3536 + struct irq_domain *msi_inner_domain;
3537 + struct irq_chip msi_bottom_irq_chip;
3538 +@@ -603,22 +604,28 @@ static void advk_pcie_irq_mask(struct irq_data *d)
3539 + {
3540 + struct advk_pcie *pcie = d->domain->host_data;
3541 + irq_hw_number_t hwirq = irqd_to_hwirq(d);
3542 ++ unsigned long flags;
3543 + u32 mask;
3544 +
3545 ++ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
3546 + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
3547 + mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
3548 + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
3549 ++ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
3550 + }
3551 +
3552 + static void advk_pcie_irq_unmask(struct irq_data *d)
3553 + {
3554 + struct advk_pcie *pcie = d->domain->host_data;
3555 + irq_hw_number_t hwirq = irqd_to_hwirq(d);
3556 ++ unsigned long flags;
3557 + u32 mask;
3558 +
3559 ++ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
3560 + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
3561 + mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
3562 + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
3563 ++ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
3564 + }
3565 +
3566 + static int advk_pcie_irq_map(struct irq_domain *h,
3567 +@@ -701,6 +708,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
3568 + struct device_node *pcie_intc_node;
3569 + struct irq_chip *irq_chip;
3570 +
3571 ++ raw_spin_lock_init(&pcie->irq_lock);
3572 ++
3573 + pcie_intc_node = of_get_next_child(node, NULL);
3574 + if (!pcie_intc_node) {
3575 + dev_err(dev, "No PCIe Intc node found\n");
3576 +diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
3577 +index 4850a1b8eec12..a86bd9660dae9 100644
3578 +--- a/drivers/pci/controller/pcie-xilinx-nwl.c
3579 ++++ b/drivers/pci/controller/pcie-xilinx-nwl.c
3580 +@@ -6,6 +6,7 @@
3581 + * (C) Copyright 2014 - 2015, Xilinx, Inc.
3582 + */
3583 +
3584 ++#include <linux/clk.h>
3585 + #include <linux/delay.h>
3586 + #include <linux/interrupt.h>
3587 + #include <linux/irq.h>
3588 +@@ -169,6 +170,7 @@ struct nwl_pcie {
3589 + u8 root_busno;
3590 + struct nwl_msi msi;
3591 + struct irq_domain *legacy_irq_domain;
3592 ++ struct clk *clk;
3593 + raw_spinlock_t leg_mask_lock;
3594 + };
3595 +
3596 +@@ -849,6 +851,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
3597 + return err;
3598 + }
3599 +
3600 ++ pcie->clk = devm_clk_get(dev, NULL);
3601 ++ if (IS_ERR(pcie->clk))
3602 ++ return PTR_ERR(pcie->clk);
3603 ++
3604 ++ err = clk_prepare_enable(pcie->clk);
3605 ++ if (err) {
3606 ++ dev_err(dev, "can't enable PCIe ref clock\n");
3607 ++ return err;
3608 ++ }
3609 ++
3610 + err = nwl_pcie_bridge_init(pcie);
3611 + if (err) {
3612 + dev_err(dev, "HW Initialization failed\n");
3613 +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
3614 +index bc80b0f0ea1ba..f65800d63856b 100644
3615 +--- a/drivers/pci/msi.c
3616 ++++ b/drivers/pci/msi.c
3617 +@@ -754,6 +754,9 @@ static void msix_mask_all(void __iomem *base, int tsize)
3618 + u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
3619 + int i;
3620 +
3621 ++ if (pci_msi_ignore_mask)
3622 ++ return;
3623 ++
3624 + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
3625 + writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
3626 + }
3627 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
3628 +index 9ebf32de85757..97d69b9be1d49 100644
3629 +--- a/drivers/pci/pci.c
3630 ++++ b/drivers/pci/pci.c
3631 +@@ -224,7 +224,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
3632 +
3633 + *endptr = strchrnul(path, ';');
3634 +
3635 +- wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
3636 ++ wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
3637 + if (!wpath)
3638 + return -ENOMEM;
3639 +
3640 +@@ -1591,11 +1591,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
3641 + * so that things like MSI message writing will behave as expected
3642 + * (e.g. if the device really is in D0 at enable time).
3643 + */
3644 +- if (dev->pm_cap) {
3645 +- u16 pmcsr;
3646 +- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
3647 +- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
3648 +- }
3649 ++ pci_update_current_state(dev, dev->current_state);
3650 +
3651 + if (atomic_inc_return(&dev->enable_cnt) > 1)
3652 + return 0; /* already enabled */
3653 +@@ -2170,7 +2166,14 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
3654 + if (enable) {
3655 + int error;
3656 +
3657 +- if (pci_pme_capable(dev, state))
3658 ++ /*
3659 ++ * Enable PME signaling if the device can signal PME from
3660 ++ * D3cold regardless of whether or not it can signal PME from
3661 ++ * the current target state, because that will allow it to
3662 ++ * signal PME when the hierarchy above it goes into D3cold and
3663 ++ * the device itself ends up in D3cold as a result of that.
3664 ++ */
3665 ++ if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
3666 + pci_pme_active(dev, true);
3667 + else
3668 + ret = 1;
3669 +@@ -2274,16 +2277,20 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
3670 + if (dev->current_state == PCI_D3cold)
3671 + target_state = PCI_D3cold;
3672 +
3673 +- if (wakeup) {
3674 ++ if (wakeup && dev->pme_support) {
3675 ++ pci_power_t state = target_state;
3676 ++
3677 + /*
3678 + * Find the deepest state from which the device can generate
3679 + * PME#.
3680 + */
3681 +- if (dev->pme_support) {
3682 +- while (target_state
3683 +- && !(dev->pme_support & (1 << target_state)))
3684 +- target_state--;
3685 +- }
3686 ++ while (state && !(dev->pme_support & (1 << state)))
3687 ++ state--;
3688 ++
3689 ++ if (state)
3690 ++ return state;
3691 ++ else if (dev->pme_support & 1)
3692 ++ return PCI_D0;
3693 + }
3694 +
3695 + return target_state;
3696 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3697 +index 7e873b6b7d558..4eb8900b9a5cd 100644
3698 +--- a/drivers/pci/quirks.c
3699 ++++ b/drivers/pci/quirks.c
3700 +@@ -3152,12 +3152,13 @@ static void fixup_mpss_256(struct pci_dev *dev)
3701 + {
3702 + dev->pcie_mpss = 1; /* 256 bytes */
3703 + }
3704 +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3705 +- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
3706 +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3707 +- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
3708 +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3709 +- PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
3710 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
3711 ++ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
3712 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
3713 ++ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
3714 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
3715 ++ PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
3716 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
3717 +
3718 + /*
3719 + * Intel 5000 and 5100 Memory controllers have an erratum with read completion
3720 +@@ -4777,6 +4778,10 @@ static const struct pci_dev_acs_enabled {
3721 + { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
3722 + /* Cavium ThunderX */
3723 + { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
3724 ++ /* Cavium multi-function devices */
3725 ++ { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
3726 ++ { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
3727 ++ { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
3728 + /* APM X-Gene */
3729 + { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
3730 + /* Ampere Computing */
3731 +@@ -5253,7 +5258,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
3732 + PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
3733 +
3734 + /*
3735 +- * Create device link for NVIDIA GPU with integrated USB xHCI Host
3736 ++ * Create device link for GPUs with integrated USB xHCI Host
3737 + * controller to VGA.
3738 + */
3739 + static void quirk_gpu_usb(struct pci_dev *usb)
3740 +@@ -5262,9 +5267,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
3741 + }
3742 + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
3743 + PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
3744 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3745 ++ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
3746 +
3747 + /*
3748 +- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
3749 ++ * Create device link for GPUs with integrated Type-C UCSI controller
3750 + * to VGA. Currently there is no class code defined for UCSI device over PCI
3751 + * so using UNKNOWN class for now and it will be updated when UCSI
3752 + * over PCI gets a class code.
3753 +@@ -5277,6 +5284,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
3754 + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
3755 + PCI_CLASS_SERIAL_UNKNOWN, 8,
3756 + quirk_gpu_usb_typec_ucsi);
3757 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3758 ++ PCI_CLASS_SERIAL_UNKNOWN, 8,
3759 ++ quirk_gpu_usb_typec_ucsi);
3760 +
3761 + /*
3762 + * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
3763 +diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
3764 +index a7bdd10fccf33..68ac8a0f5c72e 100644
3765 +--- a/drivers/pci/syscall.c
3766 ++++ b/drivers/pci/syscall.c
3767 +@@ -21,8 +21,10 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
3768 + long err;
3769 + int cfg_ret;
3770 +
3771 ++ err = -EPERM;
3772 ++ dev = NULL;
3773 + if (!capable(CAP_SYS_ADMIN))
3774 +- return -EPERM;
3775 ++ goto error;
3776 +
3777 + err = -ENODEV;
3778 + dev = pci_get_domain_bus_and_slot(0, bus, dfn);
3779 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
3780 +index 04a4e761e9a9c..c2f807bf34899 100644
3781 +--- a/drivers/pinctrl/pinctrl-single.c
3782 ++++ b/drivers/pinctrl/pinctrl-single.c
3783 +@@ -1201,6 +1201,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
3784 +
3785 + if (PCS_HAS_PINCONF) {
3786 + dev_err(pcs->dev, "pinconf not supported\n");
3787 ++ res = -ENOTSUPP;
3788 + goto free_pingroups;
3789 + }
3790 +
3791 +diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
3792 +index c05217edcb0e0..82407e4a16427 100644
3793 +--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
3794 ++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
3795 +@@ -918,7 +918,7 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
3796 + pin_bank->grange.pin_base = drvdata->pin_base
3797 + + pin_bank->pin_base;
3798 + pin_bank->grange.base = pin_bank->grange.pin_base;
3799 +- pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
3800 ++ pin_bank->grange.npins = pin_bank->nr_pins;
3801 + pin_bank->grange.gc = &pin_bank->gpio_chip;
3802 + pinctrl_add_gpio_range(drvdata->pctl_dev, &pin_bank->grange);
3803 + }
3804 +diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
3805 +index ac784ac66ac34..2b807c8aa869c 100644
3806 +--- a/drivers/platform/chrome/cros_ec_proto.c
3807 ++++ b/drivers/platform/chrome/cros_ec_proto.c
3808 +@@ -219,6 +219,15 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
3809 + msg->insize = sizeof(struct ec_response_get_protocol_info);
3810 +
3811 + ret = send_command(ec_dev, msg);
3812 ++ /*
3813 ++ * Send command once again when timeout occurred.
3814 ++ * Fingerprint MCU (FPMCU) is restarted during system boot which
3815 ++ * introduces small window in which FPMCU won't respond for any
3816 ++ * messages sent by kernel. There is no need to wait before next
3817 ++ * attempt because we waited at least EC_MSG_DEADLINE_MS.
3818 ++ */
3819 ++ if (ret == -ETIMEDOUT)
3820 ++ ret = send_command(ec_dev, msg);
3821 +
3822 + if (ret < 0) {
3823 + dev_dbg(ec_dev->dev,
3824 +diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
3825 +index ccccce9b67efe..0ebcf412f6ca0 100644
3826 +--- a/drivers/platform/x86/dell-smbios-wmi.c
3827 ++++ b/drivers/platform/x86/dell-smbios-wmi.c
3828 +@@ -72,6 +72,7 @@ static int run_smbios_call(struct wmi_device *wdev)
3829 + if (obj->type == ACPI_TYPE_INTEGER)
3830 + dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
3831 + obj->integer.value);
3832 ++ kfree(output.pointer);
3833 + return -EIO;
3834 + }
3835 + memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length);
3836 +diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
3837 +index 157cf5ec6b023..1641868c345c2 100644
3838 +--- a/drivers/power/supply/axp288_fuel_gauge.c
3839 ++++ b/drivers/power/supply/axp288_fuel_gauge.c
3840 +@@ -158,7 +158,7 @@ static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
3841 + }
3842 +
3843 + if (ret < 0) {
3844 +- dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
3845 ++ dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
3846 + return ret;
3847 + }
3848 +
3849 +@@ -172,7 +172,7 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val)
3850 + ret = regmap_write(info->regmap, reg, (unsigned int)val);
3851 +
3852 + if (ret < 0)
3853 +- dev_err(&info->pdev->dev, "axp288 reg write err:%d\n", ret);
3854 ++ dev_err(&info->pdev->dev, "Error writing reg 0x%02x err: %d\n", reg, ret);
3855 +
3856 + return ret;
3857 + }
3858 +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
3859 +index 00a3a581e0795..a1518eb6f6c0d 100644
3860 +--- a/drivers/power/supply/max17042_battery.c
3861 ++++ b/drivers/power/supply/max17042_battery.c
3862 +@@ -740,7 +740,7 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
3863 + struct max17042_config_data *config = chip->pdata->config_data;
3864 +
3865 + max17042_override_por(map, MAX17042_TGAIN, config->tgain);
3866 +- max17042_override_por(map, MAx17042_TOFF, config->toff);
3867 ++ max17042_override_por(map, MAX17042_TOFF, config->toff);
3868 + max17042_override_por(map, MAX17042_CGAIN, config->cgain);
3869 + max17042_override_por(map, MAX17042_COFF, config->coff);
3870 +
3871 +@@ -856,8 +856,12 @@ static irqreturn_t max17042_thread_handler(int id, void *dev)
3872 + {
3873 + struct max17042_chip *chip = dev;
3874 + u32 val;
3875 ++ int ret;
3876 ++
3877 ++ ret = regmap_read(chip->regmap, MAX17042_STATUS, &val);
3878 ++ if (ret)
3879 ++ return IRQ_HANDLED;
3880 +
3881 +- regmap_read(chip->regmap, MAX17042_STATUS, &val);
3882 + if ((val & STATUS_INTR_SOCMIN_BIT) ||
3883 + (val & STATUS_INTR_SOCMAX_BIT)) {
3884 + dev_info(&chip->client->dev, "SOC threshold INTR\n");
3885 +diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
3886 +index a9bbd022aeefe..9f10af7ac6efd 100644
3887 +--- a/drivers/rtc/rtc-tps65910.c
3888 ++++ b/drivers/rtc/rtc-tps65910.c
3889 +@@ -470,6 +470,6 @@ static struct platform_driver tps65910_rtc_driver = {
3890 + };
3891 +
3892 + module_platform_driver(tps65910_rtc_driver);
3893 +-MODULE_ALIAS("platform:rtc-tps65910");
3894 ++MODULE_ALIAS("platform:tps65910-rtc");
3895 + MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@××××××.com>");
3896 + MODULE_LICENSE("GPL");
3897 +diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
3898 +index 825a8f2703b4f..6efe50d70c4bf 100644
3899 +--- a/drivers/s390/cio/css.c
3900 ++++ b/drivers/s390/cio/css.c
3901 +@@ -364,9 +364,26 @@ static ssize_t pimpampom_show(struct device *dev,
3902 + }
3903 + static DEVICE_ATTR_RO(pimpampom);
3904 +
3905 ++static ssize_t dev_busid_show(struct device *dev,
3906 ++ struct device_attribute *attr,
3907 ++ char *buf)
3908 ++{
3909 ++ struct subchannel *sch = to_subchannel(dev);
3910 ++ struct pmcw *pmcw = &sch->schib.pmcw;
3911 ++
3912 ++ if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
3913 ++ pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
3914 ++ return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
3915 ++ pmcw->dev);
3916 ++ else
3917 ++ return sysfs_emit(buf, "none\n");
3918 ++}
3919 ++static DEVICE_ATTR_RO(dev_busid);
3920 ++
3921 + static struct attribute *io_subchannel_type_attrs[] = {
3922 + &dev_attr_chpids.attr,
3923 + &dev_attr_pimpampom.attr,
3924 ++ &dev_attr_dev_busid.attr,
3925 + NULL,
3926 + };
3927 + ATTRIBUTE_GROUPS(io_subchannel_type);
3928 +diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
3929 +index 79b5c5457cc22..b8dd9986809b4 100644
3930 +--- a/drivers/scsi/BusLogic.c
3931 ++++ b/drivers/scsi/BusLogic.c
3932 +@@ -3605,7 +3605,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
3933 + if (buf[0] != '\n' || len > 1)
3934 + printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
3935 + } else
3936 +- printk("%s", buf);
3937 ++ pr_cont("%s", buf);
3938 + } else {
3939 + if (begin) {
3940 + if (adapter != NULL && adapter->adapter_initd)
3941 +@@ -3613,7 +3613,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
3942 + else
3943 + printk("%s%s", blogic_msglevelmap[msglevel], buf);
3944 + } else
3945 +- printk("%s", buf);
3946 ++ pr_cont("%s", buf);
3947 + }
3948 + begin = (buf[len - 1] == '\n');
3949 + }
3950 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
3951 +index 7665fd641886e..ab66e1f0fdfa3 100644
3952 +--- a/drivers/scsi/qedi/qedi_main.c
3953 ++++ b/drivers/scsi/qedi/qedi_main.c
3954 +@@ -1507,7 +1507,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
3955 + {
3956 + u32 *list;
3957 + int i;
3958 +- int status = 0, rc;
3959 ++ int status;
3960 + u32 *pbl;
3961 + dma_addr_t page;
3962 + int num_pages;
3963 +@@ -1518,14 +1518,14 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
3964 + */
3965 + if (!qedi->num_queues) {
3966 + QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
3967 +- return 1;
3968 ++ return -ENOMEM;
3969 + }
3970 +
3971 + /* Make sure we allocated the PBL that will contain the physical
3972 + * addresses of our queues
3973 + */
3974 + if (!qedi->p_cpuq) {
3975 +- status = 1;
3976 ++ status = -EINVAL;
3977 + goto mem_alloc_failure;
3978 + }
3979 +
3980 +@@ -1540,13 +1540,13 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
3981 + "qedi->global_queues=%p.\n", qedi->global_queues);
3982 +
3983 + /* Allocate DMA coherent buffers for BDQ */
3984 +- rc = qedi_alloc_bdq(qedi);
3985 +- if (rc)
3986 ++ status = qedi_alloc_bdq(qedi);
3987 ++ if (status)
3988 + goto mem_alloc_failure;
3989 +
3990 + /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
3991 +- rc = qedi_alloc_nvm_iscsi_cfg(qedi);
3992 +- if (rc)
3993 ++ status = qedi_alloc_nvm_iscsi_cfg(qedi);
3994 ++ if (status)
3995 + goto mem_alloc_failure;
3996 +
3997 + /* Allocate a CQ and an associated PBL for each MSI-X
3998 +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
3999 +index 7821c1695e824..dcd0f058f23e0 100644
4000 +--- a/drivers/scsi/qla2xxx/qla_nvme.c
4001 ++++ b/drivers/scsi/qla2xxx/qla_nvme.c
4002 +@@ -88,8 +88,9 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
4003 + struct qla_hw_data *ha;
4004 + struct qla_qpair *qpair;
4005 +
4006 +- if (!qidx)
4007 +- qidx++;
4008 ++ /* Map admin queue and 1st IO queue to index 0 */
4009 ++ if (qidx)
4010 ++ qidx--;
4011 +
4012 + vha = (struct scsi_qla_host *)lport->private;
4013 + ha = vha->hw;
4014 +diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
4015 +index 50214b620865e..2b49d2c212dab 100644
4016 +--- a/drivers/soc/qcom/smsm.c
4017 ++++ b/drivers/soc/qcom/smsm.c
4018 +@@ -117,7 +117,7 @@ struct smsm_entry {
4019 + DECLARE_BITMAP(irq_enabled, 32);
4020 + DECLARE_BITMAP(irq_rising, 32);
4021 + DECLARE_BITMAP(irq_falling, 32);
4022 +- u32 last_value;
4023 ++ unsigned long last_value;
4024 +
4025 + u32 *remote_state;
4026 + u32 *subscription;
4027 +@@ -212,8 +212,7 @@ static irqreturn_t smsm_intr(int irq, void *data)
4028 + u32 val;
4029 +
4030 + val = readl(entry->remote_state);
4031 +- changed = val ^ entry->last_value;
4032 +- entry->last_value = val;
4033 ++ changed = val ^ xchg(&entry->last_value, val);
4034 +
4035 + for_each_set_bit(i, entry->irq_enabled, 32) {
4036 + if (!(changed & BIT(i)))
4037 +@@ -274,6 +273,12 @@ static void smsm_unmask_irq(struct irq_data *irqd)
4038 + struct qcom_smsm *smsm = entry->smsm;
4039 + u32 val;
4040 +
4041 ++ /* Make sure our last cached state is up-to-date */
4042 ++ if (readl(entry->remote_state) & BIT(irq))
4043 ++ set_bit(irq, &entry->last_value);
4044 ++ else
4045 ++ clear_bit(irq, &entry->last_value);
4046 ++
4047 + set_bit(irq, entry->irq_enabled);
4048 +
4049 + if (entry->subscription) {
4050 +diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
4051 +index 20da55d9cbb1e..d483b0e29b81f 100644
4052 +--- a/drivers/soc/rockchip/Kconfig
4053 ++++ b/drivers/soc/rockchip/Kconfig
4054 +@@ -5,8 +5,8 @@ if ARCH_ROCKCHIP || COMPILE_TEST
4055 + #
4056 +
4057 + config ROCKCHIP_GRF
4058 +- bool
4059 +- default y
4060 ++ bool "Rockchip General Register Files support" if COMPILE_TEST
4061 ++ default y if ARCH_ROCKCHIP
4062 + help
4063 + The General Register Files are a central component providing
4064 + special additional settings registers for a lot of soc-components.
4065 +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
4066 +index 25486ee8379b6..cfbf1ffb61bff 100644
4067 +--- a/drivers/spi/spi-fsl-dspi.c
4068 ++++ b/drivers/spi/spi-fsl-dspi.c
4069 +@@ -430,6 +430,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
4070 + goto err_rx_dma_buf;
4071 + }
4072 +
4073 ++ memset(&cfg, 0, sizeof(cfg));
4074 + cfg.src_addr = phy_addr + SPI_POPR;
4075 + cfg.dst_addr = phy_addr + SPI_PUSHR;
4076 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
4077 +diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
4078 +index 661a40c653e90..d8cdb13ce3e4a 100644
4079 +--- a/drivers/spi/spi-pic32.c
4080 ++++ b/drivers/spi/spi-pic32.c
4081 +@@ -369,6 +369,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
4082 + struct dma_slave_config cfg;
4083 + int ret;
4084 +
4085 ++ memset(&cfg, 0, sizeof(cfg));
4086 + cfg.device_fc = true;
4087 + cfg.src_addr = pic32s->dma_base + buf_offset;
4088 + cfg.dst_addr = pic32s->dma_base + buf_offset;
4089 +diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
4090 +index e41976010dc48..97f44458ee7b7 100644
4091 +--- a/drivers/spi/spi-sprd-adi.c
4092 ++++ b/drivers/spi/spi-sprd-adi.c
4093 +@@ -99,7 +99,7 @@
4094 + #define HWRST_STATUS_SPRDISK 0xc0
4095 +
4096 + /* Use default timeout 50 ms that converts to watchdog values */
4097 +-#define WDG_LOAD_VAL ((50 * 1000) / 32768)
4098 ++#define WDG_LOAD_VAL ((50 * 32768) / 1000)
4099 + #define WDG_LOAD_MASK GENMASK(15, 0)
4100 + #define WDG_UNLOCK_KEY 0xe551
4101 +
4102 +diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
4103 +index cb6feb34dd401..f980af0373452 100644
4104 +--- a/drivers/staging/board/board.c
4105 ++++ b/drivers/staging/board/board.c
4106 +@@ -136,6 +136,7 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc)
4107 + static int board_staging_add_dev_domain(struct platform_device *pdev,
4108 + const char *domain)
4109 + {
4110 ++ struct device *dev = &pdev->dev;
4111 + struct of_phandle_args pd_args;
4112 + struct device_node *np;
4113 +
4114 +@@ -148,7 +149,11 @@ static int board_staging_add_dev_domain(struct platform_device *pdev,
4115 + pd_args.np = np;
4116 + pd_args.args_count = 0;
4117 +
4118 +- return of_genpd_add_device(&pd_args, &pdev->dev);
4119 ++ /* Initialization similar to device_pm_init_common() */
4120 ++ spin_lock_init(&dev->power.lock);
4121 ++ dev->power.early_init = true;
4122 ++
4123 ++ return of_genpd_add_device(&pd_args, dev);
4124 + }
4125 + #else
4126 + static inline int board_staging_add_dev_domain(struct platform_device *pdev,
4127 +diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
4128 +index 74551eb717fc7..79d0513bd2828 100644
4129 +--- a/drivers/staging/ks7010/ks7010_sdio.c
4130 ++++ b/drivers/staging/ks7010/ks7010_sdio.c
4131 +@@ -938,9 +938,9 @@ static void ks7010_private_init(struct ks_wlan_private *priv,
4132 + memset(&priv->wstats, 0, sizeof(priv->wstats));
4133 +
4134 + /* sleep mode */
4135 ++ atomic_set(&priv->sleepstatus.status, 0);
4136 + atomic_set(&priv->sleepstatus.doze_request, 0);
4137 + atomic_set(&priv->sleepstatus.wakeup_request, 0);
4138 +- atomic_set(&priv->sleepstatus.wakeup_request, 0);
4139 +
4140 + trx_device_init(priv);
4141 + hostif_init(priv);
4142 +diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
4143 +index c9a6d97938f63..68889d082c3c7 100644
4144 +--- a/drivers/staging/rts5208/rtsx_scsi.c
4145 ++++ b/drivers/staging/rts5208/rtsx_scsi.c
4146 +@@ -2841,10 +2841,10 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4147 + }
4148 +
4149 + if (dev_info_id == 0x15) {
4150 +- buf_len = 0x3A;
4151 ++ buf_len = 0x3C;
4152 + data_len = 0x3A;
4153 + } else {
4154 +- buf_len = 0x6A;
4155 ++ buf_len = 0x6C;
4156 + data_len = 0x6A;
4157 + }
4158 +
4159 +@@ -2895,11 +2895,7 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4160 + }
4161 +
4162 + rtsx_stor_set_xfer_buf(buf, buf_len, srb);
4163 +-
4164 +- if (dev_info_id == 0x15)
4165 +- scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
4166 +- else
4167 +- scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
4168 ++ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
4169 +
4170 + kfree(buf);
4171 + return STATUS_SUCCESS;
4172 +diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
4173 +index 66f95f758be05..73226337f5610 100644
4174 +--- a/drivers/tty/hvc/hvsi.c
4175 ++++ b/drivers/tty/hvc/hvsi.c
4176 +@@ -1038,7 +1038,7 @@ static const struct tty_operations hvsi_ops = {
4177 +
4178 + static int __init hvsi_init(void)
4179 + {
4180 +- int i;
4181 ++ int i, ret;
4182 +
4183 + hvsi_driver = alloc_tty_driver(hvsi_count);
4184 + if (!hvsi_driver)
4185 +@@ -1069,12 +1069,25 @@ static int __init hvsi_init(void)
4186 + }
4187 + hvsi_wait = wait_for_state; /* irqs active now */
4188 +
4189 +- if (tty_register_driver(hvsi_driver))
4190 +- panic("Couldn't register hvsi console driver\n");
4191 ++ ret = tty_register_driver(hvsi_driver);
4192 ++ if (ret) {
4193 ++ pr_err("Couldn't register hvsi console driver\n");
4194 ++ goto err_free_irq;
4195 ++ }
4196 +
4197 + printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
4198 +
4199 + return 0;
4200 ++err_free_irq:
4201 ++ hvsi_wait = poll_for_state;
4202 ++ for (i = 0; i < hvsi_count; i++) {
4203 ++ struct hvsi_struct *hp = &hvsi_ports[i];
4204 ++
4205 ++ free_irq(hp->virq, hp);
4206 ++ }
4207 ++ tty_driver_kref_put(hvsi_driver);
4208 ++
4209 ++ return ret;
4210 + }
4211 + device_initcall(hvsi_init);
4212 +
4213 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
4214 +index 725e5842b8acc..f54c18e4ae909 100644
4215 +--- a/drivers/tty/serial/8250/8250_pci.c
4216 ++++ b/drivers/tty/serial/8250/8250_pci.c
4217 +@@ -70,7 +70,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
4218 +
4219 + static int
4220 + setup_port(struct serial_private *priv, struct uart_8250_port *port,
4221 +- int bar, int offset, int regshift)
4222 ++ u8 bar, unsigned int offset, int regshift)
4223 + {
4224 + struct pci_dev *dev = priv->dev;
4225 +
4226 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
4227 +index 68f71298c11b5..39e821d6e5376 100644
4228 +--- a/drivers/tty/serial/8250/8250_port.c
4229 ++++ b/drivers/tty/serial/8250/8250_port.c
4230 +@@ -132,7 +132,8 @@ static const struct serial8250_config uart_config[] = {
4231 + .name = "16C950/954",
4232 + .fifo_size = 128,
4233 + .tx_loadsz = 128,
4234 +- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
4235 ++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
4236 ++ .rxtrig_bytes = {16, 32, 112, 120},
4237 + /* UART_CAP_EFR breaks billionon CF bluetooth card. */
4238 + .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
4239 + },
4240 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
4241 +index deb9d4fa9cb09..b757fd1bdbfa5 100644
4242 +--- a/drivers/tty/serial/fsl_lpuart.c
4243 ++++ b/drivers/tty/serial/fsl_lpuart.c
4244 +@@ -2164,7 +2164,7 @@ static int lpuart_probe(struct platform_device *pdev)
4245 + return PTR_ERR(sport->port.membase);
4246 +
4247 + sport->port.membase += sdata->reg_off;
4248 +- sport->port.mapbase = res->start;
4249 ++ sport->port.mapbase = res->start + sdata->reg_off;
4250 + sport->port.dev = &pdev->dev;
4251 + sport->port.type = PORT_LPUART;
4252 + ret = platform_get_irq(pdev, 0);
4253 +diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
4254 +index bf0e2a4cb0cef..c6f927a76c3be 100644
4255 +--- a/drivers/tty/serial/jsm/jsm_neo.c
4256 ++++ b/drivers/tty/serial/jsm/jsm_neo.c
4257 +@@ -815,7 +815,9 @@ static void neo_parse_isr(struct jsm_board *brd, u32 port)
4258 + /* Parse any modem signal changes */
4259 + jsm_dbg(INTR, &ch->ch_bd->pci_dev,
4260 + "MOD_STAT: sending to parse_modem_sigs\n");
4261 ++ spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
4262 + neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
4263 ++ spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
4264 + }
4265 + }
4266 +
4267 +diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
4268 +index 689774c073ca4..8438454ca653f 100644
4269 +--- a/drivers/tty/serial/jsm/jsm_tty.c
4270 ++++ b/drivers/tty/serial/jsm/jsm_tty.c
4271 +@@ -187,6 +187,7 @@ static void jsm_tty_break(struct uart_port *port, int break_state)
4272 +
4273 + static int jsm_tty_open(struct uart_port *port)
4274 + {
4275 ++ unsigned long lock_flags;
4276 + struct jsm_board *brd;
4277 + struct jsm_channel *channel =
4278 + container_of(port, struct jsm_channel, uart_port);
4279 +@@ -240,6 +241,7 @@ static int jsm_tty_open(struct uart_port *port)
4280 + channel->ch_cached_lsr = 0;
4281 + channel->ch_stops_sent = 0;
4282 +
4283 ++ spin_lock_irqsave(&port->lock, lock_flags);
4284 + termios = &port->state->port.tty->termios;
4285 + channel->ch_c_cflag = termios->c_cflag;
4286 + channel->ch_c_iflag = termios->c_iflag;
4287 +@@ -259,6 +261,7 @@ static int jsm_tty_open(struct uart_port *port)
4288 + jsm_carrier(channel);
4289 +
4290 + channel->ch_open_count++;
4291 ++ spin_unlock_irqrestore(&port->lock, lock_flags);
4292 +
4293 + jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
4294 + return 0;
4295 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
4296 +index db5b11879910c..6f44c5f0ef3aa 100644
4297 +--- a/drivers/tty/serial/sh-sci.c
4298 ++++ b/drivers/tty/serial/sh-sci.c
4299 +@@ -1750,6 +1750,10 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
4300 +
4301 + /* Handle BREAKs */
4302 + sci_handle_breaks(port);
4303 ++
4304 ++ /* drop invalid character received before break was detected */
4305 ++ serial_port_in(port, SCxRDR);
4306 ++
4307 + sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
4308 +
4309 + return IRQ_HANDLED;
4310 +@@ -1829,7 +1833,8 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
4311 + ret = sci_er_interrupt(irq, ptr);
4312 +
4313 + /* Break Interrupt */
4314 +- if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
4315 ++ if (s->irqs[SCIx_ERI_IRQ] != s->irqs[SCIx_BRI_IRQ] &&
4316 ++ (ssr_status & SCxSR_BRK(port)) && err_enabled)
4317 + ret = sci_br_interrupt(irq, ptr);
4318 +
4319 + /* Overrun Interrupt */
4320 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
4321 +index 9e9343adc2b46..b6f42d0ee6269 100644
4322 +--- a/drivers/tty/tty_io.c
4323 ++++ b/drivers/tty/tty_io.c
4324 +@@ -2173,8 +2173,6 @@ static int tty_fasync(int fd, struct file *filp, int on)
4325 + * Locking:
4326 + * Called functions take tty_ldiscs_lock
4327 + * current->signal->tty check is safe without locks
4328 +- *
4329 +- * FIXME: may race normal receive processing
4330 + */
4331 +
4332 + static int tiocsti(struct tty_struct *tty, char __user *p)
4333 +@@ -2190,8 +2188,10 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
4334 + ld = tty_ldisc_ref_wait(tty);
4335 + if (!ld)
4336 + return -EIO;
4337 ++ tty_buffer_lock_exclusive(tty->port);
4338 + if (ld->ops->receive_buf)
4339 + ld->ops->receive_buf(tty, &ch, &mbz, 1);
4340 ++ tty_buffer_unlock_exclusive(tty->port);
4341 + tty_ldisc_deref(ld);
4342 + return 0;
4343 + }
4344 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
4345 +index d85bb3ba8263f..a76ed4acb5700 100644
4346 +--- a/drivers/usb/gadget/composite.c
4347 ++++ b/drivers/usb/gadget/composite.c
4348 +@@ -481,7 +481,7 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
4349 + {
4350 + unsigned val;
4351 +
4352 +- if (c->MaxPower)
4353 ++ if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
4354 + val = c->MaxPower;
4355 + else
4356 + val = CONFIG_USB_GADGET_VBUS_DRAW;
4357 +@@ -891,7 +891,11 @@ static int set_config(struct usb_composite_dev *cdev,
4358 + }
4359 +
4360 + /* when we return, be sure our power usage is valid */
4361 +- power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
4362 ++ if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
4363 ++ power = c->MaxPower;
4364 ++ else
4365 ++ power = CONFIG_USB_GADGET_VBUS_DRAW;
4366 ++
4367 + if (gadget->speed < USB_SPEED_SUPER)
4368 + power = min(power, 500U);
4369 + else
4370 +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
4371 +index 156651df6b4da..d7a12161e5531 100644
4372 +--- a/drivers/usb/gadget/function/u_ether.c
4373 ++++ b/drivers/usb/gadget/function/u_ether.c
4374 +@@ -491,8 +491,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
4375 + }
4376 + spin_unlock_irqrestore(&dev->lock, flags);
4377 +
4378 +- if (skb && !in) {
4379 +- dev_kfree_skb_any(skb);
4380 ++ if (!in) {
4381 ++ if (skb)
4382 ++ dev_kfree_skb_any(skb);
4383 + return NETDEV_TX_OK;
4384 + }
4385 +
4386 +diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
4387 +index 03959dc86cfd8..dd5cdcdfa4030 100644
4388 +--- a/drivers/usb/gadget/udc/at91_udc.c
4389 ++++ b/drivers/usb/gadget/udc/at91_udc.c
4390 +@@ -1879,7 +1879,9 @@ static int at91udc_probe(struct platform_device *pdev)
4391 + clk_disable(udc->iclk);
4392 +
4393 + /* request UDC and maybe VBUS irqs */
4394 +- udc->udp_irq = platform_get_irq(pdev, 0);
4395 ++ udc->udp_irq = retval = platform_get_irq(pdev, 0);
4396 ++ if (retval < 0)
4397 ++ goto err_unprepare_iclk;
4398 + retval = devm_request_irq(dev, udc->udp_irq, at91_udc_irq, 0,
4399 + driver_name, udc);
4400 + if (retval) {
4401 +diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
4402 +index e174b1b889da5..d04de117bf639 100644
4403 +--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
4404 ++++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
4405 +@@ -568,7 +568,8 @@ static int bdc_probe(struct platform_device *pdev)
4406 + if (ret) {
4407 + dev_err(dev,
4408 + "No suitable DMA config available, abort\n");
4409 +- return -ENOTSUPP;
4410 ++ ret = -ENOTSUPP;
4411 ++ goto phycleanup;
4412 + }
4413 + dev_dbg(dev, "Using 32-bit address\n");
4414 + }
4415 +diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
4416 +index 35e02a8d0091a..bdba3f48c0527 100644
4417 +--- a/drivers/usb/gadget/udc/mv_u3d_core.c
4418 ++++ b/drivers/usb/gadget/udc/mv_u3d_core.c
4419 +@@ -1922,14 +1922,6 @@ static int mv_u3d_probe(struct platform_device *dev)
4420 + goto err_get_irq;
4421 + }
4422 + u3d->irq = r->start;
4423 +- if (request_irq(u3d->irq, mv_u3d_irq,
4424 +- IRQF_SHARED, driver_name, u3d)) {
4425 +- u3d->irq = 0;
4426 +- dev_err(&dev->dev, "Request irq %d for u3d failed\n",
4427 +- u3d->irq);
4428 +- retval = -ENODEV;
4429 +- goto err_request_irq;
4430 +- }
4431 +
4432 + /* initialize gadget structure */
4433 + u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
4434 +@@ -1942,6 +1934,15 @@ static int mv_u3d_probe(struct platform_device *dev)
4435 +
4436 + mv_u3d_eps_init(u3d);
4437 +
4438 ++ if (request_irq(u3d->irq, mv_u3d_irq,
4439 ++ IRQF_SHARED, driver_name, u3d)) {
4440 ++ u3d->irq = 0;
4441 ++ dev_err(&dev->dev, "Request irq %d for u3d failed\n",
4442 ++ u3d->irq);
4443 ++ retval = -ENODEV;
4444 ++ goto err_request_irq;
4445 ++ }
4446 ++
4447 + /* external vbus detection */
4448 + if (u3d->vbus) {
4449 + u3d->clock_gating = 1;
4450 +@@ -1965,8 +1966,8 @@ static int mv_u3d_probe(struct platform_device *dev)
4451 +
4452 + err_unregister:
4453 + free_irq(u3d->irq, u3d);
4454 +-err_request_irq:
4455 + err_get_irq:
4456 ++err_request_irq:
4457 + kfree(u3d->status_req);
4458 + err_alloc_status_req:
4459 + kfree(u3d->eps);
4460 +diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
4461 +index 1ad72647a0691..da0f36af0b380 100644
4462 +--- a/drivers/usb/host/ehci-orion.c
4463 ++++ b/drivers/usb/host/ehci-orion.c
4464 +@@ -250,8 +250,11 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
4465 + * the clock does not exists.
4466 + */
4467 + priv->clk = devm_clk_get(&pdev->dev, NULL);
4468 +- if (!IS_ERR(priv->clk))
4469 +- clk_prepare_enable(priv->clk);
4470 ++ if (!IS_ERR(priv->clk)) {
4471 ++ err = clk_prepare_enable(priv->clk);
4472 ++ if (err)
4473 ++ goto err_put_hcd;
4474 ++ }
4475 +
4476 + priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
4477 + if (IS_ERR(priv->phy)) {
4478 +@@ -312,6 +315,7 @@ err_phy_init:
4479 + err_phy_get:
4480 + if (!IS_ERR(priv->clk))
4481 + clk_disable_unprepare(priv->clk);
4482 ++err_put_hcd:
4483 + usb_put_hcd(hcd);
4484 + err:
4485 + dev_err(&pdev->dev, "init %s fail, %d\n",
4486 +diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
4487 +index 226b38274a6ef..1577424319613 100644
4488 +--- a/drivers/usb/host/fotg210-hcd.c
4489 ++++ b/drivers/usb/host/fotg210-hcd.c
4490 +@@ -2509,11 +2509,6 @@ retry_xacterr:
4491 + return count;
4492 + }
4493 +
4494 +-/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
4495 +-#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
4496 +-/* ... and packet size, for any kind of endpoint descriptor */
4497 +-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
4498 +-
4499 + /* reverse of qh_urb_transaction: free a list of TDs.
4500 + * used for cleanup after errors, before HC sees an URB's TDs.
4501 + */
4502 +@@ -2599,7 +2594,7 @@ static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
4503 + token |= (1 /* "in" */ << 8);
4504 + /* else it's already initted to "out" pid (0 << 8) */
4505 +
4506 +- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
4507 ++ maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
4508 +
4509 + /*
4510 + * buffer gets wrapped in one or more qtds;
4511 +@@ -2713,9 +2708,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
4512 + gfp_t flags)
4513 + {
4514 + struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
4515 ++ struct usb_host_endpoint *ep;
4516 + u32 info1 = 0, info2 = 0;
4517 + int is_input, type;
4518 + int maxp = 0;
4519 ++ int mult;
4520 + struct usb_tt *tt = urb->dev->tt;
4521 + struct fotg210_qh_hw *hw;
4522 +
4523 +@@ -2730,14 +2727,15 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
4524 +
4525 + is_input = usb_pipein(urb->pipe);
4526 + type = usb_pipetype(urb->pipe);
4527 +- maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
4528 ++ ep = usb_pipe_endpoint(urb->dev, urb->pipe);
4529 ++ maxp = usb_endpoint_maxp(&ep->desc);
4530 ++ mult = usb_endpoint_maxp_mult(&ep->desc);
4531 +
4532 + /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
4533 + * acts like up to 3KB, but is built from smaller packets.
4534 + */
4535 +- if (max_packet(maxp) > 1024) {
4536 +- fotg210_dbg(fotg210, "bogus qh maxpacket %d\n",
4537 +- max_packet(maxp));
4538 ++ if (maxp > 1024) {
4539 ++ fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", maxp);
4540 + goto done;
4541 + }
4542 +
4543 +@@ -2751,8 +2749,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
4544 + */
4545 + if (type == PIPE_INTERRUPT) {
4546 + qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
4547 +- is_input, 0,
4548 +- hb_mult(maxp) * max_packet(maxp)));
4549 ++ is_input, 0, mult * maxp));
4550 + qh->start = NO_FRAME;
4551 +
4552 + if (urb->dev->speed == USB_SPEED_HIGH) {
4553 +@@ -2789,7 +2786,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
4554 + think_time = tt ? tt->think_time : 0;
4555 + qh->tt_usecs = NS_TO_US(think_time +
4556 + usb_calc_bus_time(urb->dev->speed,
4557 +- is_input, 0, max_packet(maxp)));
4558 ++ is_input, 0, maxp));
4559 + qh->period = urb->interval;
4560 + if (qh->period > fotg210->periodic_size) {
4561 + qh->period = fotg210->periodic_size;
4562 +@@ -2852,11 +2849,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
4563 + * to help them do so. So now people expect to use
4564 + * such nonconformant devices with Linux too; sigh.
4565 + */
4566 +- info1 |= max_packet(maxp) << 16;
4567 ++ info1 |= maxp << 16;
4568 + info2 |= (FOTG210_TUNE_MULT_HS << 30);
4569 + } else { /* PIPE_INTERRUPT */
4570 +- info1 |= max_packet(maxp) << 16;
4571 +- info2 |= hb_mult(maxp) << 30;
4572 ++ info1 |= maxp << 16;
4573 ++ info2 |= mult << 30;
4574 + }
4575 + break;
4576 + default:
4577 +@@ -3926,6 +3923,7 @@ static void iso_stream_init(struct fotg210_hcd *fotg210,
4578 + int is_input;
4579 + long bandwidth;
4580 + unsigned multi;
4581 ++ struct usb_host_endpoint *ep;
4582 +
4583 + /*
4584 + * this might be a "high bandwidth" highspeed endpoint,
4585 +@@ -3933,14 +3931,14 @@ static void iso_stream_init(struct fotg210_hcd *fotg210,
4586 + */
4587 + epnum = usb_pipeendpoint(pipe);
4588 + is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
4589 +- maxp = usb_maxpacket(dev, pipe, !is_input);
4590 ++ ep = usb_pipe_endpoint(dev, pipe);
4591 ++ maxp = usb_endpoint_maxp(&ep->desc);
4592 + if (is_input)
4593 + buf1 = (1 << 11);
4594 + else
4595 + buf1 = 0;
4596 +
4597 +- maxp = max_packet(maxp);
4598 +- multi = hb_mult(maxp);
4599 ++ multi = usb_endpoint_maxp_mult(&ep->desc);
4600 + buf1 |= maxp;
4601 + maxp *= multi;
4602 +
4603 +@@ -4461,13 +4459,12 @@ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
4604 +
4605 + /* HC need not update length with this error */
4606 + if (!(t & FOTG210_ISOC_BABBLE)) {
4607 +- desc->actual_length =
4608 +- fotg210_itdlen(urb, desc, t);
4609 ++ desc->actual_length = FOTG210_ITD_LENGTH(t);
4610 + urb->actual_length += desc->actual_length;
4611 + }
4612 + } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
4613 + desc->status = 0;
4614 +- desc->actual_length = fotg210_itdlen(urb, desc, t);
4615 ++ desc->actual_length = FOTG210_ITD_LENGTH(t);
4616 + urb->actual_length += desc->actual_length;
4617 + } else {
4618 + /* URB was too late */
4619 +diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
4620 +index 7fcd785c7bc85..0f1da9503bc67 100644
4621 +--- a/drivers/usb/host/fotg210.h
4622 ++++ b/drivers/usb/host/fotg210.h
4623 +@@ -683,11 +683,6 @@ static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
4624 + return fotg210_readl(fotg210, &fotg210->regs->frame_index);
4625 + }
4626 +
4627 +-#define fotg210_itdlen(urb, desc, t) ({ \
4628 +- usb_pipein((urb)->pipe) ? \
4629 +- (desc)->length - FOTG210_ITD_LENGTH(t) : \
4630 +- FOTG210_ITD_LENGTH(t); \
4631 +-})
4632 + /*-------------------------------------------------------------------------*/
4633 +
4634 + #endif /* __LINUX_FOTG210_H */
4635 +diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
4636 +index a631dbb369d76..983a00e2988dc 100644
4637 +--- a/drivers/usb/host/ohci-tmio.c
4638 ++++ b/drivers/usb/host/ohci-tmio.c
4639 +@@ -199,6 +199,9 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
4640 + if (!cell)
4641 + return -EINVAL;
4642 +
4643 ++ if (irq < 0)
4644 ++ return irq;
4645 ++
4646 + hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev));
4647 + if (!hcd) {
4648 + ret = -ENOMEM;
4649 +diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
4650 +index 2b0ccd150209f..4ebbe2c232926 100644
4651 +--- a/drivers/usb/host/xhci-rcar.c
4652 ++++ b/drivers/usb/host/xhci-rcar.c
4653 +@@ -143,6 +143,13 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
4654 + const struct soc_device_attribute *attr;
4655 + const char *firmware_name;
4656 +
4657 ++ /*
4658 ++ * According to the datasheet, "Upon the completion of FW Download,
4659 ++ * there is no need to write or reload FW".
4660 ++ */
4661 ++ if (readl(regs + RCAR_USB3_DL_CTRL) & RCAR_USB3_DL_CTRL_FW_SUCCESS)
4662 ++ return 0;
4663 ++
4664 + attr = soc_device_match(rcar_quirks_match);
4665 + if (attr)
4666 + quirks = (uintptr_t)attr->data;
4667 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4668 +index c4e3760abd5b4..bebab0ec29786 100644
4669 +--- a/drivers/usb/host/xhci.c
4670 ++++ b/drivers/usb/host/xhci.c
4671 +@@ -4580,19 +4580,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4672 + {
4673 + unsigned long long timeout_ns;
4674 +
4675 +- if (xhci->quirks & XHCI_INTEL_HOST)
4676 +- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4677 +- else
4678 +- timeout_ns = udev->u1_params.sel;
4679 +-
4680 + /* Prevent U1 if service interval is shorter than U1 exit latency */
4681 + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4682 +- if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
4683 ++ if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4684 + dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4685 + return USB3_LPM_DISABLED;
4686 + }
4687 + }
4688 +
4689 ++ if (xhci->quirks & XHCI_INTEL_HOST)
4690 ++ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4691 ++ else
4692 ++ timeout_ns = udev->u1_params.sel;
4693 ++
4694 + /* The U1 timeout is encoded in 1us intervals.
4695 + * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4696 + */
4697 +@@ -4644,19 +4644,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4698 + {
4699 + unsigned long long timeout_ns;
4700 +
4701 +- if (xhci->quirks & XHCI_INTEL_HOST)
4702 +- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4703 +- else
4704 +- timeout_ns = udev->u2_params.sel;
4705 +-
4706 + /* Prevent U2 if service interval is shorter than U2 exit latency */
4707 + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4708 +- if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
4709 ++ if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4710 + dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4711 + return USB3_LPM_DISABLED;
4712 + }
4713 + }
4714 +
4715 ++ if (xhci->quirks & XHCI_INTEL_HOST)
4716 ++ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4717 ++ else
4718 ++ timeout_ns = udev->u2_params.sel;
4719 ++
4720 + /* The U2 timeout is encoded in 256us intervals */
4721 + timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4722 + /* If the necessary timeout value is bigger than what we can set in the
4723 +diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
4724 +index 3828ed6d38a21..270721cf229f7 100644
4725 +--- a/drivers/usb/mtu3/mtu3_gadget.c
4726 ++++ b/drivers/usb/mtu3/mtu3_gadget.c
4727 +@@ -69,14 +69,12 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
4728 + u32 interval = 0;
4729 + u32 mult = 0;
4730 + u32 burst = 0;
4731 +- int max_packet;
4732 + int ret;
4733 +
4734 + desc = mep->desc;
4735 + comp_desc = mep->comp_desc;
4736 + mep->type = usb_endpoint_type(desc);
4737 +- max_packet = usb_endpoint_maxp(desc);
4738 +- mep->maxp = max_packet & GENMASK(10, 0);
4739 ++ mep->maxp = usb_endpoint_maxp(desc);
4740 +
4741 + switch (mtu->g.speed) {
4742 + case USB_SPEED_SUPER:
4743 +@@ -97,7 +95,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
4744 + usb_endpoint_xfer_int(desc)) {
4745 + interval = desc->bInterval;
4746 + interval = clamp_val(interval, 1, 16) - 1;
4747 +- burst = (max_packet & GENMASK(12, 11)) >> 11;
4748 ++ mult = usb_endpoint_maxp_mult(desc) - 1;
4749 + }
4750 + break;
4751 + default:
4752 +diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
4753 +index 403eb97915f8a..2f6708b8b5c2f 100644
4754 +--- a/drivers/usb/musb/musb_dsps.c
4755 ++++ b/drivers/usb/musb/musb_dsps.c
4756 +@@ -892,23 +892,22 @@ static int dsps_probe(struct platform_device *pdev)
4757 + if (!glue->usbss_base)
4758 + return -ENXIO;
4759 +
4760 +- if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
4761 +- ret = dsps_setup_optional_vbus_irq(pdev, glue);
4762 +- if (ret)
4763 +- goto err_iounmap;
4764 +- }
4765 +-
4766 + platform_set_drvdata(pdev, glue);
4767 + pm_runtime_enable(&pdev->dev);
4768 + ret = dsps_create_musb_pdev(glue, pdev);
4769 + if (ret)
4770 + goto err;
4771 +
4772 ++ if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
4773 ++ ret = dsps_setup_optional_vbus_irq(pdev, glue);
4774 ++ if (ret)
4775 ++ goto err;
4776 ++ }
4777 ++
4778 + return 0;
4779 +
4780 + err:
4781 + pm_runtime_disable(&pdev->dev);
4782 +-err_iounmap:
4783 + iounmap(glue->usbss_base);
4784 + return ret;
4785 + }
4786 +diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
4787 +index f7c96d209eda7..981db219234e4 100644
4788 +--- a/drivers/usb/phy/phy-fsl-usb.c
4789 ++++ b/drivers/usb/phy/phy-fsl-usb.c
4790 +@@ -873,6 +873,8 @@ int usb_otg_start(struct platform_device *pdev)
4791 +
4792 + /* request irq */
4793 + p_otg->irq = platform_get_irq(pdev, 0);
4794 ++ if (p_otg->irq < 0)
4795 ++ return p_otg->irq;
4796 + status = request_irq(p_otg->irq, fsl_otg_isr,
4797 + IRQF_SHARED, driver_name, p_otg);
4798 + if (status) {
4799 +diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
4800 +index 0981abc3d1adc..60d390e28289f 100644
4801 +--- a/drivers/usb/phy/phy-tahvo.c
4802 ++++ b/drivers/usb/phy/phy-tahvo.c
4803 +@@ -396,7 +396,9 @@ static int tahvo_usb_probe(struct platform_device *pdev)
4804 +
4805 + dev_set_drvdata(&pdev->dev, tu);
4806 +
4807 +- tu->irq = platform_get_irq(pdev, 0);
4808 ++ tu->irq = ret = platform_get_irq(pdev, 0);
4809 ++ if (ret < 0)
4810 ++ return ret;
4811 + ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
4812 + IRQF_ONESHOT,
4813 + "tahvo-vbus", tu);
4814 +diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
4815 +index dade34d704198..859af6113b450 100644
4816 +--- a/drivers/usb/phy/phy-twl6030-usb.c
4817 ++++ b/drivers/usb/phy/phy-twl6030-usb.c
4818 +@@ -342,6 +342,11 @@ static int twl6030_usb_probe(struct platform_device *pdev)
4819 + twl->irq2 = platform_get_irq(pdev, 1);
4820 + twl->linkstat = MUSB_UNKNOWN;
4821 +
4822 ++ if (twl->irq1 < 0)
4823 ++ return twl->irq1;
4824 ++ if (twl->irq2 < 0)
4825 ++ return twl->irq2;
4826 ++
4827 + twl->comparator.set_vbus = twl6030_set_vbus;
4828 + twl->comparator.start_srp = twl6030_start_srp;
4829 +
4830 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
4831 +index 1e3ee2bfbcd03..d0c8d3800254d 100644
4832 +--- a/drivers/usb/serial/mos7720.c
4833 ++++ b/drivers/usb/serial/mos7720.c
4834 +@@ -226,8 +226,10 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
4835 + int status;
4836 +
4837 + buf = kmalloc(1, GFP_KERNEL);
4838 +- if (!buf)
4839 ++ if (!buf) {
4840 ++ *data = 0;
4841 + return -ENOMEM;
4842 ++ }
4843 +
4844 + status = usb_control_msg(usbdev, pipe, request, requesttype, value,
4845 + index, buf, 1, MOS_WDR_TIMEOUT);
4846 +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
4847 +index b13ed5a7618d8..202dc76f7beb9 100644
4848 +--- a/drivers/usb/usbip/vhci_hcd.c
4849 ++++ b/drivers/usb/usbip/vhci_hcd.c
4850 +@@ -455,8 +455,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
4851 + vhci_hcd->port_status[rhport] &= ~(1 << USB_PORT_FEAT_RESET);
4852 + vhci_hcd->re_timeout = 0;
4853 +
4854 ++ /*
4855 ++ * A few drivers do usb reset during probe when
4856 ++ * the device could be in VDEV_ST_USED state
4857 ++ */
4858 + if (vhci_hcd->vdev[rhport].ud.status ==
4859 +- VDEV_ST_NOTASSIGNED) {
4860 ++ VDEV_ST_NOTASSIGNED ||
4861 ++ vhci_hcd->vdev[rhport].ud.status ==
4862 ++ VDEV_ST_USED) {
4863 + usbip_dbg_vhci_rh(
4864 + " enable rhport %d (status %u)\n",
4865 + rhport,
4866 +@@ -957,8 +963,32 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
4867 + spin_lock(&vdev->priv_lock);
4868 +
4869 + list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
4870 ++ struct urb *urb;
4871 ++
4872 ++ /* give back urb of unsent unlink request */
4873 + pr_info("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
4874 ++
4875 ++ urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
4876 ++ if (!urb) {
4877 ++ list_del(&unlink->list);
4878 ++ kfree(unlink);
4879 ++ continue;
4880 ++ }
4881 ++
4882 ++ urb->status = -ENODEV;
4883 ++
4884 ++ usb_hcd_unlink_urb_from_ep(hcd, urb);
4885 ++
4886 + list_del(&unlink->list);
4887 ++
4888 ++ spin_unlock(&vdev->priv_lock);
4889 ++ spin_unlock_irqrestore(&vhci->lock, flags);
4890 ++
4891 ++ usb_hcd_giveback_urb(hcd, urb, urb->status);
4892 ++
4893 ++ spin_lock_irqsave(&vhci->lock, flags);
4894 ++ spin_lock(&vdev->priv_lock);
4895 ++
4896 + kfree(unlink);
4897 + }
4898 +
4899 +diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
4900 +index c84333eb5eb59..b7765271d0fb6 100644
4901 +--- a/drivers/vfio/Kconfig
4902 ++++ b/drivers/vfio/Kconfig
4903 +@@ -29,7 +29,7 @@ menuconfig VFIO
4904 +
4905 + If you don't know what to do here, say N.
4906 +
4907 +-menuconfig VFIO_NOIOMMU
4908 ++config VFIO_NOIOMMU
4909 + bool "VFIO No-IOMMU support"
4910 + depends on VFIO
4911 + help
4912 +diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
4913 +index 3a3098d4873be..6a8dd2f16b780 100644
4914 +--- a/drivers/video/backlight/pwm_bl.c
4915 ++++ b/drivers/video/backlight/pwm_bl.c
4916 +@@ -400,6 +400,33 @@ int pwm_backlight_brightness_default(struct device *dev,
4917 + static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
4918 + {
4919 + struct device_node *node = pb->dev->of_node;
4920 ++ bool active = true;
4921 ++
4922 ++ /*
4923 ++ * If the enable GPIO is present, observable (either as input
4924 ++ * or output) and off then the backlight is not currently active.
4925 ++ * */
4926 ++ if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
4927 ++ active = false;
4928 ++
4929 ++ if (!regulator_is_enabled(pb->power_supply))
4930 ++ active = false;
4931 ++
4932 ++ if (!pwm_is_enabled(pb->pwm))
4933 ++ active = false;
4934 ++
4935 ++ /*
4936 ++ * Synchronize the enable_gpio with the observed state of the
4937 ++ * hardware.
4938 ++ */
4939 ++ if (pb->enable_gpio)
4940 ++ gpiod_direction_output(pb->enable_gpio, active);
4941 ++
4942 ++ /*
4943 ++ * Do not change pb->enabled here! pb->enabled essentially
4944 ++ * tells us if we own one of the regulator's use counts and
4945 ++ * right now we do not.
4946 ++ */
4947 +
4948 + /* Not booted with device tree or no phandle link to the node */
4949 + if (!node || !node->phandle)
4950 +@@ -411,20 +438,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
4951 + * assume that another driver will enable the backlight at the
4952 + * appropriate time. Therefore, if it is disabled, keep it so.
4953 + */
4954 +-
4955 +- /* if the enable GPIO is disabled, do not enable the backlight */
4956 +- if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
4957 +- return FB_BLANK_POWERDOWN;
4958 +-
4959 +- /* The regulator is disabled, do not enable the backlight */
4960 +- if (!regulator_is_enabled(pb->power_supply))
4961 +- return FB_BLANK_POWERDOWN;
4962 +-
4963 +- /* The PWM is disabled, keep it like this */
4964 +- if (!pwm_is_enabled(pb->pwm))
4965 +- return FB_BLANK_POWERDOWN;
4966 +-
4967 +- return FB_BLANK_UNBLANK;
4968 ++ return active ? FB_BLANK_UNBLANK: FB_BLANK_POWERDOWN;
4969 + }
4970 +
4971 + static int pwm_backlight_probe(struct platform_device *pdev)
4972 +@@ -494,18 +508,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
4973 + pb->enable_gpio = gpio_to_desc(data->enable_gpio);
4974 + }
4975 +
4976 +- /*
4977 +- * If the GPIO is not known to be already configured as output, that
4978 +- * is, if gpiod_get_direction returns either 1 or -EINVAL, change the
4979 +- * direction to output and set the GPIO as active.
4980 +- * Do not force the GPIO to active when it was already output as it
4981 +- * could cause backlight flickering or we would enable the backlight too
4982 +- * early. Leave the decision of the initial backlight state for later.
4983 +- */
4984 +- if (pb->enable_gpio &&
4985 +- gpiod_get_direction(pb->enable_gpio) != 0)
4986 +- gpiod_direction_output(pb->enable_gpio, 1);
4987 +-
4988 + pb->power_supply = devm_regulator_get(&pdev->dev, "power");
4989 + if (IS_ERR(pb->power_supply)) {
4990 + ret = PTR_ERR(pb->power_supply);
4991 +diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
4992 +index ea31054a28ca8..c1d6e63362259 100644
4993 +--- a/drivers/video/fbdev/asiliantfb.c
4994 ++++ b/drivers/video/fbdev/asiliantfb.c
4995 +@@ -227,6 +227,9 @@ static int asiliantfb_check_var(struct fb_var_screeninfo *var,
4996 + {
4997 + unsigned long Ftarget, ratio, remainder;
4998 +
4999 ++ if (!var->pixclock)
5000 ++ return -EINVAL;
5001 ++
5002 + ratio = 1000000 / var->pixclock;
5003 + remainder = 1000000 % var->pixclock;
5004 + Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock;
5005 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
5006 +index de04c097d67c7..2297dfb494d6a 100644
5007 +--- a/drivers/video/fbdev/core/fbmem.c
5008 ++++ b/drivers/video/fbdev/core/fbmem.c
5009 +@@ -971,6 +971,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
5010 + if ((var->activate & FB_ACTIVATE_FORCE) ||
5011 + memcmp(&info->var, var, sizeof(struct fb_var_screeninfo))) {
5012 + u32 activate = var->activate;
5013 ++ u32 unused;
5014 +
5015 + /* When using FOURCC mode, make sure the red, green, blue and
5016 + * transp fields are set to 0.
5017 +@@ -995,6 +996,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
5018 + if (var->xres < 8 || var->yres < 8)
5019 + return -EINVAL;
5020 +
5021 ++ /* Too huge resolution causes multiplication overflow. */
5022 ++ if (check_mul_overflow(var->xres, var->yres, &unused) ||
5023 ++ check_mul_overflow(var->xres_virtual, var->yres_virtual, &unused))
5024 ++ return -EINVAL;
5025 ++
5026 + ret = info->fbops->fb_check_var(var, info);
5027 +
5028 + if (ret)
5029 +diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
5030 +index a7bd9f25911b5..74bf26b527b91 100644
5031 +--- a/drivers/video/fbdev/kyro/fbdev.c
5032 ++++ b/drivers/video/fbdev/kyro/fbdev.c
5033 +@@ -372,6 +372,11 @@ static int kyro_dev_overlay_viewport_set(u32 x, u32 y, u32 ulWidth, u32 ulHeight
5034 + /* probably haven't called CreateOverlay yet */
5035 + return -EINVAL;
5036 +
5037 ++ if (ulWidth == 0 || ulWidth == 0xffffffff ||
5038 ++ ulHeight == 0 || ulHeight == 0xffffffff ||
5039 ++ (x < 2 && ulWidth + 2 == 0))
5040 ++ return -EINVAL;
5041 ++
5042 + /* Stop Ramdac Output */
5043 + DisableRamdacOutput(deviceInfo.pSTGReg);
5044 +
5045 +@@ -394,6 +399,9 @@ static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
5046 + {
5047 + struct kyrofb_info *par = info->par;
5048 +
5049 ++ if (!var->pixclock)
5050 ++ return -EINVAL;
5051 ++
5052 + if (var->bits_per_pixel != 16 && var->bits_per_pixel != 32) {
5053 + printk(KERN_WARNING "kyrofb: depth not supported: %u\n", var->bits_per_pixel);
5054 + return -EINVAL;
5055 +diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
5056 +index cc242ba057d3e..dfa81b641f9fe 100644
5057 +--- a/drivers/video/fbdev/riva/fbdev.c
5058 ++++ b/drivers/video/fbdev/riva/fbdev.c
5059 +@@ -1088,6 +1088,9 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
5060 + int mode_valid = 0;
5061 +
5062 + NVTRACE_ENTER();
5063 ++ if (!var->pixclock)
5064 ++ return -EINVAL;
5065 ++
5066 + switch (var->bits_per_pixel) {
5067 + case 1 ... 8:
5068 + var->red.offset = var->green.offset = var->blue.offset = 0;
5069 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
5070 +index 6f02a3f77fa83..f314b2c2d1487 100644
5071 +--- a/fs/btrfs/inode.c
5072 ++++ b/fs/btrfs/inode.c
5073 +@@ -530,7 +530,7 @@ again:
5074 + * inode has not been flagged as nocompress. This flag can
5075 + * change at any time if we discover bad compression ratios.
5076 + */
5077 +- if (nr_pages > 1 && inode_need_compress(inode, start, end)) {
5078 ++ if (inode_need_compress(inode, start, end)) {
5079 + WARN_ON(pages);
5080 + pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
5081 + if (!pages) {
5082 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
5083 +index 5a19f5ee70386..6cb4896256106 100644
5084 +--- a/fs/btrfs/volumes.c
5085 ++++ b/fs/btrfs/volumes.c
5086 +@@ -1048,6 +1048,9 @@ static void btrfs_close_one_device(struct btrfs_device *device)
5087 + fs_devices->rw_devices--;
5088 + }
5089 +
5090 ++ if (device->devid == BTRFS_DEV_REPLACE_DEVID)
5091 ++ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
5092 ++
5093 + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
5094 + fs_devices->missing_devices--;
5095 +
5096 +diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
5097 +index 9986817532b10..7932e20555d2b 100644
5098 +--- a/fs/cifs/cifs_unicode.c
5099 ++++ b/fs/cifs/cifs_unicode.c
5100 +@@ -371,14 +371,9 @@ cifs_strndup_from_utf16(const char *src, const int maxlen,
5101 + if (!dst)
5102 + return NULL;
5103 + cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
5104 +- NO_MAP_UNI_RSVD);
5105 ++ NO_MAP_UNI_RSVD);
5106 + } else {
5107 +- len = strnlen(src, maxlen);
5108 +- len++;
5109 +- dst = kmalloc(len, GFP_KERNEL);
5110 +- if (!dst)
5111 +- return NULL;
5112 +- strlcpy(dst, src, len);
5113 ++ dst = kstrndup(src, maxlen, GFP_KERNEL);
5114 + }
5115 +
5116 + return dst;
5117 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
5118 +index aa23c00367ec4..0113dba28eb09 100644
5119 +--- a/fs/cifs/sess.c
5120 ++++ b/fs/cifs/sess.c
5121 +@@ -602,7 +602,7 @@ sess_alloc_buffer(struct sess_data *sess_data, int wct)
5122 + return 0;
5123 +
5124 + out_free_smb_buf:
5125 +- kfree(smb_buf);
5126 ++ cifs_small_buf_release(smb_buf);
5127 + sess_data->iov[0].iov_base = NULL;
5128 + sess_data->iov[0].iov_len = 0;
5129 + sess_data->buf0_type = CIFS_NO_BUFFER;
5130 +diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
5131 +index aa86cb2db8236..ae7a413c0bdde 100644
5132 +--- a/fs/crypto/hooks.c
5133 ++++ b/fs/crypto/hooks.c
5134 +@@ -279,3 +279,47 @@ err_kfree:
5135 + return ERR_PTR(err);
5136 + }
5137 + EXPORT_SYMBOL_GPL(fscrypt_get_symlink);
5138 ++
5139 ++/**
5140 ++ * fscrypt_symlink_getattr() - set the correct st_size for encrypted symlinks
5141 ++ * @path: the path for the encrypted symlink being queried
5142 ++ * @stat: the struct being filled with the symlink's attributes
5143 ++ *
5144 ++ * Override st_size of encrypted symlinks to be the length of the decrypted
5145 ++ * symlink target (or the no-key encoded symlink target, if the key is
5146 ++ * unavailable) rather than the length of the encrypted symlink target. This is
5147 ++ * necessary for st_size to match the symlink target that userspace actually
5148 ++ * sees. POSIX requires this, and some userspace programs depend on it.
5149 ++ *
5150 ++ * This requires reading the symlink target from disk if needed, setting up the
5151 ++ * inode's encryption key if possible, and then decrypting or encoding the
5152 ++ * symlink target. This makes lstat() more heavyweight than is normally the
5153 ++ * case. However, decrypted symlink targets will be cached in ->i_link, so
5154 ++ * usually the symlink won't have to be read and decrypted again later if/when
5155 ++ * it is actually followed, readlink() is called, or lstat() is called again.
5156 ++ *
5157 ++ * Return: 0 on success, -errno on failure
5158 ++ */
5159 ++int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat)
5160 ++{
5161 ++ struct dentry *dentry = path->dentry;
5162 ++ struct inode *inode = d_inode(dentry);
5163 ++ const char *link;
5164 ++ DEFINE_DELAYED_CALL(done);
5165 ++
5166 ++ /*
5167 ++ * To get the symlink target that userspace will see (whether it's the
5168 ++ * decrypted target or the no-key encoded target), we can just get it in
5169 ++ * the same way the VFS does during path resolution and readlink().
5170 ++ */
5171 ++ link = READ_ONCE(inode->i_link);
5172 ++ if (!link) {
5173 ++ link = inode->i_op->get_link(dentry, inode, &done);
5174 ++ if (IS_ERR(link))
5175 ++ return PTR_ERR(link);
5176 ++ }
5177 ++ stat->size = strlen(link);
5178 ++ do_delayed_call(&done);
5179 ++ return 0;
5180 ++}
5181 ++EXPORT_SYMBOL_GPL(fscrypt_symlink_getattr);
5182 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
5183 +index c952461876595..c64e4a1be2f29 100644
5184 +--- a/fs/ext4/inline.c
5185 ++++ b/fs/ext4/inline.c
5186 +@@ -750,6 +750,12 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
5187 + ext4_write_lock_xattr(inode, &no_expand);
5188 + BUG_ON(!ext4_has_inline_data(inode));
5189 +
5190 ++ /*
5191 ++ * ei->i_inline_off may have changed since ext4_write_begin()
5192 ++ * called ext4_try_to_write_inline_data()
5193 ++ */
5194 ++ (void) ext4_find_inline_data_nolock(inode);
5195 ++
5196 + kaddr = kmap_atomic(page);
5197 + ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
5198 + kunmap_atomic(kaddr);
5199 +diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
5200 +index dd05af983092d..a9457fed351ed 100644
5201 +--- a/fs/ext4/symlink.c
5202 ++++ b/fs/ext4/symlink.c
5203 +@@ -52,10 +52,19 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
5204 + return paddr;
5205 + }
5206 +
5207 ++static int ext4_encrypted_symlink_getattr(const struct path *path,
5208 ++ struct kstat *stat, u32 request_mask,
5209 ++ unsigned int query_flags)
5210 ++{
5211 ++ ext4_getattr(path, stat, request_mask, query_flags);
5212 ++
5213 ++ return fscrypt_symlink_getattr(path, stat);
5214 ++}
5215 ++
5216 + const struct inode_operations ext4_encrypted_symlink_inode_operations = {
5217 + .get_link = ext4_encrypted_get_link,
5218 + .setattr = ext4_setattr,
5219 +- .getattr = ext4_getattr,
5220 ++ .getattr = ext4_encrypted_symlink_getattr,
5221 + .listxattr = ext4_listxattr,
5222 + };
5223 +
5224 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
5225 +index 95330dfdbb1af..2a7249496c57e 100644
5226 +--- a/fs/f2fs/file.c
5227 ++++ b/fs/f2fs/file.c
5228 +@@ -957,7 +957,6 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
5229 + }
5230 +
5231 + if (pg_start < pg_end) {
5232 +- struct address_space *mapping = inode->i_mapping;
5233 + loff_t blk_start, blk_end;
5234 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
5235 +
5236 +@@ -969,8 +968,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
5237 + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
5238 + down_write(&F2FS_I(inode)->i_mmap_sem);
5239 +
5240 +- truncate_inode_pages_range(mapping, blk_start,
5241 +- blk_end - 1);
5242 ++ truncate_pagecache_range(inode, blk_start, blk_end - 1);
5243 +
5244 + f2fs_lock_op(sbi);
5245 + ret = f2fs_truncate_hole(inode, pg_start, pg_end);
5246 +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
5247 +index 700c39ec99f5a..38299ccfdf6ef 100644
5248 +--- a/fs/f2fs/gc.c
5249 ++++ b/fs/f2fs/gc.c
5250 +@@ -998,8 +998,10 @@ next_step:
5251 + bool locked = false;
5252 +
5253 + if (S_ISREG(inode->i_mode)) {
5254 +- if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
5255 ++ if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
5256 ++ sbi->skipped_gc_rwsem++;
5257 + continue;
5258 ++ }
5259 + if (!down_write_trylock(
5260 + &fi->i_gc_rwsem[WRITE])) {
5261 + sbi->skipped_gc_rwsem++;
5262 +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
5263 +index e20a0f9e68455..edc80855974ac 100644
5264 +--- a/fs/f2fs/namei.c
5265 ++++ b/fs/f2fs/namei.c
5266 +@@ -1219,9 +1219,18 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
5267 + return target;
5268 + }
5269 +
5270 ++static int f2fs_encrypted_symlink_getattr(const struct path *path,
5271 ++ struct kstat *stat, u32 request_mask,
5272 ++ unsigned int query_flags)
5273 ++{
5274 ++ f2fs_getattr(path, stat, request_mask, query_flags);
5275 ++
5276 ++ return fscrypt_symlink_getattr(path, stat);
5277 ++}
5278 ++
5279 + const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
5280 + .get_link = f2fs_encrypted_get_link,
5281 +- .getattr = f2fs_getattr,
5282 ++ .getattr = f2fs_encrypted_symlink_getattr,
5283 + .setattr = f2fs_setattr,
5284 + #ifdef CONFIG_F2FS_FS_XATTR
5285 + .listxattr = f2fs_listxattr,
5286 +diff --git a/fs/fcntl.c b/fs/fcntl.c
5287 +index e039af1872ab2..dffb5245ae728 100644
5288 +--- a/fs/fcntl.c
5289 ++++ b/fs/fcntl.c
5290 +@@ -993,13 +993,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
5291 + {
5292 + while (fa) {
5293 + struct fown_struct *fown;
5294 ++ unsigned long flags;
5295 +
5296 + if (fa->magic != FASYNC_MAGIC) {
5297 + printk(KERN_ERR "kill_fasync: bad magic number in "
5298 + "fasync_struct!\n");
5299 + return;
5300 + }
5301 +- read_lock(&fa->fa_lock);
5302 ++ read_lock_irqsave(&fa->fa_lock, flags);
5303 + if (fa->fa_file) {
5304 + fown = &fa->fa_file->f_owner;
5305 + /* Don't send SIGURG to processes which have not set a
5306 +@@ -1008,7 +1009,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
5307 + if (!(sig == SIGURG && fown->signum == 0))
5308 + send_sigio(fown, fa->fa_fd, band);
5309 + }
5310 +- read_unlock(&fa->fa_lock);
5311 ++ read_unlock_irqrestore(&fa->fa_lock, flags);
5312 + fa = rcu_dereference(fa->fa_next);
5313 + }
5314 + }
5315 +diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
5316 +index c550512ce3350..2ff05adfc22a4 100644
5317 +--- a/fs/fscache/cookie.c
5318 ++++ b/fs/fscache/cookie.c
5319 +@@ -78,10 +78,8 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
5320 + static int fscache_set_key(struct fscache_cookie *cookie,
5321 + const void *index_key, size_t index_key_len)
5322 + {
5323 +- unsigned long long h;
5324 + u32 *buf;
5325 + int bufs;
5326 +- int i;
5327 +
5328 + bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
5329 +
5330 +@@ -95,17 +93,7 @@ static int fscache_set_key(struct fscache_cookie *cookie,
5331 + }
5332 +
5333 + memcpy(buf, index_key, index_key_len);
5334 +-
5335 +- /* Calculate a hash and combine this with the length in the first word
5336 +- * or first half word
5337 +- */
5338 +- h = (unsigned long)cookie->parent;
5339 +- h += index_key_len + cookie->type;
5340 +-
5341 +- for (i = 0; i < bufs; i++)
5342 +- h += buf[i];
5343 +-
5344 +- cookie->key_hash = h ^ (h >> 32);
5345 ++ cookie->key_hash = fscache_hash(0, buf, bufs);
5346 + return 0;
5347 + }
5348 +
5349 +diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
5350 +index d6209022e9658..cc87288a5448c 100644
5351 +--- a/fs/fscache/internal.h
5352 ++++ b/fs/fscache/internal.h
5353 +@@ -101,6 +101,8 @@ extern struct workqueue_struct *fscache_object_wq;
5354 + extern struct workqueue_struct *fscache_op_wq;
5355 + DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
5356 +
5357 ++extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
5358 ++
5359 + static inline bool fscache_object_congested(void)
5360 + {
5361 + return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
5362 +diff --git a/fs/fscache/main.c b/fs/fscache/main.c
5363 +index 30ad89db1efcc..aa49234e9520a 100644
5364 +--- a/fs/fscache/main.c
5365 ++++ b/fs/fscache/main.c
5366 +@@ -98,6 +98,45 @@ static struct ctl_table fscache_sysctls_root[] = {
5367 + };
5368 + #endif
5369 +
5370 ++/*
5371 ++ * Mixing scores (in bits) for (7,20):
5372 ++ * Input delta: 1-bit 2-bit
5373 ++ * 1 round: 330.3 9201.6
5374 ++ * 2 rounds: 1246.4 25475.4
5375 ++ * 3 rounds: 1907.1 31295.1
5376 ++ * 4 rounds: 2042.3 31718.6
5377 ++ * Perfect: 2048 31744
5378 ++ * (32*64) (32*31/2 * 64)
5379 ++ */
5380 ++#define HASH_MIX(x, y, a) \
5381 ++ ( x ^= (a), \
5382 ++ y ^= x, x = rol32(x, 7),\
5383 ++ x += y, y = rol32(y,20),\
5384 ++ y *= 9 )
5385 ++
5386 ++static inline unsigned int fold_hash(unsigned long x, unsigned long y)
5387 ++{
5388 ++ /* Use arch-optimized multiply if one exists */
5389 ++ return __hash_32(y ^ __hash_32(x));
5390 ++}
5391 ++
5392 ++/*
5393 ++ * Generate a hash. This is derived from full_name_hash(), but we want to be
5394 ++ * sure it is arch independent and that it doesn't change as bits of the
5395 ++ * computed hash value might appear on disk. The caller also guarantees that
5396 ++ * the hashed data will be a series of aligned 32-bit words.
5397 ++ */
5398 ++unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
5399 ++{
5400 ++ unsigned int a, x = 0, y = salt;
5401 ++
5402 ++ for (; n; n--) {
5403 ++ a = *data++;
5404 ++ HASH_MIX(x, y, a);
5405 ++ }
5406 ++ return fold_hash(x, y);
5407 ++}
5408 ++
5409 + /*
5410 + * initialise the fs caching module
5411 + */
5412 +diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
5413 +index 56dddc1f8dddc..9e90e42c495ed 100644
5414 +--- a/fs/gfs2/lock_dlm.c
5415 ++++ b/fs/gfs2/lock_dlm.c
5416 +@@ -295,6 +295,11 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
5417 + gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
5418 + gfs2_update_request_times(gl);
5419 +
5420 ++ /* don't want to call dlm if we've unmounted the lock protocol */
5421 ++ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
5422 ++ gfs2_glock_free(gl);
5423 ++ return;
5424 ++ }
5425 + /* don't want to skip dlm_unlock writing the lvb when lock has one */
5426 +
5427 + if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
5428 +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
5429 +index 488a9e7f8f660..2355ad62b81fa 100644
5430 +--- a/fs/isofs/inode.c
5431 ++++ b/fs/isofs/inode.c
5432 +@@ -157,7 +157,6 @@ struct iso9660_options{
5433 + unsigned int overriderockperm:1;
5434 + unsigned int uid_set:1;
5435 + unsigned int gid_set:1;
5436 +- unsigned int utf8:1;
5437 + unsigned char map;
5438 + unsigned char check;
5439 + unsigned int blocksize;
5440 +@@ -357,7 +356,6 @@ static int parse_options(char *options, struct iso9660_options *popt)
5441 + popt->gid = GLOBAL_ROOT_GID;
5442 + popt->uid = GLOBAL_ROOT_UID;
5443 + popt->iocharset = NULL;
5444 +- popt->utf8 = 0;
5445 + popt->overriderockperm = 0;
5446 + popt->session=-1;
5447 + popt->sbsector=-1;
5448 +@@ -390,10 +388,13 @@ static int parse_options(char *options, struct iso9660_options *popt)
5449 + case Opt_cruft:
5450 + popt->cruft = 1;
5451 + break;
5452 ++#ifdef CONFIG_JOLIET
5453 + case Opt_utf8:
5454 +- popt->utf8 = 1;
5455 ++ kfree(popt->iocharset);
5456 ++ popt->iocharset = kstrdup("utf8", GFP_KERNEL);
5457 ++ if (!popt->iocharset)
5458 ++ return 0;
5459 + break;
5460 +-#ifdef CONFIG_JOLIET
5461 + case Opt_iocharset:
5462 + kfree(popt->iocharset);
5463 + popt->iocharset = match_strdup(&args[0]);
5464 +@@ -496,7 +497,6 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
5465 + if (sbi->s_nocompress) seq_puts(m, ",nocompress");
5466 + if (sbi->s_overriderockperm) seq_puts(m, ",overriderockperm");
5467 + if (sbi->s_showassoc) seq_puts(m, ",showassoc");
5468 +- if (sbi->s_utf8) seq_puts(m, ",utf8");
5469 +
5470 + if (sbi->s_check) seq_printf(m, ",check=%c", sbi->s_check);
5471 + if (sbi->s_mapping) seq_printf(m, ",map=%c", sbi->s_mapping);
5472 +@@ -519,9 +519,10 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
5473 + seq_printf(m, ",fmode=%o", sbi->s_fmode);
5474 +
5475 + #ifdef CONFIG_JOLIET
5476 +- if (sbi->s_nls_iocharset &&
5477 +- strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0)
5478 ++ if (sbi->s_nls_iocharset)
5479 + seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
5480 ++ else
5481 ++ seq_puts(m, ",iocharset=utf8");
5482 + #endif
5483 + return 0;
5484 + }
5485 +@@ -865,14 +866,13 @@ root_found:
5486 + sbi->s_nls_iocharset = NULL;
5487 +
5488 + #ifdef CONFIG_JOLIET
5489 +- if (joliet_level && opt.utf8 == 0) {
5490 ++ if (joliet_level) {
5491 + char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
5492 +- sbi->s_nls_iocharset = load_nls(p);
5493 +- if (! sbi->s_nls_iocharset) {
5494 +- /* Fail only if explicit charset specified */
5495 +- if (opt.iocharset)
5496 ++ if (strcmp(p, "utf8") != 0) {
5497 ++ sbi->s_nls_iocharset = opt.iocharset ?
5498 ++ load_nls(opt.iocharset) : load_nls_default();
5499 ++ if (!sbi->s_nls_iocharset)
5500 + goto out_freesbi;
5501 +- sbi->s_nls_iocharset = load_nls_default();
5502 + }
5503 + }
5504 + #endif
5505 +@@ -888,7 +888,6 @@ root_found:
5506 + sbi->s_gid = opt.gid;
5507 + sbi->s_uid_set = opt.uid_set;
5508 + sbi->s_gid_set = opt.gid_set;
5509 +- sbi->s_utf8 = opt.utf8;
5510 + sbi->s_nocompress = opt.nocompress;
5511 + sbi->s_overriderockperm = opt.overriderockperm;
5512 + /*
5513 +diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
5514 +index 055ec6c586f7f..dcdc191ed1834 100644
5515 +--- a/fs/isofs/isofs.h
5516 ++++ b/fs/isofs/isofs.h
5517 +@@ -44,7 +44,6 @@ struct isofs_sb_info {
5518 + unsigned char s_session;
5519 + unsigned int s_high_sierra:1;
5520 + unsigned int s_rock:2;
5521 +- unsigned int s_utf8:1;
5522 + unsigned int s_cruft:1; /* Broken disks with high byte of length
5523 + * containing junk */
5524 + unsigned int s_nocompress:1;
5525 +diff --git a/fs/isofs/joliet.c b/fs/isofs/joliet.c
5526 +index be8b6a9d0b926..c0f04a1e7f695 100644
5527 +--- a/fs/isofs/joliet.c
5528 ++++ b/fs/isofs/joliet.c
5529 +@@ -41,14 +41,12 @@ uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
5530 + int
5531 + get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
5532 + {
5533 +- unsigned char utf8;
5534 + struct nls_table *nls;
5535 + unsigned char len = 0;
5536 +
5537 +- utf8 = ISOFS_SB(inode->i_sb)->s_utf8;
5538 + nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
5539 +
5540 +- if (utf8) {
5541 ++ if (!nls) {
5542 + len = utf16s_to_utf8s((const wchar_t *) de->name,
5543 + de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
5544 + outname, PAGE_SIZE);
5545 +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
5546 +index a87a562734077..57558a8d92e9b 100644
5547 +--- a/fs/nfs/callback_xdr.c
5548 ++++ b/fs/nfs/callback_xdr.c
5549 +@@ -991,7 +991,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
5550 +
5551 + out_invalidcred:
5552 + pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n");
5553 +- return rpc_autherr_badcred;
5554 ++ return svc_return_autherr(rqstp, rpc_autherr_badcred);
5555 + }
5556 +
5557 + /*
5558 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
5559 +index 25be2ab6ff6cc..0b4ee1ab25df0 100644
5560 +--- a/fs/overlayfs/dir.c
5561 ++++ b/fs/overlayfs/dir.c
5562 +@@ -517,8 +517,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
5563 + goto out_cleanup;
5564 + }
5565 + err = ovl_instantiate(dentry, inode, newdentry, hardlink);
5566 +- if (err)
5567 +- goto out_cleanup;
5568 ++ if (err) {
5569 ++ ovl_cleanup(udir, newdentry);
5570 ++ dput(newdentry);
5571 ++ }
5572 + out_dput:
5573 + dput(upper);
5574 + out_unlock:
5575 +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
5576 +index d7d2fdda4bbd0..3dbb5ac630e42 100644
5577 +--- a/fs/ubifs/file.c
5578 ++++ b/fs/ubifs/file.c
5579 +@@ -1642,6 +1642,16 @@ static const char *ubifs_get_link(struct dentry *dentry,
5580 + return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
5581 + }
5582 +
5583 ++static int ubifs_symlink_getattr(const struct path *path, struct kstat *stat,
5584 ++ u32 request_mask, unsigned int query_flags)
5585 ++{
5586 ++ ubifs_getattr(path, stat, request_mask, query_flags);
5587 ++
5588 ++ if (IS_ENCRYPTED(d_inode(path->dentry)))
5589 ++ return fscrypt_symlink_getattr(path, stat);
5590 ++ return 0;
5591 ++}
5592 ++
5593 + const struct address_space_operations ubifs_file_address_operations = {
5594 + .readpage = ubifs_readpage,
5595 + .writepage = ubifs_writepage,
5596 +@@ -1669,7 +1679,7 @@ const struct inode_operations ubifs_file_inode_operations = {
5597 + const struct inode_operations ubifs_symlink_inode_operations = {
5598 + .get_link = ubifs_get_link,
5599 + .setattr = ubifs_setattr,
5600 +- .getattr = ubifs_getattr,
5601 ++ .getattr = ubifs_symlink_getattr,
5602 + #ifdef CONFIG_UBIFS_FS_XATTR
5603 + .listxattr = ubifs_listxattr,
5604 + #endif
5605 +diff --git a/fs/udf/misc.c b/fs/udf/misc.c
5606 +index 401e64cde1be0..853bcff51043f 100644
5607 +--- a/fs/udf/misc.c
5608 ++++ b/fs/udf/misc.c
5609 +@@ -173,13 +173,22 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
5610 + else
5611 + offset = le32_to_cpu(eahd->appAttrLocation);
5612 +
5613 +- while (offset < iinfo->i_lenEAttr) {
5614 ++ while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) {
5615 ++ uint32_t attrLength;
5616 ++
5617 + gaf = (struct genericFormat *)&ea[offset];
5618 ++ attrLength = le32_to_cpu(gaf->attrLength);
5619 ++
5620 ++ /* Detect undersized elements and buffer overflows */
5621 ++ if ((attrLength < sizeof(*gaf)) ||
5622 ++ (attrLength > (iinfo->i_lenEAttr - offset)))
5623 ++ break;
5624 ++
5625 + if (le32_to_cpu(gaf->attrType) == type &&
5626 + gaf->attrSubtype == subtype)
5627 + return gaf;
5628 + else
5629 +- offset += le32_to_cpu(gaf->attrLength);
5630 ++ offset += attrLength;
5631 + }
5632 + }
5633 +
5634 +diff --git a/fs/udf/super.c b/fs/udf/super.c
5635 +index c7f6243f318bb..9c71246e6d602 100644
5636 +--- a/fs/udf/super.c
5637 ++++ b/fs/udf/super.c
5638 +@@ -112,16 +112,10 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
5639 + return NULL;
5640 + lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
5641 + partnum = le32_to_cpu(lvid->numOfPartitions);
5642 +- if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
5643 +- offsetof(struct logicalVolIntegrityDesc, impUse)) /
5644 +- (2 * sizeof(uint32_t)) < partnum) {
5645 +- udf_err(sb, "Logical volume integrity descriptor corrupted "
5646 +- "(numOfPartitions = %u)!\n", partnum);
5647 +- return NULL;
5648 +- }
5649 + /* The offset is to skip freeSpaceTable and sizeTable arrays */
5650 + offset = partnum * 2 * sizeof(uint32_t);
5651 +- return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
5652 ++ return (struct logicalVolIntegrityDescImpUse *)
5653 ++ (((uint8_t *)(lvid + 1)) + offset);
5654 + }
5655 +
5656 + /* UDF filesystem type */
5657 +@@ -1529,6 +1523,7 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
5658 + struct udf_sb_info *sbi = UDF_SB(sb);
5659 + struct logicalVolIntegrityDesc *lvid;
5660 + int indirections = 0;
5661 ++ u32 parts, impuselen;
5662 +
5663 + while (++indirections <= UDF_MAX_LVID_NESTING) {
5664 + final_bh = NULL;
5665 +@@ -1555,15 +1550,27 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
5666 +
5667 + lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
5668 + if (lvid->nextIntegrityExt.extLength == 0)
5669 +- return;
5670 ++ goto check;
5671 +
5672 + loc = leea_to_cpu(lvid->nextIntegrityExt);
5673 + }
5674 +
5675 + udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
5676 + UDF_MAX_LVID_NESTING);
5677 ++out_err:
5678 + brelse(sbi->s_lvid_bh);
5679 + sbi->s_lvid_bh = NULL;
5680 ++ return;
5681 ++check:
5682 ++ parts = le32_to_cpu(lvid->numOfPartitions);
5683 ++ impuselen = le32_to_cpu(lvid->lengthOfImpUse);
5684 ++ if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
5685 ++ sizeof(struct logicalVolIntegrityDesc) + impuselen +
5686 ++ 2 * parts * sizeof(u32) > sb->s_blocksize) {
5687 ++ udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
5688 ++ "ignoring.\n", parts, impuselen);
5689 ++ goto out_err;
5690 ++ }
5691 + }
5692 +
5693 + /*
5694 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
5695 +index d269d1139f7ff..23c8efc967af2 100644
5696 +--- a/fs/userfaultfd.c
5697 ++++ b/fs/userfaultfd.c
5698 +@@ -32,11 +32,6 @@
5699 +
5700 + static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
5701 +
5702 +-enum userfaultfd_state {
5703 +- UFFD_STATE_WAIT_API,
5704 +- UFFD_STATE_RUNNING,
5705 +-};
5706 +-
5707 + /*
5708 + * Start with fault_pending_wqh and fault_wqh so they're more likely
5709 + * to be in the same cacheline.
5710 +@@ -68,8 +63,6 @@ struct userfaultfd_ctx {
5711 + unsigned int flags;
5712 + /* features requested from the userspace */
5713 + unsigned int features;
5714 +- /* state machine */
5715 +- enum userfaultfd_state state;
5716 + /* released */
5717 + bool released;
5718 + /* memory mappings are changing because of non-cooperative event */
5719 +@@ -103,6 +96,14 @@ struct userfaultfd_wake_range {
5720 + unsigned long len;
5721 + };
5722 +
5723 ++/* internal indication that UFFD_API ioctl was successfully executed */
5724 ++#define UFFD_FEATURE_INITIALIZED (1u << 31)
5725 ++
5726 ++static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
5727 ++{
5728 ++ return ctx->features & UFFD_FEATURE_INITIALIZED;
5729 ++}
5730 ++
5731 + static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
5732 + int wake_flags, void *key)
5733 + {
5734 +@@ -700,7 +701,6 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
5735 +
5736 + atomic_set(&ctx->refcount, 1);
5737 + ctx->flags = octx->flags;
5738 +- ctx->state = UFFD_STATE_RUNNING;
5739 + ctx->features = octx->features;
5740 + ctx->released = false;
5741 + ctx->mmap_changing = false;
5742 +@@ -981,38 +981,33 @@ static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
5743 +
5744 + poll_wait(file, &ctx->fd_wqh, wait);
5745 +
5746 +- switch (ctx->state) {
5747 +- case UFFD_STATE_WAIT_API:
5748 ++ if (!userfaultfd_is_initialized(ctx))
5749 + return EPOLLERR;
5750 +- case UFFD_STATE_RUNNING:
5751 +- /*
5752 +- * poll() never guarantees that read won't block.
5753 +- * userfaults can be waken before they're read().
5754 +- */
5755 +- if (unlikely(!(file->f_flags & O_NONBLOCK)))
5756 +- return EPOLLERR;
5757 +- /*
5758 +- * lockless access to see if there are pending faults
5759 +- * __pollwait last action is the add_wait_queue but
5760 +- * the spin_unlock would allow the waitqueue_active to
5761 +- * pass above the actual list_add inside
5762 +- * add_wait_queue critical section. So use a full
5763 +- * memory barrier to serialize the list_add write of
5764 +- * add_wait_queue() with the waitqueue_active read
5765 +- * below.
5766 +- */
5767 +- ret = 0;
5768 +- smp_mb();
5769 +- if (waitqueue_active(&ctx->fault_pending_wqh))
5770 +- ret = EPOLLIN;
5771 +- else if (waitqueue_active(&ctx->event_wqh))
5772 +- ret = EPOLLIN;
5773 +-
5774 +- return ret;
5775 +- default:
5776 +- WARN_ON_ONCE(1);
5777 ++
5778 ++ /*
5779 ++ * poll() never guarantees that read won't block.
5780 ++ * userfaults can be waken before they're read().
5781 ++ */
5782 ++ if (unlikely(!(file->f_flags & O_NONBLOCK)))
5783 + return EPOLLERR;
5784 +- }
5785 ++ /*
5786 ++ * lockless access to see if there are pending faults
5787 ++ * __pollwait last action is the add_wait_queue but
5788 ++ * the spin_unlock would allow the waitqueue_active to
5789 ++ * pass above the actual list_add inside
5790 ++ * add_wait_queue critical section. So use a full
5791 ++ * memory barrier to serialize the list_add write of
5792 ++ * add_wait_queue() with the waitqueue_active read
5793 ++ * below.
5794 ++ */
5795 ++ ret = 0;
5796 ++ smp_mb();
5797 ++ if (waitqueue_active(&ctx->fault_pending_wqh))
5798 ++ ret = EPOLLIN;
5799 ++ else if (waitqueue_active(&ctx->event_wqh))
5800 ++ ret = EPOLLIN;
5801 ++
5802 ++ return ret;
5803 + }
5804 +
5805 + static const struct file_operations userfaultfd_fops;
5806 +@@ -1206,7 +1201,7 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf,
5807 + struct uffd_msg msg;
5808 + int no_wait = file->f_flags & O_NONBLOCK;
5809 +
5810 +- if (ctx->state == UFFD_STATE_WAIT_API)
5811 ++ if (!userfaultfd_is_initialized(ctx))
5812 + return -EINVAL;
5813 +
5814 + for (;;) {
5815 +@@ -1808,9 +1803,10 @@ out:
5816 + static inline unsigned int uffd_ctx_features(__u64 user_features)
5817 + {
5818 + /*
5819 +- * For the current set of features the bits just coincide
5820 ++ * For the current set of features the bits just coincide. Set
5821 ++ * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
5822 + */
5823 +- return (unsigned int)user_features;
5824 ++ return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
5825 + }
5826 +
5827 + /*
5828 +@@ -1823,12 +1819,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
5829 + {
5830 + struct uffdio_api uffdio_api;
5831 + void __user *buf = (void __user *)arg;
5832 ++ unsigned int ctx_features;
5833 + int ret;
5834 + __u64 features;
5835 +
5836 +- ret = -EINVAL;
5837 +- if (ctx->state != UFFD_STATE_WAIT_API)
5838 +- goto out;
5839 + ret = -EFAULT;
5840 + if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
5841 + goto out;
5842 +@@ -1845,9 +1839,13 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
5843 + ret = -EFAULT;
5844 + if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
5845 + goto out;
5846 +- ctx->state = UFFD_STATE_RUNNING;
5847 ++
5848 + /* only enable the requested features for this uffd context */
5849 +- ctx->features = uffd_ctx_features(features);
5850 ++ ctx_features = uffd_ctx_features(features);
5851 ++ ret = -EINVAL;
5852 ++ if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
5853 ++ goto err_out;
5854 ++
5855 + ret = 0;
5856 + out:
5857 + return ret;
5858 +@@ -1864,7 +1862,7 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
5859 + int ret = -EINVAL;
5860 + struct userfaultfd_ctx *ctx = file->private_data;
5861 +
5862 +- if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
5863 ++ if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
5864 + return -EINVAL;
5865 +
5866 + switch(cmd) {
5867 +@@ -1962,7 +1960,6 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
5868 + atomic_set(&ctx->refcount, 1);
5869 + ctx->flags = flags;
5870 + ctx->features = 0;
5871 +- ctx->state = UFFD_STATE_WAIT_API;
5872 + ctx->released = false;
5873 + ctx->mmap_changing = false;
5874 + ctx->mm = current->mm;
5875 +diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
5876 +index e0b681a717bac..052e26fda2e6c 100644
5877 +--- a/include/crypto/public_key.h
5878 ++++ b/include/crypto/public_key.h
5879 +@@ -35,9 +35,9 @@ extern void public_key_free(struct public_key *key);
5880 + struct public_key_signature {
5881 + struct asymmetric_key_id *auth_ids[2];
5882 + u8 *s; /* Signature */
5883 +- u32 s_size; /* Number of bytes in signature */
5884 + u8 *digest;
5885 +- u8 digest_size; /* Number of bytes in digest */
5886 ++ u32 s_size; /* Number of bytes in signature */
5887 ++ u32 digest_size; /* Number of bytes in digest */
5888 + const char *pkey_algo;
5889 + const char *hash_algo;
5890 + };
5891 +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
5892 +index 1c8517320ea64..4acd06cca703d 100644
5893 +--- a/include/linux/bpf_verifier.h
5894 ++++ b/include/linux/bpf_verifier.h
5895 +@@ -41,6 +41,7 @@ enum bpf_reg_liveness {
5896 + };
5897 +
5898 + struct bpf_reg_state {
5899 ++ /* Ordering of fields matters. See states_equal() */
5900 + enum bpf_reg_type type;
5901 + union {
5902 + /* valid when type == PTR_TO_PACKET */
5903 +@@ -62,7 +63,6 @@ struct bpf_reg_state {
5904 + * came from, when one is tested for != NULL.
5905 + */
5906 + u32 id;
5907 +- /* Ordering of fields matters. See states_equal() */
5908 + /* For scalar types (SCALAR_VALUE), this represents our knowledge of
5909 + * the actual value.
5910 + * For pointer types, this represents the variable part of the offset
5911 +@@ -79,15 +79,15 @@ struct bpf_reg_state {
5912 + s64 smax_value; /* maximum possible (s64)value */
5913 + u64 umin_value; /* minimum possible (u64)value */
5914 + u64 umax_value; /* maximum possible (u64)value */
5915 ++ /* parentage chain for liveness checking */
5916 ++ struct bpf_reg_state *parent;
5917 + /* Inside the callee two registers can be both PTR_TO_STACK like
5918 + * R1=fp-8 and R2=fp-8, but one of them points to this function stack
5919 + * while another to the caller's stack. To differentiate them 'frameno'
5920 + * is used which is an index in bpf_verifier_state->frame[] array
5921 + * pointing to bpf_func_state.
5922 +- * This field must be second to last, for states_equal() reasons.
5923 + */
5924 + u32 frameno;
5925 +- /* This field must be last, for states_equal() reasons. */
5926 + enum bpf_reg_liveness live;
5927 + };
5928 +
5929 +@@ -110,7 +110,6 @@ struct bpf_stack_state {
5930 + */
5931 + struct bpf_func_state {
5932 + struct bpf_reg_state regs[MAX_BPF_REG];
5933 +- struct bpf_verifier_state *parent;
5934 + /* index of call instruction that called into this func */
5935 + int callsite;
5936 + /* stack frame number of this function state from pov of
5937 +@@ -128,11 +127,17 @@ struct bpf_func_state {
5938 + struct bpf_stack_state *stack;
5939 + };
5940 +
5941 ++struct bpf_id_pair {
5942 ++ u32 old;
5943 ++ u32 cur;
5944 ++};
5945 ++
5946 ++/* Maximum number of register states that can exist at once */
5947 ++#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
5948 + #define MAX_CALL_FRAMES 8
5949 + struct bpf_verifier_state {
5950 + /* call stack tracking */
5951 + struct bpf_func_state *frame[MAX_CALL_FRAMES];
5952 +- struct bpf_verifier_state *parent;
5953 + u32 curframe;
5954 + bool speculative;
5955 + };
5956 +@@ -160,8 +165,8 @@ struct bpf_insn_aux_data {
5957 + u32 alu_limit; /* limit for add/sub register with pointer */
5958 + };
5959 + int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
5960 +- int sanitize_stack_off; /* stack slot to be cleared */
5961 + bool seen; /* this insn was processed by the verifier */
5962 ++ bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
5963 + u8 alu_state; /* used in combination with alu_limit */
5964 + };
5965 +
5966 +@@ -210,11 +215,13 @@ struct bpf_verifier_env {
5967 + struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
5968 + u32 used_map_cnt; /* number of used maps */
5969 + u32 id_gen; /* used to generate unique reg IDs */
5970 ++ bool explore_alu_limits;
5971 + bool allow_ptr_leaks;
5972 + bool seen_direct_write;
5973 + struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
5974 + struct bpf_verifier_log log;
5975 + struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
5976 ++ struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
5977 + u32 subprog_cnt;
5978 + };
5979 +
5980 +diff --git a/include/linux/filter.h b/include/linux/filter.h
5981 +index 7c84762cb59e5..e981bd92a4e3a 100644
5982 +--- a/include/linux/filter.h
5983 ++++ b/include/linux/filter.h
5984 +@@ -64,6 +64,11 @@ struct sock_reuseport;
5985 + /* unused opcode to mark call to interpreter with arguments */
5986 + #define BPF_CALL_ARGS 0xe0
5987 +
5988 ++/* unused opcode to mark speculation barrier for mitigating
5989 ++ * Speculative Store Bypass
5990 ++ */
5991 ++#define BPF_NOSPEC 0xc0
5992 ++
5993 + /* As per nm, we expose JITed images as text (code) section for
5994 + * kallsyms. That way, tools like perf can find it to match
5995 + * addresses.
5996 +@@ -354,6 +359,16 @@ struct sock_reuseport;
5997 + .off = 0, \
5998 + .imm = 0 })
5999 +
6000 ++/* Speculation barrier */
6001 ++
6002 ++#define BPF_ST_NOSPEC() \
6003 ++ ((struct bpf_insn) { \
6004 ++ .code = BPF_ST | BPF_NOSPEC, \
6005 ++ .dst_reg = 0, \
6006 ++ .src_reg = 0, \
6007 ++ .off = 0, \
6008 ++ .imm = 0 })
6009 ++
6010 + /* Internal classic blocks for direct assignment */
6011 +
6012 + #define __BPF_STMT(CODE, K) \
6013 +diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
6014 +index 93304cfeb6016..6e95c5a9b7fa1 100644
6015 +--- a/include/linux/fscrypt_notsupp.h
6016 ++++ b/include/linux/fscrypt_notsupp.h
6017 +@@ -234,4 +234,10 @@ static inline const char *fscrypt_get_symlink(struct inode *inode,
6018 + return ERR_PTR(-EOPNOTSUPP);
6019 + }
6020 +
6021 ++static inline int fscrypt_symlink_getattr(const struct path *path,
6022 ++ struct kstat *stat)
6023 ++{
6024 ++ return -EOPNOTSUPP;
6025 ++}
6026 ++
6027 + #endif /* _LINUX_FSCRYPT_NOTSUPP_H */
6028 +diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
6029 +index 0409c14ae1de4..f4cb4871a987f 100644
6030 +--- a/include/linux/fscrypt_supp.h
6031 ++++ b/include/linux/fscrypt_supp.h
6032 +@@ -231,5 +231,6 @@ extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
6033 + extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
6034 + unsigned int max_size,
6035 + struct delayed_call *done);
6036 ++int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat);
6037 +
6038 + #endif /* _LINUX_FSCRYPT_SUPP_H */
6039 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
6040 +index 2df83a6598182..cb7dc38e9c779 100644
6041 +--- a/include/linux/hugetlb.h
6042 ++++ b/include/linux/hugetlb.h
6043 +@@ -513,6 +513,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
6044 +
6045 + void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
6046 +
6047 ++static inline void hugetlb_count_init(struct mm_struct *mm)
6048 ++{
6049 ++ atomic_long_set(&mm->hugetlb_usage, 0);
6050 ++}
6051 ++
6052 + static inline void hugetlb_count_add(long l, struct mm_struct *mm)
6053 + {
6054 + atomic_long_add(l, &mm->hugetlb_usage);
6055 +@@ -593,6 +598,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
6056 + return &mm->page_table_lock;
6057 + }
6058 +
6059 ++static inline void hugetlb_count_init(struct mm_struct *mm)
6060 ++{
6061 ++}
6062 ++
6063 + static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
6064 + {
6065 + }
6066 +diff --git a/include/linux/list.h b/include/linux/list.h
6067 +index de04cc5ed5367..d2c12ef7a4e32 100644
6068 +--- a/include/linux/list.h
6069 ++++ b/include/linux/list.h
6070 +@@ -484,6 +484,15 @@ static inline void list_splice_tail_init(struct list_head *list,
6071 + pos != (head); \
6072 + pos = n, n = pos->prev)
6073 +
6074 ++/**
6075 ++ * list_entry_is_head - test if the entry points to the head of the list
6076 ++ * @pos: the type * to cursor
6077 ++ * @head: the head for your list.
6078 ++ * @member: the name of the list_head within the struct.
6079 ++ */
6080 ++#define list_entry_is_head(pos, head, member) \
6081 ++ (&pos->member == (head))
6082 ++
6083 + /**
6084 + * list_for_each_entry - iterate over list of given type
6085 + * @pos: the type * to use as a loop cursor.
6086 +@@ -492,7 +501,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6087 + */
6088 + #define list_for_each_entry(pos, head, member) \
6089 + for (pos = list_first_entry(head, typeof(*pos), member); \
6090 +- &pos->member != (head); \
6091 ++ !list_entry_is_head(pos, head, member); \
6092 + pos = list_next_entry(pos, member))
6093 +
6094 + /**
6095 +@@ -503,7 +512,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6096 + */
6097 + #define list_for_each_entry_reverse(pos, head, member) \
6098 + for (pos = list_last_entry(head, typeof(*pos), member); \
6099 +- &pos->member != (head); \
6100 ++ !list_entry_is_head(pos, head, member); \
6101 + pos = list_prev_entry(pos, member))
6102 +
6103 + /**
6104 +@@ -528,7 +537,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6105 + */
6106 + #define list_for_each_entry_continue(pos, head, member) \
6107 + for (pos = list_next_entry(pos, member); \
6108 +- &pos->member != (head); \
6109 ++ !list_entry_is_head(pos, head, member); \
6110 + pos = list_next_entry(pos, member))
6111 +
6112 + /**
6113 +@@ -542,7 +551,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6114 + */
6115 + #define list_for_each_entry_continue_reverse(pos, head, member) \
6116 + for (pos = list_prev_entry(pos, member); \
6117 +- &pos->member != (head); \
6118 ++ !list_entry_is_head(pos, head, member); \
6119 + pos = list_prev_entry(pos, member))
6120 +
6121 + /**
6122 +@@ -554,7 +563,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6123 + * Iterate over list of given type, continuing from current position.
6124 + */
6125 + #define list_for_each_entry_from(pos, head, member) \
6126 +- for (; &pos->member != (head); \
6127 ++ for (; !list_entry_is_head(pos, head, member); \
6128 + pos = list_next_entry(pos, member))
6129 +
6130 + /**
6131 +@@ -567,7 +576,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6132 + * Iterate backwards over list of given type, continuing from current position.
6133 + */
6134 + #define list_for_each_entry_from_reverse(pos, head, member) \
6135 +- for (; &pos->member != (head); \
6136 ++ for (; !list_entry_is_head(pos, head, member); \
6137 + pos = list_prev_entry(pos, member))
6138 +
6139 + /**
6140 +@@ -580,7 +589,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6141 + #define list_for_each_entry_safe(pos, n, head, member) \
6142 + for (pos = list_first_entry(head, typeof(*pos), member), \
6143 + n = list_next_entry(pos, member); \
6144 +- &pos->member != (head); \
6145 ++ !list_entry_is_head(pos, head, member); \
6146 + pos = n, n = list_next_entry(n, member))
6147 +
6148 + /**
6149 +@@ -596,7 +605,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6150 + #define list_for_each_entry_safe_continue(pos, n, head, member) \
6151 + for (pos = list_next_entry(pos, member), \
6152 + n = list_next_entry(pos, member); \
6153 +- &pos->member != (head); \
6154 ++ !list_entry_is_head(pos, head, member); \
6155 + pos = n, n = list_next_entry(n, member))
6156 +
6157 + /**
6158 +@@ -611,7 +620,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6159 + */
6160 + #define list_for_each_entry_safe_from(pos, n, head, member) \
6161 + for (n = list_next_entry(pos, member); \
6162 +- &pos->member != (head); \
6163 ++ !list_entry_is_head(pos, head, member); \
6164 + pos = n, n = list_next_entry(n, member))
6165 +
6166 + /**
6167 +@@ -627,7 +636,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6168 + #define list_for_each_entry_safe_reverse(pos, n, head, member) \
6169 + for (pos = list_last_entry(head, typeof(*pos), member), \
6170 + n = list_prev_entry(pos, member); \
6171 +- &pos->member != (head); \
6172 ++ !list_entry_is_head(pos, head, member); \
6173 + pos = n, n = list_prev_entry(n, member))
6174 +
6175 + /**
6176 +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
6177 +index d17d45c41a0b0..5653178768227 100644
6178 +--- a/include/linux/memory_hotplug.h
6179 ++++ b/include/linux/memory_hotplug.h
6180 +@@ -344,6 +344,6 @@ extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
6181 + unsigned long pnum);
6182 + extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
6183 + int online_type);
6184 +-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
6185 +- unsigned long nr_pages);
6186 ++extern struct zone *zone_for_pfn_range(int online_type, int nid,
6187 ++ unsigned long start_pfn, unsigned long nr_pages);
6188 + #endif /* __LINUX_MEMORY_HOTPLUG_H */
6189 +diff --git a/include/linux/pci.h b/include/linux/pci.h
6190 +index 2fda9893962d1..6f9ca2f278b32 100644
6191 +--- a/include/linux/pci.h
6192 ++++ b/include/linux/pci.h
6193 +@@ -1644,8 +1644,9 @@ static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
6194 + static inline void pci_disable_device(struct pci_dev *dev) { }
6195 + static inline int pci_assign_resource(struct pci_dev *dev, int i)
6196 + { return -EBUSY; }
6197 +-static inline int __pci_register_driver(struct pci_driver *drv,
6198 +- struct module *owner)
6199 ++static inline int __must_check __pci_register_driver(struct pci_driver *drv,
6200 ++ struct module *owner,
6201 ++ const char *mod_name)
6202 + { return 0; }
6203 + static inline int pci_register_driver(struct pci_driver *drv)
6204 + { return 0; }
6205 +diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
6206 +index a7ed29baf44a6..86e5ad8aeee4e 100644
6207 +--- a/include/linux/power/max17042_battery.h
6208 ++++ b/include/linux/power/max17042_battery.h
6209 +@@ -82,7 +82,7 @@ enum max17042_register {
6210 + MAX17042_RelaxCFG = 0x2A,
6211 + MAX17042_MiscCFG = 0x2B,
6212 + MAX17042_TGAIN = 0x2C,
6213 +- MAx17042_TOFF = 0x2D,
6214 ++ MAX17042_TOFF = 0x2D,
6215 + MAX17042_CGAIN = 0x2E,
6216 + MAX17042_COFF = 0x2F,
6217 +
6218 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
6219 +index 5f2e6451ece54..f97734f34746a 100644
6220 +--- a/include/linux/skbuff.h
6221 ++++ b/include/linux/skbuff.h
6222 +@@ -1761,7 +1761,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
6223 + WRITE_ONCE(newsk->prev, prev);
6224 + WRITE_ONCE(next->prev, newsk);
6225 + WRITE_ONCE(prev->next, newsk);
6226 +- list->qlen++;
6227 ++ WRITE_ONCE(list->qlen, list->qlen + 1);
6228 + }
6229 +
6230 + static inline void __skb_queue_splice(const struct sk_buff_head *list,
6231 +diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
6232 +index fdb6b317d9747..c46abf35c9bb6 100644
6233 +--- a/include/linux/sunrpc/svc.h
6234 ++++ b/include/linux/sunrpc/svc.h
6235 +@@ -271,6 +271,7 @@ struct svc_rqst {
6236 + #define RQ_VICTIM (5) /* about to be shut down */
6237 + #define RQ_BUSY (6) /* request is busy */
6238 + #define RQ_DATA (7) /* request has data */
6239 ++#define RQ_AUTHERR (8) /* Request status is auth error */
6240 + unsigned long rq_flags; /* flags field */
6241 + ktime_t rq_qtime; /* enqueue time */
6242 +
6243 +@@ -504,6 +505,7 @@ unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
6244 + char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
6245 + struct kvec *first, void *p,
6246 + size_t total);
6247 ++__be32 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err);
6248 +
6249 + #define RPC_MAX_ADDRBUFLEN (63U)
6250 +
6251 +diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
6252 +index 8975fd1a1421f..24ce2aab89469 100644
6253 +--- a/include/uapi/linux/pkt_sched.h
6254 ++++ b/include/uapi/linux/pkt_sched.h
6255 +@@ -779,6 +779,8 @@ struct tc_codel_xstats {
6256 +
6257 + /* FQ_CODEL */
6258 +
6259 ++#define FQ_CODEL_QUANTUM_MAX (1 << 20)
6260 ++
6261 + enum {
6262 + TCA_FQ_CODEL_UNSPEC,
6263 + TCA_FQ_CODEL_TARGET,
6264 +diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
6265 +index be07b5470f4bb..f51bc8f368134 100644
6266 +--- a/include/uapi/linux/serial_reg.h
6267 ++++ b/include/uapi/linux/serial_reg.h
6268 +@@ -62,6 +62,7 @@
6269 + * ST16C654: 8 16 56 60 8 16 32 56 PORT_16654
6270 + * TI16C750: 1 16 32 56 xx xx xx xx PORT_16750
6271 + * TI16C752: 8 16 56 60 8 16 32 56
6272 ++ * OX16C950: 16 32 112 120 16 32 64 112 PORT_16C950
6273 + * Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA
6274 + */
6275 + #define UART_FCR_R_TRIG_00 0x00
6276 +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
6277 +index d2b6d2459aad4..341402bc1202d 100644
6278 +--- a/kernel/bpf/core.c
6279 ++++ b/kernel/bpf/core.c
6280 +@@ -33,6 +33,7 @@
6281 + #include <linux/rcupdate.h>
6282 + #include <linux/perf_event.h>
6283 +
6284 ++#include <asm/barrier.h>
6285 + #include <asm/unaligned.h>
6286 +
6287 + /* Registers */
6288 +@@ -1050,6 +1051,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
6289 + /* Non-UAPI available opcodes. */
6290 + [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
6291 + [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
6292 ++ [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
6293 + };
6294 + #undef BPF_INSN_3_LBL
6295 + #undef BPF_INSN_2_LBL
6296 +@@ -1356,7 +1358,21 @@ out:
6297 + JMP_EXIT:
6298 + return BPF_R0;
6299 +
6300 +- /* STX and ST and LDX*/
6301 ++ /* ST, STX and LDX*/
6302 ++ ST_NOSPEC:
6303 ++ /* Speculation barrier for mitigating Speculative Store Bypass.
6304 ++ * In case of arm64, we rely on the firmware mitigation as
6305 ++ * controlled via the ssbd kernel parameter. Whenever the
6306 ++ * mitigation is enabled, it works for all of the kernel code
6307 ++ * with no need to provide any additional instructions here.
6308 ++ * In case of x86, we use 'lfence' insn for mitigation. We
6309 ++ * reuse preexisting logic from Spectre v1 mitigation that
6310 ++ * happens to produce the required code on x86 for v4 as well.
6311 ++ */
6312 ++#ifdef CONFIG_X86
6313 ++ barrier_nospec();
6314 ++#endif
6315 ++ CONT;
6316 + #define LDST(SIZEOP, SIZE) \
6317 + STX_MEM_##SIZEOP: \
6318 + *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
6319 +diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
6320 +index d6b76377cb6ee..cbd75dd5992ef 100644
6321 +--- a/kernel/bpf/disasm.c
6322 ++++ b/kernel/bpf/disasm.c
6323 +@@ -171,15 +171,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
6324 + else
6325 + verbose(cbs->private_data, "BUG_%02x\n", insn->code);
6326 + } else if (class == BPF_ST) {
6327 +- if (BPF_MODE(insn->code) != BPF_MEM) {
6328 ++ if (BPF_MODE(insn->code) == BPF_MEM) {
6329 ++ verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
6330 ++ insn->code,
6331 ++ bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
6332 ++ insn->dst_reg,
6333 ++ insn->off, insn->imm);
6334 ++ } else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
6335 ++ verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
6336 ++ } else {
6337 + verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
6338 +- return;
6339 + }
6340 +- verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
6341 +- insn->code,
6342 +- bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
6343 +- insn->dst_reg,
6344 +- insn->off, insn->imm);
6345 + } else if (class == BPF_LDX) {
6346 + if (BPF_MODE(insn->code) != BPF_MEM) {
6347 + verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
6348 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
6349 +index abdc9eca463c5..9a671f604ebfe 100644
6350 +--- a/kernel/bpf/verifier.c
6351 ++++ b/kernel/bpf/verifier.c
6352 +@@ -380,9 +380,9 @@ static int copy_stack_state(struct bpf_func_state *dst,
6353 + /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
6354 + * make it consume minimal amount of memory. check_stack_write() access from
6355 + * the program calls into realloc_func_state() to grow the stack size.
6356 +- * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
6357 +- * which this function copies over. It points to previous bpf_verifier_state
6358 +- * which is never reallocated
6359 ++ * Note there is a non-zero parent pointer inside each reg of bpf_verifier_state
6360 ++ * which this function copies over. It points to corresponding reg in previous
6361 ++ * bpf_verifier_state which is never reallocated
6362 + */
6363 + static int realloc_func_state(struct bpf_func_state *state, int size,
6364 + bool copy_old)
6365 +@@ -467,7 +467,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
6366 + }
6367 + dst_state->speculative = src->speculative;
6368 + dst_state->curframe = src->curframe;
6369 +- dst_state->parent = src->parent;
6370 + for (i = 0; i <= src->curframe; i++) {
6371 + dst = dst_state->frame[i];
6372 + if (!dst) {
6373 +@@ -739,6 +738,7 @@ static void init_reg_state(struct bpf_verifier_env *env,
6374 + for (i = 0; i < MAX_BPF_REG; i++) {
6375 + mark_reg_not_init(env, regs, i);
6376 + regs[i].live = REG_LIVE_NONE;
6377 ++ regs[i].parent = NULL;
6378 + }
6379 +
6380 + /* frame pointer */
6381 +@@ -883,74 +883,21 @@ next:
6382 + return 0;
6383 + }
6384 +
6385 +-static
6386 +-struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
6387 +- const struct bpf_verifier_state *state,
6388 +- struct bpf_verifier_state *parent,
6389 +- u32 regno)
6390 +-{
6391 +- struct bpf_verifier_state *tmp = NULL;
6392 +-
6393 +- /* 'parent' could be a state of caller and
6394 +- * 'state' could be a state of callee. In such case
6395 +- * parent->curframe < state->curframe
6396 +- * and it's ok for r1 - r5 registers
6397 +- *
6398 +- * 'parent' could be a callee's state after it bpf_exit-ed.
6399 +- * In such case parent->curframe > state->curframe
6400 +- * and it's ok for r0 only
6401 +- */
6402 +- if (parent->curframe == state->curframe ||
6403 +- (parent->curframe < state->curframe &&
6404 +- regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
6405 +- (parent->curframe > state->curframe &&
6406 +- regno == BPF_REG_0))
6407 +- return parent;
6408 +-
6409 +- if (parent->curframe > state->curframe &&
6410 +- regno >= BPF_REG_6) {
6411 +- /* for callee saved regs we have to skip the whole chain
6412 +- * of states that belong to callee and mark as LIVE_READ
6413 +- * the registers before the call
6414 +- */
6415 +- tmp = parent;
6416 +- while (tmp && tmp->curframe != state->curframe) {
6417 +- tmp = tmp->parent;
6418 +- }
6419 +- if (!tmp)
6420 +- goto bug;
6421 +- parent = tmp;
6422 +- } else {
6423 +- goto bug;
6424 +- }
6425 +- return parent;
6426 +-bug:
6427 +- verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
6428 +- verbose(env, "regno %d parent frame %d current frame %d\n",
6429 +- regno, parent->curframe, state->curframe);
6430 +- return NULL;
6431 +-}
6432 +-
6433 ++/* Parentage chain of this register (or stack slot) should take care of all
6434 ++ * issues like callee-saved registers, stack slot allocation time, etc.
6435 ++ */
6436 + static int mark_reg_read(struct bpf_verifier_env *env,
6437 +- const struct bpf_verifier_state *state,
6438 +- struct bpf_verifier_state *parent,
6439 +- u32 regno)
6440 ++ const struct bpf_reg_state *state,
6441 ++ struct bpf_reg_state *parent)
6442 + {
6443 + bool writes = parent == state->parent; /* Observe write marks */
6444 +
6445 +- if (regno == BPF_REG_FP)
6446 +- /* We don't need to worry about FP liveness because it's read-only */
6447 +- return 0;
6448 +-
6449 + while (parent) {
6450 + /* if read wasn't screened by an earlier write ... */
6451 +- if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
6452 ++ if (writes && state->live & REG_LIVE_WRITTEN)
6453 + break;
6454 +- parent = skip_callee(env, state, parent, regno);
6455 +- if (!parent)
6456 +- return -EFAULT;
6457 + /* ... then we depend on parent's value */
6458 +- parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
6459 ++ parent->live |= REG_LIVE_READ;
6460 + state = parent;
6461 + parent = state->parent;
6462 + writes = true;
6463 +@@ -976,7 +923,10 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
6464 + verbose(env, "R%d !read_ok\n", regno);
6465 + return -EACCES;
6466 + }
6467 +- return mark_reg_read(env, vstate, vstate->parent, regno);
6468 ++ /* We don't need to worry about FP liveness because it's read-only */
6469 ++ if (regno != BPF_REG_FP)
6470 ++ return mark_reg_read(env, &regs[regno],
6471 ++ regs[regno].parent);
6472 + } else {
6473 + /* check whether register used as dest operand can be written to */
6474 + if (regno == BPF_REG_FP) {
6475 +@@ -1013,6 +963,23 @@ static bool register_is_null(struct bpf_reg_state *reg)
6476 + return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
6477 + }
6478 +
6479 ++static bool register_is_const(struct bpf_reg_state *reg)
6480 ++{
6481 ++ return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
6482 ++}
6483 ++
6484 ++static void save_register_state(struct bpf_func_state *state,
6485 ++ int spi, struct bpf_reg_state *reg)
6486 ++{
6487 ++ int i;
6488 ++
6489 ++ state->stack[spi].spilled_ptr = *reg;
6490 ++ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
6491 ++
6492 ++ for (i = 0; i < BPF_REG_SIZE; i++)
6493 ++ state->stack[spi].slot_type[i] = STACK_SPILL;
6494 ++}
6495 ++
6496 + /* check_stack_read/write functions track spill/fill of registers,
6497 + * stack boundary and alignment are checked in check_mem_access()
6498 + */
6499 +@@ -1022,7 +989,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
6500 + {
6501 + struct bpf_func_state *cur; /* state of the current function */
6502 + int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
6503 +- enum bpf_reg_type type;
6504 ++ struct bpf_reg_state *reg = NULL;
6505 +
6506 + err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
6507 + true);
6508 +@@ -1039,56 +1006,45 @@ static int check_stack_write(struct bpf_verifier_env *env,
6509 + }
6510 +
6511 + cur = env->cur_state->frame[env->cur_state->curframe];
6512 +- if (value_regno >= 0 &&
6513 +- is_spillable_regtype((type = cur->regs[value_regno].type))) {
6514 ++ if (value_regno >= 0)
6515 ++ reg = &cur->regs[value_regno];
6516 ++ if (!env->allow_ptr_leaks) {
6517 ++ bool sanitize = reg && is_spillable_regtype(reg->type);
6518 +
6519 ++ for (i = 0; i < size; i++) {
6520 ++ if (state->stack[spi].slot_type[i] == STACK_INVALID) {
6521 ++ sanitize = true;
6522 ++ break;
6523 ++ }
6524 ++ }
6525 ++
6526 ++ if (sanitize)
6527 ++ env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
6528 ++ }
6529 ++
6530 ++ if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
6531 ++ !register_is_null(reg) && env->allow_ptr_leaks) {
6532 ++ save_register_state(state, spi, reg);
6533 ++ } else if (reg && is_spillable_regtype(reg->type)) {
6534 + /* register containing pointer is being spilled into stack */
6535 + if (size != BPF_REG_SIZE) {
6536 + verbose(env, "invalid size of register spill\n");
6537 + return -EACCES;
6538 + }
6539 +-
6540 +- if (state != cur && type == PTR_TO_STACK) {
6541 ++ if (state != cur && reg->type == PTR_TO_STACK) {
6542 + verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
6543 + return -EINVAL;
6544 + }
6545 +-
6546 +- /* save register state */
6547 +- state->stack[spi].spilled_ptr = cur->regs[value_regno];
6548 +- state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
6549 +-
6550 +- for (i = 0; i < BPF_REG_SIZE; i++) {
6551 +- if (state->stack[spi].slot_type[i] == STACK_MISC &&
6552 +- !env->allow_ptr_leaks) {
6553 +- int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
6554 +- int soff = (-spi - 1) * BPF_REG_SIZE;
6555 +-
6556 +- /* detected reuse of integer stack slot with a pointer
6557 +- * which means either llvm is reusing stack slot or
6558 +- * an attacker is trying to exploit CVE-2018-3639
6559 +- * (speculative store bypass)
6560 +- * Have to sanitize that slot with preemptive
6561 +- * store of zero.
6562 +- */
6563 +- if (*poff && *poff != soff) {
6564 +- /* disallow programs where single insn stores
6565 +- * into two different stack slots, since verifier
6566 +- * cannot sanitize them
6567 +- */
6568 +- verbose(env,
6569 +- "insn %d cannot access two stack slots fp%d and fp%d",
6570 +- insn_idx, *poff, soff);
6571 +- return -EINVAL;
6572 +- }
6573 +- *poff = soff;
6574 +- }
6575 +- state->stack[spi].slot_type[i] = STACK_SPILL;
6576 +- }
6577 ++ save_register_state(state, spi, reg);
6578 + } else {
6579 + u8 type = STACK_MISC;
6580 +
6581 +- /* regular write of data into stack */
6582 +- state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
6583 ++ /* regular write of data into stack destroys any spilled ptr */
6584 ++ state->stack[spi].spilled_ptr.type = NOT_INIT;
6585 ++ /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
6586 ++ if (state->stack[spi].slot_type[0] == STACK_SPILL)
6587 ++ for (i = 0; i < BPF_REG_SIZE; i++)
6588 ++ state->stack[spi].slot_type[i] = STACK_MISC;
6589 +
6590 + /* only mark the slot as written if all 8 bytes were written
6591 + * otherwise read propagation may incorrectly stop too soon
6592 +@@ -1102,10 +1058,10 @@ static int check_stack_write(struct bpf_verifier_env *env,
6593 + state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
6594 +
6595 + /* when we zero initialize stack slots mark them as such */
6596 +- if (value_regno >= 0 &&
6597 +- register_is_null(&cur->regs[value_regno]))
6598 ++ if (reg && register_is_null(reg))
6599 + type = STACK_ZERO;
6600 +
6601 ++ /* Mark slots affected by this stack write. */
6602 + for (i = 0; i < size; i++)
6603 + state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
6604 + type;
6605 +@@ -1113,61 +1069,6 @@ static int check_stack_write(struct bpf_verifier_env *env,
6606 + return 0;
6607 + }
6608 +
6609 +-/* registers of every function are unique and mark_reg_read() propagates
6610 +- * the liveness in the following cases:
6611 +- * - from callee into caller for R1 - R5 that were used as arguments
6612 +- * - from caller into callee for R0 that used as result of the call
6613 +- * - from caller to the same caller skipping states of the callee for R6 - R9,
6614 +- * since R6 - R9 are callee saved by implicit function prologue and
6615 +- * caller's R6 != callee's R6, so when we propagate liveness up to
6616 +- * parent states we need to skip callee states for R6 - R9.
6617 +- *
6618 +- * stack slot marking is different, since stacks of caller and callee are
6619 +- * accessible in both (since caller can pass a pointer to caller's stack to
6620 +- * callee which can pass it to another function), hence mark_stack_slot_read()
6621 +- * has to propagate the stack liveness to all parent states at given frame number.
6622 +- * Consider code:
6623 +- * f1() {
6624 +- * ptr = fp - 8;
6625 +- * *ptr = ctx;
6626 +- * call f2 {
6627 +- * .. = *ptr;
6628 +- * }
6629 +- * .. = *ptr;
6630 +- * }
6631 +- * First *ptr is reading from f1's stack and mark_stack_slot_read() has
6632 +- * to mark liveness at the f1's frame and not f2's frame.
6633 +- * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
6634 +- * to propagate liveness to f2 states at f1's frame level and further into
6635 +- * f1 states at f1's frame level until write into that stack slot
6636 +- */
6637 +-static void mark_stack_slot_read(struct bpf_verifier_env *env,
6638 +- const struct bpf_verifier_state *state,
6639 +- struct bpf_verifier_state *parent,
6640 +- int slot, int frameno)
6641 +-{
6642 +- bool writes = parent == state->parent; /* Observe write marks */
6643 +-
6644 +- while (parent) {
6645 +- if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
6646 +- /* since LIVE_WRITTEN mark is only done for full 8-byte
6647 +- * write the read marks are conservative and parent
6648 +- * state may not even have the stack allocated. In such case
6649 +- * end the propagation, since the loop reached beginning
6650 +- * of the function
6651 +- */
6652 +- break;
6653 +- /* if read wasn't screened by an earlier write ... */
6654 +- if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
6655 +- break;
6656 +- /* ... then we depend on parent's value */
6657 +- parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
6658 +- state = parent;
6659 +- parent = state->parent;
6660 +- writes = true;
6661 +- }
6662 +-}
6663 +-
6664 + static int check_stack_read(struct bpf_verifier_env *env,
6665 + struct bpf_func_state *reg_state /* func where register points to */,
6666 + int off, int size, int value_regno)
6667 +@@ -1175,6 +1076,7 @@ static int check_stack_read(struct bpf_verifier_env *env,
6668 + struct bpf_verifier_state *vstate = env->cur_state;
6669 + struct bpf_func_state *state = vstate->frame[vstate->curframe];
6670 + int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
6671 ++ struct bpf_reg_state *reg;
6672 + u8 *stype;
6673 +
6674 + if (reg_state->allocated_stack <= slot) {
6675 +@@ -1183,11 +1085,20 @@ static int check_stack_read(struct bpf_verifier_env *env,
6676 + return -EACCES;
6677 + }
6678 + stype = reg_state->stack[spi].slot_type;
6679 ++ reg = &reg_state->stack[spi].spilled_ptr;
6680 +
6681 + if (stype[0] == STACK_SPILL) {
6682 + if (size != BPF_REG_SIZE) {
6683 +- verbose(env, "invalid size of register spill\n");
6684 +- return -EACCES;
6685 ++ if (reg->type != SCALAR_VALUE) {
6686 ++ verbose(env, "invalid size of register fill\n");
6687 ++ return -EACCES;
6688 ++ }
6689 ++ if (value_regno >= 0) {
6690 ++ mark_reg_unknown(env, state->regs, value_regno);
6691 ++ state->regs[value_regno].live |= REG_LIVE_WRITTEN;
6692 ++ }
6693 ++ mark_reg_read(env, reg, reg->parent);
6694 ++ return 0;
6695 + }
6696 + for (i = 1; i < BPF_REG_SIZE; i++) {
6697 + if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
6698 +@@ -1198,16 +1109,14 @@ static int check_stack_read(struct bpf_verifier_env *env,
6699 +
6700 + if (value_regno >= 0) {
6701 + /* restore register state from stack */
6702 +- state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
6703 ++ state->regs[value_regno] = *reg;
6704 + /* mark reg as written since spilled pointer state likely
6705 + * has its liveness marks cleared by is_state_visited()
6706 + * which resets stack/reg liveness for state transitions
6707 + */
6708 + state->regs[value_regno].live |= REG_LIVE_WRITTEN;
6709 + }
6710 +- mark_stack_slot_read(env, vstate, vstate->parent, spi,
6711 +- reg_state->frameno);
6712 +- return 0;
6713 ++ mark_reg_read(env, reg, reg->parent);
6714 + } else {
6715 + int zeros = 0;
6716 +
6717 +@@ -1222,8 +1131,7 @@ static int check_stack_read(struct bpf_verifier_env *env,
6718 + off, i, size);
6719 + return -EACCES;
6720 + }
6721 +- mark_stack_slot_read(env, vstate, vstate->parent, spi,
6722 +- reg_state->frameno);
6723 ++ mark_reg_read(env, reg, reg->parent);
6724 + if (value_regno >= 0) {
6725 + if (zeros == size) {
6726 + /* any size read into register is zero extended,
6727 +@@ -1236,8 +1144,8 @@ static int check_stack_read(struct bpf_verifier_env *env,
6728 + }
6729 + state->regs[value_regno].live |= REG_LIVE_WRITTEN;
6730 + }
6731 +- return 0;
6732 + }
6733 ++ return 0;
6734 + }
6735 +
6736 + static int check_stack_access(struct bpf_verifier_env *env,
6737 +@@ -1855,6 +1763,29 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
6738 + BPF_SIZE(insn->code), BPF_WRITE, -1, true);
6739 + }
6740 +
6741 ++static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
6742 ++ int off, int access_size,
6743 ++ bool zero_size_allowed)
6744 ++{
6745 ++ struct bpf_reg_state *reg = cur_regs(env) + regno;
6746 ++
6747 ++ if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
6748 ++ access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
6749 ++ if (tnum_is_const(reg->var_off)) {
6750 ++ verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
6751 ++ regno, off, access_size);
6752 ++ } else {
6753 ++ char tn_buf[48];
6754 ++
6755 ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6756 ++ verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
6757 ++ regno, tn_buf, access_size);
6758 ++ }
6759 ++ return -EACCES;
6760 ++ }
6761 ++ return 0;
6762 ++}
6763 ++
6764 + /* when register 'regno' is passed into function that will read 'access_size'
6765 + * bytes from that pointer, make sure that it's within stack boundary
6766 + * and all elements of stack are initialized.
6767 +@@ -1867,7 +1798,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
6768 + {
6769 + struct bpf_reg_state *reg = cur_regs(env) + regno;
6770 + struct bpf_func_state *state = func(env, reg);
6771 +- int off, i, slot, spi;
6772 ++ int err, min_off, max_off, i, j, slot, spi;
6773 +
6774 + if (reg->type != PTR_TO_STACK) {
6775 + /* Allow zero-byte read from NULL, regardless of pointer type */
6776 +@@ -1881,21 +1812,57 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
6777 + return -EACCES;
6778 + }
6779 +
6780 +- /* Only allow fixed-offset stack reads */
6781 +- if (!tnum_is_const(reg->var_off)) {
6782 +- char tn_buf[48];
6783 ++ if (tnum_is_const(reg->var_off)) {
6784 ++ min_off = max_off = reg->var_off.value + reg->off;
6785 ++ err = __check_stack_boundary(env, regno, min_off, access_size,
6786 ++ zero_size_allowed);
6787 ++ if (err)
6788 ++ return err;
6789 ++ } else {
6790 ++ /* Variable offset is prohibited for unprivileged mode for
6791 ++ * simplicity since it requires corresponding support in
6792 ++ * Spectre masking for stack ALU.
6793 ++ * See also retrieve_ptr_limit().
6794 ++ */
6795 ++ if (!env->allow_ptr_leaks) {
6796 ++ char tn_buf[48];
6797 +
6798 +- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6799 +- verbose(env, "invalid variable stack read R%d var_off=%s\n",
6800 +- regno, tn_buf);
6801 +- return -EACCES;
6802 +- }
6803 +- off = reg->off + reg->var_off.value;
6804 +- if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
6805 +- access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
6806 +- verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
6807 +- regno, off, access_size);
6808 +- return -EACCES;
6809 ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6810 ++ verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
6811 ++ regno, tn_buf);
6812 ++ return -EACCES;
6813 ++ }
6814 ++ /* Only initialized buffer on stack is allowed to be accessed
6815 ++ * with variable offset. With uninitialized buffer it's hard to
6816 ++ * guarantee that whole memory is marked as initialized on
6817 ++ * helper return since specific bounds are unknown what may
6818 ++ * cause uninitialized stack leaking.
6819 ++ */
6820 ++ if (meta && meta->raw_mode)
6821 ++ meta = NULL;
6822 ++
6823 ++ if (reg->smax_value >= BPF_MAX_VAR_OFF ||
6824 ++ reg->smax_value <= -BPF_MAX_VAR_OFF) {
6825 ++ verbose(env, "R%d unbounded indirect variable offset stack access\n",
6826 ++ regno);
6827 ++ return -EACCES;
6828 ++ }
6829 ++ min_off = reg->smin_value + reg->off;
6830 ++ max_off = reg->smax_value + reg->off;
6831 ++ err = __check_stack_boundary(env, regno, min_off, access_size,
6832 ++ zero_size_allowed);
6833 ++ if (err) {
6834 ++ verbose(env, "R%d min value is outside of stack bound\n",
6835 ++ regno);
6836 ++ return err;
6837 ++ }
6838 ++ err = __check_stack_boundary(env, regno, max_off, access_size,
6839 ++ zero_size_allowed);
6840 ++ if (err) {
6841 ++ verbose(env, "R%d max value is outside of stack bound\n",
6842 ++ regno);
6843 ++ return err;
6844 ++ }
6845 + }
6846 +
6847 + if (meta && meta->raw_mode) {
6848 +@@ -1904,10 +1871,10 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
6849 + return 0;
6850 + }
6851 +
6852 +- for (i = 0; i < access_size; i++) {
6853 ++ for (i = min_off; i < max_off + access_size; i++) {
6854 + u8 *stype;
6855 +
6856 +- slot = -(off + i) - 1;
6857 ++ slot = -i - 1;
6858 + spi = slot / BPF_REG_SIZE;
6859 + if (state->allocated_stack <= slot)
6860 + goto err;
6861 +@@ -1919,18 +1886,34 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
6862 + *stype = STACK_MISC;
6863 + goto mark;
6864 + }
6865 ++ if (state->stack[spi].slot_type[0] == STACK_SPILL &&
6866 ++ state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
6867 ++ __mark_reg_unknown(&state->stack[spi].spilled_ptr);
6868 ++ for (j = 0; j < BPF_REG_SIZE; j++)
6869 ++ state->stack[spi].slot_type[j] = STACK_MISC;
6870 ++ goto mark;
6871 ++ }
6872 ++
6873 + err:
6874 +- verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
6875 +- off, i, access_size);
6876 ++ if (tnum_is_const(reg->var_off)) {
6877 ++ verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
6878 ++ min_off, i - min_off, access_size);
6879 ++ } else {
6880 ++ char tn_buf[48];
6881 ++
6882 ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6883 ++ verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
6884 ++ tn_buf, i - min_off, access_size);
6885 ++ }
6886 + return -EACCES;
6887 + mark:
6888 + /* reading any byte out of 8-byte 'spill_slot' will cause
6889 + * the whole slot to be marked as 'read'
6890 + */
6891 +- mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
6892 +- spi, state->frameno);
6893 ++ mark_reg_read(env, &state->stack[spi].spilled_ptr,
6894 ++ state->stack[spi].spilled_ptr.parent);
6895 + }
6896 +- return update_stack_depth(env, state, off);
6897 ++ return update_stack_depth(env, state, min_off);
6898 + }
6899 +
6900 + static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
6901 +@@ -2384,11 +2367,13 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6902 + state->curframe + 1 /* frameno within this callchain */,
6903 + subprog /* subprog number within this prog */);
6904 +
6905 +- /* copy r1 - r5 args that callee can access */
6906 ++ /* copy r1 - r5 args that callee can access. The copy includes parent
6907 ++ * pointers, which connects us up to the liveness chain
6908 ++ */
6909 + for (i = BPF_REG_1; i <= BPF_REG_5; i++)
6910 + callee->regs[i] = caller->regs[i];
6911 +
6912 +- /* after the call regsiters r0 - r5 were scratched */
6913 ++ /* after the call registers r0 - r5 were scratched */
6914 + for (i = 0; i < CALLER_SAVED_REGS; i++) {
6915 + mark_reg_not_init(env, caller->regs, caller_saved[i]);
6916 + check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6917 +@@ -2886,6 +2871,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
6918 + alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
6919 + alu_state |= ptr_is_dst_reg ?
6920 + BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
6921 ++
6922 ++ /* Limit pruning on unknown scalars to enable deep search for
6923 ++ * potential masking differences from other program paths.
6924 ++ */
6925 ++ if (!off_is_imm)
6926 ++ env->explore_alu_limits = true;
6927 + }
6928 +
6929 + err = update_alu_sanitation_state(aux, alu_state, alu_limit);
6930 +@@ -4798,13 +4789,6 @@ static bool range_within(struct bpf_reg_state *old,
6931 + old->smax_value >= cur->smax_value;
6932 + }
6933 +
6934 +-/* Maximum number of register states that can exist at once */
6935 +-#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
6936 +-struct idpair {
6937 +- u32 old;
6938 +- u32 cur;
6939 +-};
6940 +-
6941 + /* If in the old state two registers had the same id, then they need to have
6942 + * the same id in the new state as well. But that id could be different from
6943 + * the old state, so we need to track the mapping from old to new ids.
6944 +@@ -4815,11 +4799,11 @@ struct idpair {
6945 + * So we look through our idmap to see if this old id has been seen before. If
6946 + * so, we require the new id to match; otherwise, we add the id pair to the map.
6947 + */
6948 +-static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
6949 ++static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
6950 + {
6951 + unsigned int i;
6952 +
6953 +- for (i = 0; i < ID_MAP_SIZE; i++) {
6954 ++ for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
6955 + if (!idmap[i].old) {
6956 + /* Reached an empty slot; haven't seen this id before */
6957 + idmap[i].old = old_id;
6958 +@@ -4835,8 +4819,8 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
6959 + }
6960 +
6961 + /* Returns true if (rold safe implies rcur safe) */
6962 +-static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
6963 +- struct idpair *idmap)
6964 ++static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
6965 ++ struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
6966 + {
6967 + bool equal;
6968 +
6969 +@@ -4844,7 +4828,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
6970 + /* explored state didn't use this */
6971 + return true;
6972 +
6973 +- equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
6974 ++ equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
6975 +
6976 + if (rold->type == PTR_TO_STACK)
6977 + /* two stack pointers are equal only if they're pointing to
6978 +@@ -4862,6 +4846,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
6979 + return false;
6980 + switch (rold->type) {
6981 + case SCALAR_VALUE:
6982 ++ if (env->explore_alu_limits)
6983 ++ return false;
6984 + if (rcur->type == SCALAR_VALUE) {
6985 + /* new val must satisfy old val knowledge */
6986 + return range_within(rold, rcur) &&
6987 +@@ -4938,9 +4924,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
6988 + return false;
6989 + }
6990 +
6991 +-static bool stacksafe(struct bpf_func_state *old,
6992 +- struct bpf_func_state *cur,
6993 +- struct idpair *idmap)
6994 ++static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
6995 ++ struct bpf_func_state *cur, struct bpf_id_pair *idmap)
6996 + {
6997 + int i, spi;
6998 +
6999 +@@ -4982,9 +4967,8 @@ static bool stacksafe(struct bpf_func_state *old,
7000 + continue;
7001 + if (old->stack[spi].slot_type[0] != STACK_SPILL)
7002 + continue;
7003 +- if (!regsafe(&old->stack[spi].spilled_ptr,
7004 +- &cur->stack[spi].spilled_ptr,
7005 +- idmap))
7006 ++ if (!regsafe(env, &old->stack[spi].spilled_ptr,
7007 ++ &cur->stack[spi].spilled_ptr, idmap))
7008 + /* when explored and current stack slot are both storing
7009 + * spilled registers, check that stored pointers types
7010 + * are the same as well.
7011 +@@ -5026,29 +5010,21 @@ static bool stacksafe(struct bpf_func_state *old,
7012 + * whereas register type in current state is meaningful, it means that
7013 + * the current state will reach 'bpf_exit' instruction safely
7014 + */
7015 +-static bool func_states_equal(struct bpf_func_state *old,
7016 ++static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
7017 + struct bpf_func_state *cur)
7018 + {
7019 +- struct idpair *idmap;
7020 +- bool ret = false;
7021 + int i;
7022 +
7023 +- idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
7024 +- /* If we failed to allocate the idmap, just say it's not safe */
7025 +- if (!idmap)
7026 +- return false;
7027 ++ memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
7028 ++ for (i = 0; i < MAX_BPF_REG; i++)
7029 ++ if (!regsafe(env, &old->regs[i], &cur->regs[i],
7030 ++ env->idmap_scratch))
7031 ++ return false;
7032 +
7033 +- for (i = 0; i < MAX_BPF_REG; i++) {
7034 +- if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
7035 +- goto out_free;
7036 +- }
7037 ++ if (!stacksafe(env, old, cur, env->idmap_scratch))
7038 ++ return false;
7039 +
7040 +- if (!stacksafe(old, cur, idmap))
7041 +- goto out_free;
7042 +- ret = true;
7043 +-out_free:
7044 +- kfree(idmap);
7045 +- return ret;
7046 ++ return true;
7047 + }
7048 +
7049 + static bool states_equal(struct bpf_verifier_env *env,
7050 +@@ -5072,7 +5048,7 @@ static bool states_equal(struct bpf_verifier_env *env,
7051 + for (i = 0; i <= old->curframe; i++) {
7052 + if (old->frame[i]->callsite != cur->frame[i]->callsite)
7053 + return false;
7054 +- if (!func_states_equal(old->frame[i], cur->frame[i]))
7055 ++ if (!func_states_equal(env, old->frame[i], cur->frame[i]))
7056 + return false;
7057 + }
7058 + return true;
7059 +@@ -5083,7 +5059,7 @@ static bool states_equal(struct bpf_verifier_env *env,
7060 + * equivalent state (jump target or such) we didn't arrive by the straight-line
7061 + * code, so read marks in the state must propagate to the parent regardless
7062 + * of the state's write marks. That's what 'parent == state->parent' comparison
7063 +- * in mark_reg_read() and mark_stack_slot_read() is for.
7064 ++ * in mark_reg_read() is for.
7065 + */
7066 + static int propagate_liveness(struct bpf_verifier_env *env,
7067 + const struct bpf_verifier_state *vstate,
7068 +@@ -5104,7 +5080,8 @@ static int propagate_liveness(struct bpf_verifier_env *env,
7069 + if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
7070 + continue;
7071 + if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
7072 +- err = mark_reg_read(env, vstate, vparent, i);
7073 ++ err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
7074 ++ &vparent->frame[vstate->curframe]->regs[i]);
7075 + if (err)
7076 + return err;
7077 + }
7078 +@@ -5119,7 +5096,8 @@ static int propagate_liveness(struct bpf_verifier_env *env,
7079 + if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
7080 + continue;
7081 + if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
7082 +- mark_stack_slot_read(env, vstate, vparent, i, frame);
7083 ++ mark_reg_read(env, &state->stack[i].spilled_ptr,
7084 ++ &parent->stack[i].spilled_ptr);
7085 + }
7086 + }
7087 + return err;
7088 +@@ -5129,7 +5107,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
7089 + {
7090 + struct bpf_verifier_state_list *new_sl;
7091 + struct bpf_verifier_state_list *sl;
7092 +- struct bpf_verifier_state *cur = env->cur_state;
7093 ++ struct bpf_verifier_state *cur = env->cur_state, *new;
7094 + int i, j, err, states_cnt = 0;
7095 +
7096 + sl = env->explored_states[insn_idx];
7097 +@@ -5175,16 +5153,18 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
7098 + return -ENOMEM;
7099 +
7100 + /* add new state to the head of linked list */
7101 +- err = copy_verifier_state(&new_sl->state, cur);
7102 ++ new = &new_sl->state;
7103 ++ err = copy_verifier_state(new, cur);
7104 + if (err) {
7105 +- free_verifier_state(&new_sl->state, false);
7106 ++ free_verifier_state(new, false);
7107 + kfree(new_sl);
7108 + return err;
7109 + }
7110 + new_sl->next = env->explored_states[insn_idx];
7111 + env->explored_states[insn_idx] = new_sl;
7112 + /* connect new state to parentage chain */
7113 +- cur->parent = &new_sl->state;
7114 ++ for (i = 0; i < BPF_REG_FP; i++)
7115 ++ cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i];
7116 + /* clear write marks in current state: the writes we did are not writes
7117 + * our child did, so they don't screen off its reads from us.
7118 + * (There are no read marks in current state, because reads always mark
7119 +@@ -5197,9 +5177,13 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
7120 + /* all stack frames are accessible from callee, clear them all */
7121 + for (j = 0; j <= cur->curframe; j++) {
7122 + struct bpf_func_state *frame = cur->frame[j];
7123 ++ struct bpf_func_state *newframe = new->frame[j];
7124 +
7125 +- for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
7126 ++ for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
7127 + frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
7128 ++ frame->stack[i].spilled_ptr.parent =
7129 ++ &newframe->stack[i].spilled_ptr;
7130 ++ }
7131 + }
7132 + return 0;
7133 + }
7134 +@@ -5850,34 +5834,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
7135 + insn = env->prog->insnsi + delta;
7136 +
7137 + for (i = 0; i < insn_cnt; i++, insn++) {
7138 ++ bool ctx_access;
7139 ++
7140 + if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
7141 + insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
7142 + insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
7143 +- insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
7144 ++ insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
7145 + type = BPF_READ;
7146 +- else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
7147 +- insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
7148 +- insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
7149 +- insn->code == (BPF_STX | BPF_MEM | BPF_DW))
7150 ++ ctx_access = true;
7151 ++ } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
7152 ++ insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
7153 ++ insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
7154 ++ insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
7155 ++ insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
7156 ++ insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
7157 ++ insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
7158 ++ insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
7159 + type = BPF_WRITE;
7160 +- else
7161 ++ ctx_access = BPF_CLASS(insn->code) == BPF_STX;
7162 ++ } else {
7163 + continue;
7164 ++ }
7165 +
7166 + if (type == BPF_WRITE &&
7167 +- env->insn_aux_data[i + delta].sanitize_stack_off) {
7168 ++ env->insn_aux_data[i + delta].sanitize_stack_spill) {
7169 + struct bpf_insn patch[] = {
7170 +- /* Sanitize suspicious stack slot with zero.
7171 +- * There are no memory dependencies for this store,
7172 +- * since it's only using frame pointer and immediate
7173 +- * constant of zero
7174 +- */
7175 +- BPF_ST_MEM(BPF_DW, BPF_REG_FP,
7176 +- env->insn_aux_data[i + delta].sanitize_stack_off,
7177 +- 0),
7178 +- /* the original STX instruction will immediately
7179 +- * overwrite the same stack slot with appropriate value
7180 +- */
7181 + *insn,
7182 ++ BPF_ST_NOSPEC(),
7183 + };
7184 +
7185 + cnt = ARRAY_SIZE(patch);
7186 +@@ -5891,6 +5874,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
7187 + continue;
7188 + }
7189 +
7190 ++ if (!ctx_access)
7191 ++ continue;
7192 ++
7193 + if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
7194 + continue;
7195 +
7196 +diff --git a/kernel/events/core.c b/kernel/events/core.c
7197 +index dd740f91de478..4a8c3f5313f96 100644
7198 +--- a/kernel/events/core.c
7199 ++++ b/kernel/events/core.c
7200 +@@ -8914,7 +8914,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
7201 + return;
7202 +
7203 + if (ifh->nr_file_filters) {
7204 +- mm = get_task_mm(event->ctx->task);
7205 ++ mm = get_task_mm(task);
7206 + if (!mm)
7207 + goto restart;
7208 +
7209 +diff --git a/kernel/fork.c b/kernel/fork.c
7210 +index cf535b9d5db75..b658716005077 100644
7211 +--- a/kernel/fork.c
7212 ++++ b/kernel/fork.c
7213 +@@ -964,6 +964,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
7214 + mm->pmd_huge_pte = NULL;
7215 + #endif
7216 + mm_init_uprobes_state(mm);
7217 ++ hugetlb_count_init(mm);
7218 +
7219 + if (current->mm) {
7220 + mm->flags = current->mm->flags & MMF_INIT_MASK;
7221 +diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
7222 +index 354151fef06ae..fbc62d360419d 100644
7223 +--- a/kernel/locking/mutex.c
7224 ++++ b/kernel/locking/mutex.c
7225 +@@ -911,7 +911,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
7226 + struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
7227 + {
7228 + struct mutex_waiter waiter;
7229 +- bool first = false;
7230 + struct ww_mutex *ww;
7231 + int ret;
7232 +
7233 +@@ -986,6 +985,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
7234 +
7235 + set_current_state(state);
7236 + for (;;) {
7237 ++ bool first;
7238 ++
7239 + /*
7240 + * Once we hold wait_lock, we're serialized against
7241 + * mutex_unlock() handing the lock off to us, do a trylock
7242 +@@ -1014,15 +1015,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
7243 + spin_unlock(&lock->wait_lock);
7244 + schedule_preempt_disabled();
7245 +
7246 +- /*
7247 +- * ww_mutex needs to always recheck its position since its waiter
7248 +- * list is not FIFO ordered.
7249 +- */
7250 +- if (ww_ctx || !first) {
7251 +- first = __mutex_waiter_is_first(lock, &waiter);
7252 +- if (first)
7253 +- __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
7254 +- }
7255 ++ first = __mutex_waiter_is_first(lock, &waiter);
7256 ++ if (first)
7257 ++ __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
7258 +
7259 + set_current_state(state);
7260 + /*
7261 +diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
7262 +index 95271f180687e..33de14435c1f6 100644
7263 +--- a/kernel/pid_namespace.c
7264 ++++ b/kernel/pid_namespace.c
7265 +@@ -52,7 +52,8 @@ static struct kmem_cache *create_pid_cachep(unsigned int level)
7266 + mutex_lock(&pid_caches_mutex);
7267 + /* Name collision forces to do allocation under mutex. */
7268 + if (!*pkc)
7269 +- *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN, 0);
7270 ++ *pkc = kmem_cache_create(name, len, 0,
7271 ++ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, 0);
7272 + mutex_unlock(&pid_caches_mutex);
7273 + /* current can fail, but someone else can succeed. */
7274 + return READ_ONCE(*pkc);
7275 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
7276 +index aa592dc3cb401..beec5081a55af 100644
7277 +--- a/kernel/sched/deadline.c
7278 ++++ b/kernel/sched/deadline.c
7279 +@@ -1654,6 +1654,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
7280 + */
7281 + raw_spin_lock(&rq->lock);
7282 + if (p->dl.dl_non_contending) {
7283 ++ update_rq_clock(rq);
7284 + sub_running_bw(&p->dl, &rq->dl);
7285 + p->dl.dl_non_contending = 0;
7286 + /*
7287 +@@ -2615,7 +2616,7 @@ void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
7288 + dl_se->dl_runtime = attr->sched_runtime;
7289 + dl_se->dl_deadline = attr->sched_deadline;
7290 + dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
7291 +- dl_se->flags = attr->sched_flags;
7292 ++ dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
7293 + dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
7294 + dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
7295 + }
7296 +@@ -2628,7 +2629,8 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
7297 + attr->sched_runtime = dl_se->dl_runtime;
7298 + attr->sched_deadline = dl_se->dl_deadline;
7299 + attr->sched_period = dl_se->dl_period;
7300 +- attr->sched_flags = dl_se->flags;
7301 ++ attr->sched_flags &= ~SCHED_DL_FLAGS;
7302 ++ attr->sched_flags |= dl_se->flags;
7303 + }
7304 +
7305 + /*
7306 +@@ -2703,7 +2705,7 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
7307 + if (dl_se->dl_runtime != attr->sched_runtime ||
7308 + dl_se->dl_deadline != attr->sched_deadline ||
7309 + dl_se->dl_period != attr->sched_period ||
7310 +- dl_se->flags != attr->sched_flags)
7311 ++ dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
7312 + return true;
7313 +
7314 + return false;
7315 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
7316 +index 7b7ba91e319bb..55e695080fc6b 100644
7317 +--- a/kernel/sched/sched.h
7318 ++++ b/kernel/sched/sched.h
7319 +@@ -209,6 +209,8 @@ static inline int task_has_dl_policy(struct task_struct *p)
7320 + */
7321 + #define SCHED_FLAG_SUGOV 0x10000000
7322 +
7323 ++#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
7324 ++
7325 + static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
7326 + {
7327 + #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
7328 +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
7329 +index 0e04b24cec818..32ee24f5142ab 100644
7330 +--- a/kernel/time/hrtimer.c
7331 ++++ b/kernel/time/hrtimer.c
7332 +@@ -1020,12 +1020,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
7333 + * remove hrtimer, called with base lock held
7334 + */
7335 + static inline int
7336 +-remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
7337 ++remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
7338 ++ bool restart, bool keep_local)
7339 + {
7340 + u8 state = timer->state;
7341 +
7342 + if (state & HRTIMER_STATE_ENQUEUED) {
7343 +- int reprogram;
7344 ++ bool reprogram;
7345 +
7346 + /*
7347 + * Remove the timer and force reprogramming when high
7348 +@@ -1038,8 +1039,16 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
7349 + debug_deactivate(timer);
7350 + reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
7351 +
7352 ++ /*
7353 ++ * If the timer is not restarted then reprogramming is
7354 ++ * required if the timer is local. If it is local and about
7355 ++ * to be restarted, avoid programming it twice (on removal
7356 ++ * and a moment later when it's requeued).
7357 ++ */
7358 + if (!restart)
7359 + state = HRTIMER_STATE_INACTIVE;
7360 ++ else
7361 ++ reprogram &= !keep_local;
7362 +
7363 + __remove_hrtimer(timer, base, state, reprogram);
7364 + return 1;
7365 +@@ -1093,9 +1102,31 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
7366 + struct hrtimer_clock_base *base)
7367 + {
7368 + struct hrtimer_clock_base *new_base;
7369 ++ bool force_local, first;
7370 +
7371 +- /* Remove an active timer from the queue: */
7372 +- remove_hrtimer(timer, base, true);
7373 ++ /*
7374 ++ * If the timer is on the local cpu base and is the first expiring
7375 ++ * timer then this might end up reprogramming the hardware twice
7376 ++ * (on removal and on enqueue). To avoid that by prevent the
7377 ++ * reprogram on removal, keep the timer local to the current CPU
7378 ++ * and enforce reprogramming after it is queued no matter whether
7379 ++ * it is the new first expiring timer again or not.
7380 ++ */
7381 ++ force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
7382 ++ force_local &= base->cpu_base->next_timer == timer;
7383 ++
7384 ++ /*
7385 ++ * Remove an active timer from the queue. In case it is not queued
7386 ++ * on the current CPU, make sure that remove_hrtimer() updates the
7387 ++ * remote data correctly.
7388 ++ *
7389 ++ * If it's on the current CPU and the first expiring timer, then
7390 ++ * skip reprogramming, keep the timer local and enforce
7391 ++ * reprogramming later if it was the first expiring timer. This
7392 ++ * avoids programming the underlying clock event twice (once at
7393 ++ * removal and once after enqueue).
7394 ++ */
7395 ++ remove_hrtimer(timer, base, true, force_local);
7396 +
7397 + if (mode & HRTIMER_MODE_REL)
7398 + tim = ktime_add_safe(tim, base->get_time());
7399 +@@ -1105,9 +1136,24 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
7400 + hrtimer_set_expires_range_ns(timer, tim, delta_ns);
7401 +
7402 + /* Switch the timer base, if necessary: */
7403 +- new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
7404 ++ if (!force_local) {
7405 ++ new_base = switch_hrtimer_base(timer, base,
7406 ++ mode & HRTIMER_MODE_PINNED);
7407 ++ } else {
7408 ++ new_base = base;
7409 ++ }
7410 ++
7411 ++ first = enqueue_hrtimer(timer, new_base, mode);
7412 ++ if (!force_local)
7413 ++ return first;
7414 +
7415 +- return enqueue_hrtimer(timer, new_base, mode);
7416 ++ /*
7417 ++ * Timer was forced to stay on the current CPU to avoid
7418 ++ * reprogramming on removal and enqueue. Force reprogram the
7419 ++ * hardware by evaluating the new first expiring timer.
7420 ++ */
7421 ++ hrtimer_force_reprogram(new_base->cpu_base, 1);
7422 ++ return 0;
7423 + }
7424 +
7425 + /**
7426 +@@ -1168,7 +1214,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
7427 + base = lock_hrtimer_base(timer, &flags);
7428 +
7429 + if (!hrtimer_callback_running(timer))
7430 +- ret = remove_hrtimer(timer, base, false);
7431 ++ ret = remove_hrtimer(timer, base, false, false);
7432 +
7433 + unlock_hrtimer_base(timer, &flags);
7434 +
7435 +diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
7436 +index 20ed0f7667871..00825028cc847 100644
7437 +--- a/lib/mpi/mpiutil.c
7438 ++++ b/lib/mpi/mpiutil.c
7439 +@@ -91,7 +91,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
7440 + return 0; /* no need to do it */
7441 +
7442 + if (a->d) {
7443 +- p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
7444 ++ p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
7445 + if (!p)
7446 + return -ENOMEM;
7447 + memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
7448 +diff --git a/lib/test_bpf.c b/lib/test_bpf.c
7449 +index 08d3d59dca173..49d79079e8b3e 100644
7450 +--- a/lib/test_bpf.c
7451 ++++ b/lib/test_bpf.c
7452 +@@ -4293,8 +4293,8 @@ static struct bpf_test tests[] = {
7453 + .u.insns_int = {
7454 + BPF_LD_IMM64(R0, 0),
7455 + BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
7456 +- BPF_STX_MEM(BPF_W, R10, R1, -40),
7457 +- BPF_LDX_MEM(BPF_W, R0, R10, -40),
7458 ++ BPF_STX_MEM(BPF_DW, R10, R1, -40),
7459 ++ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
7460 + BPF_EXIT_INSN(),
7461 + },
7462 + INTERNAL,
7463 +@@ -6687,7 +6687,14 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
7464 + u64 duration;
7465 + u32 ret;
7466 +
7467 +- if (test->test[i].data_size == 0 &&
7468 ++ /*
7469 ++ * NOTE: Several sub-tests may be present, in which case
7470 ++ * a zero {data_size, result} tuple indicates the end of
7471 ++ * the sub-test array. The first test is always run,
7472 ++ * even if both data_size and result happen to be zero.
7473 ++ */
7474 ++ if (i > 0 &&
7475 ++ test->test[i].data_size == 0 &&
7476 + test->test[i].result == 0)
7477 + break;
7478 +
7479 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
7480 +index e60e28131f679..20f079c81b335 100644
7481 +--- a/mm/memory_hotplug.c
7482 ++++ b/mm/memory_hotplug.c
7483 +@@ -783,8 +783,8 @@ static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn
7484 + return movable_node_enabled ? movable_zone : kernel_zone;
7485 + }
7486 +
7487 +-struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
7488 +- unsigned long nr_pages)
7489 ++struct zone *zone_for_pfn_range(int online_type, int nid,
7490 ++ unsigned long start_pfn, unsigned long nr_pages)
7491 + {
7492 + if (online_type == MMOP_ONLINE_KERNEL)
7493 + return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
7494 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
7495 +index 4446a523e684e..afcaa657a0229 100644
7496 +--- a/mm/page_alloc.c
7497 ++++ b/mm/page_alloc.c
7498 +@@ -807,7 +807,7 @@ static inline void __free_one_page(struct page *page,
7499 + struct page *buddy;
7500 + unsigned int max_order;
7501 +
7502 +- max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
7503 ++ max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
7504 +
7505 + VM_BUG_ON(!zone_is_initialized(zone));
7506 + VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
7507 +@@ -820,7 +820,7 @@ static inline void __free_one_page(struct page *page,
7508 + VM_BUG_ON_PAGE(bad_range(zone, page), page);
7509 +
7510 + continue_merging:
7511 +- while (order < max_order - 1) {
7512 ++ while (order < max_order) {
7513 + buddy_pfn = __find_buddy_pfn(pfn, order);
7514 + buddy = page + (buddy_pfn - pfn);
7515 +
7516 +@@ -844,7 +844,7 @@ continue_merging:
7517 + pfn = combined_pfn;
7518 + order++;
7519 + }
7520 +- if (max_order < MAX_ORDER) {
7521 ++ if (order < MAX_ORDER - 1) {
7522 + /* If we are here, it means order is >= pageblock_order.
7523 + * We want to prevent merge between freepages on isolate
7524 + * pageblock and normal pageblock. Without this, pageblock
7525 +@@ -865,7 +865,7 @@ continue_merging:
7526 + is_migrate_isolate(buddy_mt)))
7527 + goto done_merging;
7528 + }
7529 +- max_order++;
7530 ++ max_order = order + 1;
7531 + goto continue_merging;
7532 + }
7533 +
7534 +diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
7535 +index 9daab0dd833b3..21132bf3d8503 100644
7536 +--- a/net/9p/trans_xen.c
7537 ++++ b/net/9p/trans_xen.c
7538 +@@ -138,7 +138,7 @@ static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
7539 +
7540 + static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
7541 + {
7542 +- struct xen_9pfs_front_priv *priv = NULL;
7543 ++ struct xen_9pfs_front_priv *priv;
7544 + RING_IDX cons, prod, masked_cons, masked_prod;
7545 + unsigned long flags;
7546 + u32 size = p9_req->tc.size;
7547 +@@ -151,7 +151,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
7548 + break;
7549 + }
7550 + read_unlock(&xen_9pfs_lock);
7551 +- if (!priv || priv->client != client)
7552 ++ if (list_entry_is_head(priv, &xen_9pfs_devs, list))
7553 + return -EINVAL;
7554 +
7555 + num = p9_req->tc.tag % priv->num_rings;
7556 +diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
7557 +index c32638dddbf94..f6b9dc4e408f2 100644
7558 +--- a/net/bluetooth/cmtp/cmtp.h
7559 ++++ b/net/bluetooth/cmtp/cmtp.h
7560 +@@ -26,7 +26,7 @@
7561 + #include <linux/types.h>
7562 + #include <net/bluetooth/bluetooth.h>
7563 +
7564 +-#define BTNAMSIZ 18
7565 ++#define BTNAMSIZ 21
7566 +
7567 + /* CMTP ioctl defines */
7568 + #define CMTPCONNADD _IOW('C', 200, int)
7569 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
7570 +index 7a85f215da45c..26acacb2fa95f 100644
7571 +--- a/net/bluetooth/hci_core.c
7572 ++++ b/net/bluetooth/hci_core.c
7573 +@@ -1296,6 +1296,12 @@ int hci_inquiry(void __user *arg)
7574 + goto done;
7575 + }
7576 +
7577 ++ /* Restrict maximum inquiry length to 60 seconds */
7578 ++ if (ir.length > 60) {
7579 ++ err = -EINVAL;
7580 ++ goto done;
7581 ++ }
7582 ++
7583 + hci_dev_lock(hdev);
7584 + if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
7585 + inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
7586 +@@ -1622,6 +1628,14 @@ int hci_dev_do_close(struct hci_dev *hdev)
7587 + hci_request_cancel_all(hdev);
7588 + hci_req_sync_lock(hdev);
7589 +
7590 ++ if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
7591 ++ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
7592 ++ test_bit(HCI_UP, &hdev->flags)) {
7593 ++ /* Execute vendor specific shutdown routine */
7594 ++ if (hdev->shutdown)
7595 ++ hdev->shutdown(hdev);
7596 ++ }
7597 ++
7598 + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
7599 + cancel_delayed_work_sync(&hdev->cmd_timer);
7600 + hci_req_sync_unlock(hdev);
7601 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
7602 +index 45cc864cf2b38..937cada5595ee 100644
7603 +--- a/net/bluetooth/hci_event.c
7604 ++++ b/net/bluetooth/hci_event.c
7605 +@@ -4083,6 +4083,21 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
7606 +
7607 + switch (ev->status) {
7608 + case 0x00:
7609 ++ /* The synchronous connection complete event should only be
7610 ++ * sent once per new connection. Receiving a successful
7611 ++ * complete event when the connection status is already
7612 ++ * BT_CONNECTED means that the device is misbehaving and sent
7613 ++ * multiple complete event packets for the same new connection.
7614 ++ *
7615 ++ * Registering the device more than once can corrupt kernel
7616 ++ * memory, hence upon detecting this invalid event, we report
7617 ++ * an error and ignore the packet.
7618 ++ */
7619 ++ if (conn->state == BT_CONNECTED) {
7620 ++ bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
7621 ++ goto unlock;
7622 ++ }
7623 ++
7624 + conn->handle = __le16_to_cpu(ev->handle);
7625 + conn->state = BT_CONNECTED;
7626 + conn->type = ev->link_type;
7627 +@@ -4786,9 +4801,64 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
7628 + }
7629 + #endif
7630 +
7631 ++static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
7632 ++ u8 bdaddr_type, bdaddr_t *local_rpa)
7633 ++{
7634 ++ if (conn->out) {
7635 ++ conn->dst_type = bdaddr_type;
7636 ++ conn->resp_addr_type = bdaddr_type;
7637 ++ bacpy(&conn->resp_addr, bdaddr);
7638 ++
7639 ++ /* Check if the controller has set a Local RPA then it must be
7640 ++ * used instead or hdev->rpa.
7641 ++ */
7642 ++ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
7643 ++ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
7644 ++ bacpy(&conn->init_addr, local_rpa);
7645 ++ } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
7646 ++ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
7647 ++ bacpy(&conn->init_addr, &conn->hdev->rpa);
7648 ++ } else {
7649 ++ hci_copy_identity_address(conn->hdev, &conn->init_addr,
7650 ++ &conn->init_addr_type);
7651 ++ }
7652 ++ } else {
7653 ++ conn->resp_addr_type = conn->hdev->adv_addr_type;
7654 ++ /* Check if the controller has set a Local RPA then it must be
7655 ++ * used instead or hdev->rpa.
7656 ++ */
7657 ++ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
7658 ++ conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
7659 ++ bacpy(&conn->resp_addr, local_rpa);
7660 ++ } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
7661 ++ /* In case of ext adv, resp_addr will be updated in
7662 ++ * Adv Terminated event.
7663 ++ */
7664 ++ if (!ext_adv_capable(conn->hdev))
7665 ++ bacpy(&conn->resp_addr,
7666 ++ &conn->hdev->random_addr);
7667 ++ } else {
7668 ++ bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
7669 ++ }
7670 ++
7671 ++ conn->init_addr_type = bdaddr_type;
7672 ++ bacpy(&conn->init_addr, bdaddr);
7673 ++
7674 ++ /* For incoming connections, set the default minimum
7675 ++ * and maximum connection interval. They will be used
7676 ++ * to check if the parameters are in range and if not
7677 ++ * trigger the connection update procedure.
7678 ++ */
7679 ++ conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
7680 ++ conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
7681 ++ }
7682 ++}
7683 ++
7684 + static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
7685 +- bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
7686 +- u16 interval, u16 latency, u16 supervision_timeout)
7687 ++ bdaddr_t *bdaddr, u8 bdaddr_type,
7688 ++ bdaddr_t *local_rpa, u8 role, u16 handle,
7689 ++ u16 interval, u16 latency,
7690 ++ u16 supervision_timeout)
7691 + {
7692 + struct hci_conn_params *params;
7693 + struct hci_conn *conn;
7694 +@@ -4836,32 +4906,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
7695 + cancel_delayed_work(&conn->le_conn_timeout);
7696 + }
7697 +
7698 +- if (!conn->out) {
7699 +- /* Set the responder (our side) address type based on
7700 +- * the advertising address type.
7701 +- */
7702 +- conn->resp_addr_type = hdev->adv_addr_type;
7703 +- if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
7704 +- /* In case of ext adv, resp_addr will be updated in
7705 +- * Adv Terminated event.
7706 +- */
7707 +- if (!ext_adv_capable(hdev))
7708 +- bacpy(&conn->resp_addr, &hdev->random_addr);
7709 +- } else {
7710 +- bacpy(&conn->resp_addr, &hdev->bdaddr);
7711 +- }
7712 +-
7713 +- conn->init_addr_type = bdaddr_type;
7714 +- bacpy(&conn->init_addr, bdaddr);
7715 +-
7716 +- /* For incoming connections, set the default minimum
7717 +- * and maximum connection interval. They will be used
7718 +- * to check if the parameters are in range and if not
7719 +- * trigger the connection update procedure.
7720 +- */
7721 +- conn->le_conn_min_interval = hdev->le_conn_min_interval;
7722 +- conn->le_conn_max_interval = hdev->le_conn_max_interval;
7723 +- }
7724 ++ le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
7725 +
7726 + /* Lookup the identity address from the stored connection
7727 + * address and address type.
7728 +@@ -4959,7 +5004,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
7729 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7730 +
7731 + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
7732 +- ev->role, le16_to_cpu(ev->handle),
7733 ++ NULL, ev->role, le16_to_cpu(ev->handle),
7734 + le16_to_cpu(ev->interval),
7735 + le16_to_cpu(ev->latency),
7736 + le16_to_cpu(ev->supervision_timeout));
7737 +@@ -4973,7 +5018,7 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
7738 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7739 +
7740 + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
7741 +- ev->role, le16_to_cpu(ev->handle),
7742 ++ &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
7743 + le16_to_cpu(ev->interval),
7744 + le16_to_cpu(ev->latency),
7745 + le16_to_cpu(ev->supervision_timeout));
7746 +@@ -5004,7 +5049,8 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
7747 + if (conn) {
7748 + struct adv_info *adv_instance;
7749 +
7750 +- if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
7751 ++ if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
7752 ++ bacmp(&conn->resp_addr, BDADDR_ANY))
7753 + return;
7754 +
7755 + if (!hdev->cur_adv_instance) {
7756 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
7757 +index a4ca55df73908..007a01b08dbe9 100644
7758 +--- a/net/bluetooth/sco.c
7759 ++++ b/net/bluetooth/sco.c
7760 +@@ -48,6 +48,8 @@ struct sco_conn {
7761 + spinlock_t lock;
7762 + struct sock *sk;
7763 +
7764 ++ struct delayed_work timeout_work;
7765 ++
7766 + unsigned int mtu;
7767 + };
7768 +
7769 +@@ -73,9 +75,20 @@ struct sco_pinfo {
7770 + #define SCO_CONN_TIMEOUT (HZ * 40)
7771 + #define SCO_DISCONN_TIMEOUT (HZ * 2)
7772 +
7773 +-static void sco_sock_timeout(struct timer_list *t)
7774 ++static void sco_sock_timeout(struct work_struct *work)
7775 + {
7776 +- struct sock *sk = from_timer(sk, t, sk_timer);
7777 ++ struct sco_conn *conn = container_of(work, struct sco_conn,
7778 ++ timeout_work.work);
7779 ++ struct sock *sk;
7780 ++
7781 ++ sco_conn_lock(conn);
7782 ++ sk = conn->sk;
7783 ++ if (sk)
7784 ++ sock_hold(sk);
7785 ++ sco_conn_unlock(conn);
7786 ++
7787 ++ if (!sk)
7788 ++ return;
7789 +
7790 + BT_DBG("sock %p state %d", sk, sk->sk_state);
7791 +
7792 +@@ -84,20 +97,26 @@ static void sco_sock_timeout(struct timer_list *t)
7793 + sk->sk_state_change(sk);
7794 + bh_unlock_sock(sk);
7795 +
7796 +- sco_sock_kill(sk);
7797 + sock_put(sk);
7798 + }
7799 +
7800 + static void sco_sock_set_timer(struct sock *sk, long timeout)
7801 + {
7802 ++ if (!sco_pi(sk)->conn)
7803 ++ return;
7804 ++
7805 + BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
7806 +- sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
7807 ++ cancel_delayed_work(&sco_pi(sk)->conn->timeout_work);
7808 ++ schedule_delayed_work(&sco_pi(sk)->conn->timeout_work, timeout);
7809 + }
7810 +
7811 + static void sco_sock_clear_timer(struct sock *sk)
7812 + {
7813 ++ if (!sco_pi(sk)->conn)
7814 ++ return;
7815 ++
7816 + BT_DBG("sock %p state %d", sk, sk->sk_state);
7817 +- sk_stop_timer(sk, &sk->sk_timer);
7818 ++ cancel_delayed_work(&sco_pi(sk)->conn->timeout_work);
7819 + }
7820 +
7821 + /* ---- SCO connections ---- */
7822 +@@ -176,8 +195,10 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
7823 + sco_sock_clear_timer(sk);
7824 + sco_chan_del(sk, err);
7825 + bh_unlock_sock(sk);
7826 +- sco_sock_kill(sk);
7827 + sock_put(sk);
7828 ++
7829 ++ /* Ensure no more work items will run before freeing conn. */
7830 ++ cancel_delayed_work_sync(&conn->timeout_work);
7831 + }
7832 +
7833 + hcon->sco_data = NULL;
7834 +@@ -192,6 +213,8 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
7835 + sco_pi(sk)->conn = conn;
7836 + conn->sk = sk;
7837 +
7838 ++ INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout);
7839 ++
7840 + if (parent)
7841 + bt_accept_enqueue(parent, sk, true);
7842 + }
7843 +@@ -211,44 +234,32 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
7844 + return err;
7845 + }
7846 +
7847 +-static int sco_connect(struct sock *sk)
7848 ++static int sco_connect(struct hci_dev *hdev, struct sock *sk)
7849 + {
7850 + struct sco_conn *conn;
7851 + struct hci_conn *hcon;
7852 +- struct hci_dev *hdev;
7853 + int err, type;
7854 +
7855 + BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
7856 +
7857 +- hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
7858 +- if (!hdev)
7859 +- return -EHOSTUNREACH;
7860 +-
7861 +- hci_dev_lock(hdev);
7862 +-
7863 + if (lmp_esco_capable(hdev) && !disable_esco)
7864 + type = ESCO_LINK;
7865 + else
7866 + type = SCO_LINK;
7867 +
7868 + if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
7869 +- (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
7870 +- err = -EOPNOTSUPP;
7871 +- goto done;
7872 +- }
7873 ++ (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)))
7874 ++ return -EOPNOTSUPP;
7875 +
7876 + hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
7877 + sco_pi(sk)->setting);
7878 +- if (IS_ERR(hcon)) {
7879 +- err = PTR_ERR(hcon);
7880 +- goto done;
7881 +- }
7882 ++ if (IS_ERR(hcon))
7883 ++ return PTR_ERR(hcon);
7884 +
7885 + conn = sco_conn_add(hcon);
7886 + if (!conn) {
7887 + hci_conn_drop(hcon);
7888 +- err = -ENOMEM;
7889 +- goto done;
7890 ++ return -ENOMEM;
7891 + }
7892 +
7893 + /* Update source addr of the socket */
7894 +@@ -256,7 +267,7 @@ static int sco_connect(struct sock *sk)
7895 +
7896 + err = sco_chan_add(conn, sk, NULL);
7897 + if (err)
7898 +- goto done;
7899 ++ return err;
7900 +
7901 + if (hcon->state == BT_CONNECTED) {
7902 + sco_sock_clear_timer(sk);
7903 +@@ -266,9 +277,6 @@ static int sco_connect(struct sock *sk)
7904 + sco_sock_set_timer(sk, sk->sk_sndtimeo);
7905 + }
7906 +
7907 +-done:
7908 +- hci_dev_unlock(hdev);
7909 +- hci_dev_put(hdev);
7910 + return err;
7911 + }
7912 +
7913 +@@ -393,8 +401,7 @@ static void sco_sock_cleanup_listen(struct sock *parent)
7914 + */
7915 + static void sco_sock_kill(struct sock *sk)
7916 + {
7917 +- if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket ||
7918 +- sock_flag(sk, SOCK_DEAD))
7919 ++ if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
7920 + return;
7921 +
7922 + BT_DBG("sk %p state %d", sk, sk->sk_state);
7923 +@@ -446,7 +453,6 @@ static void sco_sock_close(struct sock *sk)
7924 + lock_sock(sk);
7925 + __sco_sock_close(sk);
7926 + release_sock(sk);
7927 +- sco_sock_kill(sk);
7928 + }
7929 +
7930 + static void sco_sock_init(struct sock *sk, struct sock *parent)
7931 +@@ -488,8 +494,6 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
7932 +
7933 + sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
7934 +
7935 +- timer_setup(&sk->sk_timer, sco_sock_timeout, 0);
7936 +-
7937 + bt_sock_link(&sco_sk_list, sk);
7938 + return sk;
7939 + }
7940 +@@ -554,6 +558,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
7941 + {
7942 + struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
7943 + struct sock *sk = sock->sk;
7944 ++ struct hci_dev *hdev;
7945 + int err;
7946 +
7947 + BT_DBG("sk %p", sk);
7948 +@@ -568,12 +573,19 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
7949 + if (sk->sk_type != SOCK_SEQPACKET)
7950 + return -EINVAL;
7951 +
7952 ++ hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR);
7953 ++ if (!hdev)
7954 ++ return -EHOSTUNREACH;
7955 ++ hci_dev_lock(hdev);
7956 ++
7957 + lock_sock(sk);
7958 +
7959 + /* Set destination address and psm */
7960 + bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
7961 +
7962 +- err = sco_connect(sk);
7963 ++ err = sco_connect(hdev, sk);
7964 ++ hci_dev_unlock(hdev);
7965 ++ hci_dev_put(hdev);
7966 + if (err)
7967 + goto done;
7968 +
7969 +@@ -761,6 +773,11 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
7970 + cp.max_latency = cpu_to_le16(0xffff);
7971 + cp.retrans_effort = 0xff;
7972 + break;
7973 ++ default:
7974 ++ /* use CVSD settings as fallback */
7975 ++ cp.max_latency = cpu_to_le16(0xffff);
7976 ++ cp.retrans_effort = 0xff;
7977 ++ break;
7978 + }
7979 +
7980 + hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
7981 +diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
7982 +index 13e2ae6be620c..8aeece7aa9e97 100644
7983 +--- a/net/caif/chnl_net.c
7984 ++++ b/net/caif/chnl_net.c
7985 +@@ -53,20 +53,6 @@ struct chnl_net {
7986 + enum caif_states state;
7987 + };
7988 +
7989 +-static void robust_list_del(struct list_head *delete_node)
7990 +-{
7991 +- struct list_head *list_node;
7992 +- struct list_head *n;
7993 +- ASSERT_RTNL();
7994 +- list_for_each_safe(list_node, n, &chnl_net_list) {
7995 +- if (list_node == delete_node) {
7996 +- list_del(list_node);
7997 +- return;
7998 +- }
7999 +- }
8000 +- WARN_ON(1);
8001 +-}
8002 +-
8003 + static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
8004 + {
8005 + struct sk_buff *skb;
8006 +@@ -368,6 +354,7 @@ static int chnl_net_init(struct net_device *dev)
8007 + ASSERT_RTNL();
8008 + priv = netdev_priv(dev);
8009 + strncpy(priv->name, dev->name, sizeof(priv->name));
8010 ++ INIT_LIST_HEAD(&priv->list_field);
8011 + return 0;
8012 + }
8013 +
8014 +@@ -376,7 +363,7 @@ static void chnl_net_uninit(struct net_device *dev)
8015 + struct chnl_net *priv;
8016 + ASSERT_RTNL();
8017 + priv = netdev_priv(dev);
8018 +- robust_list_del(&priv->list_field);
8019 ++ list_del_init(&priv->list_field);
8020 + }
8021 +
8022 + static const struct net_device_ops netdev_ops = {
8023 +@@ -541,7 +528,7 @@ static void __exit chnl_exit_module(void)
8024 + rtnl_lock();
8025 + list_for_each_safe(list_node, _tmp, &chnl_net_list) {
8026 + dev = list_entry(list_node, struct chnl_net, list_field);
8027 +- list_del(list_node);
8028 ++ list_del_init(list_node);
8029 + delete_device(dev);
8030 + }
8031 + rtnl_unlock();
8032 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
8033 +index 994dd1520f07a..949694c70cbc6 100644
8034 +--- a/net/core/flow_dissector.c
8035 ++++ b/net/core/flow_dissector.c
8036 +@@ -694,8 +694,10 @@ proto_again:
8037 + FLOW_DISSECTOR_KEY_IPV4_ADDRS,
8038 + target_container);
8039 +
8040 +- memcpy(&key_addrs->v4addrs, &iph->saddr,
8041 +- sizeof(key_addrs->v4addrs));
8042 ++ memcpy(&key_addrs->v4addrs.src, &iph->saddr,
8043 ++ sizeof(key_addrs->v4addrs.src));
8044 ++ memcpy(&key_addrs->v4addrs.dst, &iph->daddr,
8045 ++ sizeof(key_addrs->v4addrs.dst));
8046 + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
8047 + }
8048 +
8049 +@@ -744,8 +746,10 @@ proto_again:
8050 + FLOW_DISSECTOR_KEY_IPV6_ADDRS,
8051 + target_container);
8052 +
8053 +- memcpy(&key_addrs->v6addrs, &iph->saddr,
8054 +- sizeof(key_addrs->v6addrs));
8055 ++ memcpy(&key_addrs->v6addrs.src, &iph->saddr,
8056 ++ sizeof(key_addrs->v6addrs.src));
8057 ++ memcpy(&key_addrs->v6addrs.dst, &iph->daddr,
8058 ++ sizeof(key_addrs->v6addrs.dst));
8059 + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
8060 + }
8061 +
8062 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
8063 +index 939d8a31eb82a..26d70c00b0545 100644
8064 +--- a/net/core/net_namespace.c
8065 ++++ b/net/core/net_namespace.c
8066 +@@ -192,9 +192,9 @@ static int net_eq_idr(int id, void *net, void *peer)
8067 + return 0;
8068 + }
8069 +
8070 +-/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
8071 +- * is set to true, thus the caller knows that the new id must be notified via
8072 +- * rtnl.
8073 ++/* Must be called from RCU-critical section or with nsid_lock held. If
8074 ++ * a new id is assigned, the bool alloc is set to true, thus the
8075 ++ * caller knows that the new id must be notified via rtnl.
8076 + */
8077 + static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
8078 + {
8079 +@@ -218,7 +218,7 @@ static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
8080 + return NETNSA_NSID_NOT_ASSIGNED;
8081 + }
8082 +
8083 +-/* should be called with nsid_lock held */
8084 ++/* Must be called from RCU-critical section or with nsid_lock held */
8085 + static int __peernet2id(struct net *net, struct net *peer)
8086 + {
8087 + bool no = false;
8088 +@@ -261,9 +261,10 @@ int peernet2id(struct net *net, struct net *peer)
8089 + {
8090 + int id;
8091 +
8092 +- spin_lock_bh(&net->nsid_lock);
8093 ++ rcu_read_lock();
8094 + id = __peernet2id(net, peer);
8095 +- spin_unlock_bh(&net->nsid_lock);
8096 ++ rcu_read_unlock();
8097 ++
8098 + return id;
8099 + }
8100 + EXPORT_SYMBOL(peernet2id);
8101 +@@ -837,6 +838,7 @@ struct rtnl_net_dump_cb {
8102 + int s_idx;
8103 + };
8104 +
8105 ++/* Runs in RCU-critical section. */
8106 + static int rtnl_net_dumpid_one(int id, void *peer, void *data)
8107 + {
8108 + struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
8109 +@@ -867,9 +869,9 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
8110 + .s_idx = cb->args[0],
8111 + };
8112 +
8113 +- spin_lock_bh(&net->nsid_lock);
8114 ++ rcu_read_lock();
8115 + idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
8116 +- spin_unlock_bh(&net->nsid_lock);
8117 ++ rcu_read_unlock();
8118 +
8119 + cb->args[0] = net_cb.idx;
8120 + return skb->len;
8121 +diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
8122 +index ba6fc3c1186b9..e91838a7b8497 100644
8123 +--- a/net/dccp/minisocks.c
8124 ++++ b/net/dccp/minisocks.c
8125 +@@ -98,6 +98,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
8126 + newdp->dccps_role = DCCP_ROLE_SERVER;
8127 + newdp->dccps_hc_rx_ackvec = NULL;
8128 + newdp->dccps_service_list = NULL;
8129 ++ newdp->dccps_hc_rx_ccid = NULL;
8130 ++ newdp->dccps_hc_tx_ccid = NULL;
8131 + newdp->dccps_service = dreq->dreq_service;
8132 + newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
8133 + newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
8134 +diff --git a/net/dsa/slave.c b/net/dsa/slave.c
8135 +index b887d9edb9c38..f7c122357a966 100644
8136 +--- a/net/dsa/slave.c
8137 ++++ b/net/dsa/slave.c
8138 +@@ -1226,13 +1226,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
8139 + * use the switch internal MDIO bus instead
8140 + */
8141 + ret = dsa_slave_phy_connect(slave_dev, dp->index);
8142 +- if (ret) {
8143 +- netdev_err(slave_dev,
8144 +- "failed to connect to port %d: %d\n",
8145 +- dp->index, ret);
8146 +- phylink_destroy(dp->pl);
8147 +- return ret;
8148 +- }
8149 ++ }
8150 ++ if (ret) {
8151 ++ netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
8152 ++ ERR_PTR(ret));
8153 ++ phylink_destroy(dp->pl);
8154 + }
8155 +
8156 + return ret;
8157 +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
8158 +index dde6cf82e9f0a..fe10a565b7d85 100644
8159 +--- a/net/ipv4/icmp.c
8160 ++++ b/net/ipv4/icmp.c
8161 +@@ -465,6 +465,23 @@ out_bh_enable:
8162 + local_bh_enable();
8163 + }
8164 +
8165 ++/*
8166 ++ * The device used for looking up which routing table to use for sending an ICMP
8167 ++ * error is preferably the source whenever it is set, which should ensure the
8168 ++ * icmp error can be sent to the source host, else lookup using the routing
8169 ++ * table of the destination device, else use the main routing table (index 0).
8170 ++ */
8171 ++static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
8172 ++{
8173 ++ struct net_device *route_lookup_dev = NULL;
8174 ++
8175 ++ if (skb->dev)
8176 ++ route_lookup_dev = skb->dev;
8177 ++ else if (skb_dst(skb))
8178 ++ route_lookup_dev = skb_dst(skb)->dev;
8179 ++ return route_lookup_dev;
8180 ++}
8181 ++
8182 + static struct rtable *icmp_route_lookup(struct net *net,
8183 + struct flowi4 *fl4,
8184 + struct sk_buff *skb_in,
8185 +@@ -473,6 +490,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
8186 + int type, int code,
8187 + struct icmp_bxm *param)
8188 + {
8189 ++ struct net_device *route_lookup_dev;
8190 + struct rtable *rt, *rt2;
8191 + struct flowi4 fl4_dec;
8192 + int err;
8193 +@@ -487,7 +505,8 @@ static struct rtable *icmp_route_lookup(struct net *net,
8194 + fl4->flowi4_proto = IPPROTO_ICMP;
8195 + fl4->fl4_icmp_type = type;
8196 + fl4->fl4_icmp_code = code;
8197 +- fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
8198 ++ route_lookup_dev = icmp_get_route_lookup_dev(skb_in);
8199 ++ fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev);
8200 +
8201 + security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
8202 + rt = ip_route_output_key_hash(net, fl4, skb_in);
8203 +@@ -511,7 +530,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
8204 + if (err)
8205 + goto relookup_failed;
8206 +
8207 +- if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev,
8208 ++ if (inet_addr_type_dev_table(net, route_lookup_dev,
8209 + fl4_dec.saddr) == RTN_LOCAL) {
8210 + rt2 = __ip_route_output_key(net, &fl4_dec);
8211 + if (IS_ERR(rt2))
8212 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
8213 +index dca7fe0ae24ad..15804cfc19a8c 100644
8214 +--- a/net/ipv4/igmp.c
8215 ++++ b/net/ipv4/igmp.c
8216 +@@ -2743,6 +2743,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u
8217 + rv = 1;
8218 + } else if (im) {
8219 + if (src_addr) {
8220 ++ spin_lock_bh(&im->lock);
8221 + for (psf = im->sources; psf; psf = psf->sf_next) {
8222 + if (psf->sf_inaddr == src_addr)
8223 + break;
8224 +@@ -2753,6 +2754,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u
8225 + im->sfcount[MCAST_EXCLUDE];
8226 + else
8227 + rv = im->sfcount[MCAST_EXCLUDE] != 0;
8228 ++ spin_unlock_bh(&im->lock);
8229 + } else
8230 + rv = 1; /* unspecified source; tentatively allow */
8231 + }
8232 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
8233 +index a8a37d1128201..0c431fd4b1200 100644
8234 +--- a/net/ipv4/ip_gre.c
8235 ++++ b/net/ipv4/ip_gre.c
8236 +@@ -449,8 +449,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
8237 +
8238 + static int gre_handle_offloads(struct sk_buff *skb, bool csum)
8239 + {
8240 +- if (csum && skb_checksum_start(skb) < skb->data)
8241 +- return -EINVAL;
8242 + return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
8243 + }
8244 +
8245 +@@ -682,15 +680,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
8246 + }
8247 +
8248 + if (dev->header_ops) {
8249 ++ const int pull_len = tunnel->hlen + sizeof(struct iphdr);
8250 ++
8251 + if (skb_cow_head(skb, 0))
8252 + goto free_skb;
8253 +
8254 + tnl_params = (const struct iphdr *)skb->data;
8255 +
8256 ++ if (pull_len > skb_transport_offset(skb))
8257 ++ goto free_skb;
8258 ++
8259 + /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
8260 + * to gre header.
8261 + */
8262 +- skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
8263 ++ skb_pull(skb, pull_len);
8264 + skb_reset_mac_header(skb);
8265 + } else {
8266 + if (skb_cow_head(skb, dev->needed_headroom))
8267 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
8268 +index e63905f7f6f95..25beecee89494 100644
8269 +--- a/net/ipv4/ip_output.c
8270 ++++ b/net/ipv4/ip_output.c
8271 +@@ -419,8 +419,9 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
8272 + {
8273 + BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
8274 + offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
8275 +- memcpy(&iph->saddr, &fl4->saddr,
8276 +- sizeof(fl4->saddr) + sizeof(fl4->daddr));
8277 ++
8278 ++ iph->saddr = fl4->saddr;
8279 ++ iph->daddr = fl4->daddr;
8280 + }
8281 +
8282 + /* Note: skb->sk can be different from sk, in case of tunnels */
8283 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
8284 +index 1491d239385e5..730a15fc497ca 100644
8285 +--- a/net/ipv4/route.c
8286 ++++ b/net/ipv4/route.c
8287 +@@ -604,18 +604,25 @@ static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
8288 + }
8289 + }
8290 +
8291 +-static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
8292 ++static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
8293 + {
8294 +- struct fib_nh_exception *fnhe, *oldest;
8295 ++ struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
8296 ++ struct fib_nh_exception *fnhe, *oldest = NULL;
8297 +
8298 +- oldest = rcu_dereference(hash->chain);
8299 +- for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
8300 +- fnhe = rcu_dereference(fnhe->fnhe_next)) {
8301 +- if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
8302 ++ for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
8303 ++ fnhe = rcu_dereference_protected(*fnhe_p,
8304 ++ lockdep_is_held(&fnhe_lock));
8305 ++ if (!fnhe)
8306 ++ break;
8307 ++ if (!oldest ||
8308 ++ time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
8309 + oldest = fnhe;
8310 ++ oldest_p = fnhe_p;
8311 ++ }
8312 + }
8313 + fnhe_flush_routes(oldest);
8314 +- return oldest;
8315 ++ *oldest_p = oldest->fnhe_next;
8316 ++ kfree_rcu(oldest, rcu);
8317 + }
8318 +
8319 + static inline u32 fnhe_hashfun(__be32 daddr)
8320 +@@ -692,16 +699,21 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
8321 + if (rt)
8322 + fill_route_from_fnhe(rt, fnhe);
8323 + } else {
8324 +- if (depth > FNHE_RECLAIM_DEPTH)
8325 +- fnhe = fnhe_oldest(hash);
8326 +- else {
8327 +- fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
8328 +- if (!fnhe)
8329 +- goto out_unlock;
8330 +-
8331 +- fnhe->fnhe_next = hash->chain;
8332 +- rcu_assign_pointer(hash->chain, fnhe);
8333 ++ /* Randomize max depth to avoid some side channels attacks. */
8334 ++ int max_depth = FNHE_RECLAIM_DEPTH +
8335 ++ prandom_u32_max(FNHE_RECLAIM_DEPTH);
8336 ++
8337 ++ while (depth > max_depth) {
8338 ++ fnhe_remove_oldest(hash);
8339 ++ depth--;
8340 + }
8341 ++
8342 ++ fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
8343 ++ if (!fnhe)
8344 ++ goto out_unlock;
8345 ++
8346 ++ fnhe->fnhe_next = hash->chain;
8347 ++
8348 + fnhe->fnhe_genid = genid;
8349 + fnhe->fnhe_daddr = daddr;
8350 + fnhe->fnhe_gw = gw;
8351 +@@ -709,6 +721,8 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
8352 + fnhe->fnhe_mtu_locked = lock;
8353 + fnhe->fnhe_expires = max(1UL, expires);
8354 +
8355 ++ rcu_assign_pointer(hash->chain, fnhe);
8356 ++
8357 + /* Exception created; mark the cached routes for the nexthop
8358 + * stale, so anyone caching it rechecks if this exception
8359 + * applies to them.
8360 +@@ -2801,7 +2815,7 @@ static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
8361 + udph = skb_put_zero(skb, sizeof(struct udphdr));
8362 + udph->source = sport;
8363 + udph->dest = dport;
8364 +- udph->len = sizeof(struct udphdr);
8365 ++ udph->len = htons(sizeof(struct udphdr));
8366 + udph->check = 0;
8367 + break;
8368 + }
8369 +diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
8370 +index 2ab371f555250..119d2c2f3b047 100644
8371 +--- a/net/ipv4/tcp_fastopen.c
8372 ++++ b/net/ipv4/tcp_fastopen.c
8373 +@@ -342,8 +342,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
8374 + return NULL;
8375 + }
8376 +
8377 +- if (syn_data &&
8378 +- tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
8379 ++ if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
8380 + goto fastopen;
8381 +
8382 + if (foc->len >= 0 && /* Client presents or requests a cookie */
8383 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
8384 +index 36bff9291530b..5117e0aeea1af 100644
8385 +--- a/net/ipv4/tcp_input.c
8386 ++++ b/net/ipv4/tcp_input.c
8387 +@@ -1195,7 +1195,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
8388 + if (dup_sack && (sacked & TCPCB_RETRANS)) {
8389 + if (tp->undo_marker && tp->undo_retrans > 0 &&
8390 + after(end_seq, tp->undo_marker))
8391 +- tp->undo_retrans--;
8392 ++ tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
8393 + if ((sacked & TCPCB_SACKED_ACKED) &&
8394 + before(start_seq, state->reord))
8395 + state->reord = start_seq;
8396 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
8397 +index 71236aa7388d7..de4edfbc9e466 100644
8398 +--- a/net/ipv4/tcp_ipv4.c
8399 ++++ b/net/ipv4/tcp_ipv4.c
8400 +@@ -2177,6 +2177,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
8401 + static void *tcp_seek_last_pos(struct seq_file *seq)
8402 + {
8403 + struct tcp_iter_state *st = seq->private;
8404 ++ int bucket = st->bucket;
8405 + int offset = st->offset;
8406 + int orig_num = st->num;
8407 + void *rc = NULL;
8408 +@@ -2187,7 +2188,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
8409 + break;
8410 + st->state = TCP_SEQ_STATE_LISTENING;
8411 + rc = listening_get_next(seq, NULL);
8412 +- while (offset-- && rc)
8413 ++ while (offset-- && rc && bucket == st->bucket)
8414 + rc = listening_get_next(seq, rc);
8415 + if (rc)
8416 + break;
8417 +@@ -2198,7 +2199,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
8418 + if (st->bucket > tcp_hashinfo.ehash_mask)
8419 + break;
8420 + rc = established_get_first(seq);
8421 +- while (offset-- && rc)
8422 ++ while (offset-- && rc && bucket == st->bucket)
8423 + rc = established_get_next(seq, rc);
8424 + }
8425 +
8426 +diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c
8427 +index f14de4b6d639d..58e839e2ce1d3 100644
8428 +--- a/net/ipv6/netfilter/nf_socket_ipv6.c
8429 ++++ b/net/ipv6/netfilter/nf_socket_ipv6.c
8430 +@@ -104,7 +104,7 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
8431 + {
8432 + __be16 uninitialized_var(dport), uninitialized_var(sport);
8433 + const struct in6_addr *daddr = NULL, *saddr = NULL;
8434 +- struct ipv6hdr *iph = ipv6_hdr(skb);
8435 ++ struct ipv6hdr *iph = ipv6_hdr(skb), ipv6_var;
8436 + struct sk_buff *data_skb = NULL;
8437 + int doff = 0;
8438 + int thoff = 0, tproto;
8439 +@@ -134,8 +134,6 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
8440 + thoff + sizeof(*hp);
8441 +
8442 + } else if (tproto == IPPROTO_ICMPV6) {
8443 +- struct ipv6hdr ipv6_var;
8444 +-
8445 + if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
8446 + &sport, &dport, &ipv6_var))
8447 + return NULL;
8448 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
8449 +index 9abdc0a04d993..bf2a53d455a18 100644
8450 +--- a/net/l2tp/l2tp_core.c
8451 ++++ b/net/l2tp/l2tp_core.c
8452 +@@ -889,8 +889,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
8453 + }
8454 +
8455 + if (tunnel->version == L2TP_HDR_VER_3 &&
8456 +- l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
8457 ++ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
8458 ++ l2tp_session_dec_refcount(session);
8459 + goto error;
8460 ++ }
8461 +
8462 + l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
8463 + l2tp_session_dec_refcount(session);
8464 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
8465 +index 3530d1a5fc98e..5c5908127fcb5 100644
8466 +--- a/net/mac80211/tx.c
8467 ++++ b/net/mac80211/tx.c
8468 +@@ -3142,7 +3142,9 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
8469 + if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
8470 + return true;
8471 +
8472 +- if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
8473 ++ if (!ieee80211_amsdu_realloc_pad(local, skb,
8474 ++ sizeof(*amsdu_hdr) +
8475 ++ local->hw.extra_tx_headroom))
8476 + return false;
8477 +
8478 + data = skb_push(skb, sizeof(*amsdu_hdr));
8479 +diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
8480 +index 3e3494c8d42f8..e252f62bb8c20 100644
8481 +--- a/net/netlabel/netlabel_cipso_v4.c
8482 ++++ b/net/netlabel/netlabel_cipso_v4.c
8483 +@@ -156,8 +156,8 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
8484 + return -ENOMEM;
8485 + doi_def->map.std = kzalloc(sizeof(*doi_def->map.std), GFP_KERNEL);
8486 + if (doi_def->map.std == NULL) {
8487 +- ret_val = -ENOMEM;
8488 +- goto add_std_failure;
8489 ++ kfree(doi_def);
8490 ++ return -ENOMEM;
8491 + }
8492 + doi_def->type = CIPSO_V4_MAP_TRANS;
8493 +
8494 +@@ -198,14 +198,14 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
8495 + }
8496 + doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size,
8497 + sizeof(u32),
8498 +- GFP_KERNEL);
8499 ++ GFP_KERNEL | __GFP_NOWARN);
8500 + if (doi_def->map.std->lvl.local == NULL) {
8501 + ret_val = -ENOMEM;
8502 + goto add_std_failure;
8503 + }
8504 + doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size,
8505 + sizeof(u32),
8506 +- GFP_KERNEL);
8507 ++ GFP_KERNEL | __GFP_NOWARN);
8508 + if (doi_def->map.std->lvl.cipso == NULL) {
8509 + ret_val = -ENOMEM;
8510 + goto add_std_failure;
8511 +@@ -273,7 +273,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
8512 + doi_def->map.std->cat.local = kcalloc(
8513 + doi_def->map.std->cat.local_size,
8514 + sizeof(u32),
8515 +- GFP_KERNEL);
8516 ++ GFP_KERNEL | __GFP_NOWARN);
8517 + if (doi_def->map.std->cat.local == NULL) {
8518 + ret_val = -ENOMEM;
8519 + goto add_std_failure;
8520 +@@ -281,7 +281,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
8521 + doi_def->map.std->cat.cipso = kcalloc(
8522 + doi_def->map.std->cat.cipso_size,
8523 + sizeof(u32),
8524 +- GFP_KERNEL);
8525 ++ GFP_KERNEL | __GFP_NOWARN);
8526 + if (doi_def->map.std->cat.cipso == NULL) {
8527 + ret_val = -ENOMEM;
8528 + goto add_std_failure;
8529 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
8530 +index ac3fe507bc1c4..b0fd268ed65e5 100644
8531 +--- a/net/netlink/af_netlink.c
8532 ++++ b/net/netlink/af_netlink.c
8533 +@@ -2498,13 +2498,15 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
8534 + /* errors reported via destination sk->sk_err, but propagate
8535 + * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
8536 + err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
8537 ++ if (err == -ESRCH)
8538 ++ err = 0;
8539 + }
8540 +
8541 + if (report) {
8542 + int err2;
8543 +
8544 + err2 = nlmsg_unicast(sk, skb, portid);
8545 +- if (!err || err == -ESRCH)
8546 ++ if (!err)
8547 + err = err2;
8548 + }
8549 +
8550 +diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
8551 +index ebc3c8c7e6661..bc62e1b246539 100644
8552 +--- a/net/sched/sch_cbq.c
8553 ++++ b/net/sched/sch_cbq.c
8554 +@@ -1616,7 +1616,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
8555 + err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
8556 + if (err) {
8557 + kfree(cl);
8558 +- return err;
8559 ++ goto failure;
8560 + }
8561 +
8562 + if (tca[TCA_RATE]) {
8563 +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
8564 +index a862d9990be74..e4f69c779b8cf 100644
8565 +--- a/net/sched/sch_fq_codel.c
8566 ++++ b/net/sched/sch_fq_codel.c
8567 +@@ -382,6 +382,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
8568 + {
8569 + struct fq_codel_sched_data *q = qdisc_priv(sch);
8570 + struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
8571 ++ u32 quantum = 0;
8572 + int err;
8573 +
8574 + if (!opt)
8575 +@@ -399,6 +400,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
8576 + q->flows_cnt > 65536)
8577 + return -EINVAL;
8578 + }
8579 ++ if (tb[TCA_FQ_CODEL_QUANTUM]) {
8580 ++ quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
8581 ++ if (quantum > FQ_CODEL_QUANTUM_MAX) {
8582 ++ NL_SET_ERR_MSG(extack, "Invalid quantum");
8583 ++ return -EINVAL;
8584 ++ }
8585 ++ }
8586 + sch_tree_lock(sch);
8587 +
8588 + if (tb[TCA_FQ_CODEL_TARGET]) {
8589 +@@ -425,8 +433,8 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
8590 + if (tb[TCA_FQ_CODEL_ECN])
8591 + q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
8592 +
8593 +- if (tb[TCA_FQ_CODEL_QUANTUM])
8594 +- q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
8595 ++ if (quantum)
8596 ++ q->quantum = quantum;
8597 +
8598 + if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
8599 + q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
8600 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
8601 +index a85d78d2bdb73..d9d03881e4de5 100644
8602 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
8603 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
8604 +@@ -1914,7 +1914,7 @@ gss_svc_init_net(struct net *net)
8605 + goto out2;
8606 + return 0;
8607 + out2:
8608 +- destroy_use_gss_proxy_proc_entry(net);
8609 ++ rsi_cache_destroy_net(net);
8610 + out1:
8611 + rsc_cache_destroy_net(net);
8612 + return rv;
8613 +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
8614 +index 11d393c047728..6a606af8de819 100644
8615 +--- a/net/sunrpc/svc.c
8616 ++++ b/net/sunrpc/svc.c
8617 +@@ -1146,6 +1146,22 @@ static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ..
8618 +
8619 + extern void svc_tcp_prep_reply_hdr(struct svc_rqst *);
8620 +
8621 ++__be32
8622 ++svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err)
8623 ++{
8624 ++ set_bit(RQ_AUTHERR, &rqstp->rq_flags);
8625 ++ return auth_err;
8626 ++}
8627 ++EXPORT_SYMBOL_GPL(svc_return_autherr);
8628 ++
8629 ++static __be32
8630 ++svc_get_autherr(struct svc_rqst *rqstp, __be32 *statp)
8631 ++{
8632 ++ if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags))
8633 ++ return *statp;
8634 ++ return rpc_auth_ok;
8635 ++}
8636 ++
8637 + /*
8638 + * Common routine for processing the RPC request.
8639 + */
8640 +@@ -1296,11 +1312,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
8641 + procp->pc_release(rqstp);
8642 + goto dropit;
8643 + }
8644 +- if (*statp == rpc_autherr_badcred) {
8645 +- if (procp->pc_release)
8646 +- procp->pc_release(rqstp);
8647 +- goto err_bad_auth;
8648 +- }
8649 ++ auth_stat = svc_get_autherr(rqstp, statp);
8650 ++ if (auth_stat != rpc_auth_ok)
8651 ++ goto err_release_bad_auth;
8652 + if (*statp == rpc_success && procp->pc_encode &&
8653 + !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) {
8654 + dprintk("svc: failed to encode reply\n");
8655 +@@ -1359,6 +1373,9 @@ err_bad_rpc:
8656 + svc_putnl(resv, 2);
8657 + goto sendit;
8658 +
8659 ++err_release_bad_auth:
8660 ++ if (procp->pc_release)
8661 ++ procp->pc_release(rqstp);
8662 + err_bad_auth:
8663 + dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
8664 + serv->sv_stats->rpcbadauth++;
8665 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
8666 +index 6aead6deaa6c4..848ae6dcbd822 100644
8667 +--- a/net/tipc/socket.c
8668 ++++ b/net/tipc/socket.c
8669 +@@ -1716,6 +1716,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
8670 + bool connected = !tipc_sk_type_connectionless(sk);
8671 + struct tipc_sock *tsk = tipc_sk(sk);
8672 + int rc, err, hlen, dlen, copy;
8673 ++ struct tipc_skb_cb *skb_cb;
8674 + struct sk_buff_head xmitq;
8675 + struct tipc_msg *hdr;
8676 + struct sk_buff *skb;
8677 +@@ -1739,6 +1740,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
8678 + if (unlikely(rc))
8679 + goto exit;
8680 + skb = skb_peek(&sk->sk_receive_queue);
8681 ++ skb_cb = TIPC_SKB_CB(skb);
8682 + hdr = buf_msg(skb);
8683 + dlen = msg_data_sz(hdr);
8684 + hlen = msg_hdr_sz(hdr);
8685 +@@ -1758,18 +1760,33 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
8686 +
8687 + /* Capture data if non-error msg, otherwise just set return value */
8688 + if (likely(!err)) {
8689 +- copy = min_t(int, dlen, buflen);
8690 +- if (unlikely(copy != dlen))
8691 +- m->msg_flags |= MSG_TRUNC;
8692 +- rc = skb_copy_datagram_msg(skb, hlen, m, copy);
8693 ++ int offset = skb_cb->bytes_read;
8694 ++
8695 ++ copy = min_t(int, dlen - offset, buflen);
8696 ++ rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
8697 ++ if (unlikely(rc))
8698 ++ goto exit;
8699 ++ if (unlikely(offset + copy < dlen)) {
8700 ++ if (flags & MSG_EOR) {
8701 ++ if (!(flags & MSG_PEEK))
8702 ++ skb_cb->bytes_read = offset + copy;
8703 ++ } else {
8704 ++ m->msg_flags |= MSG_TRUNC;
8705 ++ skb_cb->bytes_read = 0;
8706 ++ }
8707 ++ } else {
8708 ++ if (flags & MSG_EOR)
8709 ++ m->msg_flags |= MSG_EOR;
8710 ++ skb_cb->bytes_read = 0;
8711 ++ }
8712 + } else {
8713 + copy = 0;
8714 + rc = 0;
8715 +- if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
8716 ++ if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
8717 + rc = -ECONNRESET;
8718 ++ goto exit;
8719 ++ }
8720 + }
8721 +- if (unlikely(rc))
8722 +- goto exit;
8723 +
8724 + /* Mark message as group event if applicable */
8725 + if (unlikely(grp_evt)) {
8726 +@@ -1792,6 +1809,9 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
8727 + tipc_node_distr_xmit(sock_net(sk), &xmitq);
8728 + }
8729 +
8730 ++ if (skb_cb->bytes_read)
8731 ++ goto exit;
8732 ++
8733 + tsk_advance_rx_queue(sk);
8734 +
8735 + if (likely(!connected))
8736 +@@ -2203,7 +2223,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
8737 + static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
8738 + u32 dport, struct sk_buff_head *xmitq)
8739 + {
8740 +- unsigned long time_limit = jiffies + 2;
8741 ++ unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
8742 + struct sk_buff *skb;
8743 + unsigned int lim;
8744 + atomic_t *dcnt;
8745 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
8746 +index 98c253afa0db2..c293a558b0d4f 100644
8747 +--- a/net/unix/af_unix.c
8748 ++++ b/net/unix/af_unix.c
8749 +@@ -2739,7 +2739,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
8750 +
8751 + other = unix_peer(sk);
8752 + if (other && unix_peer(other) != sk &&
8753 +- unix_recvq_full(other) &&
8754 ++ unix_recvq_full_lockless(other) &&
8755 + unix_dgram_peer_wake_me(sk, other))
8756 + writable = 0;
8757 +
8758 +diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh
8759 +index e68b9ee6814b8..35db26f736b9d 100755
8760 +--- a/samples/bpf/test_override_return.sh
8761 ++++ b/samples/bpf/test_override_return.sh
8762 +@@ -1,5 +1,6 @@
8763 + #!/bin/bash
8764 +
8765 ++rm -r tmpmnt
8766 + rm -f testfile.img
8767 + dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
8768 + DEVICE=$(losetup --show -f testfile.img)
8769 +diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
8770 +index ea6dae78f0dff..2ed13e9f3fcb0 100644
8771 +--- a/samples/bpf/tracex7_user.c
8772 ++++ b/samples/bpf/tracex7_user.c
8773 +@@ -13,6 +13,11 @@ int main(int argc, char **argv)
8774 + char command[256];
8775 + int ret;
8776 +
8777 ++ if (!argv[1]) {
8778 ++ fprintf(stderr, "ERROR: Run with the btrfs device argument!\n");
8779 ++ return 0;
8780 ++ }
8781 ++
8782 + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
8783 +
8784 + if (load_bpf_file(filename)) {
8785 +diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
8786 +index 13b446328dda0..5095b2e8fceeb 100644
8787 +--- a/security/integrity/ima/Kconfig
8788 ++++ b/security/integrity/ima/Kconfig
8789 +@@ -5,7 +5,6 @@ config IMA
8790 + select SECURITYFS
8791 + select CRYPTO
8792 + select CRYPTO_HMAC
8793 +- select CRYPTO_MD5
8794 + select CRYPTO_SHA1
8795 + select CRYPTO_HASH_INFO
8796 + select TCG_TPM if HAS_IOMEM && !UML
8797 +diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
8798 +index 3e7a1523663b8..daad75ee74d9f 100644
8799 +--- a/security/integrity/ima/ima_mok.c
8800 ++++ b/security/integrity/ima/ima_mok.c
8801 +@@ -26,7 +26,7 @@ struct key *ima_blacklist_keyring;
8802 + /*
8803 + * Allocate the IMA blacklist keyring
8804 + */
8805 +-__init int ima_mok_init(void)
8806 ++static __init int ima_mok_init(void)
8807 + {
8808 + struct key_restriction *restriction;
8809 +
8810 +diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
8811 +index a7855c61c05cd..07d23b4f76f3b 100644
8812 +--- a/security/smack/smack_access.c
8813 ++++ b/security/smack/smack_access.c
8814 +@@ -85,23 +85,22 @@ int log_policy = SMACK_AUDIT_DENIED;
8815 + int smk_access_entry(char *subject_label, char *object_label,
8816 + struct list_head *rule_list)
8817 + {
8818 +- int may = -ENOENT;
8819 + struct smack_rule *srp;
8820 +
8821 + list_for_each_entry_rcu(srp, rule_list, list) {
8822 + if (srp->smk_object->smk_known == object_label &&
8823 + srp->smk_subject->smk_known == subject_label) {
8824 +- may = srp->smk_access;
8825 +- break;
8826 ++ int may = srp->smk_access;
8827 ++ /*
8828 ++ * MAY_WRITE implies MAY_LOCK.
8829 ++ */
8830 ++ if ((may & MAY_WRITE) == MAY_WRITE)
8831 ++ may |= MAY_LOCK;
8832 ++ return may;
8833 + }
8834 + }
8835 +
8836 +- /*
8837 +- * MAY_WRITE implies MAY_LOCK.
8838 +- */
8839 +- if ((may & MAY_WRITE) == MAY_WRITE)
8840 +- may |= MAY_LOCK;
8841 +- return may;
8842 ++ return -ENOENT;
8843 + }
8844 +
8845 + /**
8846 +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
8847 +index 56295936387c3..da454eeee5c91 100644
8848 +--- a/sound/core/pcm_lib.c
8849 ++++ b/sound/core/pcm_lib.c
8850 +@@ -1751,7 +1751,7 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
8851 + channels = params_channels(params);
8852 + frame_size = snd_pcm_format_size(format, channels);
8853 + if (frame_size > 0)
8854 +- params->fifo_size /= (unsigned)frame_size;
8855 ++ params->fifo_size /= frame_size;
8856 + }
8857 + return 0;
8858 + }
8859 +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
8860 +index 186c0ee059da7..c4d19b88d17dc 100644
8861 +--- a/sound/soc/intel/boards/bytcr_rt5640.c
8862 ++++ b/sound/soc/intel/boards/bytcr_rt5640.c
8863 +@@ -293,9 +293,6 @@ static const struct snd_soc_dapm_widget byt_rt5640_widgets[] = {
8864 + static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
8865 + {"Headphone", NULL, "Platform Clock"},
8866 + {"Headset Mic", NULL, "Platform Clock"},
8867 +- {"Internal Mic", NULL, "Platform Clock"},
8868 +- {"Speaker", NULL, "Platform Clock"},
8869 +-
8870 + {"Headset Mic", NULL, "MICBIAS1"},
8871 + {"IN2P", NULL, "Headset Mic"},
8872 + {"Headphone", NULL, "HPOL"},
8873 +@@ -303,19 +300,23 @@ static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
8874 + };
8875 +
8876 + static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic1_map[] = {
8877 ++ {"Internal Mic", NULL, "Platform Clock"},
8878 + {"DMIC1", NULL, "Internal Mic"},
8879 + };
8880 +
8881 + static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic2_map[] = {
8882 ++ {"Internal Mic", NULL, "Platform Clock"},
8883 + {"DMIC2", NULL, "Internal Mic"},
8884 + };
8885 +
8886 + static const struct snd_soc_dapm_route byt_rt5640_intmic_in1_map[] = {
8887 ++ {"Internal Mic", NULL, "Platform Clock"},
8888 + {"Internal Mic", NULL, "MICBIAS1"},
8889 + {"IN1P", NULL, "Internal Mic"},
8890 + };
8891 +
8892 + static const struct snd_soc_dapm_route byt_rt5640_intmic_in3_map[] = {
8893 ++ {"Internal Mic", NULL, "Platform Clock"},
8894 + {"Internal Mic", NULL, "MICBIAS1"},
8895 + {"IN3P", NULL, "Internal Mic"},
8896 + };
8897 +@@ -357,6 +358,7 @@ static const struct snd_soc_dapm_route byt_rt5640_ssp0_aif2_map[] = {
8898 + };
8899 +
8900 + static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
8901 ++ {"Speaker", NULL, "Platform Clock"},
8902 + {"Speaker", NULL, "SPOLP"},
8903 + {"Speaker", NULL, "SPOLN"},
8904 + {"Speaker", NULL, "SPORP"},
8905 +@@ -364,6 +366,7 @@ static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
8906 + };
8907 +
8908 + static const struct snd_soc_dapm_route byt_rt5640_mono_spk_map[] = {
8909 ++ {"Speaker", NULL, "Platform Clock"},
8910 + {"Speaker", NULL, "SPOLP"},
8911 + {"Speaker", NULL, "SPOLN"},
8912 + };
8913 +diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
8914 +index b86f76c3598cd..c6d11a59310fc 100644
8915 +--- a/sound/soc/rockchip/rockchip_i2s.c
8916 ++++ b/sound/soc/rockchip/rockchip_i2s.c
8917 +@@ -189,7 +189,9 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
8918 + {
8919 + struct rk_i2s_dev *i2s = to_info(cpu_dai);
8920 + unsigned int mask = 0, val = 0;
8921 ++ int ret = 0;
8922 +
8923 ++ pm_runtime_get_sync(cpu_dai->dev);
8924 + mask = I2S_CKR_MSS_MASK;
8925 + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
8926 + case SND_SOC_DAIFMT_CBS_CFS:
8927 +@@ -202,7 +204,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
8928 + i2s->is_master_mode = false;
8929 + break;
8930 + default:
8931 +- return -EINVAL;
8932 ++ ret = -EINVAL;
8933 ++ goto err_pm_put;
8934 + }
8935 +
8936 + regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
8937 +@@ -216,7 +219,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
8938 + val = I2S_CKR_CKP_POS;
8939 + break;
8940 + default:
8941 +- return -EINVAL;
8942 ++ ret = -EINVAL;
8943 ++ goto err_pm_put;
8944 + }
8945 +
8946 + regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
8947 +@@ -232,14 +236,15 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
8948 + case SND_SOC_DAIFMT_I2S:
8949 + val = I2S_TXCR_IBM_NORMAL;
8950 + break;
8951 +- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
8952 +- val = I2S_TXCR_TFS_PCM;
8953 +- break;
8954 +- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
8955 ++ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */
8956 + val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
8957 + break;
8958 ++ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
8959 ++ val = I2S_TXCR_TFS_PCM;
8960 ++ break;
8961 + default:
8962 +- return -EINVAL;
8963 ++ ret = -EINVAL;
8964 ++ goto err_pm_put;
8965 + }
8966 +
8967 + regmap_update_bits(i2s->regmap, I2S_TXCR, mask, val);
8968 +@@ -255,19 +260,23 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
8969 + case SND_SOC_DAIFMT_I2S:
8970 + val = I2S_RXCR_IBM_NORMAL;
8971 + break;
8972 +- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
8973 +- val = I2S_RXCR_TFS_PCM;
8974 +- break;
8975 +- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
8976 ++ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */
8977 + val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
8978 + break;
8979 ++ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
8980 ++ val = I2S_RXCR_TFS_PCM;
8981 ++ break;
8982 + default:
8983 +- return -EINVAL;
8984 ++ ret = -EINVAL;
8985 ++ goto err_pm_put;
8986 + }
8987 +
8988 + regmap_update_bits(i2s->regmap, I2S_RXCR, mask, val);
8989 +
8990 +- return 0;
8991 ++err_pm_put:
8992 ++ pm_runtime_put(cpu_dai->dev);
8993 ++
8994 ++ return ret;
8995 + }
8996 +
8997 + static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
8998 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
8999 +index 4114594e57a30..f4bdcff82f5cc 100644
9000 +--- a/sound/usb/quirks.c
9001 ++++ b/sound/usb/quirks.c
9002 +@@ -1554,6 +1554,7 @@ static const struct registration_quirk registration_quirks[] = {
9003 + REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
9004 + REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
9005 + REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */
9006 ++ REG_QUIRK_ENTRY(0x0ecb, 0x1f47, 2), /* JBL Quantum 800 */
9007 + REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */
9008 + REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2), /* JBL Quantum 600 */
9009 + REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */
9010 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
9011 +index ab208400ea140..4ada233b37ed1 100644
9012 +--- a/tools/perf/util/machine.c
9013 ++++ b/tools/perf/util/machine.c
9014 +@@ -1893,6 +1893,7 @@ static int add_callchain_ip(struct thread *thread,
9015 +
9016 + al.filtered = 0;
9017 + al.sym = NULL;
9018 ++ al.srcline = NULL;
9019 + if (!cpumode) {
9020 + thread__find_cpumode_addr_location(thread, ip, &al);
9021 + } else {
9022 +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
9023 +index 4e202217fae10..87ba89df98022 100644
9024 +--- a/tools/testing/selftests/bpf/test_maps.c
9025 ++++ b/tools/testing/selftests/bpf/test_maps.c
9026 +@@ -796,7 +796,7 @@ static void test_sockmap(int tasks, void *data)
9027 +
9028 + FD_ZERO(&w);
9029 + FD_SET(sfd[3], &w);
9030 +- to.tv_sec = 1;
9031 ++ to.tv_sec = 30;
9032 + to.tv_usec = 0;
9033 + s = select(sfd[3] + 1, &w, NULL, NULL, &to);
9034 + if (s == -1) {
9035 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
9036 +index c7d17781dbfee..858e551432339 100644
9037 +--- a/tools/testing/selftests/bpf/test_verifier.c
9038 ++++ b/tools/testing/selftests/bpf/test_verifier.c
9039 +@@ -956,15 +956,45 @@ static struct bpf_test tests[] = {
9040 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
9041 + /* mess up with R1 pointer on stack */
9042 + BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
9043 +- /* fill back into R0 should fail */
9044 ++ /* fill back into R0 is fine for priv.
9045 ++ * R0 now becomes SCALAR_VALUE.
9046 ++ */
9047 + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
9048 ++ /* Load from R0 should fail. */
9049 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
9050 + BPF_EXIT_INSN(),
9051 + },
9052 + .errstr_unpriv = "attempt to corrupt spilled",
9053 +- .errstr = "corrupted spill",
9054 ++ .errstr = "R0 invalid mem access 'inv",
9055 + .result = REJECT,
9056 + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9057 + },
9058 ++ {
9059 ++ "check corrupted spill/fill, LSB",
9060 ++ .insns = {
9061 ++ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
9062 ++ BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
9063 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
9064 ++ BPF_EXIT_INSN(),
9065 ++ },
9066 ++ .errstr_unpriv = "attempt to corrupt spilled",
9067 ++ .result_unpriv = REJECT,
9068 ++ .result = ACCEPT,
9069 ++ .retval = POINTER_VALUE,
9070 ++ },
9071 ++ {
9072 ++ "check corrupted spill/fill, MSB",
9073 ++ .insns = {
9074 ++ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
9075 ++ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
9076 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
9077 ++ BPF_EXIT_INSN(),
9078 ++ },
9079 ++ .errstr_unpriv = "attempt to corrupt spilled",
9080 ++ .result_unpriv = REJECT,
9081 ++ .result = ACCEPT,
9082 ++ .retval = POINTER_VALUE,
9083 ++ },
9084 + {
9085 + "invalid src register in STX",
9086 + .insns = {
9087 +@@ -3858,7 +3888,8 @@ static struct bpf_test tests[] = {
9088 + offsetof(struct __sk_buff, data)),
9089 + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9090 + offsetof(struct __sk_buff, data_end)),
9091 +- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
9092 ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9093 ++ offsetof(struct __sk_buff, mark)),
9094 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
9095 + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
9096 + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
9097 +@@ -6530,9 +6561,9 @@ static struct bpf_test tests[] = {
9098 + {
9099 + "helper access to variable memory: stack, bitwise AND, zero included",
9100 + .insns = {
9101 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
9102 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9103 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
9104 +- BPF_MOV64_IMM(BPF_REG_2, 16),
9105 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
9106 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
9107 + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
9108 +@@ -6547,9 +6578,9 @@ static struct bpf_test tests[] = {
9109 + {
9110 + "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
9111 + .insns = {
9112 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
9113 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9114 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
9115 +- BPF_MOV64_IMM(BPF_REG_2, 16),
9116 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
9117 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
9118 + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
9119 +@@ -6623,9 +6654,9 @@ static struct bpf_test tests[] = {
9120 + {
9121 + "helper access to variable memory: stack, JMP, bounds + offset",
9122 + .insns = {
9123 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
9124 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9125 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
9126 +- BPF_MOV64_IMM(BPF_REG_2, 16),
9127 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
9128 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
9129 + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
9130 +@@ -6644,9 +6675,9 @@ static struct bpf_test tests[] = {
9131 + {
9132 + "helper access to variable memory: stack, JMP, wrong max",
9133 + .insns = {
9134 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
9135 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9136 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
9137 +- BPF_MOV64_IMM(BPF_REG_2, 16),
9138 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
9139 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
9140 + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
9141 +@@ -6664,9 +6695,9 @@ static struct bpf_test tests[] = {
9142 + {
9143 + "helper access to variable memory: stack, JMP, no max check",
9144 + .insns = {
9145 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
9146 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9147 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
9148 +- BPF_MOV64_IMM(BPF_REG_2, 16),
9149 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
9150 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
9151 + BPF_MOV64_IMM(BPF_REG_4, 0),
9152 +@@ -6684,9 +6715,9 @@ static struct bpf_test tests[] = {
9153 + {
9154 + "helper access to variable memory: stack, JMP, no min check",
9155 + .insns = {
9156 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
9157 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9158 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
9159 +- BPF_MOV64_IMM(BPF_REG_2, 16),
9160 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
9161 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
9162 + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
9163 +@@ -6702,9 +6733,9 @@ static struct bpf_test tests[] = {
9164 + {
9165 + "helper access to variable memory: stack, JMP (signed), no min check",
9166 + .insns = {
9167 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
9168 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9169 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
9170 +- BPF_MOV64_IMM(BPF_REG_2, 16),
9171 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
9172 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
9173 + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
9174 +@@ -6746,6 +6777,7 @@ static struct bpf_test tests[] = {
9175 + {
9176 + "helper access to variable memory: map, JMP, wrong max",
9177 + .insns = {
9178 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
9179 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9180 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9181 + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
9182 +@@ -6753,7 +6785,7 @@ static struct bpf_test tests[] = {
9183 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
9184 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
9185 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
9186 +- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
9187 ++ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
9188 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
9189 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
9190 + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
9191 +@@ -6765,7 +6797,7 @@ static struct bpf_test tests[] = {
9192 + BPF_MOV64_IMM(BPF_REG_0, 0),
9193 + BPF_EXIT_INSN(),
9194 + },
9195 +- .fixup_map2 = { 3 },
9196 ++ .fixup_map2 = { 4 },
9197 + .errstr = "invalid access to map value, value_size=48 off=0 size=49",
9198 + .result = REJECT,
9199 + .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9200 +@@ -6800,6 +6832,7 @@ static struct bpf_test tests[] = {
9201 + {
9202 + "helper access to variable memory: map adjusted, JMP, wrong max",
9203 + .insns = {
9204 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
9205 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9206 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9207 + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
9208 +@@ -6808,7 +6841,7 @@ static struct bpf_test tests[] = {
9209 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
9210 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
9211 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
9212 +- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
9213 ++ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
9214 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
9215 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
9216 + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
9217 +@@ -6820,7 +6853,7 @@ static struct bpf_test tests[] = {
9218 + BPF_MOV64_IMM(BPF_REG_0, 0),
9219 + BPF_EXIT_INSN(),
9220 + },
9221 +- .fixup_map2 = { 3 },
9222 ++ .fixup_map2 = { 4 },
9223 + .errstr = "R1 min value is outside of the array range",
9224 + .result = REJECT,
9225 + .prog_type = BPF_PROG_TYPE_TRACEPOINT,
9226 +@@ -6842,8 +6875,8 @@ static struct bpf_test tests[] = {
9227 + {
9228 + "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
9229 + .insns = {
9230 ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9231 + BPF_MOV64_IMM(BPF_REG_1, 0),
9232 +- BPF_MOV64_IMM(BPF_REG_2, 1),
9233 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
9234 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
9235 + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
9236 +@@ -7070,6 +7103,7 @@ static struct bpf_test tests[] = {
9237 + {
9238 + "helper access to variable memory: 8 bytes leak",
9239 + .insns = {
9240 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
9241 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9242 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
9243 + BPF_MOV64_IMM(BPF_REG_0, 0),
9244 +@@ -7080,7 +7114,6 @@ static struct bpf_test tests[] = {
9245 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
9246 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
9247 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
9248 +- BPF_MOV64_IMM(BPF_REG_2, 1),
9249 + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
9250 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
9251 + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
9252 +@@ -8465,7 +8498,7 @@ static struct bpf_test tests[] = {
9253 + .prog_type = BPF_PROG_TYPE_LWT_IN,
9254 + },
9255 + {
9256 +- "indirect variable-offset stack access",
9257 ++ "indirect variable-offset stack access, out of bound",
9258 + .insns = {
9259 + /* Fill the top 8 bytes of the stack */
9260 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9261 +@@ -8486,10 +8519,85 @@ static struct bpf_test tests[] = {
9262 + BPF_EXIT_INSN(),
9263 + },
9264 + .fixup_map1 = { 5 },
9265 +- .errstr = "variable stack read R2",
9266 ++ .errstr = "invalid stack type R2 var_off",
9267 ++ .result = REJECT,
9268 ++ .prog_type = BPF_PROG_TYPE_LWT_IN,
9269 ++ },
9270 ++ {
9271 ++ "indirect variable-offset stack access, max_off+size > max_initialized",
9272 ++ .insns = {
9273 ++ /* Fill only the second from top 8 bytes of the stack. */
9274 ++ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
9275 ++ /* Get an unknown value. */
9276 ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9277 ++ /* Make it small and 4-byte aligned. */
9278 ++ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9279 ++ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
9280 ++ /* Add it to fp. We now have either fp-12 or fp-16, but we don't know
9281 ++ * which. fp-12 size 8 is partially uninitialized stack.
9282 ++ */
9283 ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9284 ++ /* Dereference it indirectly. */
9285 ++ BPF_LD_MAP_FD(BPF_REG_1, 0),
9286 ++ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
9287 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
9288 ++ BPF_EXIT_INSN(),
9289 ++ },
9290 ++ .fixup_map1 = { 5 },
9291 ++ .errstr = "invalid indirect read from stack var_off",
9292 + .result = REJECT,
9293 + .prog_type = BPF_PROG_TYPE_LWT_IN,
9294 + },
9295 ++ {
9296 ++ "indirect variable-offset stack access, min_off < min_initialized",
9297 ++ .insns = {
9298 ++ /* Fill only the top 8 bytes of the stack. */
9299 ++ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9300 ++ /* Get an unknown value */
9301 ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9302 ++ /* Make it small and 4-byte aligned. */
9303 ++ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9304 ++ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
9305 ++ /* Add it to fp. We now have either fp-12 or fp-16, but we don't know
9306 ++ * which. fp-16 size 8 is partially uninitialized stack.
9307 ++ */
9308 ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9309 ++ /* Dereference it indirectly. */
9310 ++ BPF_LD_MAP_FD(BPF_REG_1, 0),
9311 ++ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
9312 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
9313 ++ BPF_EXIT_INSN(),
9314 ++ },
9315 ++ .fixup_map1 = { 5 },
9316 ++ .errstr = "invalid indirect read from stack var_off",
9317 ++ .result = REJECT,
9318 ++ .prog_type = BPF_PROG_TYPE_LWT_IN,
9319 ++ },
9320 ++ {
9321 ++ "indirect variable-offset stack access, ok",
9322 ++ .insns = {
9323 ++ /* Fill the top 16 bytes of the stack. */
9324 ++ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
9325 ++ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9326 ++ /* Get an unknown value. */
9327 ++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9328 ++ /* Make it small and 4-byte aligned. */
9329 ++ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9330 ++ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
9331 ++ /* Add it to fp. We now have either fp-12 or fp-16, we don't know
9332 ++ * which, but either way it points to initialized stack.
9333 ++ */
9334 ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9335 ++ /* Dereference it indirectly. */
9336 ++ BPF_LD_MAP_FD(BPF_REG_1, 0),
9337 ++ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
9338 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
9339 ++ BPF_EXIT_INSN(),
9340 ++ },
9341 ++ .fixup_map1 = { 6 },
9342 ++ .result = ACCEPT,
9343 ++ .prog_type = BPF_PROG_TYPE_LWT_IN,
9344 ++ },
9345 + {
9346 + "direct stack access with 32-bit wraparound. test1",
9347 + .insns = {
9348 +diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
9349 +index 735a510230c3f..45751648aacda 100644
9350 +--- a/tools/thermal/tmon/Makefile
9351 ++++ b/tools/thermal/tmon/Makefile
9352 +@@ -10,7 +10,7 @@ CFLAGS+= -O1 ${WARNFLAGS}
9353 + # Add "-fstack-protector" only if toolchain supports it.
9354 + CFLAGS+= $(call cc-option,-fstack-protector)
9355 + CC?= $(CROSS_COMPILE)gcc
9356 +-PKG_CONFIG?= pkg-config
9357 ++PKG_CONFIG?= $(CROSS_COMPILE)pkg-config
9358 +
9359 + CFLAGS+=-D VERSION=\"$(VERSION)\"
9360 + LDFLAGS+=
9361 +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
9362 +index 39706799ecdf8..b943ec5345cbd 100644
9363 +--- a/virt/kvm/arm/arm.c
9364 ++++ b/virt/kvm/arm/arm.c
9365 +@@ -1137,6 +1137,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
9366 + if (copy_from_user(&reg, argp, sizeof(reg)))
9367 + break;
9368 +
9369 ++ /*
9370 ++ * We could owe a reset due to PSCI. Handle the pending reset
9371 ++ * here to ensure userspace register accesses are ordered after
9372 ++ * the reset.
9373 ++ */
9374 ++ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
9375 ++ kvm_reset_vcpu(vcpu);
9376 ++
9377 + if (ioctl == KVM_SET_ONE_REG)
9378 + r = kvm_arm_set_reg(vcpu, &reg);
9379 + else