Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 22 Sep 2021 11:39:34
Message-Id: 1632310758.f1ad5dc0b5f6809f86a86e23b3fe3b3592722da7.mpagano@gentoo
1 commit: f1ad5dc0b5f6809f86a86e23b3fe3b3592722da7
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 22 11:39:18 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 22 11:39:18 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f1ad5dc0
7
8 Linux patch 5.4.148
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1147_linux-5.4.148.patch | 8519 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 8523 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 4d2be88..b620b8f 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -631,6 +631,10 @@ Patch: 1146_linux-5.4.147.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.147
23
24 +Patch: 1147_linux-5.4.148.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.148
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1147_linux-5.4.148.patch b/1147_linux-5.4.148.patch
33 new file mode 100644
34 index 0000000..e4f197b
35 --- /dev/null
36 +++ b/1147_linux-5.4.148.patch
37 @@ -0,0 +1,8519 @@
38 +diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
39 +index 1c5d2281efc97..771d9e7ae082b 100644
40 +--- a/Documentation/admin-guide/devices.txt
41 ++++ b/Documentation/admin-guide/devices.txt
42 +@@ -3002,10 +3002,10 @@
43 + 65 = /dev/infiniband/issm1 Second InfiniBand IsSM device
44 + ...
45 + 127 = /dev/infiniband/issm63 63rd InfiniBand IsSM device
46 +- 128 = /dev/infiniband/uverbs0 First InfiniBand verbs device
47 +- 129 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
48 ++ 192 = /dev/infiniband/uverbs0 First InfiniBand verbs device
49 ++ 193 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
50 + ...
51 +- 159 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
52 ++ 223 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
53 +
54 + 232 char Biometric Devices
55 + 0 = /dev/biometric/sensor0/fingerprint first fingerprint sensor on first device
56 +diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml
57 +index 60b38eb5c61ab..56e1945911f1e 100644
58 +--- a/Documentation/devicetree/bindings/arm/tegra.yaml
59 ++++ b/Documentation/devicetree/bindings/arm/tegra.yaml
60 +@@ -49,7 +49,7 @@ properties:
61 + - const: toradex,apalis_t30
62 + - const: nvidia,tegra30
63 + - items:
64 +- - const: toradex,apalis_t30-eval-v1.1
65 ++ - const: toradex,apalis_t30-v1.1-eval
66 + - const: toradex,apalis_t30-eval
67 + - const: toradex,apalis_t30-v1.1
68 + - const: toradex,apalis_t30
69 +diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
70 +index 44919d48d2415..c459f169a9044 100644
71 +--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
72 ++++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
73 +@@ -122,7 +122,7 @@ on various other factors also like;
74 + so the device should have enough free bytes available its OOB/Spare
75 + area to accommodate ECC for entire page. In general following expression
76 + helps in determining if given device can accommodate ECC syndrome:
77 +- "2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE"
78 ++ "2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE"
79 + where
80 + OOBSIZE number of bytes in OOB/spare area
81 + PAGESIZE number of bytes in main-area of device page
82 +diff --git a/Makefile b/Makefile
83 +index 98227dae34947..b84706c6d6248 100644
84 +--- a/Makefile
85 ++++ b/Makefile
86 +@@ -1,7 +1,7 @@
87 + # SPDX-License-Identifier: GPL-2.0
88 + VERSION = 5
89 + PATCHLEVEL = 4
90 +-SUBLEVEL = 147
91 ++SUBLEVEL = 148
92 + EXTRAVERSION =
93 + NAME = Kleptomaniac Octopus
94 +
95 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
96 +index a2fbea3ee07c7..102418ac5ff4a 100644
97 +--- a/arch/arc/mm/cache.c
98 ++++ b/arch/arc/mm/cache.c
99 +@@ -1123,7 +1123,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
100 + clear_page(to);
101 + clear_bit(PG_dc_clean, &page->flags);
102 + }
103 +-
104 ++EXPORT_SYMBOL(clear_user_page);
105 +
106 + /**********************************************************************
107 + * Explicit Cache flush request from user space via syscall
108 +diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
109 +index f0b3a9281d69b..fb6cb24bde5c9 100644
110 +--- a/arch/arm/boot/compressed/Makefile
111 ++++ b/arch/arm/boot/compressed/Makefile
112 +@@ -90,6 +90,8 @@ $(addprefix $(obj)/,$(libfdt_objs) atags_to_fdt.o): \
113 + $(addprefix $(obj)/,$(libfdt_hdrs))
114 +
115 + ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
116 ++CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN}
117 ++CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280
118 + OBJS += $(libfdt_objs) atags_to_fdt.o
119 + endif
120 +
121 +diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
122 +index 5ff9a179c83c3..c80d1700e0949 100644
123 +--- a/arch/arm/boot/dts/imx53-ppd.dts
124 ++++ b/arch/arm/boot/dts/imx53-ppd.dts
125 +@@ -70,6 +70,12 @@
126 + clock-frequency = <11289600>;
127 + };
128 +
129 ++ achc_24M: achc-clock {
130 ++ compatible = "fixed-clock";
131 ++ #clock-cells = <0>;
132 ++ clock-frequency = <24000000>;
133 ++ };
134 ++
135 + sgtlsound: sound {
136 + compatible = "fsl,imx53-cpuvo-sgtl5000",
137 + "fsl,imx-audio-sgtl5000";
138 +@@ -287,16 +293,13 @@
139 + &gpio4 12 GPIO_ACTIVE_LOW>;
140 + status = "okay";
141 +
142 +- spidev0: spi@0 {
143 +- compatible = "ge,achc";
144 +- reg = <0>;
145 +- spi-max-frequency = <1000000>;
146 +- };
147 +-
148 +- spidev1: spi@1 {
149 +- compatible = "ge,achc";
150 +- reg = <1>;
151 +- spi-max-frequency = <1000000>;
152 ++ spidev0: spi@1 {
153 ++ compatible = "ge,achc", "nxp,kinetis-k20";
154 ++ reg = <1>, <0>;
155 ++ vdd-supply = <&reg_3v3>;
156 ++ vdda-supply = <&reg_3v3>;
157 ++ clocks = <&achc_24M>;
158 ++ reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
159 + };
160 +
161 + gpioxra0: gpio@2 {
162 +diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
163 +index 8b79b4112ee1a..2b075e287610f 100644
164 +--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
165 ++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
166 +@@ -1261,9 +1261,9 @@
167 + <&mmcc DSI1_BYTE_CLK>,
168 + <&mmcc DSI_PIXEL_CLK>,
169 + <&mmcc DSI1_ESC_CLK>;
170 +- clock-names = "iface_clk", "bus_clk", "core_mmss_clk",
171 +- "src_clk", "byte_clk", "pixel_clk",
172 +- "core_clk";
173 ++ clock-names = "iface", "bus", "core_mmss",
174 ++ "src", "byte", "pixel",
175 ++ "core";
176 +
177 + assigned-clocks = <&mmcc DSI1_BYTE_SRC>,
178 + <&mmcc DSI1_ESC_SRC>,
179 +diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
180 +index 20137fc578b1b..394a6b4dc69d5 100644
181 +--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
182 ++++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
183 +@@ -185,8 +185,9 @@
184 + nvidia,pins = "ata", "atb", "atc", "atd", "ate",
185 + "cdev1", "cdev2", "dap1", "dtb", "gma",
186 + "gmb", "gmc", "gmd", "gme", "gpu7",
187 +- "gpv", "i2cp", "pta", "rm", "slxa",
188 +- "slxk", "spia", "spib", "uac";
189 ++ "gpv", "i2cp", "irrx", "irtx", "pta",
190 ++ "rm", "slxa", "slxk", "spia", "spib",
191 ++ "uac";
192 + nvidia,pull = <TEGRA_PIN_PULL_NONE>;
193 + nvidia,tristate = <TEGRA_PIN_DISABLE>;
194 + };
195 +@@ -211,7 +212,7 @@
196 + conf_ddc {
197 + nvidia,pins = "ddc", "dta", "dtd", "kbca",
198 + "kbcb", "kbcc", "kbcd", "kbce", "kbcf",
199 +- "sdc";
200 ++ "sdc", "uad", "uca";
201 + nvidia,pull = <TEGRA_PIN_PULL_UP>;
202 + nvidia,tristate = <TEGRA_PIN_DISABLE>;
203 + };
204 +@@ -221,10 +222,9 @@
205 + "lvp0", "owc", "sdb";
206 + nvidia,tristate = <TEGRA_PIN_ENABLE>;
207 + };
208 +- conf_irrx {
209 +- nvidia,pins = "irrx", "irtx", "sdd", "spic",
210 +- "spie", "spih", "uaa", "uab", "uad",
211 +- "uca", "ucb";
212 ++ conf_sdd {
213 ++ nvidia,pins = "sdd", "spic", "spie", "spih",
214 ++ "uaa", "uab", "ucb";
215 + nvidia,pull = <TEGRA_PIN_PULL_UP>;
216 + nvidia,tristate = <TEGRA_PIN_ENABLE>;
217 + };
218 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
219 +index 3595be0f25277..2d6c73d7d397c 100644
220 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
221 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-frwy.dts
222 +@@ -83,15 +83,9 @@
223 + };
224 +
225 + eeprom@52 {
226 +- compatible = "atmel,24c512";
227 ++ compatible = "onnn,cat24c04", "atmel,24c04";
228 + reg = <0x52>;
229 + };
230 +-
231 +- eeprom@53 {
232 +- compatible = "atmel,24c512";
233 +- reg = <0x53>;
234 +- };
235 +-
236 + };
237 + };
238 + };
239 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
240 +index 2743397591141..8858c1e92f23c 100644
241 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
242 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
243 +@@ -58,14 +58,9 @@
244 + };
245 +
246 + eeprom@52 {
247 +- compatible = "atmel,24c512";
248 ++ compatible = "onnn,cat24c05", "atmel,24c04";
249 + reg = <0x52>;
250 + };
251 +-
252 +- eeprom@53 {
253 +- compatible = "atmel,24c512";
254 +- reg = <0x53>;
255 +- };
256 + };
257 +
258 + &i2c3 {
259 +diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
260 +index 631a7f77c3869..0b3eb8c0b8df0 100644
261 +--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
262 ++++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
263 +@@ -1082,13 +1082,13 @@
264 +
265 + cpu@0 {
266 + device_type = "cpu";
267 +- compatible = "nvidia,denver";
268 ++ compatible = "nvidia,tegra132-denver";
269 + reg = <0>;
270 + };
271 +
272 + cpu@1 {
273 + device_type = "cpu";
274 +- compatible = "nvidia,denver";
275 ++ compatible = "nvidia,tegra132-denver";
276 + reg = <1>;
277 + };
278 + };
279 +diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
280 +index 0821754f0fd6d..90adff8aa9baf 100644
281 +--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
282 ++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
283 +@@ -1434,7 +1434,7 @@
284 + };
285 +
286 + pcie_ep@14160000 {
287 +- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
288 ++ compatible = "nvidia,tegra194-pcie-ep";
289 + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX4A>;
290 + reg = <0x00 0x14160000 0x0 0x00020000 /* appl registers (128K) */
291 + 0x00 0x36040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
292 +@@ -1466,7 +1466,7 @@
293 + };
294 +
295 + pcie_ep@14180000 {
296 +- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
297 ++ compatible = "nvidia,tegra194-pcie-ep";
298 + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8B>;
299 + reg = <0x00 0x14180000 0x0 0x00020000 /* appl registers (128K) */
300 + 0x00 0x38040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
301 +@@ -1498,7 +1498,7 @@
302 + };
303 +
304 + pcie_ep@141a0000 {
305 +- compatible = "nvidia,tegra194-pcie-ep", "snps,dw-pcie-ep";
306 ++ compatible = "nvidia,tegra194-pcie-ep";
307 + power-domains = <&bpmp TEGRA194_POWER_DOMAIN_PCIEX8A>;
308 + reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
309 + 0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
310 +diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
311 +index 70be3f95209bc..830d9f2c1e5f2 100644
312 +--- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
313 ++++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
314 +@@ -20,7 +20,7 @@
315 + stdout-path = "serial0";
316 + };
317 +
318 +- memory {
319 ++ memory@40000000 {
320 + device_type = "memory";
321 + reg = <0x0 0x40000000 0x0 0x20000000>;
322 + };
323 +diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
324 +index 817efd95d539f..9679b74a20817 100644
325 +--- a/arch/arm64/include/asm/kernel-pgtable.h
326 ++++ b/arch/arm64/include/asm/kernel-pgtable.h
327 +@@ -65,8 +65,8 @@
328 + #define EARLY_KASLR (0)
329 + #endif
330 +
331 +-#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \
332 +- - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
333 ++#define EARLY_ENTRIES(vstart, vend, shift) \
334 ++ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
335 +
336 + #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
337 +
338 +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
339 +index 04b982a2799eb..e62c9cbf99f46 100644
340 +--- a/arch/arm64/kernel/fpsimd.c
341 ++++ b/arch/arm64/kernel/fpsimd.c
342 +@@ -498,7 +498,7 @@ size_t sve_state_size(struct task_struct const *task)
343 + void sve_alloc(struct task_struct *task)
344 + {
345 + if (task->thread.sve_state) {
346 +- memset(task->thread.sve_state, 0, sve_state_size(current));
347 ++ memset(task->thread.sve_state, 0, sve_state_size(task));
348 + return;
349 + }
350 +
351 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
352 +index a2e0b37549433..2f784d3b4b390 100644
353 +--- a/arch/arm64/kernel/head.S
354 ++++ b/arch/arm64/kernel/head.S
355 +@@ -194,7 +194,7 @@ ENDPROC(preserve_boot_args)
356 + * to be composed of multiple pages. (This effectively scales the end index).
357 + *
358 + * vstart: virtual address of start of range
359 +- * vend: virtual address of end of range
360 ++ * vend: virtual address of end of range - we map [vstart, vend]
361 + * shift: shift used to transform virtual address into index
362 + * ptrs: number of entries in page table
363 + * istart: index in table corresponding to vstart
364 +@@ -231,17 +231,18 @@ ENDPROC(preserve_boot_args)
365 + *
366 + * tbl: location of page table
367 + * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
368 +- * vstart: start address to map
369 +- * vend: end address to map - we map [vstart, vend]
370 ++ * vstart: virtual address of start of range
371 ++ * vend: virtual address of end of range - we map [vstart, vend - 1]
372 + * flags: flags to use to map last level entries
373 + * phys: physical address corresponding to vstart - physical memory is contiguous
374 + * pgds: the number of pgd entries
375 + *
376 + * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
377 +- * Preserves: vstart, vend, flags
378 +- * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
379 ++ * Preserves: vstart, flags
380 ++ * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
381 + */
382 + .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
383 ++ sub \vend, \vend, #1
384 + add \rtbl, \tbl, #PAGE_SIZE
385 + mov \sv, \rtbl
386 + mov \count, #0
387 +diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
388 +index 9d0a3a23d50e5..355c51309ed85 100644
389 +--- a/arch/m68k/Kconfig.bus
390 ++++ b/arch/m68k/Kconfig.bus
391 +@@ -63,7 +63,7 @@ source "drivers/zorro/Kconfig"
392 +
393 + endif
394 +
395 +-if !MMU
396 ++if COLDFIRE
397 +
398 + config ISA_DMA_API
399 + def_bool !M5272
400 +diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
401 +index 98a063093b69a..0be28adff5572 100644
402 +--- a/arch/mips/mti-malta/malta-dtshim.c
403 ++++ b/arch/mips/mti-malta/malta-dtshim.c
404 +@@ -22,7 +22,7 @@
405 + #define ROCIT_CONFIG_GEN1_MEMMAP_SHIFT 8
406 + #define ROCIT_CONFIG_GEN1_MEMMAP_MASK (0xf << 8)
407 +
408 +-static unsigned char fdt_buf[16 << 10] __initdata;
409 ++static unsigned char fdt_buf[16 << 10] __initdata __aligned(8);
410 +
411 + /* determined physical memory size, not overridden by command line args */
412 + extern unsigned long physical_memsize;
413 +diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
414 +index c6481cfc5220f..6b27cf4a0d786 100644
415 +--- a/arch/openrisc/kernel/entry.S
416 ++++ b/arch/openrisc/kernel/entry.S
417 +@@ -547,6 +547,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
418 + l.bnf 1f // ext irq enabled, all ok.
419 + l.nop
420 +
421 ++#ifdef CONFIG_PRINTK
422 + l.addi r1,r1,-0x8
423 + l.movhi r3,hi(42f)
424 + l.ori r3,r3,lo(42f)
425 +@@ -560,6 +561,7 @@ EXCEPTION_ENTRY(_external_irq_handler)
426 + .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r"
427 + .align 4
428 + .previous
429 ++#endif
430 +
431 + l.ori r4,r4,SPR_SR_IEE // fix the bug
432 + // l.sw PT_SR(r1),r4
433 +diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
434 +index 02895a8f2c551..92223f9ff05c7 100644
435 +--- a/arch/parisc/kernel/signal.c
436 ++++ b/arch/parisc/kernel/signal.c
437 +@@ -238,6 +238,12 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
438 + #endif
439 +
440 + usp = (regs->gr[30] & ~(0x01UL));
441 ++#ifdef CONFIG_64BIT
442 ++ if (is_compat_task()) {
443 ++ /* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */
444 ++ usp = (compat_uint_t)usp;
445 ++ }
446 ++#endif
447 + /*FIXME: frame_size parameter is unused, remove it. */
448 + frame = get_sigframe(&ksig->ka, usp, sizeof(*frame));
449 +
450 +diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
451 +index 285d506c5a769..2f5e06309f096 100644
452 +--- a/arch/powerpc/configs/mpc885_ads_defconfig
453 ++++ b/arch/powerpc/configs/mpc885_ads_defconfig
454 +@@ -39,6 +39,7 @@ CONFIG_MTD_CFI_GEOMETRY=y
455 + # CONFIG_MTD_CFI_I2 is not set
456 + CONFIG_MTD_CFI_I4=y
457 + CONFIG_MTD_CFI_AMDSTD=y
458 ++CONFIG_MTD_PHYSMAP=y
459 + CONFIG_MTD_PHYSMAP_OF=y
460 + # CONFIG_BLK_DEV is not set
461 + CONFIG_NETDEVICES=y
462 +diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
463 +index c6bbe9778d3cd..3c09109e708ef 100644
464 +--- a/arch/powerpc/include/asm/pmc.h
465 ++++ b/arch/powerpc/include/asm/pmc.h
466 +@@ -34,6 +34,13 @@ static inline void ppc_set_pmu_inuse(int inuse)
467 + #endif
468 + }
469 +
470 ++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
471 ++static inline int ppc_get_pmu_inuse(void)
472 ++{
473 ++ return get_paca()->pmcregs_in_use;
474 ++}
475 ++#endif
476 ++
477 + extern void power4_enable_pmcs(void);
478 +
479 + #else /* CONFIG_PPC64 */
480 +diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
481 +index b13c6213b0d9b..890f95151fb44 100644
482 +--- a/arch/powerpc/kernel/stacktrace.c
483 ++++ b/arch/powerpc/kernel/stacktrace.c
484 +@@ -8,6 +8,7 @@
485 + * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
486 + */
487 +
488 ++#include <linux/delay.h>
489 + #include <linux/export.h>
490 + #include <linux/kallsyms.h>
491 + #include <linux/module.h>
492 +diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
493 +index ab6eeb8e753e5..35fd67b4ceb41 100644
494 +--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
495 ++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
496 +@@ -177,10 +177,13 @@ static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
497 + idx -= stt->offset;
498 + page = stt->pages[idx / TCES_PER_PAGE];
499 + /*
500 +- * page must not be NULL in real mode,
501 +- * kvmppc_rm_ioba_validate() must have taken care of this.
502 ++ * kvmppc_rm_ioba_validate() allows pages not be allocated if TCE is
503 ++ * being cleared, otherwise it returns H_TOO_HARD and we skip this.
504 + */
505 +- WARN_ON_ONCE_RM(!page);
506 ++ if (!page) {
507 ++ WARN_ON_ONCE_RM(tce != 0);
508 ++ return;
509 ++ }
510 + tbl = kvmppc_page_address(page);
511 +
512 + tbl[idx % TCES_PER_PAGE] = tce;
513 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
514 +index bba358f134718..6c99ccc3bfcb0 100644
515 +--- a/arch/powerpc/kvm/book3s_hv.c
516 ++++ b/arch/powerpc/kvm/book3s_hv.c
517 +@@ -58,6 +58,7 @@
518 + #include <asm/kvm_book3s.h>
519 + #include <asm/mmu_context.h>
520 + #include <asm/lppaca.h>
521 ++#include <asm/pmc.h>
522 + #include <asm/processor.h>
523 + #include <asm/cputhreads.h>
524 + #include <asm/page.h>
525 +@@ -3559,6 +3560,18 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
526 + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
527 + kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
528 +
529 ++#ifdef CONFIG_PPC_PSERIES
530 ++ if (kvmhv_on_pseries()) {
531 ++ barrier();
532 ++ if (vcpu->arch.vpa.pinned_addr) {
533 ++ struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
534 ++ get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
535 ++ } else {
536 ++ get_lppaca()->pmcregs_in_use = 1;
537 ++ }
538 ++ barrier();
539 ++ }
540 ++#endif
541 + kvmhv_load_guest_pmu(vcpu);
542 +
543 + msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
544 +@@ -3693,6 +3706,13 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
545 + save_pmu |= nesting_enabled(vcpu->kvm);
546 +
547 + kvmhv_save_guest_pmu(vcpu, save_pmu);
548 ++#ifdef CONFIG_PPC_PSERIES
549 ++ if (kvmhv_on_pseries()) {
550 ++ barrier();
551 ++ get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
552 ++ barrier();
553 ++ }
554 ++#endif
555 +
556 + vc->entry_exit_map = 0x101;
557 + vc->in_guest = 0;
558 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
559 +index c6fbbd29bd871..feaf6ca2e76c1 100644
560 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
561 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
562 +@@ -3137,7 +3137,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
563 + /* The following code handles the fake_suspend = 1 case */
564 + mflr r0
565 + std r0, PPC_LR_STKOFF(r1)
566 +- stdu r1, -PPC_MIN_STKFRM(r1)
567 ++ stdu r1, -TM_FRAME_SIZE(r1)
568 +
569 + /* Turn on TM. */
570 + mfmsr r8
571 +@@ -3152,10 +3152,42 @@ BEGIN_FTR_SECTION
572 + END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
573 + nop
574 +
575 ++ /*
576 ++ * It's possible that treclaim. may modify registers, if we have lost
577 ++ * track of fake-suspend state in the guest due to it using rfscv.
578 ++ * Save and restore registers in case this occurs.
579 ++ */
580 ++ mfspr r3, SPRN_DSCR
581 ++ mfspr r4, SPRN_XER
582 ++ mfspr r5, SPRN_AMR
583 ++ /* SPRN_TAR would need to be saved here if the kernel ever used it */
584 ++ mfcr r12
585 ++ SAVE_NVGPRS(r1)
586 ++ SAVE_GPR(2, r1)
587 ++ SAVE_GPR(3, r1)
588 ++ SAVE_GPR(4, r1)
589 ++ SAVE_GPR(5, r1)
590 ++ stw r12, 8(r1)
591 ++ std r1, HSTATE_HOST_R1(r13)
592 ++
593 + /* We have to treclaim here because that's the only way to do S->N */
594 + li r3, TM_CAUSE_KVM_RESCHED
595 + TRECLAIM(R3)
596 +
597 ++ GET_PACA(r13)
598 ++ ld r1, HSTATE_HOST_R1(r13)
599 ++ REST_GPR(2, r1)
600 ++ REST_GPR(3, r1)
601 ++ REST_GPR(4, r1)
602 ++ REST_GPR(5, r1)
603 ++ lwz r12, 8(r1)
604 ++ REST_NVGPRS(r1)
605 ++ mtspr SPRN_DSCR, r3
606 ++ mtspr SPRN_XER, r4
607 ++ mtspr SPRN_AMR, r5
608 ++ mtcr r12
609 ++ HMT_MEDIUM
610 ++
611 + /*
612 + * We were in fake suspend, so we are not going to save the
613 + * register state as the guest checkpointed state (since
614 +@@ -3183,7 +3215,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
615 + std r5, VCPU_TFHAR(r9)
616 + std r6, VCPU_TFIAR(r9)
617 +
618 +- addi r1, r1, PPC_MIN_STKFRM
619 ++ addi r1, r1, TM_FRAME_SIZE
620 + ld r0, PPC_LR_STKOFF(r1)
621 + mtlr r0
622 + blr
623 +diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
624 +index 6884d16ec19b9..732cfc53e260d 100644
625 +--- a/arch/powerpc/perf/hv-gpci.c
626 ++++ b/arch/powerpc/perf/hv-gpci.c
627 +@@ -164,7 +164,7 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
628 + */
629 + count = 0;
630 + for (i = offset; i < offset + length; i++)
631 +- count |= arg->bytes[i] << (i - offset);
632 ++ count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
633 +
634 + *value = count;
635 + out:
636 +diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
637 +index 1932088686a68..e6a5007f017d8 100644
638 +--- a/arch/s390/include/asm/setup.h
639 ++++ b/arch/s390/include/asm/setup.h
640 +@@ -39,6 +39,7 @@
641 + #define MACHINE_FLAG_NX BIT(15)
642 + #define MACHINE_FLAG_GS BIT(16)
643 + #define MACHINE_FLAG_SCC BIT(17)
644 ++#define MACHINE_FLAG_PCI_MIO BIT(18)
645 +
646 + #define LPP_MAGIC BIT(31)
647 + #define LPP_PID_MASK _AC(0xffffffff, UL)
648 +@@ -106,6 +107,7 @@ extern unsigned long __swsusp_reset_dma;
649 + #define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
650 + #define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
651 + #define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
652 ++#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
653 +
654 + /*
655 + * Console mode. Override with conmode=
656 +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
657 +index 2531776cf6cf9..eb89cb0aa60b4 100644
658 +--- a/arch/s390/kernel/early.c
659 ++++ b/arch/s390/kernel/early.c
660 +@@ -252,6 +252,10 @@ static __init void detect_machine_facilities(void)
661 + clock_comparator_max = -1ULL >> 1;
662 + __ctl_set_bit(0, 53);
663 + }
664 ++ if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
665 ++ S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
666 ++ /* the control bit is set during PCI initialization */
667 ++ }
668 + }
669 +
670 + static inline void save_vector_registers(void)
671 +diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
672 +index ab584e8e35275..9156653b56f69 100644
673 +--- a/arch/s390/kernel/jump_label.c
674 ++++ b/arch/s390/kernel/jump_label.c
675 +@@ -36,7 +36,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
676 + unsigned char *ipe = (unsigned char *)expected;
677 + unsigned char *ipn = (unsigned char *)new;
678 +
679 +- pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
680 ++ pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
681 + pr_emerg("Found: %6ph\n", ipc);
682 + pr_emerg("Expected: %6ph\n", ipe);
683 + pr_emerg("New: %6ph\n", ipn);
684 +diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
685 +index c1d96e588152b..5521f593cd20a 100644
686 +--- a/arch/s390/mm/init.c
687 ++++ b/arch/s390/mm/init.c
688 +@@ -168,9 +168,9 @@ static void pv_init(void)
689 + return;
690 +
691 + /* make sure bounce buffers are shared */
692 ++ swiotlb_force = SWIOTLB_FORCE;
693 + swiotlb_init(1);
694 + swiotlb_update_mem_attributes();
695 +- swiotlb_force = SWIOTLB_FORCE;
696 + }
697 +
698 + void __init mem_init(void)
699 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
700 +index 3e6612d8b921c..2d29966276296 100644
701 +--- a/arch/s390/net/bpf_jit_comp.c
702 ++++ b/arch/s390/net/bpf_jit_comp.c
703 +@@ -569,10 +569,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
704 + EMIT4(0xb9080000, dst_reg, src_reg);
705 + break;
706 + case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
707 +- if (!imm)
708 +- break;
709 +- /* alfi %dst,imm */
710 +- EMIT6_IMM(0xc20b0000, dst_reg, imm);
711 ++ if (imm != 0) {
712 ++ /* alfi %dst,imm */
713 ++ EMIT6_IMM(0xc20b0000, dst_reg, imm);
714 ++ }
715 + EMIT_ZERO(dst_reg);
716 + break;
717 + case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
718 +@@ -594,17 +594,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
719 + EMIT4(0xb9090000, dst_reg, src_reg);
720 + break;
721 + case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
722 +- if (!imm)
723 +- break;
724 +- /* alfi %dst,-imm */
725 +- EMIT6_IMM(0xc20b0000, dst_reg, -imm);
726 ++ if (imm != 0) {
727 ++ /* alfi %dst,-imm */
728 ++ EMIT6_IMM(0xc20b0000, dst_reg, -imm);
729 ++ }
730 + EMIT_ZERO(dst_reg);
731 + break;
732 + case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
733 + if (!imm)
734 + break;
735 +- /* agfi %dst,-imm */
736 +- EMIT6_IMM(0xc2080000, dst_reg, -imm);
737 ++ if (imm == -0x80000000) {
738 ++ /* algfi %dst,0x80000000 */
739 ++ EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
740 ++ } else {
741 ++ /* agfi %dst,-imm */
742 ++ EMIT6_IMM(0xc2080000, dst_reg, -imm);
743 ++ }
744 + break;
745 + /*
746 + * BPF_MUL
747 +@@ -619,10 +624,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
748 + EMIT4(0xb90c0000, dst_reg, src_reg);
749 + break;
750 + case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
751 +- if (imm == 1)
752 +- break;
753 +- /* msfi %r5,imm */
754 +- EMIT6_IMM(0xc2010000, dst_reg, imm);
755 ++ if (imm != 1) {
756 ++ /* msfi %r5,imm */
757 ++ EMIT6_IMM(0xc2010000, dst_reg, imm);
758 ++ }
759 + EMIT_ZERO(dst_reg);
760 + break;
761 + case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
762 +@@ -675,6 +680,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
763 + if (BPF_OP(insn->code) == BPF_MOD)
764 + /* lhgi %dst,0 */
765 + EMIT4_IMM(0xa7090000, dst_reg, 0);
766 ++ else
767 ++ EMIT_ZERO(dst_reg);
768 + break;
769 + }
770 + /* lhi %w0,0 */
771 +@@ -769,10 +776,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
772 + EMIT4(0xb9820000, dst_reg, src_reg);
773 + break;
774 + case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
775 +- if (!imm)
776 +- break;
777 +- /* xilf %dst,imm */
778 +- EMIT6_IMM(0xc0070000, dst_reg, imm);
779 ++ if (imm != 0) {
780 ++ /* xilf %dst,imm */
781 ++ EMIT6_IMM(0xc0070000, dst_reg, imm);
782 ++ }
783 + EMIT_ZERO(dst_reg);
784 + break;
785 + case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
786 +@@ -793,10 +800,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
787 + EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
788 + break;
789 + case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
790 +- if (imm == 0)
791 +- break;
792 +- /* sll %dst,imm(%r0) */
793 +- EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
794 ++ if (imm != 0) {
795 ++ /* sll %dst,imm(%r0) */
796 ++ EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
797 ++ }
798 + EMIT_ZERO(dst_reg);
799 + break;
800 + case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
801 +@@ -818,10 +825,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
802 + EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
803 + break;
804 + case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
805 +- if (imm == 0)
806 +- break;
807 +- /* srl %dst,imm(%r0) */
808 +- EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
809 ++ if (imm != 0) {
810 ++ /* srl %dst,imm(%r0) */
811 ++ EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
812 ++ }
813 + EMIT_ZERO(dst_reg);
814 + break;
815 + case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
816 +@@ -843,10 +850,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
817 + EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
818 + break;
819 + case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
820 +- if (imm == 0)
821 +- break;
822 +- /* sra %dst,imm(%r0) */
823 +- EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
824 ++ if (imm != 0) {
825 ++ /* sra %dst,imm(%r0) */
826 ++ EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
827 ++ }
828 + EMIT_ZERO(dst_reg);
829 + break;
830 + case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
831 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
832 +index 6105b1b6e49b7..b8ddacf1efe11 100644
833 +--- a/arch/s390/pci/pci.c
834 ++++ b/arch/s390/pci/pci.c
835 +@@ -854,7 +854,6 @@ static void zpci_mem_exit(void)
836 + }
837 +
838 + static unsigned int s390_pci_probe __initdata = 1;
839 +-static unsigned int s390_pci_no_mio __initdata;
840 + unsigned int s390_pci_force_floating __initdata;
841 + static unsigned int s390_pci_initialized;
842 +
843 +@@ -865,7 +864,7 @@ char * __init pcibios_setup(char *str)
844 + return NULL;
845 + }
846 + if (!strcmp(str, "nomio")) {
847 +- s390_pci_no_mio = 1;
848 ++ S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
849 + return NULL;
850 + }
851 + if (!strcmp(str, "force_floating")) {
852 +@@ -890,7 +889,7 @@ static int __init pci_base_init(void)
853 + if (!test_facility(69) || !test_facility(71))
854 + return 0;
855 +
856 +- if (test_facility(153) && !s390_pci_no_mio) {
857 ++ if (MACHINE_HAS_PCI_MIO) {
858 + static_branch_enable(&have_mio);
859 + ctl_set_bit(2, 5);
860 + }
861 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
862 +index b8541d77452c1..a37ccafe065ba 100644
863 +--- a/arch/x86/mm/init_64.c
864 ++++ b/arch/x86/mm/init_64.c
865 +@@ -1355,18 +1355,18 @@ int kern_addr_valid(unsigned long addr)
866 + return 0;
867 +
868 + p4d = p4d_offset(pgd, addr);
869 +- if (p4d_none(*p4d))
870 ++ if (!p4d_present(*p4d))
871 + return 0;
872 +
873 + pud = pud_offset(p4d, addr);
874 +- if (pud_none(*pud))
875 ++ if (!pud_present(*pud))
876 + return 0;
877 +
878 + if (pud_large(*pud))
879 + return pfn_valid(pud_pfn(*pud));
880 +
881 + pmd = pmd_offset(pud, addr);
882 +- if (pmd_none(*pmd))
883 ++ if (!pmd_present(*pmd))
884 + return 0;
885 +
886 + if (pmd_large(*pmd))
887 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
888 +index 3e66feff524a8..b99074ca5e686 100644
889 +--- a/arch/x86/xen/enlighten_pv.c
890 ++++ b/arch/x86/xen/enlighten_pv.c
891 +@@ -1183,6 +1183,11 @@ static void __init xen_dom0_set_legacy_features(void)
892 + x86_platform.legacy.rtc = 1;
893 + }
894 +
895 ++static void __init xen_domu_set_legacy_features(void)
896 ++{
897 ++ x86_platform.legacy.rtc = 0;
898 ++}
899 ++
900 + /* First C function to be called on Xen boot */
901 + asmlinkage __visible void __init xen_start_kernel(void)
902 + {
903 +@@ -1353,6 +1358,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
904 + add_preferred_console("xenboot", 0, NULL);
905 + if (pci_xen)
906 + x86_init.pci.arch_init = pci_xen_init;
907 ++ x86_platform.set_legacy_features =
908 ++ xen_domu_set_legacy_features;
909 + } else {
910 + const struct dom0_vga_console_info *info =
911 + (void *)((char *)xen_start_info +
912 +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
913 +index 12fcb3858303a..8b1e40ec58f65 100644
914 +--- a/arch/x86/xen/p2m.c
915 ++++ b/arch/x86/xen/p2m.c
916 +@@ -622,8 +622,8 @@ int xen_alloc_p2m_entry(unsigned long pfn)
917 + }
918 +
919 + /* Expanded the p2m? */
920 +- if (pfn > xen_p2m_last_pfn) {
921 +- xen_p2m_last_pfn = pfn;
922 ++ if (pfn >= xen_p2m_last_pfn) {
923 ++ xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
924 + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
925 + }
926 +
927 +diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
928 +index af81a62faba64..e7faea3d73d3b 100644
929 +--- a/arch/xtensa/platforms/iss/console.c
930 ++++ b/arch/xtensa/platforms/iss/console.c
931 +@@ -168,9 +168,13 @@ static const struct tty_operations serial_ops = {
932 +
933 + int __init rs_init(void)
934 + {
935 +- tty_port_init(&serial_port);
936 ++ int ret;
937 +
938 + serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
939 ++ if (!serial_driver)
940 ++ return -ENOMEM;
941 ++
942 ++ tty_port_init(&serial_port);
943 +
944 + pr_info("%s %s\n", serial_name, serial_version);
945 +
946 +@@ -190,8 +194,15 @@ int __init rs_init(void)
947 + tty_set_operations(serial_driver, &serial_ops);
948 + tty_port_link_device(&serial_port, serial_driver, 0);
949 +
950 +- if (tty_register_driver(serial_driver))
951 +- panic("Couldn't register serial driver\n");
952 ++ ret = tty_register_driver(serial_driver);
953 ++ if (ret) {
954 ++ pr_err("Couldn't register serial driver\n");
955 ++ tty_driver_kref_put(serial_driver);
956 ++ tty_port_destroy(&serial_port);
957 ++
958 ++ return ret;
959 ++ }
960 ++
961 + return 0;
962 + }
963 +
964 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
965 +index 136232a01f715..8dee243e639f0 100644
966 +--- a/block/bfq-iosched.c
967 ++++ b/block/bfq-iosched.c
968 +@@ -2523,6 +2523,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
969 + * are likely to increase the throughput.
970 + */
971 + bfqq->new_bfqq = new_bfqq;
972 ++ /*
973 ++ * The above assignment schedules the following redirections:
974 ++ * each time some I/O for bfqq arrives, the process that
975 ++ * generated that I/O is disassociated from bfqq and
976 ++ * associated with new_bfqq. Here we increases new_bfqq->ref
977 ++ * in advance, adding the number of processes that are
978 ++ * expected to be associated with new_bfqq as they happen to
979 ++ * issue I/O.
980 ++ */
981 + new_bfqq->ref += process_refs;
982 + return new_bfqq;
983 + }
984 +@@ -2582,6 +2591,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
985 + {
986 + struct bfq_queue *in_service_bfqq, *new_bfqq;
987 +
988 ++ /* if a merge has already been setup, then proceed with that first */
989 ++ if (bfqq->new_bfqq)
990 ++ return bfqq->new_bfqq;
991 ++
992 + /*
993 + * Do not perform queue merging if the device is non
994 + * rotational and performs internal queueing. In fact, such a
995 +@@ -2636,9 +2649,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
996 + if (bfq_too_late_for_merging(bfqq))
997 + return NULL;
998 +
999 +- if (bfqq->new_bfqq)
1000 +- return bfqq->new_bfqq;
1001 +-
1002 + if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
1003 + return NULL;
1004 +
1005 +@@ -5004,7 +5014,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
1006 + if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
1007 + pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
1008 + bfqq->new_ioprio);
1009 +- bfqq->new_ioprio = IOPRIO_BE_NR;
1010 ++ bfqq->new_ioprio = IOPRIO_BE_NR - 1;
1011 + }
1012 +
1013 + bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
1014 +diff --git a/block/blk-zoned.c b/block/blk-zoned.c
1015 +index b17c094cb977c..a85d0a06a6ff2 100644
1016 +--- a/block/blk-zoned.c
1017 ++++ b/block/blk-zoned.c
1018 +@@ -316,9 +316,6 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
1019 + if (!blk_queue_is_zoned(q))
1020 + return -ENOTTY;
1021 +
1022 +- if (!capable(CAP_SYS_ADMIN))
1023 +- return -EACCES;
1024 +-
1025 + if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
1026 + return -EFAULT;
1027 +
1028 +@@ -374,9 +371,6 @@ int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
1029 + if (!blk_queue_is_zoned(q))
1030 + return -ENOTTY;
1031 +
1032 +- if (!capable(CAP_SYS_ADMIN))
1033 +- return -EACCES;
1034 +-
1035 + if (!(mode & FMODE_WRITE))
1036 + return -EBADF;
1037 +
1038 +diff --git a/block/bsg.c b/block/bsg.c
1039 +index 0d012efef5274..c8b9714e69232 100644
1040 +--- a/block/bsg.c
1041 ++++ b/block/bsg.c
1042 +@@ -371,10 +371,13 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1043 + case SG_GET_RESERVED_SIZE:
1044 + case SG_SET_RESERVED_SIZE:
1045 + case SG_EMULATED_HOST:
1046 +- case SCSI_IOCTL_SEND_COMMAND:
1047 + return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
1048 + case SG_IO:
1049 + return bsg_sg_io(bd->queue, file->f_mode, uarg);
1050 ++ case SCSI_IOCTL_SEND_COMMAND:
1051 ++ pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n",
1052 ++ current->comm);
1053 ++ return -EINVAL;
1054 + default:
1055 + return -ENOTTY;
1056 + }
1057 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1058 +index 7788af0ca1090..5c354c7aff946 100644
1059 +--- a/drivers/ata/libata-core.c
1060 ++++ b/drivers/ata/libata-core.c
1061 +@@ -4556,6 +4556,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
1062 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
1063 + { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1064 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
1065 ++ { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1066 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
1067 ++ { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1068 ++ ATA_HORKAGE_ZERO_AFTER_TRIM, },
1069 + { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
1070 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
1071 +
1072 +diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
1073 +index 9dcef6ac643b9..982fe91125322 100644
1074 +--- a/drivers/ata/sata_dwc_460ex.c
1075 ++++ b/drivers/ata/sata_dwc_460ex.c
1076 +@@ -1249,24 +1249,20 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1077 + irq = irq_of_parse_and_map(np, 0);
1078 + if (irq == NO_IRQ) {
1079 + dev_err(&ofdev->dev, "no SATA DMA irq\n");
1080 +- err = -ENODEV;
1081 +- goto error_out;
1082 ++ return -ENODEV;
1083 + }
1084 +
1085 + #ifdef CONFIG_SATA_DWC_OLD_DMA
1086 + if (!of_find_property(np, "dmas", NULL)) {
1087 + err = sata_dwc_dma_init_old(ofdev, hsdev);
1088 + if (err)
1089 +- goto error_out;
1090 ++ return err;
1091 + }
1092 + #endif
1093 +
1094 + hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
1095 +- if (IS_ERR(hsdev->phy)) {
1096 +- err = PTR_ERR(hsdev->phy);
1097 +- hsdev->phy = NULL;
1098 +- goto error_out;
1099 +- }
1100 ++ if (IS_ERR(hsdev->phy))
1101 ++ return PTR_ERR(hsdev->phy);
1102 +
1103 + err = phy_init(hsdev->phy);
1104 + if (err)
1105 +diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
1106 +index 977d27bd1a220..9a9dec657c166 100644
1107 +--- a/drivers/base/power/trace.c
1108 ++++ b/drivers/base/power/trace.c
1109 +@@ -13,6 +13,7 @@
1110 + #include <linux/export.h>
1111 + #include <linux/rtc.h>
1112 + #include <linux/suspend.h>
1113 ++#include <linux/init.h>
1114 +
1115 + #include <linux/mc146818rtc.h>
1116 +
1117 +@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
1118 + const char *file = *(const char **)(tracedata + 2);
1119 + unsigned int user_hash_value, file_hash_value;
1120 +
1121 ++ if (!x86_platform.legacy.rtc)
1122 ++ return;
1123 ++
1124 + user_hash_value = user % USERHASH;
1125 + file_hash_value = hash_string(lineno, file, FILEHASH);
1126 + set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
1127 +@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
1128 +
1129 + static int early_resume_init(void)
1130 + {
1131 ++ if (!x86_platform.legacy.rtc)
1132 ++ return 0;
1133 ++
1134 + hash_value_early_read = read_magic_time();
1135 + register_pm_notifier(&pm_trace_nb);
1136 + return 0;
1137 +@@ -277,6 +284,9 @@ static int late_resume_init(void)
1138 + unsigned int val = hash_value_early_read;
1139 + unsigned int user, file, dev;
1140 +
1141 ++ if (!x86_platform.legacy.rtc)
1142 ++ return 0;
1143 ++
1144 + user = val % USERHASH;
1145 + val = val / USERHASH;
1146 + file = val % FILEHASH;
1147 +diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
1148 +index 44a46dcc0518b..d7fe1303f79dc 100644
1149 +--- a/drivers/clk/at91/clk-generated.c
1150 ++++ b/drivers/clk/at91/clk-generated.c
1151 +@@ -18,8 +18,6 @@
1152 +
1153 + #define GENERATED_MAX_DIV 255
1154 +
1155 +-#define GCK_INDEX_DT_AUDIO_PLL 5
1156 +-
1157 + struct clk_generated {
1158 + struct clk_hw hw;
1159 + struct regmap *regmap;
1160 +@@ -29,7 +27,7 @@ struct clk_generated {
1161 + u32 gckdiv;
1162 + const struct clk_pcr_layout *layout;
1163 + u8 parent_id;
1164 +- bool audio_pll_allowed;
1165 ++ int chg_pid;
1166 + };
1167 +
1168 + #define to_clk_generated(hw) \
1169 +@@ -109,7 +107,7 @@ static void clk_generated_best_diff(struct clk_rate_request *req,
1170 + tmp_rate = parent_rate / div;
1171 + tmp_diff = abs(req->rate - tmp_rate);
1172 +
1173 +- if (*best_diff < 0 || *best_diff > tmp_diff) {
1174 ++ if (*best_diff < 0 || *best_diff >= tmp_diff) {
1175 + *best_rate = tmp_rate;
1176 + *best_diff = tmp_diff;
1177 + req->best_parent_rate = parent_rate;
1178 +@@ -129,7 +127,16 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
1179 + int i;
1180 + u32 div;
1181 +
1182 +- for (i = 0; i < clk_hw_get_num_parents(hw) - 1; i++) {
1183 ++ /* do not look for a rate that is outside of our range */
1184 ++ if (gck->range.max && req->rate > gck->range.max)
1185 ++ req->rate = gck->range.max;
1186 ++ if (gck->range.min && req->rate < gck->range.min)
1187 ++ req->rate = gck->range.min;
1188 ++
1189 ++ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
1190 ++ if (gck->chg_pid == i)
1191 ++ continue;
1192 ++
1193 + parent = clk_hw_get_parent_by_index(hw, i);
1194 + if (!parent)
1195 + continue;
1196 +@@ -161,10 +168,10 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
1197 + * that the only clks able to modify gck rate are those of audio IPs.
1198 + */
1199 +
1200 +- if (!gck->audio_pll_allowed)
1201 ++ if (gck->chg_pid < 0)
1202 + goto end;
1203 +
1204 +- parent = clk_hw_get_parent_by_index(hw, GCK_INDEX_DT_AUDIO_PLL);
1205 ++ parent = clk_hw_get_parent_by_index(hw, gck->chg_pid);
1206 + if (!parent)
1207 + goto end;
1208 +
1209 +@@ -271,8 +278,8 @@ struct clk_hw * __init
1210 + at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
1211 + const struct clk_pcr_layout *layout,
1212 + const char *name, const char **parent_names,
1213 +- u8 num_parents, u8 id, bool pll_audio,
1214 +- const struct clk_range *range)
1215 ++ u8 num_parents, u8 id,
1216 ++ const struct clk_range *range, int chg_pid)
1217 + {
1218 + struct clk_generated *gck;
1219 + struct clk_init_data init;
1220 +@@ -287,15 +294,16 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
1221 + init.ops = &generated_ops;
1222 + init.parent_names = parent_names;
1223 + init.num_parents = num_parents;
1224 +- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
1225 +- CLK_SET_RATE_PARENT;
1226 ++ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
1227 ++ if (chg_pid >= 0)
1228 ++ init.flags |= CLK_SET_RATE_PARENT;
1229 +
1230 + gck->id = id;
1231 + gck->hw.init = &init;
1232 + gck->regmap = regmap;
1233 + gck->lock = lock;
1234 + gck->range = *range;
1235 +- gck->audio_pll_allowed = pll_audio;
1236 ++ gck->chg_pid = chg_pid;
1237 + gck->layout = layout;
1238 +
1239 + clk_generated_startup(gck);
1240 +diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
1241 +index aa1754eac59ff..8a652c44c25ab 100644
1242 +--- a/drivers/clk/at91/dt-compat.c
1243 ++++ b/drivers/clk/at91/dt-compat.c
1244 +@@ -22,6 +22,8 @@
1245 +
1246 + #define SYSTEM_MAX_ID 31
1247 +
1248 ++#define GCK_INDEX_DT_AUDIO_PLL 5
1249 ++
1250 + #ifdef CONFIG_HAVE_AT91_AUDIO_PLL
1251 + static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
1252 + {
1253 +@@ -135,7 +137,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
1254 + return;
1255 +
1256 + for_each_child_of_node(np, gcknp) {
1257 +- bool pll_audio = false;
1258 ++ int chg_pid = INT_MIN;
1259 +
1260 + if (of_property_read_u32(gcknp, "reg", &id))
1261 + continue;
1262 +@@ -152,12 +154,12 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
1263 + if (of_device_is_compatible(np, "atmel,sama5d2-clk-generated") &&
1264 + (id == GCK_ID_I2S0 || id == GCK_ID_I2S1 ||
1265 + id == GCK_ID_CLASSD))
1266 +- pll_audio = true;
1267 ++ chg_pid = GCK_INDEX_DT_AUDIO_PLL;
1268 +
1269 + hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
1270 + &dt_pcr_layout, name,
1271 + parent_names, num_parents,
1272 +- id, pll_audio, &range);
1273 ++ id, &range, chg_pid);
1274 + if (IS_ERR(hw))
1275 + continue;
1276 +
1277 +diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
1278 +index 9b8db9cdcda53..8a88ad2360742 100644
1279 +--- a/drivers/clk/at91/pmc.h
1280 ++++ b/drivers/clk/at91/pmc.h
1281 +@@ -118,8 +118,8 @@ struct clk_hw * __init
1282 + at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
1283 + const struct clk_pcr_layout *layout,
1284 + const char *name, const char **parent_names,
1285 +- u8 num_parents, u8 id, bool pll_audio,
1286 +- const struct clk_range *range);
1287 ++ u8 num_parents, u8 id,
1288 ++ const struct clk_range *range, int chg_pid);
1289 +
1290 + struct clk_hw * __init
1291 + at91_clk_register_h32mx(struct regmap *regmap, const char *name,
1292 +diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
1293 +index e3f4c8f20223a..39923899478f9 100644
1294 +--- a/drivers/clk/at91/sam9x60.c
1295 ++++ b/drivers/clk/at91/sam9x60.c
1296 +@@ -124,7 +124,6 @@ static const struct {
1297 + char *n;
1298 + u8 id;
1299 + struct clk_range r;
1300 +- bool pll;
1301 + } sam9x60_gck[] = {
1302 + { .n = "flex0_gclk", .id = 5, },
1303 + { .n = "flex1_gclk", .id = 6, },
1304 +@@ -144,11 +143,9 @@ static const struct {
1305 + { .n = "sdmmc1_gclk", .id = 26, .r = { .min = 0, .max = 105000000 }, },
1306 + { .n = "flex11_gclk", .id = 32, },
1307 + { .n = "flex12_gclk", .id = 33, },
1308 +- { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 },
1309 +- .pll = true, },
1310 ++ { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 }, },
1311 + { .n = "pit64b_gclk", .id = 37, },
1312 +- { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 },
1313 +- .pll = true, },
1314 ++ { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 }, },
1315 + { .n = "tcb1_gclk", .id = 45, },
1316 + { .n = "dbgu_gclk", .id = 47, },
1317 + };
1318 +@@ -285,8 +282,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
1319 + sam9x60_gck[i].n,
1320 + parent_names, 6,
1321 + sam9x60_gck[i].id,
1322 +- sam9x60_gck[i].pll,
1323 +- &sam9x60_gck[i].r);
1324 ++ &sam9x60_gck[i].r, INT_MIN);
1325 + if (IS_ERR(hw))
1326 + goto err_free;
1327 +
1328 +diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
1329 +index ff7e3f727082e..d3c4bceb032d1 100644
1330 +--- a/drivers/clk/at91/sama5d2.c
1331 ++++ b/drivers/clk/at91/sama5d2.c
1332 +@@ -115,21 +115,20 @@ static const struct {
1333 + char *n;
1334 + u8 id;
1335 + struct clk_range r;
1336 +- bool pll;
1337 ++ int chg_pid;
1338 + } sama5d2_gck[] = {
1339 +- { .n = "sdmmc0_gclk", .id = 31, },
1340 +- { .n = "sdmmc1_gclk", .id = 32, },
1341 +- { .n = "tcb0_gclk", .id = 35, .r = { .min = 0, .max = 83000000 }, },
1342 +- { .n = "tcb1_gclk", .id = 36, .r = { .min = 0, .max = 83000000 }, },
1343 +- { .n = "pwm_gclk", .id = 38, .r = { .min = 0, .max = 83000000 }, },
1344 +- { .n = "isc_gclk", .id = 46, },
1345 +- { .n = "pdmic_gclk", .id = 48, },
1346 +- { .n = "i2s0_gclk", .id = 54, .pll = true },
1347 +- { .n = "i2s1_gclk", .id = 55, .pll = true },
1348 +- { .n = "can0_gclk", .id = 56, .r = { .min = 0, .max = 80000000 }, },
1349 +- { .n = "can1_gclk", .id = 57, .r = { .min = 0, .max = 80000000 }, },
1350 +- { .n = "classd_gclk", .id = 59, .r = { .min = 0, .max = 100000000 },
1351 +- .pll = true },
1352 ++ { .n = "sdmmc0_gclk", .id = 31, .chg_pid = INT_MIN, },
1353 ++ { .n = "sdmmc1_gclk", .id = 32, .chg_pid = INT_MIN, },
1354 ++ { .n = "tcb0_gclk", .id = 35, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
1355 ++ { .n = "tcb1_gclk", .id = 36, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
1356 ++ { .n = "pwm_gclk", .id = 38, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
1357 ++ { .n = "isc_gclk", .id = 46, .chg_pid = INT_MIN, },
1358 ++ { .n = "pdmic_gclk", .id = 48, .chg_pid = INT_MIN, },
1359 ++ { .n = "i2s0_gclk", .id = 54, .chg_pid = 5, },
1360 ++ { .n = "i2s1_gclk", .id = 55, .chg_pid = 5, },
1361 ++ { .n = "can0_gclk", .id = 56, .chg_pid = INT_MIN, .r = { .min = 0, .max = 80000000 }, },
1362 ++ { .n = "can1_gclk", .id = 57, .chg_pid = INT_MIN, .r = { .min = 0, .max = 80000000 }, },
1363 ++ { .n = "classd_gclk", .id = 59, .chg_pid = 5, .r = { .min = 0, .max = 100000000 }, },
1364 + };
1365 +
1366 + static const struct clk_programmable_layout sama5d2_programmable_layout = {
1367 +@@ -317,8 +316,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
1368 + sama5d2_gck[i].n,
1369 + parent_names, 6,
1370 + sama5d2_gck[i].id,
1371 +- sama5d2_gck[i].pll,
1372 +- &sama5d2_gck[i].r);
1373 ++ &sama5d2_gck[i].r,
1374 ++ sama5d2_gck[i].chg_pid);
1375 + if (IS_ERR(hw))
1376 + goto err_free;
1377 +
1378 +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
1379 +index bc6ccf2c7aae0..c636c9ba01008 100644
1380 +--- a/drivers/cpufreq/powernv-cpufreq.c
1381 ++++ b/drivers/cpufreq/powernv-cpufreq.c
1382 +@@ -36,6 +36,7 @@
1383 + #define MAX_PSTATE_SHIFT 32
1384 + #define LPSTATE_SHIFT 48
1385 + #define GPSTATE_SHIFT 56
1386 ++#define MAX_NR_CHIPS 32
1387 +
1388 + #define MAX_RAMP_DOWN_TIME 5120
1389 + /*
1390 +@@ -1050,12 +1051,20 @@ static int init_chip_info(void)
1391 + unsigned int *chip;
1392 + unsigned int cpu, i;
1393 + unsigned int prev_chip_id = UINT_MAX;
1394 ++ cpumask_t *chip_cpu_mask;
1395 + int ret = 0;
1396 +
1397 + chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
1398 + if (!chip)
1399 + return -ENOMEM;
1400 +
1401 ++ /* Allocate a chip cpu mask large enough to fit mask for all chips */
1402 ++ chip_cpu_mask = kcalloc(MAX_NR_CHIPS, sizeof(cpumask_t), GFP_KERNEL);
1403 ++ if (!chip_cpu_mask) {
1404 ++ ret = -ENOMEM;
1405 ++ goto free_and_return;
1406 ++ }
1407 ++
1408 + for_each_possible_cpu(cpu) {
1409 + unsigned int id = cpu_to_chip_id(cpu);
1410 +
1411 +@@ -1063,22 +1072,25 @@ static int init_chip_info(void)
1412 + prev_chip_id = id;
1413 + chip[nr_chips++] = id;
1414 + }
1415 ++ cpumask_set_cpu(cpu, &chip_cpu_mask[nr_chips-1]);
1416 + }
1417 +
1418 + chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
1419 + if (!chips) {
1420 + ret = -ENOMEM;
1421 +- goto free_and_return;
1422 ++ goto out_free_chip_cpu_mask;
1423 + }
1424 +
1425 + for (i = 0; i < nr_chips; i++) {
1426 + chips[i].id = chip[i];
1427 +- cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
1428 ++ cpumask_copy(&chips[i].mask, &chip_cpu_mask[i]);
1429 + INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
1430 + for_each_cpu(cpu, &chips[i].mask)
1431 + per_cpu(chip_info, cpu) = &chips[i];
1432 + }
1433 +
1434 ++out_free_chip_cpu_mask:
1435 ++ kfree(chip_cpu_mask);
1436 + free_and_return:
1437 + kfree(chip);
1438 + return ret;
1439 +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
1440 +index 66fa524b6261e..5471110792071 100644
1441 +--- a/drivers/crypto/mxs-dcp.c
1442 ++++ b/drivers/crypto/mxs-dcp.c
1443 +@@ -298,21 +298,20 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
1444 +
1445 + struct scatterlist *dst = req->dst;
1446 + struct scatterlist *src = req->src;
1447 +- const int nents = sg_nents(req->src);
1448 ++ int dst_nents = sg_nents(dst);
1449 +
1450 + const int out_off = DCP_BUF_SZ;
1451 + uint8_t *in_buf = sdcp->coh->aes_in_buf;
1452 + uint8_t *out_buf = sdcp->coh->aes_out_buf;
1453 +
1454 +- uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
1455 + uint32_t dst_off = 0;
1456 ++ uint8_t *src_buf = NULL;
1457 + uint32_t last_out_len = 0;
1458 +
1459 + uint8_t *key = sdcp->coh->aes_key;
1460 +
1461 + int ret = 0;
1462 +- int split = 0;
1463 +- unsigned int i, len, clen, rem = 0, tlen = 0;
1464 ++ unsigned int i, len, clen, tlen = 0;
1465 + int init = 0;
1466 + bool limit_hit = false;
1467 +
1468 +@@ -330,7 +329,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
1469 + memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
1470 + }
1471 +
1472 +- for_each_sg(req->src, src, nents, i) {
1473 ++ for_each_sg(req->src, src, sg_nents(src), i) {
1474 + src_buf = sg_virt(src);
1475 + len = sg_dma_len(src);
1476 + tlen += len;
1477 +@@ -355,34 +354,17 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
1478 + * submit the buffer.
1479 + */
1480 + if (actx->fill == out_off || sg_is_last(src) ||
1481 +- limit_hit) {
1482 ++ limit_hit) {
1483 + ret = mxs_dcp_run_aes(actx, req, init);
1484 + if (ret)
1485 + return ret;
1486 + init = 0;
1487 +
1488 +- out_tmp = out_buf;
1489 ++ sg_pcopy_from_buffer(dst, dst_nents, out_buf,
1490 ++ actx->fill, dst_off);
1491 ++ dst_off += actx->fill;
1492 + last_out_len = actx->fill;
1493 +- while (dst && actx->fill) {
1494 +- if (!split) {
1495 +- dst_buf = sg_virt(dst);
1496 +- dst_off = 0;
1497 +- }
1498 +- rem = min(sg_dma_len(dst) - dst_off,
1499 +- actx->fill);
1500 +-
1501 +- memcpy(dst_buf + dst_off, out_tmp, rem);
1502 +- out_tmp += rem;
1503 +- dst_off += rem;
1504 +- actx->fill -= rem;
1505 +-
1506 +- if (dst_off == sg_dma_len(dst)) {
1507 +- dst = sg_next(dst);
1508 +- split = 0;
1509 +- } else {
1510 +- split = 1;
1511 +- }
1512 +- }
1513 ++ actx->fill = 0;
1514 + }
1515 + } while (len);
1516 +
1517 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
1518 +index 67736c801f3ca..cc70da05db4b5 100644
1519 +--- a/drivers/dma/imx-sdma.c
1520 ++++ b/drivers/dma/imx-sdma.c
1521 +@@ -377,7 +377,6 @@ struct sdma_channel {
1522 + unsigned long watermark_level;
1523 + u32 shp_addr, per_addr;
1524 + enum dma_status status;
1525 +- bool context_loaded;
1526 + struct imx_dma_data data;
1527 + struct work_struct terminate_worker;
1528 + };
1529 +@@ -988,9 +987,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
1530 + int ret;
1531 + unsigned long flags;
1532 +
1533 +- if (sdmac->context_loaded)
1534 +- return 0;
1535 +-
1536 + if (sdmac->direction == DMA_DEV_TO_MEM)
1537 + load_address = sdmac->pc_from_device;
1538 + else if (sdmac->direction == DMA_DEV_TO_DEV)
1539 +@@ -1033,8 +1029,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
1540 +
1541 + spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
1542 +
1543 +- sdmac->context_loaded = true;
1544 +-
1545 + return ret;
1546 + }
1547 +
1548 +@@ -1074,7 +1068,6 @@ static void sdma_channel_terminate_work(struct work_struct *work)
1549 + sdmac->desc = NULL;
1550 + spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1551 + vchan_dma_desc_free_list(&sdmac->vc, &head);
1552 +- sdmac->context_loaded = false;
1553 + }
1554 +
1555 + static int sdma_disable_channel_async(struct dma_chan *chan)
1556 +@@ -1141,7 +1134,6 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1557 + static int sdma_config_channel(struct dma_chan *chan)
1558 + {
1559 + struct sdma_channel *sdmac = to_sdma_chan(chan);
1560 +- int ret;
1561 +
1562 + sdma_disable_channel(chan);
1563 +
1564 +@@ -1181,9 +1173,7 @@ static int sdma_config_channel(struct dma_chan *chan)
1565 + sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1566 + }
1567 +
1568 +- ret = sdma_load_context(sdmac);
1569 +-
1570 +- return ret;
1571 ++ return 0;
1572 + }
1573 +
1574 + static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1575 +@@ -1335,7 +1325,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1576 +
1577 + sdmac->event_id0 = 0;
1578 + sdmac->event_id1 = 0;
1579 +- sdmac->context_loaded = false;
1580 +
1581 + sdma_set_channel_priority(sdmac, 0);
1582 +
1583 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1584 +index d1e278e999eeb..1b2fa83798304 100644
1585 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1586 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1587 +@@ -762,7 +762,7 @@ enum amd_hw_ip_block_type {
1588 + MAX_HWIP
1589 + };
1590 +
1591 +-#define HWIP_MAX_INSTANCE 8
1592 ++#define HWIP_MAX_INSTANCE 10
1593 +
1594 + struct amd_powerplay {
1595 + void *pp_handle;
1596 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
1597 +index 70dbe343f51df..89cecdba81ace 100644
1598 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
1599 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
1600 +@@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
1601 + void
1602 + amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
1603 + {
1604 +- u8 val;
1605 ++ u8 val = 0;
1606 +
1607 + if (!amdgpu_connector->router.ddc_valid)
1608 + return;
1609 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1610 +index 28361a9c5addc..532d1842f6a30 100644
1611 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1612 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
1613 +@@ -200,7 +200,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
1614 + c++;
1615 + }
1616 +
1617 +- BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
1618 ++ BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
1619 +
1620 + placement->num_placement = c;
1621 + placement->placement = places;
1622 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
1623 +index 8a32b5c93778b..bd7ae3e130b6f 100644
1624 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
1625 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
1626 +@@ -138,7 +138,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
1627 + return ret;
1628 + }
1629 +
1630 +- __decode_table_header_from_buff(hdr, &buff[2]);
1631 ++ __decode_table_header_from_buff(hdr, buff);
1632 +
1633 + if (hdr->header == EEPROM_TABLE_HDR_VAL) {
1634 + control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
1635 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
1636 +index 88813dad731fa..c021519af8106 100644
1637 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
1638 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
1639 +@@ -98,36 +98,78 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
1640 + uint32_t *se_mask)
1641 + {
1642 + struct kfd_cu_info cu_info;
1643 +- uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0};
1644 +- int i, se, sh, cu = 0;
1645 +-
1646 ++ uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
1647 ++ int i, se, sh, cu;
1648 + amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
1649 +
1650 + if (cu_mask_count > cu_info.cu_active_number)
1651 + cu_mask_count = cu_info.cu_active_number;
1652 +
1653 ++ /* Exceeding these bounds corrupts the stack and indicates a coding error.
1654 ++ * Returning with no CU's enabled will hang the queue, which should be
1655 ++ * attention grabbing.
1656 ++ */
1657 ++ if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
1658 ++ pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
1659 ++ return;
1660 ++ }
1661 ++ if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
1662 ++ pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
1663 ++ cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
1664 ++ return;
1665 ++ }
1666 ++ /* Count active CUs per SH.
1667 ++ *
1668 ++ * Some CUs in an SH may be disabled. HW expects disabled CUs to be
1669 ++ * represented in the high bits of each SH's enable mask (the upper and lower
1670 ++ * 16 bits of se_mask) and will take care of the actual distribution of
1671 ++ * disabled CUs within each SH automatically.
1672 ++ * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
1673 ++ *
1674 ++ * See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
1675 ++ */
1676 + for (se = 0; se < cu_info.num_shader_engines; se++)
1677 + for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
1678 +- cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
1679 +-
1680 +- /* Symmetrically map cu_mask to all SEs:
1681 +- * cu_mask[0] bit0 -> se_mask[0] bit0;
1682 +- * cu_mask[0] bit1 -> se_mask[1] bit0;
1683 +- * ... (if # SE is 4)
1684 +- * cu_mask[0] bit4 -> se_mask[0] bit1;
1685 ++ cu_per_sh[se][sh] = hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
1686 ++
1687 ++ /* Symmetrically map cu_mask to all SEs & SHs:
1688 ++ * se_mask programs up to 2 SH in the upper and lower 16 bits.
1689 ++ *
1690 ++ * Examples
1691 ++ * Assuming 1 SH/SE, 4 SEs:
1692 ++ * cu_mask[0] bit0 -> se_mask[0] bit0
1693 ++ * cu_mask[0] bit1 -> se_mask[1] bit0
1694 ++ * ...
1695 ++ * cu_mask[0] bit4 -> se_mask[0] bit1
1696 ++ * ...
1697 ++ *
1698 ++ * Assuming 2 SH/SE, 4 SEs
1699 ++ * cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
1700 ++ * cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
1701 ++ * ...
1702 ++ * cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
1703 ++ * cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
1704 ++ * ...
1705 ++ * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
1706 + * ...
1707 ++ *
1708 ++ * First ensure all CUs are disabled, then enable user specified CUs.
1709 + */
1710 +- se = 0;
1711 +- for (i = 0; i < cu_mask_count; i++) {
1712 +- if (cu_mask[i / 32] & (1 << (i % 32)))
1713 +- se_mask[se] |= 1 << cu;
1714 +-
1715 +- do {
1716 +- se++;
1717 +- if (se == cu_info.num_shader_engines) {
1718 +- se = 0;
1719 +- cu++;
1720 ++ for (i = 0; i < cu_info.num_shader_engines; i++)
1721 ++ se_mask[i] = 0;
1722 ++
1723 ++ i = 0;
1724 ++ for (cu = 0; cu < 16; cu++) {
1725 ++ for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
1726 ++ for (se = 0; se < cu_info.num_shader_engines; se++) {
1727 ++ if (cu_per_sh[se][sh] > cu) {
1728 ++ if (cu_mask[i / 32] & (1 << (i % 32)))
1729 ++ se_mask[se] |= 1 << (cu + sh * 16);
1730 ++ i++;
1731 ++ if (i == cu_mask_count)
1732 ++ return;
1733 ++ }
1734 + }
1735 +- } while (cu >= cu_per_se[se] && cu < 32);
1736 ++ }
1737 + }
1738 + }
1739 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
1740 +index fbdb16418847c..4edc012e31387 100644
1741 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
1742 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
1743 +@@ -27,6 +27,7 @@
1744 + #include "kfd_priv.h"
1745 +
1746 + #define KFD_MAX_NUM_SE 8
1747 ++#define KFD_MAX_NUM_SH_PER_SE 2
1748 +
1749 + /**
1750 + * struct mqd_manager
1751 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
1752 +index f3dfb2887ae0b..2cdcefab2d7d4 100644
1753 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
1754 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
1755 +@@ -95,29 +95,29 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
1756 +
1757 + rd_buf_ptr = rd_buf;
1758 +
1759 +- str_len = strlen("Current: %d %d %d ");
1760 +- snprintf(rd_buf_ptr, str_len, "Current: %d %d %d ",
1761 ++ str_len = strlen("Current: %d 0x%x %d ");
1762 ++ snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ",
1763 + link->cur_link_settings.lane_count,
1764 + link->cur_link_settings.link_rate,
1765 + link->cur_link_settings.link_spread);
1766 + rd_buf_ptr += str_len;
1767 +
1768 +- str_len = strlen("Verified: %d %d %d ");
1769 +- snprintf(rd_buf_ptr, str_len, "Verified: %d %d %d ",
1770 ++ str_len = strlen("Verified: %d 0x%x %d ");
1771 ++ snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ",
1772 + link->verified_link_cap.lane_count,
1773 + link->verified_link_cap.link_rate,
1774 + link->verified_link_cap.link_spread);
1775 + rd_buf_ptr += str_len;
1776 +
1777 +- str_len = strlen("Reported: %d %d %d ");
1778 +- snprintf(rd_buf_ptr, str_len, "Reported: %d %d %d ",
1779 ++ str_len = strlen("Reported: %d 0x%x %d ");
1780 ++ snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ",
1781 + link->reported_link_cap.lane_count,
1782 + link->reported_link_cap.link_rate,
1783 + link->reported_link_cap.link_spread);
1784 + rd_buf_ptr += str_len;
1785 +
1786 +- str_len = strlen("Preferred: %d %d %d ");
1787 +- snprintf(rd_buf_ptr, str_len, "Preferred: %d %d %d\n",
1788 ++ str_len = strlen("Preferred: %d 0x%x %d ");
1789 ++ snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n",
1790 + link->preferred_link_setting.lane_count,
1791 + link->preferred_link_setting.link_rate,
1792 + link->preferred_link_setting.link_spread);
1793 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
1794 +index 60123db7ba02f..bc5ebea1abede 100644
1795 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
1796 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
1797 +@@ -3264,13 +3264,12 @@ static enum dc_status dcn10_set_clock(struct dc *dc,
1798 + struct dc_clock_config clock_cfg = {0};
1799 + struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
1800 +
1801 +- if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
1802 +- dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
1803 +- context, clock_type, &clock_cfg);
1804 +-
1805 +- if (!dc->clk_mgr->funcs->get_clock)
1806 ++ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
1807 + return DC_FAIL_UNSUPPORTED_1;
1808 +
1809 ++ dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
1810 ++ context, clock_type, &clock_cfg);
1811 ++
1812 + if (clk_khz > clock_cfg.max_clock_khz)
1813 + return DC_FAIL_CLK_EXCEED_MAX;
1814 +
1815 +@@ -3288,7 +3287,7 @@ static enum dc_status dcn10_set_clock(struct dc *dc,
1816 + else
1817 + return DC_ERROR_UNEXPECTED;
1818 +
1819 +- if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
1820 ++ if (dc->clk_mgr->funcs->update_clocks)
1821 + dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
1822 + context, true);
1823 + return DC_OK;
1824 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1825 +index 2b1175bb2daee..d2ea4c003d442 100644
1826 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1827 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
1828 +@@ -2232,7 +2232,7 @@ void dcn20_set_mcif_arb_params(
1829 + wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1830 + wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1831 + }
1832 +- wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
1833 ++ wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
1834 + wb_arb_params->slice_lines = 32;
1835 + wb_arb_params->arbitration_slice = 2;
1836 + wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
1837 +diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
1838 +index 00debd02c3220..0ba92428ef560 100644
1839 +--- a/drivers/gpu/drm/drm_debugfs.c
1840 ++++ b/drivers/gpu/drm/drm_debugfs.c
1841 +@@ -91,6 +91,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
1842 + mutex_lock(&dev->filelist_mutex);
1843 + list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
1844 + struct task_struct *task;
1845 ++ bool is_current_master = drm_is_current_master(priv);
1846 +
1847 + rcu_read_lock(); /* locks pid_task()->comm */
1848 + task = pid_task(priv->pid, PIDTYPE_PID);
1849 +@@ -99,7 +100,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
1850 + task ? task->comm : "<unknown>",
1851 + pid_vnr(priv->pid),
1852 + priv->minor->index,
1853 +- drm_is_current_master(priv) ? 'y' : 'n',
1854 ++ is_current_master ? 'y' : 'n',
1855 + priv->authenticated ? 'y' : 'n',
1856 + from_kuid_munged(seq_user_ns(m), uid),
1857 + priv->magic);
1858 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
1859 +index 0c9c40720ca9a..35225ff8792dd 100644
1860 +--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
1861 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
1862 +@@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
1863 + if (switch_mmu_context) {
1864 + struct etnaviv_iommu_context *old_context = gpu->mmu_context;
1865 +
1866 +- etnaviv_iommu_context_get(mmu_context);
1867 +- gpu->mmu_context = mmu_context;
1868 ++ gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
1869 + etnaviv_iommu_context_put(old_context);
1870 + }
1871 +
1872 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1873 +index cb1faaac380a3..519948637186e 100644
1874 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1875 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1876 +@@ -304,8 +304,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
1877 + list_del(&mapping->obj_node);
1878 + }
1879 +
1880 +- etnaviv_iommu_context_get(mmu_context);
1881 +- mapping->context = mmu_context;
1882 ++ mapping->context = etnaviv_iommu_context_get(mmu_context);
1883 + mapping->use = 1;
1884 +
1885 + ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
1886 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
1887 +index 1ba83a90cdef6..7085b08b1db42 100644
1888 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
1889 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
1890 +@@ -534,8 +534,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
1891 + goto err_submit_objects;
1892 +
1893 + submit->ctx = file->driver_priv;
1894 +- etnaviv_iommu_context_get(submit->ctx->mmu);
1895 +- submit->mmu_context = submit->ctx->mmu;
1896 ++ submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
1897 + submit->exec_state = args->exec_state;
1898 + submit->flags = args->flags;
1899 +
1900 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1901 +index 85de8551ce866..db35736d47af2 100644
1902 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1903 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
1904 +@@ -545,6 +545,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
1905 + /* We rely on the GPU running, so program the clock */
1906 + etnaviv_gpu_update_clock(gpu);
1907 +
1908 ++ gpu->fe_running = false;
1909 ++ gpu->exec_state = -1;
1910 ++ if (gpu->mmu_context)
1911 ++ etnaviv_iommu_context_put(gpu->mmu_context);
1912 ++ gpu->mmu_context = NULL;
1913 ++
1914 + return 0;
1915 + }
1916 +
1917 +@@ -607,19 +613,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
1918 + VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
1919 + VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
1920 + }
1921 ++
1922 ++ gpu->fe_running = true;
1923 + }
1924 +
1925 +-static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
1926 ++static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
1927 ++ struct etnaviv_iommu_context *context)
1928 + {
1929 +- u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
1930 +- &gpu->mmu_context->cmdbuf_mapping);
1931 + u16 prefetch;
1932 ++ u32 address;
1933 +
1934 + /* setup the MMU */
1935 +- etnaviv_iommu_restore(gpu, gpu->mmu_context);
1936 ++ etnaviv_iommu_restore(gpu, context);
1937 +
1938 + /* Start command processor */
1939 + prefetch = etnaviv_buffer_init(gpu);
1940 ++ address = etnaviv_cmdbuf_get_va(&gpu->buffer,
1941 ++ &gpu->mmu_context->cmdbuf_mapping);
1942 +
1943 + etnaviv_gpu_start_fe(gpu, address, prefetch);
1944 + }
1945 +@@ -790,7 +800,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
1946 + /* Now program the hardware */
1947 + mutex_lock(&gpu->lock);
1948 + etnaviv_gpu_hw_init(gpu);
1949 +- gpu->exec_state = -1;
1950 + mutex_unlock(&gpu->lock);
1951 +
1952 + pm_runtime_mark_last_busy(gpu->dev);
1953 +@@ -994,8 +1003,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
1954 + spin_unlock(&gpu->event_spinlock);
1955 +
1956 + etnaviv_gpu_hw_init(gpu);
1957 +- gpu->exec_state = -1;
1958 +- gpu->mmu_context = NULL;
1959 +
1960 + mutex_unlock(&gpu->lock);
1961 + pm_runtime_mark_last_busy(gpu->dev);
1962 +@@ -1306,14 +1313,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1963 + goto out_unlock;
1964 + }
1965 +
1966 +- if (!gpu->mmu_context) {
1967 +- etnaviv_iommu_context_get(submit->mmu_context);
1968 +- gpu->mmu_context = submit->mmu_context;
1969 +- etnaviv_gpu_start_fe_idleloop(gpu);
1970 +- } else {
1971 +- etnaviv_iommu_context_get(gpu->mmu_context);
1972 +- submit->prev_mmu_context = gpu->mmu_context;
1973 +- }
1974 ++ if (!gpu->fe_running)
1975 ++ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
1976 ++
1977 ++ if (submit->prev_mmu_context)
1978 ++ etnaviv_iommu_context_put(submit->prev_mmu_context);
1979 ++ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
1980 +
1981 + if (submit->nr_pmrs) {
1982 + gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1983 +@@ -1530,7 +1535,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1984 +
1985 + static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1986 + {
1987 +- if (gpu->initialized && gpu->mmu_context) {
1988 ++ if (gpu->initialized && gpu->fe_running) {
1989 + /* Replace the last WAIT with END */
1990 + mutex_lock(&gpu->lock);
1991 + etnaviv_buffer_end(gpu);
1992 +@@ -1543,8 +1548,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1993 + */
1994 + etnaviv_gpu_wait_idle(gpu, 100);
1995 +
1996 +- etnaviv_iommu_context_put(gpu->mmu_context);
1997 +- gpu->mmu_context = NULL;
1998 ++ gpu->fe_running = false;
1999 + }
2000 +
2001 + gpu->exec_state = -1;
2002 +@@ -1692,6 +1696,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
2003 + etnaviv_gpu_hw_suspend(gpu);
2004 + #endif
2005 +
2006 ++ if (gpu->mmu_context)
2007 ++ etnaviv_iommu_context_put(gpu->mmu_context);
2008 ++
2009 + if (gpu->initialized) {
2010 + etnaviv_cmdbuf_free(&gpu->buffer);
2011 + etnaviv_iommu_global_fini(gpu);
2012 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
2013 +index 8f9bd4edc96a5..02478c75f8968 100644
2014 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
2015 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
2016 +@@ -101,6 +101,7 @@ struct etnaviv_gpu {
2017 + struct workqueue_struct *wq;
2018 + struct drm_gpu_scheduler sched;
2019 + bool initialized;
2020 ++ bool fe_running;
2021 +
2022 + /* 'ring'-buffer: */
2023 + struct etnaviv_cmdbuf buffer;
2024 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
2025 +index 1a7c89a67bea3..afe5dd6a9925b 100644
2026 +--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
2027 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
2028 +@@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
2029 + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
2030 + u32 pgtable;
2031 +
2032 ++ if (gpu->mmu_context)
2033 ++ etnaviv_iommu_context_put(gpu->mmu_context);
2034 ++ gpu->mmu_context = etnaviv_iommu_context_get(context);
2035 ++
2036 + /* set base addresses */
2037 + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
2038 + gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
2039 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
2040 +index f8bf488e9d717..d664ae29ae209 100644
2041 +--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
2042 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
2043 +@@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
2044 + if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
2045 + return;
2046 +
2047 ++ if (gpu->mmu_context)
2048 ++ etnaviv_iommu_context_put(gpu->mmu_context);
2049 ++ gpu->mmu_context = etnaviv_iommu_context_get(context);
2050 ++
2051 + prefetch = etnaviv_buffer_config_mmuv2(gpu,
2052 + (u32)v2_context->mtlb_dma,
2053 + (u32)context->global->bad_page_dma);
2054 +@@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
2055 + if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
2056 + return;
2057 +
2058 ++ if (gpu->mmu_context)
2059 ++ etnaviv_iommu_context_put(gpu->mmu_context);
2060 ++ gpu->mmu_context = etnaviv_iommu_context_get(context);
2061 ++
2062 + gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
2063 + lower_32_bits(context->global->v2.pta_dma));
2064 + gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
2065 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
2066 +index 3607d348c2980..707f5c1a58740 100644
2067 +--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
2068 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
2069 +@@ -204,6 +204,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
2070 + */
2071 + list_for_each_entry_safe(m, n, &list, scan_node) {
2072 + etnaviv_iommu_remove_mapping(context, m);
2073 ++ etnaviv_iommu_context_put(m->context);
2074 + m->context = NULL;
2075 + list_del_init(&m->mmu_node);
2076 + list_del_init(&m->scan_node);
2077 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
2078 +index d1d6902fd13be..e4a0b7d09c2ea 100644
2079 +--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
2080 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
2081 +@@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
2082 + struct etnaviv_iommu_context *
2083 + etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
2084 + struct etnaviv_cmdbuf_suballoc *suballoc);
2085 +-static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
2086 ++static inline struct etnaviv_iommu_context *
2087 ++etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
2088 + {
2089 + kref_get(&ctx->refcount);
2090 ++ return ctx;
2091 + }
2092 + void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
2093 + void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
2094 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
2095 +index 58b89ec11b0eb..a3c9d8b9e1a18 100644
2096 +--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
2097 ++++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
2098 +@@ -140,6 +140,8 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
2099 + EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
2100 + else if (IS_ENABLED(CONFIG_IOMMU_DMA))
2101 + mapping = iommu_get_domain_for_dev(priv->dma_dev);
2102 ++ else
2103 ++ mapping = ERR_PTR(-ENODEV);
2104 +
2105 + if (IS_ERR(mapping))
2106 + return PTR_ERR(mapping);
2107 +diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
2108 +index 20194d86d0339..4f0c6d58e06fa 100644
2109 +--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
2110 ++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
2111 +@@ -108,13 +108,6 @@ static void mdp4_disable_commit(struct msm_kms *kms)
2112 +
2113 + static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
2114 + {
2115 +- int i;
2116 +- struct drm_crtc *crtc;
2117 +- struct drm_crtc_state *crtc_state;
2118 +-
2119 +- /* see 119ecb7fd */
2120 +- for_each_new_crtc_in_state(state, crtc, crtc_state, i)
2121 +- drm_crtc_vblank_get(crtc);
2122 + }
2123 +
2124 + static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
2125 +@@ -133,12 +126,6 @@ static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
2126 +
2127 + static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
2128 + {
2129 +- struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
2130 +- struct drm_crtc *crtc;
2131 +-
2132 +- /* see 119ecb7fd */
2133 +- for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
2134 +- drm_crtc_vblank_put(crtc);
2135 + }
2136 +
2137 + static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
2138 +@@ -418,6 +405,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
2139 + {
2140 + struct platform_device *pdev = to_platform_device(dev->dev);
2141 + struct mdp4_platform_config *config = mdp4_get_config(pdev);
2142 ++ struct msm_drm_private *priv = dev->dev_private;
2143 + struct mdp4_kms *mdp4_kms;
2144 + struct msm_kms *kms = NULL;
2145 + struct msm_gem_address_space *aspace;
2146 +@@ -432,7 +420,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
2147 +
2148 + mdp_kms_init(&mdp4_kms->base, &kms_funcs);
2149 +
2150 +- kms = &mdp4_kms->base.base;
2151 ++ priv->kms = &mdp4_kms->base.base;
2152 ++ kms = priv->kms;
2153 +
2154 + mdp4_kms->dev = dev;
2155 +
2156 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
2157 +index bfd503d220881..8a014dc115712 100644
2158 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
2159 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
2160 +@@ -52,25 +52,16 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
2161 + }
2162 +
2163 + static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
2164 +- u64 iova, size_t size)
2165 ++ u64 iova, u64 size)
2166 + {
2167 + u8 region_width;
2168 + u64 region = iova & PAGE_MASK;
2169 +- /*
2170 +- * fls returns:
2171 +- * 1 .. 32
2172 +- *
2173 +- * 10 + fls(num_pages)
2174 +- * results in the range (11 .. 42)
2175 +- */
2176 +-
2177 +- size = round_up(size, PAGE_SIZE);
2178 +
2179 +- region_width = 10 + fls(size >> PAGE_SHIFT);
2180 +- if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
2181 +- /* not pow2, so must go up to the next pow2 */
2182 +- region_width += 1;
2183 +- }
2184 ++ /* The size is encoded as ceil(log2) minus(1), which may be calculated
2185 ++ * with fls. The size must be clamped to hardware bounds.
2186 ++ */
2187 ++ size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
2188 ++ region_width = fls64(size - 1) - 1;
2189 + region |= region_width;
2190 +
2191 + /* Lock the region that needs to be updated */
2192 +@@ -81,7 +72,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
2193 +
2194 +
2195 + static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
2196 +- u64 iova, size_t size, u32 op)
2197 ++ u64 iova, u64 size, u32 op)
2198 + {
2199 + if (as_nr < 0)
2200 + return 0;
2201 +@@ -98,7 +89,7 @@ static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
2202 +
2203 + static int mmu_hw_do_operation(struct panfrost_device *pfdev,
2204 + struct panfrost_mmu *mmu,
2205 +- u64 iova, size_t size, u32 op)
2206 ++ u64 iova, u64 size, u32 op)
2207 + {
2208 + int ret;
2209 +
2210 +@@ -115,7 +106,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
2211 + u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
2212 + u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
2213 +
2214 +- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
2215 ++ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
2216 +
2217 + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
2218 + mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
2219 +@@ -131,7 +122,7 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
2220 +
2221 + static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
2222 + {
2223 +- mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
2224 ++ mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
2225 +
2226 + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
2227 + mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
2228 +@@ -231,7 +222,7 @@ static size_t get_pgsize(u64 addr, size_t size)
2229 +
2230 + static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
2231 + struct panfrost_mmu *mmu,
2232 +- u64 iova, size_t size)
2233 ++ u64 iova, u64 size)
2234 + {
2235 + if (mmu->as < 0)
2236 + return;
2237 +diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
2238 +index eddaa62ad8b0e..2ae3a4d301d39 100644
2239 +--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
2240 ++++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
2241 +@@ -318,6 +318,8 @@
2242 + #define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
2243 + #define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
2244 +
2245 ++#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
2246 ++
2247 + #define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
2248 + #define gpu_read(dev, reg) readl(dev->iomem + reg)
2249 +
2250 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
2251 +index 6d551ae251c0a..ea4c97f5b0736 100644
2252 +--- a/drivers/hid/hid-input.c
2253 ++++ b/drivers/hid/hid-input.c
2254 +@@ -415,8 +415,6 @@ static int hidinput_get_battery_property(struct power_supply *psy,
2255 +
2256 + if (dev->battery_status == HID_BATTERY_UNKNOWN)
2257 + val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
2258 +- else if (dev->battery_capacity == 100)
2259 +- val->intval = POWER_SUPPLY_STATUS_FULL;
2260 + else
2261 + val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
2262 + break;
2263 +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
2264 +index 6f7a3702b5fba..ac076ac73de5d 100644
2265 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c
2266 ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
2267 +@@ -178,8 +178,6 @@ static const struct i2c_hid_quirks {
2268 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
2269 + { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
2270 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
2271 +- { USB_VENDOR_ID_ELAN, HID_ANY_ID,
2272 +- I2C_HID_QUIRK_BOGUS_IRQ },
2273 + { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
2274 + I2C_HID_QUIRK_RESET_ON_RESUME },
2275 + { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393,
2276 +@@ -190,7 +188,8 @@ static const struct i2c_hid_quirks {
2277 + * Sending the wakeup after reset actually break ELAN touchscreen controller
2278 + */
2279 + { USB_VENDOR_ID_ELAN, HID_ANY_ID,
2280 +- I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
2281 ++ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET |
2282 ++ I2C_HID_QUIRK_BOGUS_IRQ },
2283 + { 0, 0 }
2284 + };
2285 +
2286 +diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
2287 +index e6c022e1dc1cf..17cc8b3fc5d82 100644
2288 +--- a/drivers/iio/dac/ad5624r_spi.c
2289 ++++ b/drivers/iio/dac/ad5624r_spi.c
2290 +@@ -229,7 +229,7 @@ static int ad5624r_probe(struct spi_device *spi)
2291 + if (!indio_dev)
2292 + return -ENOMEM;
2293 + st = iio_priv(indio_dev);
2294 +- st->reg = devm_regulator_get(&spi->dev, "vcc");
2295 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
2296 + if (!IS_ERR(st->reg)) {
2297 + ret = regulator_enable(st->reg);
2298 + if (ret)
2299 +@@ -240,6 +240,22 @@ static int ad5624r_probe(struct spi_device *spi)
2300 + goto error_disable_reg;
2301 +
2302 + voltage_uv = ret;
2303 ++ } else {
2304 ++ if (PTR_ERR(st->reg) != -ENODEV)
2305 ++ return PTR_ERR(st->reg);
2306 ++ /* Backwards compatibility. This naming is not correct */
2307 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
2308 ++ if (!IS_ERR(st->reg)) {
2309 ++ ret = regulator_enable(st->reg);
2310 ++ if (ret)
2311 ++ return ret;
2312 ++
2313 ++ ret = regulator_get_voltage(st->reg);
2314 ++ if (ret < 0)
2315 ++ goto error_disable_reg;
2316 ++
2317 ++ voltage_uv = ret;
2318 ++ }
2319 + }
2320 +
2321 + spi_set_drvdata(spi, indio_dev);
2322 +diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
2323 +index da8adadf47559..75b6da00065a3 100644
2324 +--- a/drivers/infiniband/core/iwcm.c
2325 ++++ b/drivers/infiniband/core/iwcm.c
2326 +@@ -1187,29 +1187,34 @@ static int __init iw_cm_init(void)
2327 +
2328 + ret = iwpm_init(RDMA_NL_IWCM);
2329 + if (ret)
2330 +- pr_err("iw_cm: couldn't init iwpm\n");
2331 +- else
2332 +- rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
2333 ++ return ret;
2334 ++
2335 + iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
2336 + if (!iwcm_wq)
2337 +- return -ENOMEM;
2338 ++ goto err_alloc;
2339 +
2340 + iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
2341 + iwcm_ctl_table);
2342 + if (!iwcm_ctl_table_hdr) {
2343 + pr_err("iw_cm: couldn't register sysctl paths\n");
2344 +- destroy_workqueue(iwcm_wq);
2345 +- return -ENOMEM;
2346 ++ goto err_sysctl;
2347 + }
2348 +
2349 ++ rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
2350 + return 0;
2351 ++
2352 ++err_sysctl:
2353 ++ destroy_workqueue(iwcm_wq);
2354 ++err_alloc:
2355 ++ iwpm_exit(RDMA_NL_IWCM);
2356 ++ return -ENOMEM;
2357 + }
2358 +
2359 + static void __exit iw_cm_cleanup(void)
2360 + {
2361 ++ rdma_nl_unregister(RDMA_NL_IWCM);
2362 + unregister_net_sysctl_table(iwcm_ctl_table_hdr);
2363 + destroy_workqueue(iwcm_wq);
2364 +- rdma_nl_unregister(RDMA_NL_IWCM);
2365 + iwpm_exit(RDMA_NL_IWCM);
2366 + }
2367 +
2368 +diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
2369 +index 4edae89e8e3ca..17f1e59ab12ee 100644
2370 +--- a/drivers/infiniband/hw/efa/efa_verbs.c
2371 ++++ b/drivers/infiniband/hw/efa/efa_verbs.c
2372 +@@ -745,7 +745,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
2373 + rq_entry_inserted = true;
2374 + qp->qp_handle = create_qp_resp.qp_handle;
2375 + qp->ibqp.qp_num = create_qp_resp.qp_num;
2376 +- qp->ibqp.qp_type = init_attr->qp_type;
2377 + qp->max_send_wr = init_attr->cap.max_send_wr;
2378 + qp->max_recv_wr = init_attr->cap.max_recv_wr;
2379 + qp->max_send_sge = init_attr->cap.max_send_sge;
2380 +diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
2381 +index fbff6b2f00e71..1256dbd5b2ef0 100644
2382 +--- a/drivers/infiniband/hw/hfi1/init.c
2383 ++++ b/drivers/infiniband/hw/hfi1/init.c
2384 +@@ -664,12 +664,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
2385 +
2386 + ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
2387 + ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
2388 +-
2389 +- if (loopback) {
2390 +- dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
2391 +- !default_pkey_idx);
2392 +- ppd->pkeys[!default_pkey_idx] = 0x8001;
2393 +- }
2394 ++ ppd->pkeys[0] = 0x8001;
2395 +
2396 + INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
2397 + INIT_WORK(&ppd->link_up_work, handle_link_up);
2398 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
2399 +index d85648b9c247a..571c04e70343a 100644
2400 +--- a/drivers/md/dm-crypt.c
2401 ++++ b/drivers/md/dm-crypt.c
2402 +@@ -2092,7 +2092,12 @@ static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2403 + struct crypt_config *cc = pool_data;
2404 + struct page *page;
2405 +
2406 +- if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
2407 ++ /*
2408 ++ * Note, percpu_counter_read_positive() may over (and under) estimate
2409 ++ * the current usage by at most (batch - 1) * num_online_cpus() pages,
2410 ++ * but avoids potential spinlock contention of an exact result.
2411 ++ */
2412 ++ if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
2413 + likely(gfp_mask & __GFP_NORETRY))
2414 + return NULL;
2415 +
2416 +diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
2417 +index 082796534b0ae..bb02354a48b81 100644
2418 +--- a/drivers/media/dvb-frontends/dib8000.c
2419 ++++ b/drivers/media/dvb-frontends/dib8000.c
2420 +@@ -2107,32 +2107,55 @@ static void dib8000_load_ana_fe_coefs(struct dib8000_state *state, const s16 *an
2421 + dib8000_write_word(state, 117 + mode, ana_fe[mode]);
2422 + }
2423 +
2424 +-static const u16 lut_prbs_2k[14] = {
2425 +- 0, 0x423, 0x009, 0x5C7, 0x7A6, 0x3D8, 0x527, 0x7FF, 0x79B, 0x3D6, 0x3A2, 0x53B, 0x2F4, 0x213
2426 ++static const u16 lut_prbs_2k[13] = {
2427 ++ 0x423, 0x009, 0x5C7,
2428 ++ 0x7A6, 0x3D8, 0x527,
2429 ++ 0x7FF, 0x79B, 0x3D6,
2430 ++ 0x3A2, 0x53B, 0x2F4,
2431 ++ 0x213
2432 + };
2433 +-static const u16 lut_prbs_4k[14] = {
2434 +- 0, 0x208, 0x0C3, 0x7B9, 0x423, 0x5C7, 0x3D8, 0x7FF, 0x3D6, 0x53B, 0x213, 0x029, 0x0D0, 0x48E
2435 ++
2436 ++static const u16 lut_prbs_4k[13] = {
2437 ++ 0x208, 0x0C3, 0x7B9,
2438 ++ 0x423, 0x5C7, 0x3D8,
2439 ++ 0x7FF, 0x3D6, 0x53B,
2440 ++ 0x213, 0x029, 0x0D0,
2441 ++ 0x48E
2442 + };
2443 +-static const u16 lut_prbs_8k[14] = {
2444 +- 0, 0x740, 0x069, 0x7DD, 0x208, 0x7B9, 0x5C7, 0x7FF, 0x53B, 0x029, 0x48E, 0x4C4, 0x367, 0x684
2445 ++
2446 ++static const u16 lut_prbs_8k[13] = {
2447 ++ 0x740, 0x069, 0x7DD,
2448 ++ 0x208, 0x7B9, 0x5C7,
2449 ++ 0x7FF, 0x53B, 0x029,
2450 ++ 0x48E, 0x4C4, 0x367,
2451 ++ 0x684
2452 + };
2453 +
2454 + static u16 dib8000_get_init_prbs(struct dib8000_state *state, u16 subchannel)
2455 + {
2456 + int sub_channel_prbs_group = 0;
2457 ++ int prbs_group;
2458 +
2459 +- sub_channel_prbs_group = (subchannel / 3) + 1;
2460 +- dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
2461 ++ sub_channel_prbs_group = subchannel / 3;
2462 ++ if (sub_channel_prbs_group >= ARRAY_SIZE(lut_prbs_2k))
2463 ++ return 0;
2464 +
2465 + switch (state->fe[0]->dtv_property_cache.transmission_mode) {
2466 + case TRANSMISSION_MODE_2K:
2467 +- return lut_prbs_2k[sub_channel_prbs_group];
2468 ++ prbs_group = lut_prbs_2k[sub_channel_prbs_group];
2469 ++ break;
2470 + case TRANSMISSION_MODE_4K:
2471 +- return lut_prbs_4k[sub_channel_prbs_group];
2472 ++ prbs_group = lut_prbs_4k[sub_channel_prbs_group];
2473 ++ break;
2474 + default:
2475 + case TRANSMISSION_MODE_8K:
2476 +- return lut_prbs_8k[sub_channel_prbs_group];
2477 ++ prbs_group = lut_prbs_8k[sub_channel_prbs_group];
2478 + }
2479 ++
2480 ++ dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n",
2481 ++ sub_channel_prbs_group, subchannel, prbs_group);
2482 ++
2483 ++ return prbs_group;
2484 + }
2485 +
2486 + static void dib8000_set_13seg_channel(struct dib8000_state *state)
2487 +@@ -2409,10 +2432,8 @@ static void dib8000_set_isdbt_common_channel(struct dib8000_state *state, u8 seq
2488 + /* TSB or ISDBT ? apply it now */
2489 + if (c->isdbt_sb_mode) {
2490 + dib8000_set_sb_channel(state);
2491 +- if (c->isdbt_sb_subchannel < 14)
2492 +- init_prbs = dib8000_get_init_prbs(state, c->isdbt_sb_subchannel);
2493 +- else
2494 +- init_prbs = 0;
2495 ++ init_prbs = dib8000_get_init_prbs(state,
2496 ++ c->isdbt_sb_subchannel);
2497 + } else {
2498 + dib8000_set_13seg_channel(state);
2499 + init_prbs = 0xfff;
2500 +@@ -3004,6 +3025,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
2501 +
2502 + unsigned long *timeout = &state->timeout;
2503 + unsigned long now = jiffies;
2504 ++ u16 init_prbs;
2505 + #ifdef DIB8000_AGC_FREEZE
2506 + u16 agc1, agc2;
2507 + #endif
2508 +@@ -3302,8 +3324,10 @@ static int dib8000_tune(struct dvb_frontend *fe)
2509 + break;
2510 +
2511 + case CT_DEMOD_STEP_11: /* 41 : init prbs autosearch */
2512 +- if (state->subchannel <= 41) {
2513 +- dib8000_set_subchannel_prbs(state, dib8000_get_init_prbs(state, state->subchannel));
2514 ++ init_prbs = dib8000_get_init_prbs(state, state->subchannel);
2515 ++
2516 ++ if (init_prbs) {
2517 ++ dib8000_set_subchannel_prbs(state, init_prbs);
2518 + *tune_state = CT_DEMOD_STEP_9;
2519 + } else {
2520 + *tune_state = CT_DEMOD_STOP;
2521 +diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
2522 +index f86ae18bc104b..ffaa4a91e5713 100644
2523 +--- a/drivers/media/i2c/imx258.c
2524 ++++ b/drivers/media/i2c/imx258.c
2525 +@@ -22,7 +22,7 @@
2526 + #define IMX258_CHIP_ID 0x0258
2527 +
2528 + /* V_TIMING internal */
2529 +-#define IMX258_VTS_30FPS 0x0c98
2530 ++#define IMX258_VTS_30FPS 0x0c50
2531 + #define IMX258_VTS_30FPS_2K 0x0638
2532 + #define IMX258_VTS_30FPS_VGA 0x034c
2533 + #define IMX258_VTS_MAX 0xffff
2534 +@@ -46,7 +46,7 @@
2535 + /* Analog gain control */
2536 + #define IMX258_REG_ANALOG_GAIN 0x0204
2537 + #define IMX258_ANA_GAIN_MIN 0
2538 +-#define IMX258_ANA_GAIN_MAX 0x1fff
2539 ++#define IMX258_ANA_GAIN_MAX 480
2540 + #define IMX258_ANA_GAIN_STEP 1
2541 + #define IMX258_ANA_GAIN_DEFAULT 0x0
2542 +
2543 +diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
2544 +index 1088161498df0..18a2027ba1450 100644
2545 +--- a/drivers/media/i2c/tda1997x.c
2546 ++++ b/drivers/media/i2c/tda1997x.c
2547 +@@ -1695,14 +1695,15 @@ static int tda1997x_query_dv_timings(struct v4l2_subdev *sd,
2548 + struct v4l2_dv_timings *timings)
2549 + {
2550 + struct tda1997x_state *state = to_state(sd);
2551 ++ int ret;
2552 +
2553 + v4l_dbg(1, debug, state->client, "%s\n", __func__);
2554 + memset(timings, 0, sizeof(struct v4l2_dv_timings));
2555 + mutex_lock(&state->lock);
2556 +- tda1997x_detect_std(state, timings);
2557 ++ ret = tda1997x_detect_std(state, timings);
2558 + mutex_unlock(&state->lock);
2559 +
2560 +- return 0;
2561 ++ return ret;
2562 + }
2563 +
2564 + static const struct v4l2_subdev_video_ops tda1997x_video_ops = {
2565 +diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
2566 +index a632602131f21..efb80a78d2fa2 100644
2567 +--- a/drivers/media/platform/tegra-cec/tegra_cec.c
2568 ++++ b/drivers/media/platform/tegra-cec/tegra_cec.c
2569 +@@ -366,7 +366,11 @@ static int tegra_cec_probe(struct platform_device *pdev)
2570 + return -ENOENT;
2571 + }
2572 +
2573 +- clk_prepare_enable(cec->clk);
2574 ++ ret = clk_prepare_enable(cec->clk);
2575 ++ if (ret) {
2576 ++ dev_err(&pdev->dev, "Unable to prepare clock for CEC\n");
2577 ++ return ret;
2578 ++ }
2579 +
2580 + /* set context info. */
2581 + cec->dev = &pdev->dev;
2582 +@@ -446,9 +450,7 @@ static int tegra_cec_resume(struct platform_device *pdev)
2583 +
2584 + dev_notice(&pdev->dev, "Resuming\n");
2585 +
2586 +- clk_prepare_enable(cec->clk);
2587 +-
2588 +- return 0;
2589 ++ return clk_prepare_enable(cec->clk);
2590 + }
2591 + #endif
2592 +
2593 +diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
2594 +index ef8b83b707df0..13ab7312fa3b5 100644
2595 +--- a/drivers/media/rc/rc-loopback.c
2596 ++++ b/drivers/media/rc/rc-loopback.c
2597 +@@ -42,7 +42,7 @@ static int loop_set_tx_mask(struct rc_dev *dev, u32 mask)
2598 +
2599 + if ((mask & (RXMASK_REGULAR | RXMASK_LEARNING)) != mask) {
2600 + dprintk("invalid tx mask: %u\n", mask);
2601 +- return -EINVAL;
2602 ++ return 2;
2603 + }
2604 +
2605 + dprintk("setting tx mask: %u\n", mask);
2606 +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
2607 +index 7d60dd3b0bd85..db7f8f8ee2f9f 100644
2608 +--- a/drivers/media/usb/uvc/uvc_v4l2.c
2609 ++++ b/drivers/media/usb/uvc/uvc_v4l2.c
2610 +@@ -894,8 +894,8 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
2611 + {
2612 + struct uvc_fh *handle = fh;
2613 + struct uvc_video_chain *chain = handle->chain;
2614 ++ u8 *buf;
2615 + int ret;
2616 +- u8 i;
2617 +
2618 + if (chain->selector == NULL ||
2619 + (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) {
2620 +@@ -903,22 +903,27 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
2621 + return 0;
2622 + }
2623 +
2624 ++ buf = kmalloc(1, GFP_KERNEL);
2625 ++ if (!buf)
2626 ++ return -ENOMEM;
2627 ++
2628 + ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, chain->selector->id,
2629 + chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
2630 +- &i, 1);
2631 +- if (ret < 0)
2632 +- return ret;
2633 ++ buf, 1);
2634 ++ if (!ret)
2635 ++ *input = *buf - 1;
2636 +
2637 +- *input = i - 1;
2638 +- return 0;
2639 ++ kfree(buf);
2640 ++
2641 ++ return ret;
2642 + }
2643 +
2644 + static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
2645 + {
2646 + struct uvc_fh *handle = fh;
2647 + struct uvc_video_chain *chain = handle->chain;
2648 ++ u8 *buf;
2649 + int ret;
2650 +- u32 i;
2651 +
2652 + ret = uvc_acquire_privileges(handle);
2653 + if (ret < 0)
2654 +@@ -934,10 +939,17 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
2655 + if (input >= chain->selector->bNrInPins)
2656 + return -EINVAL;
2657 +
2658 +- i = input + 1;
2659 +- return uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
2660 +- chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
2661 +- &i, 1);
2662 ++ buf = kmalloc(1, GFP_KERNEL);
2663 ++ if (!buf)
2664 ++ return -ENOMEM;
2665 ++
2666 ++ *buf = input + 1;
2667 ++ ret = uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id,
2668 ++ chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL,
2669 ++ buf, 1);
2670 ++ kfree(buf);
2671 ++
2672 ++ return ret;
2673 + }
2674 +
2675 + static int uvc_ioctl_queryctrl(struct file *file, void *fh,
2676 +diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
2677 +index 4f23e939ead0b..60454e1b727e9 100644
2678 +--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
2679 ++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
2680 +@@ -196,7 +196,7 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
2681 + if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
2682 + return false;
2683 +
2684 +- for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
2685 ++ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
2686 + if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
2687 + fnc, fnc_handle) &&
2688 + v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
2689 +@@ -218,7 +218,7 @@ bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
2690 + {
2691 + unsigned int i;
2692 +
2693 +- for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
2694 ++ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
2695 + const struct v4l2_bt_timings *bt =
2696 + &v4l2_dv_timings_presets[i].bt;
2697 +
2698 +diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
2699 +index 3e9dc92cb467b..842de1f352dfc 100644
2700 +--- a/drivers/mfd/ab8500-core.c
2701 ++++ b/drivers/mfd/ab8500-core.c
2702 +@@ -493,7 +493,7 @@ static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
2703 + if (line == AB8540_INT_GPIO43F || line == AB8540_INT_GPIO44F)
2704 + line += 1;
2705 +
2706 +- handle_nested_irq(irq_create_mapping(ab8500->domain, line));
2707 ++ handle_nested_irq(irq_find_mapping(ab8500->domain, line));
2708 + }
2709 +
2710 + return 0;
2711 +diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
2712 +index aa59496e43768..9db1000944c34 100644
2713 +--- a/drivers/mfd/axp20x.c
2714 ++++ b/drivers/mfd/axp20x.c
2715 +@@ -125,12 +125,13 @@ static const struct regmap_range axp288_writeable_ranges[] = {
2716 +
2717 + static const struct regmap_range axp288_volatile_ranges[] = {
2718 + regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
2719 ++ regmap_reg_range(AXP22X_PWR_OUT_CTRL1, AXP22X_ALDO3_V_OUT),
2720 + regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
2721 + regmap_reg_range(AXP288_BC_DET_STAT, AXP20X_VBUS_IPSOUT_MGMT),
2722 + regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
2723 + regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
2724 + regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
2725 +- regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
2726 ++ regmap_reg_range(AXP20X_GPIO1_CTRL, AXP22X_GPIO_STATE),
2727 + regmap_reg_range(AXP288_RT_BATT_V_H, AXP288_RT_BATT_V_L),
2728 + regmap_reg_range(AXP20X_FG_RES, AXP288_FG_CC_CAP_REG),
2729 + };
2730 +diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
2731 +index dfac6afa82ca5..f1f2ad9ff0b34 100644
2732 +--- a/drivers/mfd/db8500-prcmu.c
2733 ++++ b/drivers/mfd/db8500-prcmu.c
2734 +@@ -1695,22 +1695,20 @@ static long round_clock_rate(u8 clock, unsigned long rate)
2735 + }
2736 +
2737 + static const unsigned long db8500_armss_freqs[] = {
2738 +- 200000000,
2739 +- 400000000,
2740 +- 800000000,
2741 ++ 199680000,
2742 ++ 399360000,
2743 ++ 798720000,
2744 + 998400000
2745 + };
2746 +
2747 + /* The DB8520 has slightly higher ARMSS max frequency */
2748 + static const unsigned long db8520_armss_freqs[] = {
2749 +- 200000000,
2750 +- 400000000,
2751 +- 800000000,
2752 ++ 199680000,
2753 ++ 399360000,
2754 ++ 798720000,
2755 + 1152000000
2756 + };
2757 +
2758 +-
2759 +-
2760 + static long round_armss_rate(unsigned long rate)
2761 + {
2762 + unsigned long freq = 0;
2763 +diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
2764 +index 1aee3b3253fc9..508349399f8af 100644
2765 +--- a/drivers/mfd/stmpe.c
2766 ++++ b/drivers/mfd/stmpe.c
2767 +@@ -1091,7 +1091,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
2768 +
2769 + if (variant->id_val == STMPE801_ID ||
2770 + variant->id_val == STMPE1600_ID) {
2771 +- int base = irq_create_mapping(stmpe->domain, 0);
2772 ++ int base = irq_find_mapping(stmpe->domain, 0);
2773 +
2774 + handle_nested_irq(base);
2775 + return IRQ_HANDLED;
2776 +@@ -1119,7 +1119,7 @@ static irqreturn_t stmpe_irq(int irq, void *data)
2777 + while (status) {
2778 + int bit = __ffs(status);
2779 + int line = bank * 8 + bit;
2780 +- int nestedirq = irq_create_mapping(stmpe->domain, line);
2781 ++ int nestedirq = irq_find_mapping(stmpe->domain, line);
2782 +
2783 + handle_nested_irq(nestedirq);
2784 + status &= ~(1 << bit);
2785 +diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
2786 +index 67c9995bb1aa6..23cfbd050120d 100644
2787 +--- a/drivers/mfd/tc3589x.c
2788 ++++ b/drivers/mfd/tc3589x.c
2789 +@@ -187,7 +187,7 @@ again:
2790 +
2791 + while (status) {
2792 + int bit = __ffs(status);
2793 +- int virq = irq_create_mapping(tc3589x->domain, bit);
2794 ++ int virq = irq_find_mapping(tc3589x->domain, bit);
2795 +
2796 + handle_nested_irq(virq);
2797 + status &= ~(1 << bit);
2798 +diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
2799 +index 22d2f02d855c2..ccc5a9ac788c1 100644
2800 +--- a/drivers/mfd/tqmx86.c
2801 ++++ b/drivers/mfd/tqmx86.c
2802 +@@ -210,6 +210,8 @@ static int tqmx86_probe(struct platform_device *pdev)
2803 +
2804 + /* Assumes the IRQ resource is first. */
2805 + tqmx_gpio_resources[0].start = gpio_irq;
2806 ++ } else {
2807 ++ tqmx_gpio_resources[0].flags = 0;
2808 + }
2809 +
2810 + ocores_platfom_data.clock_khz = tqmx86_board_id_to_clk_rate(board_id);
2811 +diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
2812 +index 6c3a619e26286..651a028bc519a 100644
2813 +--- a/drivers/mfd/wm8994-irq.c
2814 ++++ b/drivers/mfd/wm8994-irq.c
2815 +@@ -154,7 +154,7 @@ static irqreturn_t wm8994_edge_irq(int irq, void *data)
2816 + struct wm8994 *wm8994 = data;
2817 +
2818 + while (gpio_get_value_cansleep(wm8994->pdata.irq_gpio))
2819 +- handle_nested_irq(irq_create_mapping(wm8994->edge_irq, 0));
2820 ++ handle_nested_irq(irq_find_mapping(wm8994->edge_irq, 0));
2821 +
2822 + return IRQ_HANDLED;
2823 + }
2824 +diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
2825 +index c2338750313c4..a49782dd903cd 100644
2826 +--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
2827 ++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
2828 +@@ -2238,7 +2238,8 @@ int vmci_qp_broker_map(struct vmci_handle handle,
2829 +
2830 + result = VMCI_SUCCESS;
2831 +
2832 +- if (context_id != VMCI_HOST_CONTEXT_ID) {
2833 ++ if (context_id != VMCI_HOST_CONTEXT_ID &&
2834 ++ !QPBROKERSTATE_HAS_MEM(entry)) {
2835 + struct vmci_qp_page_store page_store;
2836 +
2837 + page_store.pages = guest_mem;
2838 +@@ -2345,7 +2346,8 @@ int vmci_qp_broker_unmap(struct vmci_handle handle,
2839 + goto out;
2840 + }
2841 +
2842 +- if (context_id != VMCI_HOST_CONTEXT_ID) {
2843 ++ if (context_id != VMCI_HOST_CONTEXT_ID &&
2844 ++ QPBROKERSTATE_HAS_MEM(entry)) {
2845 + qp_acquire_queue_mutex(entry->produce_q);
2846 + result = qp_save_headers(entry);
2847 + if (result < VMCI_SUCCESS)
2848 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
2849 +index 8322d22a59c45..e92f9373e2274 100644
2850 +--- a/drivers/mmc/core/block.c
2851 ++++ b/drivers/mmc/core/block.c
2852 +@@ -591,6 +591,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
2853 + }
2854 +
2855 + mmc_wait_for_req(card->host, &mrq);
2856 ++ memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
2857 +
2858 + if (cmd.error) {
2859 + dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
2860 +@@ -640,8 +641,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
2861 + if (idata->ic.postsleep_min_us)
2862 + usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
2863 +
2864 +- memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
2865 +-
2866 + if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
2867 + /*
2868 + * Ensure RPMB/R1B command has completed by polling CMD13
2869 +diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
2870 +index 11087976ab19c..9ff718b61c72e 100644
2871 +--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
2872 ++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
2873 +@@ -539,9 +539,22 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host,
2874 + return 0;
2875 + }
2876 +
2877 ++static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
2878 ++{
2879 ++ rtsx_pci_write_register(host->pcr, SD_CFG1,
2880 ++ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
2881 ++}
2882 ++
2883 ++static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
2884 ++{
2885 ++ rtsx_pci_write_register(host->pcr, SD_CFG1,
2886 ++ SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
2887 ++}
2888 ++
2889 + static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
2890 + {
2891 + struct mmc_data *data = mrq->data;
2892 ++ int err;
2893 +
2894 + if (host->sg_count < 0) {
2895 + data->error = host->sg_count;
2896 +@@ -550,22 +563,19 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
2897 + return data->error;
2898 + }
2899 +
2900 +- if (data->flags & MMC_DATA_READ)
2901 +- return sd_read_long_data(host, mrq);
2902 ++ if (data->flags & MMC_DATA_READ) {
2903 ++ if (host->initial_mode)
2904 ++ sd_disable_initial_mode(host);
2905 +
2906 +- return sd_write_long_data(host, mrq);
2907 +-}
2908 ++ err = sd_read_long_data(host, mrq);
2909 +
2910 +-static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
2911 +-{
2912 +- rtsx_pci_write_register(host->pcr, SD_CFG1,
2913 +- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128);
2914 +-}
2915 ++ if (host->initial_mode)
2916 ++ sd_enable_initial_mode(host);
2917 +
2918 +-static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host)
2919 +-{
2920 +- rtsx_pci_write_register(host->pcr, SD_CFG1,
2921 +- SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0);
2922 ++ return err;
2923 ++ }
2924 ++
2925 ++ return sd_write_long_data(host, mrq);
2926 + }
2927 +
2928 + static void sd_normal_rw(struct realtek_pci_sdmmc *host,
2929 +diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
2930 +index 7023cbec4017b..dd10f7abf5a71 100644
2931 +--- a/drivers/mmc/host/sdhci-of-arasan.c
2932 ++++ b/drivers/mmc/host/sdhci-of-arasan.c
2933 +@@ -192,7 +192,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
2934 + * through low speeds without power cycling.
2935 + */
2936 + sdhci_set_clock(host, host->max_clk);
2937 +- phy_power_on(sdhci_arasan->phy);
2938 ++ if (phy_power_on(sdhci_arasan->phy)) {
2939 ++ pr_err("%s: Cannot power on phy.\n",
2940 ++ mmc_hostname(host->mmc));
2941 ++ return;
2942 ++ }
2943 ++
2944 + sdhci_arasan->is_phy_on = true;
2945 +
2946 + /*
2947 +@@ -228,7 +233,12 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
2948 + msleep(20);
2949 +
2950 + if (ctrl_phy) {
2951 +- phy_power_on(sdhci_arasan->phy);
2952 ++ if (phy_power_on(sdhci_arasan->phy)) {
2953 ++ pr_err("%s: Cannot power on phy.\n",
2954 ++ mmc_hostname(host->mmc));
2955 ++ return;
2956 ++ }
2957 ++
2958 + sdhci_arasan->is_phy_on = true;
2959 + }
2960 + }
2961 +@@ -416,7 +426,9 @@ static int sdhci_arasan_suspend(struct device *dev)
2962 + ret = phy_power_off(sdhci_arasan->phy);
2963 + if (ret) {
2964 + dev_err(dev, "Cannot power off phy.\n");
2965 +- sdhci_resume_host(host);
2966 ++ if (sdhci_resume_host(host))
2967 ++ dev_err(dev, "Cannot resume host.\n");
2968 ++
2969 + return ret;
2970 + }
2971 + sdhci_arasan->is_phy_on = false;
2972 +diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
2973 +index 2d1c22dc88c15..cc5009200cc23 100644
2974 +--- a/drivers/mtd/nand/raw/cafe_nand.c
2975 ++++ b/drivers/mtd/nand/raw/cafe_nand.c
2976 +@@ -757,7 +757,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
2977 + "CAFE NAND", mtd);
2978 + if (err) {
2979 + dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
2980 +- goto out_ior;
2981 ++ goto out_free_rs;
2982 + }
2983 +
2984 + /* Disable master reset, enable NAND clock */
2985 +@@ -801,6 +801,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
2986 + /* Disable NAND IRQ in global IRQ mask register */
2987 + cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
2988 + free_irq(pdev->irq, mtd);
2989 ++ out_free_rs:
2990 ++ free_rs(cafe->rs);
2991 + out_ior:
2992 + pci_iounmap(pdev, cafe->mmio);
2993 + out_free_mtd:
2994 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2995 +index e21643377162b..1949f631e1bc5 100644
2996 +--- a/drivers/net/bonding/bond_main.c
2997 ++++ b/drivers/net/bonding/bond_main.c
2998 +@@ -1926,7 +1926,6 @@ static int __bond_release_one(struct net_device *bond_dev,
2999 + /* recompute stats just before removing the slave */
3000 + bond_get_stats(bond->dev, &bond->bond_stats);
3001 +
3002 +- bond_upper_dev_unlink(bond, slave);
3003 + /* unregister rx_handler early so bond_handle_frame wouldn't be called
3004 + * for this slave anymore.
3005 + */
3006 +@@ -1935,6 +1934,8 @@ static int __bond_release_one(struct net_device *bond_dev,
3007 + if (BOND_MODE(bond) == BOND_MODE_8023AD)
3008 + bond_3ad_unbind_slave(slave);
3009 +
3010 ++ bond_upper_dev_unlink(bond, slave);
3011 ++
3012 + if (bond_mode_can_use_xmit_hash(bond))
3013 + bond_update_slave_arr(bond, slave);
3014 +
3015 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
3016 +index e78b683f73052..825d840cdb8c3 100644
3017 +--- a/drivers/net/dsa/b53/b53_common.c
3018 ++++ b/drivers/net/dsa/b53/b53_common.c
3019 +@@ -2353,9 +2353,8 @@ static int b53_switch_init(struct b53_device *dev)
3020 + dev->cpu_port = 5;
3021 + }
3022 +
3023 +- /* cpu port is always last */
3024 +- dev->num_ports = dev->cpu_port + 1;
3025 + dev->enabled_ports |= BIT(dev->cpu_port);
3026 ++ dev->num_ports = fls(dev->enabled_ports);
3027 +
3028 + /* Include non standard CPU port built-in PHYs to be probed */
3029 + if (is539x(dev) || is531x5(dev)) {
3030 +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
3031 +index af3d56636a076..3225de0f655f2 100644
3032 +--- a/drivers/net/dsa/lantiq_gswip.c
3033 ++++ b/drivers/net/dsa/lantiq_gswip.c
3034 +@@ -837,7 +837,8 @@ static int gswip_setup(struct dsa_switch *ds)
3035 +
3036 + gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
3037 + GSWIP_MAC_CTRL_2p(cpu_port));
3038 +- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN);
3039 ++ gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
3040 ++ GSWIP_MAC_FLEN);
3041 + gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
3042 + GSWIP_BM_QUEUE_GCTRL);
3043 +
3044 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
3045 +index cf39623b828b7..4630998d47fd4 100644
3046 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
3047 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
3048 +@@ -1246,7 +1246,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
3049 +
3050 + /* SR-IOV capability was enabled but there are no VFs*/
3051 + if (iov->total == 0) {
3052 +- err = -EINVAL;
3053 ++ err = 0;
3054 + goto failed;
3055 + }
3056 +
3057 +diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
3058 +index 0ccdde366ae17..540d99f59226e 100644
3059 +--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
3060 ++++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
3061 +@@ -1153,6 +1153,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3062 + if (!adapter->registered_device_map) {
3063 + pr_err("%s: could not register any net devices\n",
3064 + pci_name(pdev));
3065 ++ err = -EINVAL;
3066 + goto out_release_adapter_res;
3067 + }
3068 +
3069 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3070 +index e64e175162068..db9c8f943811b 100644
3071 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3072 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3073 +@@ -56,6 +56,7 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
3074 + #define HNS3_OUTER_VLAN_TAG 2
3075 +
3076 + #define HNS3_MIN_TX_LEN 33U
3077 ++#define HNS3_MIN_TUN_PKT_LEN 65U
3078 +
3079 + /* hns3_pci_tbl - PCI Device ID Table
3080 + *
3081 +@@ -931,8 +932,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
3082 + l4.tcp->doff);
3083 + break;
3084 + case IPPROTO_UDP:
3085 +- if (hns3_tunnel_csum_bug(skb))
3086 +- return skb_checksum_help(skb);
3087 ++ if (hns3_tunnel_csum_bug(skb)) {
3088 ++ int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
3089 ++
3090 ++ return ret ? ret : skb_checksum_help(skb);
3091 ++ }
3092 +
3093 + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
3094 + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
3095 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3096 +index aa402e2671212..f44e8401496b1 100644
3097 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3098 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3099 +@@ -1328,9 +1328,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
3100 +
3101 + static int hclge_configure(struct hclge_dev *hdev)
3102 + {
3103 ++ const struct cpumask *cpumask = cpu_online_mask;
3104 + struct hclge_cfg cfg;
3105 + unsigned int i;
3106 +- int ret;
3107 ++ int node, ret;
3108 +
3109 + ret = hclge_get_cfg(hdev, &cfg);
3110 + if (ret) {
3111 +@@ -1390,11 +1391,12 @@ static int hclge_configure(struct hclge_dev *hdev)
3112 +
3113 + hclge_init_kdump_kernel_config(hdev);
3114 +
3115 +- /* Set the init affinity based on pci func number */
3116 +- i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
3117 +- i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
3118 +- cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
3119 +- &hdev->affinity_mask);
3120 ++ /* Set the affinity based on numa node */
3121 ++ node = dev_to_node(&hdev->pdev->dev);
3122 ++ if (node != NUMA_NO_NODE)
3123 ++ cpumask = cpumask_of_node(node);
3124 ++
3125 ++ cpumask_copy(&hdev->affinity_mask, cpumask);
3126 +
3127 + return ret;
3128 + }
3129 +@@ -6683,11 +6685,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
3130 + hclge_clear_arfs_rules(handle);
3131 + spin_unlock_bh(&hdev->fd_rule_lock);
3132 +
3133 +- /* If it is not PF reset, the firmware will disable the MAC,
3134 ++ /* If it is not PF reset or FLR, the firmware will disable the MAC,
3135 + * so it only need to stop phy here.
3136 + */
3137 + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
3138 +- hdev->reset_type != HNAE3_FUNC_RESET) {
3139 ++ hdev->reset_type != HNAE3_FUNC_RESET &&
3140 ++ hdev->reset_type != HNAE3_FLR_RESET) {
3141 + hclge_mac_stop_phy(hdev);
3142 + hclge_update_link_status(hdev);
3143 + return;
3144 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
3145 +index ea348ebbbf2e9..db2e9dd5681eb 100644
3146 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
3147 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
3148 +@@ -1956,6 +1956,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
3149 +
3150 + hclgevf_enable_vector(&hdev->misc_vector, false);
3151 + event_cause = hclgevf_check_evt_cause(hdev, &clearval);
3152 ++ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
3153 ++ hclgevf_clear_event_cause(hdev, clearval);
3154 +
3155 + switch (event_cause) {
3156 + case HCLGEVF_VECTOR0_EVENT_RST:
3157 +@@ -1968,10 +1970,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
3158 + break;
3159 + }
3160 +
3161 +- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
3162 +- hclgevf_clear_event_cause(hdev, clearval);
3163 ++ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
3164 + hclgevf_enable_vector(&hdev->misc_vector, true);
3165 +- }
3166 +
3167 + return IRQ_HANDLED;
3168 + }
3169 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
3170 +index ecfe588f330ef..cfe7229593ead 100644
3171 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
3172 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
3173 +@@ -4277,6 +4277,14 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3174 + return 0;
3175 + }
3176 +
3177 ++ if (adapter->failover_pending) {
3178 ++ adapter->init_done_rc = -EAGAIN;
3179 ++ netdev_dbg(netdev, "Failover pending, ignoring login response\n");
3180 ++ complete(&adapter->init_done);
3181 ++ /* login response buffer will be released on reset */
3182 ++ return 0;
3183 ++ }
3184 ++
3185 + netdev->mtu = adapter->req_mtu - ETH_HLEN;
3186 +
3187 + netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
3188 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
3189 +index 94a3f000e999b..bc46c262b42d8 100644
3190 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
3191 ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
3192 +@@ -142,6 +142,30 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
3193 + return 0;
3194 + }
3195 +
3196 ++/**
3197 ++ * iavf_lock_timeout - try to set bit but give up after timeout
3198 ++ * @adapter: board private structure
3199 ++ * @bit: bit to set
3200 ++ * @msecs: timeout in msecs
3201 ++ *
3202 ++ * Returns 0 on success, negative on failure
3203 ++ **/
3204 ++static int iavf_lock_timeout(struct iavf_adapter *adapter,
3205 ++ enum iavf_critical_section_t bit,
3206 ++ unsigned int msecs)
3207 ++{
3208 ++ unsigned int wait, delay = 10;
3209 ++
3210 ++ for (wait = 0; wait < msecs; wait += delay) {
3211 ++ if (!test_and_set_bit(bit, &adapter->crit_section))
3212 ++ return 0;
3213 ++
3214 ++ msleep(delay);
3215 ++ }
3216 ++
3217 ++ return -1;
3218 ++}
3219 ++
3220 + /**
3221 + * iavf_schedule_reset - Set the flags and schedule a reset event
3222 + * @adapter: board private structure
3223 +@@ -1961,7 +1985,6 @@ static void iavf_watchdog_task(struct work_struct *work)
3224 + /* check for hw reset */
3225 + reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
3226 + if (!reg_val) {
3227 +- adapter->state = __IAVF_RESETTING;
3228 + adapter->flags |= IAVF_FLAG_RESET_PENDING;
3229 + adapter->aq_required = 0;
3230 + adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3231 +@@ -2077,6 +2100,10 @@ static void iavf_reset_task(struct work_struct *work)
3232 + if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3233 + return;
3234 +
3235 ++ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200)) {
3236 ++ schedule_work(&adapter->reset_task);
3237 ++ return;
3238 ++ }
3239 + while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
3240 + &adapter->crit_section))
3241 + usleep_range(500, 1000);
3242 +@@ -2291,6 +2318,8 @@ static void iavf_adminq_task(struct work_struct *work)
3243 + if (!event.msg_buf)
3244 + goto out;
3245 +
3246 ++ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200))
3247 ++ goto freedom;
3248 + do {
3249 + ret = iavf_clean_arq_element(hw, &event, &pending);
3250 + v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3251 +@@ -2304,6 +2333,7 @@ static void iavf_adminq_task(struct work_struct *work)
3252 + if (pending != 0)
3253 + memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3254 + } while (pending);
3255 ++ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3256 +
3257 + if ((adapter->flags &
3258 + (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
3259 +@@ -3600,6 +3630,10 @@ static void iavf_init_task(struct work_struct *work)
3260 + init_task.work);
3261 + struct iavf_hw *hw = &adapter->hw;
3262 +
3263 ++ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) {
3264 ++ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
3265 ++ return;
3266 ++ }
3267 + switch (adapter->state) {
3268 + case __IAVF_STARTUP:
3269 + if (iavf_startup(adapter) < 0)
3270 +@@ -3612,14 +3646,14 @@ static void iavf_init_task(struct work_struct *work)
3271 + case __IAVF_INIT_GET_RESOURCES:
3272 + if (iavf_init_get_resources(adapter) < 0)
3273 + goto init_failed;
3274 +- return;
3275 ++ goto out;
3276 + default:
3277 + goto init_failed;
3278 + }
3279 +
3280 + queue_delayed_work(iavf_wq, &adapter->init_task,
3281 + msecs_to_jiffies(30));
3282 +- return;
3283 ++ goto out;
3284 + init_failed:
3285 + if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
3286 + dev_err(&adapter->pdev->dev,
3287 +@@ -3628,9 +3662,11 @@ init_failed:
3288 + iavf_shutdown_adminq(hw);
3289 + adapter->state = __IAVF_STARTUP;
3290 + queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
3291 +- return;
3292 ++ goto out;
3293 + }
3294 + queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
3295 ++out:
3296 ++ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3297 + }
3298 +
3299 + /**
3300 +@@ -3647,9 +3683,12 @@ static void iavf_shutdown(struct pci_dev *pdev)
3301 + if (netif_running(netdev))
3302 + iavf_close(netdev);
3303 +
3304 ++ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
3305 ++ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
3306 + /* Prevent the watchdog from running. */
3307 + adapter->state = __IAVF_REMOVE;
3308 + adapter->aq_required = 0;
3309 ++ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3310 +
3311 + #ifdef CONFIG_PM
3312 + pci_save_state(pdev);
3313 +@@ -3878,10 +3917,6 @@ static void iavf_remove(struct pci_dev *pdev)
3314 + err);
3315 + }
3316 +
3317 +- /* Shut down all the garbage mashers on the detention level */
3318 +- adapter->state = __IAVF_REMOVE;
3319 +- adapter->aq_required = 0;
3320 +- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3321 + iavf_request_reset(adapter);
3322 + msleep(50);
3323 + /* If the FW isn't responding, kick it once, but only once. */
3324 +@@ -3889,6 +3924,13 @@ static void iavf_remove(struct pci_dev *pdev)
3325 + iavf_request_reset(adapter);
3326 + msleep(50);
3327 + }
3328 ++ if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
3329 ++ dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
3330 ++
3331 ++ /* Shut down all the garbage mashers on the detention level */
3332 ++ adapter->state = __IAVF_REMOVE;
3333 ++ adapter->aq_required = 0;
3334 ++ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3335 + iavf_free_all_tx_resources(adapter);
3336 + iavf_free_all_rx_resources(adapter);
3337 + iavf_misc_irq_disable(adapter);
3338 +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
3339 +index 084cf4a4114ad..9ba05d9aa8e08 100644
3340 +--- a/drivers/net/ethernet/intel/igc/igc_main.c
3341 ++++ b/drivers/net/ethernet/intel/igc/igc_main.c
3342 +@@ -2693,6 +2693,7 @@ static irqreturn_t igc_msix_ring(int irq, void *data)
3343 + */
3344 + static int igc_request_msix(struct igc_adapter *adapter)
3345 + {
3346 ++ unsigned int num_q_vectors = adapter->num_q_vectors;
3347 + int i = 0, err = 0, vector = 0, free_vector = 0;
3348 + struct net_device *netdev = adapter->netdev;
3349 +
3350 +@@ -2701,7 +2702,13 @@ static int igc_request_msix(struct igc_adapter *adapter)
3351 + if (err)
3352 + goto err_out;
3353 +
3354 +- for (i = 0; i < adapter->num_q_vectors; i++) {
3355 ++ if (num_q_vectors > MAX_Q_VECTORS) {
3356 ++ num_q_vectors = MAX_Q_VECTORS;
3357 ++ dev_warn(&adapter->pdev->dev,
3358 ++ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
3359 ++ adapter->num_q_vectors, MAX_Q_VECTORS);
3360 ++ }
3361 ++ for (i = 0; i < num_q_vectors; i++) {
3362 + struct igc_q_vector *q_vector = adapter->q_vector[i];
3363 +
3364 + vector++;
3365 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
3366 +index 76547d35cd0e1..bf091a6c0cd2d 100644
3367 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
3368 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
3369 +@@ -865,7 +865,7 @@ static void cb_timeout_handler(struct work_struct *work)
3370 + ent->ret = -ETIMEDOUT;
3371 + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
3372 + ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
3373 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
3374 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
3375 +
3376 + out:
3377 + cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
3378 +@@ -977,7 +977,7 @@ static void cmd_work_handler(struct work_struct *work)
3379 + MLX5_SET(mbox_out, ent->out, status, status);
3380 + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
3381 +
3382 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
3383 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
3384 + return;
3385 + }
3386 +
3387 +@@ -991,7 +991,7 @@ static void cmd_work_handler(struct work_struct *work)
3388 + poll_timeout(ent);
3389 + /* make sure we read the descriptor after ownership is SW */
3390 + rmb();
3391 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
3392 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
3393 + }
3394 + }
3395 +
3396 +@@ -1051,7 +1051,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
3397 + mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
3398 +
3399 + ent->ret = -ETIMEDOUT;
3400 +- mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
3401 ++ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
3402 + }
3403 +
3404 + static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
3405 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
3406 +index dc36b0db37222..97359417c6e7f 100644
3407 +--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
3408 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
3409 +@@ -1005,7 +1005,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
3410 + err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
3411 + if (err) {
3412 + mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
3413 +- return err;
3414 ++ goto err_cancel_work;
3415 + }
3416 +
3417 + err = mlx5_fw_tracer_create_mkey(tracer);
3418 +@@ -1029,6 +1029,7 @@ err_notifier_unregister:
3419 + mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
3420 + err_dealloc_pd:
3421 + mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
3422 ++err_cancel_work:
3423 + cancel_work_sync(&tracer->read_fw_strings_work);
3424 + return err;
3425 + }
3426 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3427 +index 739bf5dc5a252..5fe4e028567a9 100644
3428 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3429 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3430 +@@ -1606,9 +1606,9 @@ static int build_match_list(struct match_list_head *match_head,
3431 +
3432 + curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
3433 + if (!curr_match) {
3434 ++ rcu_read_unlock();
3435 + free_match_list(match_head, ft_locked);
3436 +- err = -ENOMEM;
3437 +- goto out;
3438 ++ return -ENOMEM;
3439 + }
3440 + if (!tree_get_node(&g->node)) {
3441 + kfree(curr_match);
3442 +@@ -1617,7 +1617,6 @@ static int build_match_list(struct match_list_head *match_head,
3443 + curr_match->g = g;
3444 + list_add_tail(&curr_match->list, &match_head->list);
3445 + }
3446 +-out:
3447 + rcu_read_unlock();
3448 + return err;
3449 + }
3450 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
3451 +index f012aac83b10e..401564b94eb10 100644
3452 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
3453 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
3454 +@@ -603,6 +603,7 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
3455 + MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
3456 + MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
3457 + MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
3458 ++ MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
3459 +
3460 + return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, qpc,
3461 + &dr_qp->mqp);
3462 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
3463 +index 5d85ae59bc51e..3769b15b04b3b 100644
3464 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
3465 ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
3466 +@@ -3173,6 +3173,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3467 + struct qed_nvm_image_att *p_image_att)
3468 + {
3469 + enum nvm_image_type type;
3470 ++ int rc;
3471 + u32 i;
3472 +
3473 + /* Translate image_id into MFW definitions */
3474 +@@ -3198,7 +3199,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3475 + return -EINVAL;
3476 + }
3477 +
3478 +- qed_mcp_nvm_info_populate(p_hwfn);
3479 ++ rc = qed_mcp_nvm_info_populate(p_hwfn);
3480 ++ if (rc)
3481 ++ return rc;
3482 ++
3483 + for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3484 + if (type == p_hwfn->nvm_info.image_att[i].image_type)
3485 + break;
3486 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
3487 +index c48a0e2d4d7ef..6a009d51ec510 100644
3488 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
3489 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
3490 +@@ -440,7 +440,6 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
3491 + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
3492 + msleep(20);
3493 +
3494 +- qlcnic_rom_unlock(adapter);
3495 + /* big hammer don't reset CAM block on reset */
3496 + QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
3497 +
3498 +diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
3499 +index 274e5b4bc4ac8..f158fdf3aab2c 100644
3500 +--- a/drivers/net/ethernet/rdc/r6040.c
3501 ++++ b/drivers/net/ethernet/rdc/r6040.c
3502 +@@ -119,6 +119,8 @@
3503 + #define PHY_ST 0x8A /* PHY status register */
3504 + #define MAC_SM 0xAC /* MAC status machine */
3505 + #define MAC_SM_RST 0x0002 /* MAC status machine reset */
3506 ++#define MD_CSC 0xb6 /* MDC speed control register */
3507 ++#define MD_CSC_DEFAULT 0x0030
3508 + #define MAC_ID 0xBE /* Identifier register */
3509 +
3510 + #define TX_DCNT 0x80 /* TX descriptor count */
3511 +@@ -354,8 +356,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
3512 + {
3513 + void __iomem *ioaddr = lp->base;
3514 + int limit = MAC_DEF_TIMEOUT;
3515 +- u16 cmd;
3516 ++ u16 cmd, md_csc;
3517 +
3518 ++ md_csc = ioread16(ioaddr + MD_CSC);
3519 + iowrite16(MAC_RST, ioaddr + MCR1);
3520 + while (limit--) {
3521 + cmd = ioread16(ioaddr + MCR1);
3522 +@@ -367,6 +370,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
3523 + iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
3524 + iowrite16(0, ioaddr + MAC_SM);
3525 + mdelay(5);
3526 ++
3527 ++ /* Restore MDIO clock frequency */
3528 ++ if (md_csc != MD_CSC_DEFAULT)
3529 ++ iowrite16(md_csc, ioaddr + MD_CSC);
3530 + }
3531 +
3532 + static void r6040_init_mac_regs(struct net_device *dev)
3533 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
3534 +index 931a44fe7afe8..50d85d0372302 100644
3535 +--- a/drivers/net/ethernet/renesas/sh_eth.c
3536 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
3537 +@@ -2567,6 +2567,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
3538 + else
3539 + txdesc->status |= cpu_to_le32(TD_TACT);
3540 +
3541 ++ wmb(); /* cur_tx must be incremented after TACT bit was set */
3542 + mdp->cur_tx++;
3543 +
3544 + if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
3545 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
3546 +index 0f56f8e336917..03b11f191c262 100644
3547 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
3548 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
3549 +@@ -288,10 +288,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
3550 + val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
3551 + break;
3552 + default:
3553 +- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
3554 +- phy_modes(gmac->phy_mode));
3555 +- err = -EINVAL;
3556 +- goto err_remove_config_dt;
3557 ++ goto err_unsupported_phy;
3558 + }
3559 + regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
3560 +
3561 +@@ -308,10 +305,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
3562 + NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
3563 + break;
3564 + default:
3565 +- dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
3566 +- phy_modes(gmac->phy_mode));
3567 +- err = -EINVAL;
3568 +- goto err_remove_config_dt;
3569 ++ goto err_unsupported_phy;
3570 + }
3571 + regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
3572 +
3573 +@@ -328,8 +322,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
3574 + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
3575 + break;
3576 + default:
3577 +- /* We don't get here; the switch above will have errored out */
3578 +- unreachable();
3579 ++ goto err_unsupported_phy;
3580 + }
3581 + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
3582 +
3583 +@@ -360,6 +353,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
3584 +
3585 + return 0;
3586 +
3587 ++err_unsupported_phy:
3588 ++ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
3589 ++ phy_modes(gmac->phy_mode));
3590 ++ err = -EINVAL;
3591 ++
3592 + err_remove_config_dt:
3593 + stmmac_remove_config_dt(pdev, plat_dat);
3594 +
3595 +diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
3596 +index bede1ff289c59..a65b7291e12a2 100644
3597 +--- a/drivers/net/ethernet/wiznet/w5100.c
3598 ++++ b/drivers/net/ethernet/wiznet/w5100.c
3599 +@@ -1052,6 +1052,8 @@ static int w5100_mmio_probe(struct platform_device *pdev)
3600 + mac_addr = data->mac_addr;
3601 +
3602 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3603 ++ if (!mem)
3604 ++ return -EINVAL;
3605 + if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
3606 + ops = &w5100_mmio_indirect_ops;
3607 + else
3608 +diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h
3609 +index 21aa24c741b96..daae7fa58fb82 100644
3610 +--- a/drivers/net/phy/dp83640_reg.h
3611 ++++ b/drivers/net/phy/dp83640_reg.h
3612 +@@ -5,7 +5,7 @@
3613 + #ifndef HAVE_DP83640_REGISTERS
3614 + #define HAVE_DP83640_REGISTERS
3615 +
3616 +-#define PAGE0 0x0000
3617 ++/* #define PAGE0 0x0000 */
3618 + #define PHYCR2 0x001c /* PHY Control Register 2 */
3619 +
3620 + #define PAGE4 0x0004
3621 +diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
3622 +index eb100eb33de3d..77ac5a721e7b6 100644
3623 +--- a/drivers/net/usb/cdc_mbim.c
3624 ++++ b/drivers/net/usb/cdc_mbim.c
3625 +@@ -653,6 +653,11 @@ static const struct usb_device_id mbim_devs[] = {
3626 + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
3627 + },
3628 +
3629 ++ /* Telit LN920 */
3630 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1061, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
3631 ++ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
3632 ++ },
3633 ++
3634 + /* default entry */
3635 + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
3636 + .driver_info = (unsigned long)&cdc_mbim_info_zlp,
3637 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3638 +index b4885a700296e..b0a4ca3559fd8 100644
3639 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3640 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
3641 +@@ -3351,7 +3351,8 @@ found:
3642 + "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
3643 + cptr, code, reference, length, major, minor);
3644 + if ((!AR_SREV_9485(ah) && length >= 1024) ||
3645 +- (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
3646 ++ (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485) ||
3647 ++ (length > cptr)) {
3648 + ath_dbg(common, EEPROM, "Skipping bad header\n");
3649 + cptr -= COMP_HDR_LEN;
3650 + continue;
3651 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
3652 +index 9fd8e64288ffa..7e2e22b6bbbc5 100644
3653 +--- a/drivers/net/wireless/ath/ath9k/hw.c
3654 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
3655 +@@ -1622,7 +1622,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
3656 + ath9k_hw_gpio_request_out(ah, i, NULL,
3657 + AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
3658 + ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
3659 +- ath9k_hw_gpio_free(ah, i);
3660 + }
3661 + }
3662 +
3663 +@@ -2730,14 +2729,17 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
3664 + static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
3665 + const char *label)
3666 + {
3667 ++ int err;
3668 ++
3669 + if (ah->caps.gpio_requested & BIT(gpio))
3670 + return;
3671 +
3672 +- /* may be requested by BSP, free anyway */
3673 +- gpio_free(gpio);
3674 +-
3675 +- if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
3676 ++ err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label);
3677 ++ if (err) {
3678 ++ ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n",
3679 ++ gpio, err);
3680 + return;
3681 ++ }
3682 +
3683 + ah->caps.gpio_requested |= BIT(gpio);
3684 + }
3685 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
3686 +index 9c417dd062913..7736621dca653 100644
3687 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
3688 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
3689 +@@ -1043,8 +1043,10 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
3690 + return -ENOMEM;
3691 +
3692 + #ifdef CONFIG_IWLWIFI_DEBUGFS
3693 +- if (mvm->beacon_inject_active)
3694 ++ if (mvm->beacon_inject_active) {
3695 ++ dev_kfree_skb(beacon);
3696 + return -EBUSY;
3697 ++ }
3698 + #endif
3699 +
3700 + ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
3701 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3702 +index 09b1a6beee77c..081cbc9ec7368 100644
3703 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3704 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3705 +@@ -2970,16 +2970,20 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
3706 + void *_data)
3707 + {
3708 + struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data;
3709 ++ const struct cfg80211_bss_ies *ies;
3710 + const struct element *elem;
3711 +
3712 +- elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, bss->ies->data,
3713 +- bss->ies->len);
3714 ++ rcu_read_lock();
3715 ++ ies = rcu_dereference(bss->ies);
3716 ++ elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
3717 ++ ies->len);
3718 +
3719 + if (!elem || elem->datalen < 10 ||
3720 + !(elem->data[10] &
3721 + WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) {
3722 + data->tolerated = false;
3723 + }
3724 ++ rcu_read_unlock();
3725 + }
3726 +
3727 + static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
3728 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
3729 +index 8b0576cde797e..a9aab6c690e85 100644
3730 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
3731 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
3732 +@@ -687,10 +687,26 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
3733 +
3734 + mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
3735 +
3736 +- mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
3737 +- mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
3738 +- mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
3739 +- mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
3740 ++ if (iwl_mvm_has_new_tx_api(mvm)) {
3741 ++ /*
3742 ++ * If we have the new TX/queue allocation API initialize them
3743 ++ * all to invalid numbers. We'll rewrite the ones that we need
3744 ++ * later, but that doesn't happen for all of them all of the
3745 ++ * time (e.g. P2P Device is optional), and if a dynamic queue
3746 ++ * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then
3747 ++ * iwl_mvm_is_static_queue() erroneously returns true, and we
3748 ++ * might have things getting stuck.
3749 ++ */
3750 ++ mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
3751 ++ mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
3752 ++ mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
3753 ++ mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
3754 ++ } else {
3755 ++ mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
3756 ++ mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
3757 ++ mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
3758 ++ mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
3759 ++ }
3760 +
3761 + mvm->sf_state = SF_UNINIT;
3762 + if (iwl_mvm_has_unified_ucode(mvm))
3763 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
3764 +index 40cafcf40ccf0..5df4bbb6c6de3 100644
3765 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
3766 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
3767 +@@ -346,8 +346,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
3768 + }
3769 +
3770 + static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3771 +- int queue, u8 tid, u8 flags)
3772 ++ u16 *queueptr, u8 tid, u8 flags)
3773 + {
3774 ++ int queue = *queueptr;
3775 + struct iwl_scd_txq_cfg_cmd cmd = {
3776 + .scd_queue = queue,
3777 + .action = SCD_CFG_DISABLE_QUEUE,
3778 +@@ -356,6 +357,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3779 +
3780 + if (iwl_mvm_has_new_tx_api(mvm)) {
3781 + iwl_trans_txq_free(mvm->trans, queue);
3782 ++ *queueptr = IWL_MVM_INVALID_QUEUE;
3783 +
3784 + return 0;
3785 + }
3786 +@@ -517,6 +519,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
3787 + u8 sta_id, tid;
3788 + unsigned long disable_agg_tids = 0;
3789 + bool same_sta;
3790 ++ u16 queue_tmp = queue;
3791 + int ret;
3792 +
3793 + lockdep_assert_held(&mvm->mutex);
3794 +@@ -539,7 +542,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
3795 + iwl_mvm_invalidate_sta_queue(mvm, queue,
3796 + disable_agg_tids, false);
3797 +
3798 +- ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
3799 ++ ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
3800 + if (ret) {
3801 + IWL_ERR(mvm,
3802 + "Failed to free inactive queue %d (ret=%d)\n",
3803 +@@ -1209,6 +1212,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
3804 + unsigned int wdg_timeout =
3805 + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
3806 + int queue = -1;
3807 ++ u16 queue_tmp;
3808 + unsigned long disable_agg_tids = 0;
3809 + enum iwl_mvm_agg_state queue_state;
3810 + bool shared_queue = false, inc_ssn;
3811 +@@ -1357,7 +1361,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
3812 + return 0;
3813 +
3814 + out_err:
3815 +- iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
3816 ++ queue_tmp = queue;
3817 ++ iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
3818 +
3819 + return ret;
3820 + }
3821 +@@ -1795,7 +1800,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
3822 + if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
3823 + continue;
3824 +
3825 +- iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
3826 ++ iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
3827 + 0);
3828 + mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
3829 + }
3830 +@@ -2005,7 +2010,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
3831 + ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
3832 + if (ret) {
3833 + if (!iwl_mvm_has_new_tx_api(mvm))
3834 +- iwl_mvm_disable_txq(mvm, NULL, *queue,
3835 ++ iwl_mvm_disable_txq(mvm, NULL, queue,
3836 + IWL_MAX_TID_COUNT, 0);
3837 + return ret;
3838 + }
3839 +@@ -2073,7 +2078,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3840 + if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
3841 + return -EINVAL;
3842 +
3843 +- iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
3844 ++ iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
3845 + ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
3846 + if (ret)
3847 + IWL_WARN(mvm, "Failed sending remove station\n");
3848 +@@ -2090,7 +2095,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
3849 + if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
3850 + return -EINVAL;
3851 +
3852 +- iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
3853 ++ iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
3854 + ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
3855 + if (ret)
3856 + IWL_WARN(mvm, "Failed sending remove station\n");
3857 +@@ -2186,7 +2191,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
3858 + struct ieee80211_vif *vif)
3859 + {
3860 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3861 +- int queue;
3862 ++ u16 *queueptr, queue;
3863 +
3864 + lockdep_assert_held(&mvm->mutex);
3865 +
3866 +@@ -2195,10 +2200,10 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
3867 + switch (vif->type) {
3868 + case NL80211_IFTYPE_AP:
3869 + case NL80211_IFTYPE_ADHOC:
3870 +- queue = mvm->probe_queue;
3871 ++ queueptr = &mvm->probe_queue;
3872 + break;
3873 + case NL80211_IFTYPE_P2P_DEVICE:
3874 +- queue = mvm->p2p_dev_queue;
3875 ++ queueptr = &mvm->p2p_dev_queue;
3876 + break;
3877 + default:
3878 + WARN(1, "Can't free bcast queue on vif type %d\n",
3879 +@@ -2206,7 +2211,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
3880 + return;
3881 + }
3882 +
3883 +- iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
3884 ++ queue = *queueptr;
3885 ++ iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
3886 + if (iwl_mvm_has_new_tx_api(mvm))
3887 + return;
3888 +
3889 +@@ -2441,7 +2447,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3890 +
3891 + iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
3892 +
3893 +- iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
3894 ++ iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
3895 +
3896 + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
3897 + if (ret)
3898 +diff --git a/drivers/ntb/test/ntb_msi_test.c b/drivers/ntb/test/ntb_msi_test.c
3899 +index 99d826ed9c341..662067dc9ce2c 100644
3900 +--- a/drivers/ntb/test/ntb_msi_test.c
3901 ++++ b/drivers/ntb/test/ntb_msi_test.c
3902 +@@ -372,8 +372,10 @@ static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb)
3903 + if (ret)
3904 + goto remove_dbgfs;
3905 +
3906 +- if (!nm->isr_ctx)
3907 ++ if (!nm->isr_ctx) {
3908 ++ ret = -ENOMEM;
3909 + goto remove_dbgfs;
3910 ++ }
3911 +
3912 + ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
3913 +
3914 +diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
3915 +index 5ce4766a6c9eb..251fe75798c13 100644
3916 +--- a/drivers/ntb/test/ntb_perf.c
3917 ++++ b/drivers/ntb/test/ntb_perf.c
3918 +@@ -597,6 +597,7 @@ static int perf_setup_inbuf(struct perf_peer *peer)
3919 + return -ENOMEM;
3920 + }
3921 + if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
3922 ++ ret = -EINVAL;
3923 + dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
3924 + goto err_free_inbuf;
3925 + }
3926 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
3927 +index f6427a10a9908..38bbbbbc6f47f 100644
3928 +--- a/drivers/nvme/host/tcp.c
3929 ++++ b/drivers/nvme/host/tcp.c
3930 +@@ -642,17 +642,9 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
3931 + unsigned int *offset, size_t *len)
3932 + {
3933 + struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
3934 +- struct nvme_tcp_request *req;
3935 +- struct request *rq;
3936 +-
3937 +- rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
3938 +- if (!rq) {
3939 +- dev_err(queue->ctrl->ctrl.device,
3940 +- "queue %d tag %#x not found\n",
3941 +- nvme_tcp_queue_id(queue), pdu->command_id);
3942 +- return -ENOENT;
3943 +- }
3944 +- req = blk_mq_rq_to_pdu(rq);
3945 ++ struct request *rq =
3946 ++ blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
3947 ++ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
3948 +
3949 + while (true) {
3950 + int recv_len, ret;
3951 +diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
3952 +index a32e60b024b8d..6675b5e56960c 100644
3953 +--- a/drivers/of/kobj.c
3954 ++++ b/drivers/of/kobj.c
3955 +@@ -119,7 +119,7 @@ int __of_attach_node_sysfs(struct device_node *np)
3956 + struct property *pp;
3957 + int rc;
3958 +
3959 +- if (!of_kset)
3960 ++ if (!IS_ENABLED(CONFIG_SYSFS) || !of_kset)
3961 + return 0;
3962 +
3963 + np->kobj.kset = of_kset;
3964 +diff --git a/drivers/opp/of.c b/drivers/opp/of.c
3965 +index 603c688fe23dc..30cc407c8f93f 100644
3966 +--- a/drivers/opp/of.c
3967 ++++ b/drivers/opp/of.c
3968 +@@ -95,15 +95,7 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
3969 + static struct device_node *of_parse_required_opp(struct device_node *np,
3970 + int index)
3971 + {
3972 +- struct device_node *required_np;
3973 +-
3974 +- required_np = of_parse_phandle(np, "required-opps", index);
3975 +- if (unlikely(!required_np)) {
3976 +- pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n",
3977 +- __func__, np, index);
3978 +- }
3979 +-
3980 +- return required_np;
3981 ++ return of_parse_phandle(np, "required-opps", index);
3982 + }
3983 +
3984 + /* The caller must call dev_pm_opp_put_opp_table() after the table is used */
3985 +@@ -996,7 +988,7 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
3986 +
3987 + required_np = of_parse_required_opp(np, index);
3988 + if (!required_np)
3989 +- return -EINVAL;
3990 ++ return -ENODEV;
3991 +
3992 + opp_table = _find_table_of_opp_np(required_np);
3993 + if (IS_ERR(opp_table)) {
3994 +diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
3995 +index 5d41dda6da4e7..75daa16f38b7f 100644
3996 +--- a/drivers/parport/ieee1284_ops.c
3997 ++++ b/drivers/parport/ieee1284_ops.c
3998 +@@ -535,7 +535,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
3999 + goto out;
4000 +
4001 + /* Yield the port for a while. */
4002 +- if (count && dev->port->irq != PARPORT_IRQ_NONE) {
4003 ++ if (dev->port->irq != PARPORT_IRQ_NONE) {
4004 + parport_release (dev);
4005 + schedule_timeout_interruptible(msecs_to_jiffies(40));
4006 + parport_claim_or_block (dev);
4007 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
4008 +index 0a2902569f140..0538348ed843f 100644
4009 +--- a/drivers/pci/controller/pci-aardvark.c
4010 ++++ b/drivers/pci/controller/pci-aardvark.c
4011 +@@ -62,6 +62,7 @@
4012 + #define PIO_COMPLETION_STATUS_CRS 2
4013 + #define PIO_COMPLETION_STATUS_CA 4
4014 + #define PIO_NON_POSTED_REQ BIT(10)
4015 ++#define PIO_ERR_STATUS BIT(11)
4016 + #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
4017 + #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
4018 + #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
4019 +@@ -176,7 +177,7 @@
4020 + (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
4021 + PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
4022 +
4023 +-#define PIO_RETRY_CNT 500
4024 ++#define PIO_RETRY_CNT 750000 /* 1.5 s */
4025 + #define PIO_RETRY_DELAY 2 /* 2 us*/
4026 +
4027 + #define LINK_WAIT_MAX_RETRIES 10
4028 +@@ -193,6 +194,7 @@ struct advk_pcie {
4029 + struct list_head resources;
4030 + struct irq_domain *irq_domain;
4031 + struct irq_chip irq_chip;
4032 ++ raw_spinlock_t irq_lock;
4033 + struct irq_domain *msi_domain;
4034 + struct irq_domain *msi_inner_domain;
4035 + struct irq_chip msi_bottom_irq_chip;
4036 +@@ -363,7 +365,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
4037 + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
4038 + }
4039 +
4040 +-static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
4041 ++static int advk_pcie_check_pio_status(struct advk_pcie *pcie, u32 *val)
4042 + {
4043 + struct device *dev = &pcie->pdev->dev;
4044 + u32 reg;
4045 +@@ -374,14 +376,49 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
4046 + status = (reg & PIO_COMPLETION_STATUS_MASK) >>
4047 + PIO_COMPLETION_STATUS_SHIFT;
4048 +
4049 +- if (!status)
4050 +- return;
4051 +-
4052 ++ /*
4053 ++ * According to HW spec, the PIO status check sequence as below:
4054 ++ * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
4055 ++ * it still needs to check Error Status(bit11), only when this bit
4056 ++ * indicates no error happen, the operation is successful.
4057 ++ * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
4058 ++ * means a PIO write error, and for PIO read it is successful with
4059 ++ * a read value of 0xFFFFFFFF.
4060 ++ * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
4061 ++ * only means a PIO write error, and for PIO read it is successful
4062 ++ * with a read value of 0xFFFF0001.
4063 ++ * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
4064 ++ * error for both PIO read and PIO write operation.
4065 ++ * 5) other errors are indicated as 'unknown'.
4066 ++ */
4067 + switch (status) {
4068 ++ case PIO_COMPLETION_STATUS_OK:
4069 ++ if (reg & PIO_ERR_STATUS) {
4070 ++ strcomp_status = "COMP_ERR";
4071 ++ break;
4072 ++ }
4073 ++ /* Get the read result */
4074 ++ if (val)
4075 ++ *val = advk_readl(pcie, PIO_RD_DATA);
4076 ++ /* No error */
4077 ++ strcomp_status = NULL;
4078 ++ break;
4079 + case PIO_COMPLETION_STATUS_UR:
4080 + strcomp_status = "UR";
4081 + break;
4082 + case PIO_COMPLETION_STATUS_CRS:
4083 ++ /* PCIe r4.0, sec 2.3.2, says:
4084 ++ * If CRS Software Visibility is not enabled, the Root Complex
4085 ++ * must re-issue the Configuration Request as a new Request.
4086 ++ * A Root Complex implementation may choose to limit the number
4087 ++ * of Configuration Request/CRS Completion Status loops before
4088 ++ * determining that something is wrong with the target of the
4089 ++ * Request and taking appropriate action, e.g., complete the
4090 ++ * Request to the host as a failed transaction.
4091 ++ *
4092 ++ * To simplify implementation do not re-issue the Configuration
4093 ++ * Request and complete the Request as a failed transaction.
4094 ++ */
4095 + strcomp_status = "CRS";
4096 + break;
4097 + case PIO_COMPLETION_STATUS_CA:
4098 +@@ -392,6 +429,9 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
4099 + break;
4100 + }
4101 +
4102 ++ if (!strcomp_status)
4103 ++ return 0;
4104 ++
4105 + if (reg & PIO_NON_POSTED_REQ)
4106 + str_posted = "Non-posted";
4107 + else
4108 +@@ -399,6 +439,8 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
4109 +
4110 + dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
4111 + str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
4112 ++
4113 ++ return -EFAULT;
4114 + }
4115 +
4116 + static int advk_pcie_wait_pio(struct advk_pcie *pcie)
4117 +@@ -625,10 +667,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
4118 + if (ret < 0)
4119 + return PCIBIOS_SET_FAILED;
4120 +
4121 +- advk_pcie_check_pio_status(pcie);
4122 ++ /* Check PIO status and get the read result */
4123 ++ ret = advk_pcie_check_pio_status(pcie, val);
4124 ++ if (ret < 0) {
4125 ++ *val = 0xffffffff;
4126 ++ return PCIBIOS_SET_FAILED;
4127 ++ }
4128 +
4129 +- /* Get the read result */
4130 +- *val = advk_readl(pcie, PIO_RD_DATA);
4131 + if (size == 1)
4132 + *val = (*val >> (8 * (where & 3))) & 0xff;
4133 + else if (size == 2)
4134 +@@ -692,7 +737,9 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
4135 + if (ret < 0)
4136 + return PCIBIOS_SET_FAILED;
4137 +
4138 +- advk_pcie_check_pio_status(pcie);
4139 ++ ret = advk_pcie_check_pio_status(pcie, NULL);
4140 ++ if (ret < 0)
4141 ++ return PCIBIOS_SET_FAILED;
4142 +
4143 + return PCIBIOS_SUCCESSFUL;
4144 + }
4145 +@@ -766,22 +813,28 @@ static void advk_pcie_irq_mask(struct irq_data *d)
4146 + {
4147 + struct advk_pcie *pcie = d->domain->host_data;
4148 + irq_hw_number_t hwirq = irqd_to_hwirq(d);
4149 ++ unsigned long flags;
4150 + u32 mask;
4151 +
4152 ++ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
4153 + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
4154 + mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
4155 + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
4156 ++ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
4157 + }
4158 +
4159 + static void advk_pcie_irq_unmask(struct irq_data *d)
4160 + {
4161 + struct advk_pcie *pcie = d->domain->host_data;
4162 + irq_hw_number_t hwirq = irqd_to_hwirq(d);
4163 ++ unsigned long flags;
4164 + u32 mask;
4165 +
4166 ++ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
4167 + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
4168 + mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
4169 + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
4170 ++ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
4171 + }
4172 +
4173 + static int advk_pcie_irq_map(struct irq_domain *h,
4174 +@@ -865,6 +918,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
4175 + struct irq_chip *irq_chip;
4176 + int ret = 0;
4177 +
4178 ++ raw_spin_lock_init(&pcie->irq_lock);
4179 ++
4180 + pcie_intc_node = of_get_next_child(node, NULL);
4181 + if (!pcie_intc_node) {
4182 + dev_err(dev, "No PCIe Intc node found\n");
4183 +diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
4184 +index 45c0f344ccd16..11b046b20b92a 100644
4185 +--- a/drivers/pci/controller/pcie-xilinx-nwl.c
4186 ++++ b/drivers/pci/controller/pcie-xilinx-nwl.c
4187 +@@ -6,6 +6,7 @@
4188 + * (C) Copyright 2014 - 2015, Xilinx, Inc.
4189 + */
4190 +
4191 ++#include <linux/clk.h>
4192 + #include <linux/delay.h>
4193 + #include <linux/interrupt.h>
4194 + #include <linux/irq.h>
4195 +@@ -169,6 +170,7 @@ struct nwl_pcie {
4196 + u8 root_busno;
4197 + struct nwl_msi msi;
4198 + struct irq_domain *legacy_irq_domain;
4199 ++ struct clk *clk;
4200 + raw_spinlock_t leg_mask_lock;
4201 + };
4202 +
4203 +@@ -839,6 +841,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
4204 + return err;
4205 + }
4206 +
4207 ++ pcie->clk = devm_clk_get(dev, NULL);
4208 ++ if (IS_ERR(pcie->clk))
4209 ++ return PTR_ERR(pcie->clk);
4210 ++
4211 ++ err = clk_prepare_enable(pcie->clk);
4212 ++ if (err) {
4213 ++ dev_err(dev, "can't enable PCIe ref clock\n");
4214 ++ return err;
4215 ++ }
4216 ++
4217 + err = nwl_pcie_bridge_init(pcie);
4218 + if (err) {
4219 + dev_err(dev, "HW Initialization failed\n");
4220 +diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
4221 +index a32070be5adf9..cc6194aa24c15 100644
4222 +--- a/drivers/pci/hotplug/TODO
4223 ++++ b/drivers/pci/hotplug/TODO
4224 +@@ -40,9 +40,6 @@ ibmphp:
4225 +
4226 + * The return value of pci_hp_register() is not checked.
4227 +
4228 +-* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
4229 +- and once more in the error path of its caller ibmphp_access_ebda().
4230 +-
4231 + * The various slot data structures are difficult to follow and need to be
4232 + simplified. A lot of functions are too large and too complex, they need
4233 + to be broken up into smaller, manageable pieces. Negative examples are
4234 +diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
4235 +index 11a2661dc0627..7fb75401ad8a7 100644
4236 +--- a/drivers/pci/hotplug/ibmphp_ebda.c
4237 ++++ b/drivers/pci/hotplug/ibmphp_ebda.c
4238 +@@ -714,8 +714,7 @@ static int __init ebda_rsrc_controller(void)
4239 + /* init hpc structure */
4240 + hpc_ptr = alloc_ebda_hpc(slot_num, bus_num);
4241 + if (!hpc_ptr) {
4242 +- rc = -ENOMEM;
4243 +- goto error_no_hpc;
4244 ++ return -ENOMEM;
4245 + }
4246 + hpc_ptr->ctlr_id = ctlr_id;
4247 + hpc_ptr->ctlr_relative_id = ctlr;
4248 +@@ -910,8 +909,6 @@ error:
4249 + kfree(tmp_slot);
4250 + error_no_slot:
4251 + free_ebda_hpc(hpc_ptr);
4252 +-error_no_hpc:
4253 +- iounmap(io_mem);
4254 + return rc;
4255 + }
4256 +
4257 +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
4258 +index 5bb37671a86ad..c8bd243717b7b 100644
4259 +--- a/drivers/pci/msi.c
4260 ++++ b/drivers/pci/msi.c
4261 +@@ -782,6 +782,9 @@ static void msix_mask_all(void __iomem *base, int tsize)
4262 + u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
4263 + int i;
4264 +
4265 ++ if (pci_msi_ignore_mask)
4266 ++ return;
4267 ++
4268 + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
4269 + writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
4270 + }
4271 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
4272 +index 58c33b65d451a..b9550cd4280ca 100644
4273 +--- a/drivers/pci/pci.c
4274 ++++ b/drivers/pci/pci.c
4275 +@@ -224,7 +224,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
4276 +
4277 + *endptr = strchrnul(path, ';');
4278 +
4279 +- wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
4280 ++ wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
4281 + if (!wpath)
4282 + return -ENOMEM;
4283 +
4284 +@@ -1672,11 +1672,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
4285 + * so that things like MSI message writing will behave as expected
4286 + * (e.g. if the device really is in D0 at enable time).
4287 + */
4288 +- if (dev->pm_cap) {
4289 +- u16 pmcsr;
4290 +- pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
4291 +- dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
4292 +- }
4293 ++ pci_update_current_state(dev, dev->current_state);
4294 +
4295 + if (atomic_inc_return(&dev->enable_cnt) > 1)
4296 + return 0; /* already enabled */
4297 +diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
4298 +index 1b330129089fe..8637f6068f9c2 100644
4299 +--- a/drivers/pci/pcie/portdrv_core.c
4300 ++++ b/drivers/pci/pcie/portdrv_core.c
4301 +@@ -255,8 +255,13 @@ static int get_port_device_capability(struct pci_dev *dev)
4302 + services |= PCIE_PORT_SERVICE_DPC;
4303 +
4304 + if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
4305 +- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
4306 +- services |= PCIE_PORT_SERVICE_BWNOTIF;
4307 ++ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
4308 ++ u32 linkcap;
4309 ++
4310 ++ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
4311 ++ if (linkcap & PCI_EXP_LNKCAP_LBNC)
4312 ++ services |= PCIE_PORT_SERVICE_BWNOTIF;
4313 ++ }
4314 +
4315 + return services;
4316 + }
4317 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
4318 +index 97c343d31f989..686298c0f6cda 100644
4319 +--- a/drivers/pci/quirks.c
4320 ++++ b/drivers/pci/quirks.c
4321 +@@ -3252,6 +3252,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
4322 + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
4323 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
4324 + PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
4325 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
4326 +
4327 + /*
4328 + * Intel 5000 and 5100 Memory controllers have an erratum with read completion
4329 +@@ -4683,6 +4684,18 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4330 + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4331 + }
4332 +
4333 ++/*
4334 ++ * Each of these NXP Root Ports is in a Root Complex with a unique segment
4335 ++ * number and does provide isolation features to disable peer transactions
4336 ++ * and validate bus numbers in requests, but does not provide an ACS
4337 ++ * capability.
4338 ++ */
4339 ++static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
4340 ++{
4341 ++ return pci_acs_ctrl_enabled(acs_flags,
4342 ++ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4343 ++}
4344 ++
4345 + static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
4346 + {
4347 + if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4348 +@@ -4909,6 +4922,10 @@ static const struct pci_dev_acs_enabled {
4349 + { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
4350 + /* Cavium ThunderX */
4351 + { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
4352 ++ /* Cavium multi-function devices */
4353 ++ { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
4354 ++ { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
4355 ++ { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
4356 + /* APM X-Gene */
4357 + { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
4358 + /* Ampere Computing */
4359 +@@ -4929,6 +4946,39 @@ static const struct pci_dev_acs_enabled {
4360 + { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
4361 + { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
4362 + { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
4363 ++ /* NXP root ports, xx=16, 12, or 08 cores */
4364 ++ /* LX2xx0A : without security features + CAN-FD */
4365 ++ { PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
4366 ++ { PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
4367 ++ { PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
4368 ++ /* LX2xx0C : security features + CAN-FD */
4369 ++ { PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
4370 ++ { PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
4371 ++ { PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
4372 ++ /* LX2xx0E : security features + CAN */
4373 ++ { PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
4374 ++ { PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
4375 ++ { PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
4376 ++ /* LX2xx0N : without security features + CAN */
4377 ++ { PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
4378 ++ { PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
4379 ++ { PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
4380 ++ /* LX2xx2A : without security features + CAN-FD */
4381 ++ { PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
4382 ++ { PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
4383 ++ { PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
4384 ++ /* LX2xx2C : security features + CAN-FD */
4385 ++ { PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
4386 ++ { PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
4387 ++ { PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
4388 ++ /* LX2xx2E : security features + CAN */
4389 ++ { PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
4390 ++ { PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
4391 ++ { PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
4392 ++ /* LX2xx2N : without security features + CAN */
4393 ++ { PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
4394 ++ { PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
4395 ++ { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
4396 + /* Zhaoxin Root/Downstream Ports */
4397 + { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
4398 + { 0 }
4399 +@@ -5393,7 +5443,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
4400 + PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
4401 +
4402 + /*
4403 +- * Create device link for NVIDIA GPU with integrated USB xHCI Host
4404 ++ * Create device link for GPUs with integrated USB xHCI Host
4405 + * controller to VGA.
4406 + */
4407 + static void quirk_gpu_usb(struct pci_dev *usb)
4408 +@@ -5402,9 +5452,11 @@ static void quirk_gpu_usb(struct pci_dev *usb)
4409 + }
4410 + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
4411 + PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
4412 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
4413 ++ PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
4414 +
4415 + /*
4416 +- * Create device link for NVIDIA GPU with integrated Type-C UCSI controller
4417 ++ * Create device link for GPUs with integrated Type-C UCSI controller
4418 + * to VGA. Currently there is no class code defined for UCSI device over PCI
4419 + * so using UNKNOWN class for now and it will be updated when UCSI
4420 + * over PCI gets a class code.
4421 +@@ -5417,6 +5469,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
4422 + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
4423 + PCI_CLASS_SERIAL_UNKNOWN, 8,
4424 + quirk_gpu_usb_typec_ucsi);
4425 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
4426 ++ PCI_CLASS_SERIAL_UNKNOWN, 8,
4427 ++ quirk_gpu_usb_typec_ucsi);
4428 +
4429 + /*
4430 + * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
4431 +diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
4432 +index 8b003c890b87b..c9f03418e71e0 100644
4433 +--- a/drivers/pci/syscall.c
4434 ++++ b/drivers/pci/syscall.c
4435 +@@ -22,8 +22,10 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
4436 + long err;
4437 + int cfg_ret;
4438 +
4439 ++ err = -EPERM;
4440 ++ dev = NULL;
4441 + if (!capable(CAP_SYS_ADMIN))
4442 +- return -EPERM;
4443 ++ goto error;
4444 +
4445 + err = -ENODEV;
4446 + dev = pci_get_domain_bus_and_slot(0, bus, dfn);
4447 +diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
4448 +index 91596eee0bda1..ba078a7098468 100644
4449 +--- a/drivers/pinctrl/pinctrl-ingenic.c
4450 ++++ b/drivers/pinctrl/pinctrl-ingenic.c
4451 +@@ -348,7 +348,7 @@ static const struct ingenic_chip_info jz4725b_chip_info = {
4452 + };
4453 +
4454 + static const u32 jz4760_pull_ups[6] = {
4455 +- 0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0xfffff00f,
4456 ++ 0xffffffff, 0xfffcf3ff, 0xffffffff, 0xffffcfff, 0xfffffb7c, 0x0000000f,
4457 + };
4458 +
4459 + static const u32 jz4760_pull_downs[6] = {
4460 +@@ -611,11 +611,11 @@ static const struct ingenic_chip_info jz4760b_chip_info = {
4461 + };
4462 +
4463 + static const u32 jz4770_pull_ups[6] = {
4464 +- 0x3fffffff, 0xfff0030c, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0xffa7f00f,
4465 ++ 0x3fffffff, 0xfff0f3fc, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0x0024f00f,
4466 + };
4467 +
4468 + static const u32 jz4770_pull_downs[6] = {
4469 +- 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x00580ff0,
4470 ++ 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x005b0ff0,
4471 + };
4472 +
4473 + static int jz4770_uart0_data_pins[] = { 0xa0, 0xa3, };
4474 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
4475 +index a9d511982780c..fb1c8965cb991 100644
4476 +--- a/drivers/pinctrl/pinctrl-single.c
4477 ++++ b/drivers/pinctrl/pinctrl-single.c
4478 +@@ -1201,6 +1201,7 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
4479 +
4480 + if (PCS_HAS_PINCONF) {
4481 + dev_err(pcs->dev, "pinconf not supported\n");
4482 ++ res = -ENOTSUPP;
4483 + goto free_pingroups;
4484 + }
4485 +
4486 +diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
4487 +index ccdf0bb214149..835c14bb315bc 100644
4488 +--- a/drivers/pinctrl/pinctrl-stmfx.c
4489 ++++ b/drivers/pinctrl/pinctrl-stmfx.c
4490 +@@ -540,7 +540,7 @@ static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
4491 + u8 pending[NR_GPIO_REGS];
4492 + u8 src[NR_GPIO_REGS] = {0, 0, 0};
4493 + unsigned long n, status;
4494 +- int ret;
4495 ++ int i, ret;
4496 +
4497 + ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_IRQ_GPI_PENDING,
4498 + &pending, NR_GPIO_REGS);
4499 +@@ -550,7 +550,9 @@ static irqreturn_t stmfx_pinctrl_irq_thread_fn(int irq, void *dev_id)
4500 + regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC,
4501 + src, NR_GPIO_REGS);
4502 +
4503 +- status = *(unsigned long *)pending;
4504 ++ BUILD_BUG_ON(NR_GPIO_REGS > sizeof(status));
4505 ++ for (i = 0, status = 0; i < NR_GPIO_REGS; i++)
4506 ++ status |= (unsigned long)pending[i] << (i * 8);
4507 + for_each_set_bit(n, &status, gc->ngpio) {
4508 + handle_nested_irq(irq_find_mapping(gc->irq.domain, n));
4509 + stmfx_pinctrl_irq_toggle_trigger(pctl, n);
4510 +diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
4511 +index f26574ef234ab..601fffeba39fe 100644
4512 +--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
4513 ++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
4514 +@@ -918,7 +918,7 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
4515 + pin_bank->grange.pin_base = drvdata->pin_base
4516 + + pin_bank->pin_base;
4517 + pin_bank->grange.base = pin_bank->grange.pin_base;
4518 +- pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
4519 ++ pin_bank->grange.npins = pin_bank->nr_pins;
4520 + pin_bank->grange.gc = &pin_bank->gpio_chip;
4521 + pinctrl_add_gpio_range(drvdata->pctl_dev, &pin_bank->grange);
4522 + }
4523 +diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
4524 +index f659f96bda128..9b575e9dd71c5 100644
4525 +--- a/drivers/platform/chrome/cros_ec_proto.c
4526 ++++ b/drivers/platform/chrome/cros_ec_proto.c
4527 +@@ -213,6 +213,15 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev,
4528 + msg->insize = sizeof(struct ec_response_get_protocol_info);
4529 +
4530 + ret = send_command(ec_dev, msg);
4531 ++ /*
4532 ++ * Send command once again when timeout occurred.
4533 ++ * Fingerprint MCU (FPMCU) is restarted during system boot which
4534 ++ * introduces small window in which FPMCU won't respond for any
4535 ++ * messages sent by kernel. There is no need to wait before next
4536 ++ * attempt because we waited at least EC_MSG_DEADLINE_MS.
4537 ++ */
4538 ++ if (ret == -ETIMEDOUT)
4539 ++ ret = send_command(ec_dev, msg);
4540 +
4541 + if (ret < 0) {
4542 + dev_dbg(ec_dev->dev,
4543 +diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
4544 +index c97bd4a452422..5821e9d9a4ce4 100644
4545 +--- a/drivers/platform/x86/dell-smbios-wmi.c
4546 ++++ b/drivers/platform/x86/dell-smbios-wmi.c
4547 +@@ -69,6 +69,7 @@ static int run_smbios_call(struct wmi_device *wdev)
4548 + if (obj->type == ACPI_TYPE_INTEGER)
4549 + dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
4550 + obj->integer.value);
4551 ++ kfree(output.pointer);
4552 + return -EIO;
4553 + }
4554 + memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length);
4555 +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
4556 +index ab4740c3bf573..f8f8207a1895e 100644
4557 +--- a/drivers/power/supply/max17042_battery.c
4558 ++++ b/drivers/power/supply/max17042_battery.c
4559 +@@ -842,8 +842,12 @@ static irqreturn_t max17042_thread_handler(int id, void *dev)
4560 + {
4561 + struct max17042_chip *chip = dev;
4562 + u32 val;
4563 ++ int ret;
4564 ++
4565 ++ ret = regmap_read(chip->regmap, MAX17042_STATUS, &val);
4566 ++ if (ret)
4567 ++ return IRQ_HANDLED;
4568 +
4569 +- regmap_read(chip->regmap, MAX17042_STATUS, &val);
4570 + if ((val & STATUS_INTR_SOCMIN_BIT) ||
4571 + (val & STATUS_INTR_SOCMAX_BIT)) {
4572 + dev_info(&chip->client->dev, "SOC threshold INTR\n");
4573 +diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
4574 +index 2c0467a9e7179..8d1b1fda62dd1 100644
4575 +--- a/drivers/rtc/rtc-tps65910.c
4576 ++++ b/drivers/rtc/rtc-tps65910.c
4577 +@@ -460,6 +460,6 @@ static struct platform_driver tps65910_rtc_driver = {
4578 + };
4579 +
4580 + module_platform_driver(tps65910_rtc_driver);
4581 +-MODULE_ALIAS("platform:rtc-tps65910");
4582 ++MODULE_ALIAS("platform:tps65910-rtc");
4583 + MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@××××××.com>");
4584 + MODULE_LICENSE("GPL");
4585 +diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
4586 +index cc5e84b80c699..faa3a4b8ed91d 100644
4587 +--- a/drivers/s390/char/sclp_early.c
4588 ++++ b/drivers/s390/char/sclp_early.c
4589 +@@ -40,13 +40,14 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
4590 + sclp.has_gisaf = !!(sccb->fac118 & 0x08);
4591 + sclp.has_hvs = !!(sccb->fac119 & 0x80);
4592 + sclp.has_kss = !!(sccb->fac98 & 0x01);
4593 +- sclp.has_sipl = !!(sccb->cbl & 0x4000);
4594 + if (sccb->fac85 & 0x02)
4595 + S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
4596 + if (sccb->fac91 & 0x40)
4597 + S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
4598 + if (sccb->cpuoff > 134)
4599 + sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
4600 ++ if (sccb->cpuoff > 137)
4601 ++ sclp.has_sipl = !!(sccb->cbl & 0x4000);
4602 + sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
4603 + sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
4604 + sclp.rzm <<= 20;
4605 +diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
4606 +index 6e988233fb81f..6a54556119dd6 100644
4607 +--- a/drivers/scsi/BusLogic.c
4608 ++++ b/drivers/scsi/BusLogic.c
4609 +@@ -3601,7 +3601,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
4610 + if (buf[0] != '\n' || len > 1)
4611 + printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf);
4612 + } else
4613 +- printk("%s", buf);
4614 ++ pr_cont("%s", buf);
4615 + } else {
4616 + if (begin) {
4617 + if (adapter != NULL && adapter->adapter_initd)
4618 +@@ -3609,7 +3609,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
4619 + else
4620 + printk("%s%s", blogic_msglevelmap[msglevel], buf);
4621 + } else
4622 +- printk("%s", buf);
4623 ++ pr_cont("%s", buf);
4624 + }
4625 + begin = (buf[len - 1] == '\n');
4626 + }
4627 +diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c
4628 +index e42acf314d068..33df6a9ba9b5f 100644
4629 +--- a/drivers/scsi/pcmcia/fdomain_cs.c
4630 ++++ b/drivers/scsi/pcmcia/fdomain_cs.c
4631 +@@ -45,8 +45,10 @@ static int fdomain_probe(struct pcmcia_device *link)
4632 + goto fail_disable;
4633 +
4634 + if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE,
4635 +- "fdomain_cs"))
4636 ++ "fdomain_cs")) {
4637 ++ ret = -EBUSY;
4638 + goto fail_disable;
4639 ++ }
4640 +
4641 + sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev);
4642 + if (!sh) {
4643 +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
4644 +index 7a6306f8483ec..c95e04cc64240 100644
4645 +--- a/drivers/scsi/qedf/qedf_main.c
4646 ++++ b/drivers/scsi/qedf/qedf_main.c
4647 +@@ -2894,7 +2894,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
4648 + {
4649 + u32 *list;
4650 + int i;
4651 +- int status = 0, rc;
4652 ++ int status;
4653 + u32 *pbl;
4654 + dma_addr_t page;
4655 + int num_pages;
4656 +@@ -2906,7 +2906,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
4657 + */
4658 + if (!qedf->num_queues) {
4659 + QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
4660 +- return 1;
4661 ++ return -ENOMEM;
4662 + }
4663 +
4664 + /*
4665 +@@ -2914,7 +2914,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
4666 + * addresses of our queues
4667 + */
4668 + if (!qedf->p_cpuq) {
4669 +- status = 1;
4670 ++ status = -EINVAL;
4671 + QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
4672 + goto mem_alloc_failure;
4673 + }
4674 +@@ -2930,8 +2930,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
4675 + "qedf->global_queues=%p.\n", qedf->global_queues);
4676 +
4677 + /* Allocate DMA coherent buffers for BDQ */
4678 +- rc = qedf_alloc_bdq(qedf);
4679 +- if (rc) {
4680 ++ status = qedf_alloc_bdq(qedf);
4681 ++ if (status) {
4682 + QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
4683 + goto mem_alloc_failure;
4684 + }
4685 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
4686 +index 1ec42c5f0b2a0..92c4a367b7bd7 100644
4687 +--- a/drivers/scsi/qedi/qedi_main.c
4688 ++++ b/drivers/scsi/qedi/qedi_main.c
4689 +@@ -1553,7 +1553,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
4690 + {
4691 + u32 *list;
4692 + int i;
4693 +- int status = 0, rc;
4694 ++ int status;
4695 + u32 *pbl;
4696 + dma_addr_t page;
4697 + int num_pages;
4698 +@@ -1564,14 +1564,14 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
4699 + */
4700 + if (!qedi->num_queues) {
4701 + QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
4702 +- return 1;
4703 ++ return -ENOMEM;
4704 + }
4705 +
4706 + /* Make sure we allocated the PBL that will contain the physical
4707 + * addresses of our queues
4708 + */
4709 + if (!qedi->p_cpuq) {
4710 +- status = 1;
4711 ++ status = -EINVAL;
4712 + goto mem_alloc_failure;
4713 + }
4714 +
4715 +@@ -1586,13 +1586,13 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
4716 + "qedi->global_queues=%p.\n", qedi->global_queues);
4717 +
4718 + /* Allocate DMA coherent buffers for BDQ */
4719 +- rc = qedi_alloc_bdq(qedi);
4720 +- if (rc)
4721 ++ status = qedi_alloc_bdq(qedi);
4722 ++ if (status)
4723 + goto mem_alloc_failure;
4724 +
4725 + /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
4726 +- rc = qedi_alloc_nvm_iscsi_cfg(qedi);
4727 +- if (rc)
4728 ++ status = qedi_alloc_nvm_iscsi_cfg(qedi);
4729 ++ if (status)
4730 + goto mem_alloc_failure;
4731 +
4732 + /* Allocate a CQ and an associated PBL for each MSI-X
4733 +diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
4734 +index 11656e864fca9..97453c12b7358 100644
4735 +--- a/drivers/scsi/qla2xxx/qla_nvme.c
4736 ++++ b/drivers/scsi/qla2xxx/qla_nvme.c
4737 +@@ -84,8 +84,9 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
4738 + struct qla_hw_data *ha;
4739 + struct qla_qpair *qpair;
4740 +
4741 +- if (!qidx)
4742 +- qidx++;
4743 ++ /* Map admin queue and 1st IO queue to index 0 */
4744 ++ if (qidx)
4745 ++ qidx--;
4746 +
4747 + vha = (struct scsi_qla_host *)lport->private;
4748 + ha = vha->hw;
4749 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
4750 +index 052ce78814075..28cbefe715e59 100644
4751 +--- a/drivers/scsi/qla2xxx/qla_os.c
4752 ++++ b/drivers/scsi/qla2xxx/qla_os.c
4753 +@@ -15,6 +15,7 @@
4754 + #include <linux/slab.h>
4755 + #include <linux/blk-mq-pci.h>
4756 + #include <linux/refcount.h>
4757 ++#include <linux/crash_dump.h>
4758 +
4759 + #include <scsi/scsi_tcq.h>
4760 + #include <scsi/scsicam.h>
4761 +@@ -2799,6 +2800,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4762 + return ret;
4763 + }
4764 +
4765 ++ if (is_kdump_kernel()) {
4766 ++ ql2xmqsupport = 0;
4767 ++ ql2xallocfwdump = 0;
4768 ++ }
4769 ++
4770 + /* This may fail but that's ok */
4771 + pci_enable_pcie_error_reporting(pdev);
4772 +
4773 +diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
4774 +index 9bc451004184f..80ff00025c03d 100644
4775 +--- a/drivers/scsi/smartpqi/smartpqi_init.c
4776 ++++ b/drivers/scsi/smartpqi/smartpqi_init.c
4777 +@@ -1192,6 +1192,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
4778 + "Requested %d bytes, received %d bytes",
4779 + raid_map_size,
4780 + get_unaligned_le32(&raid_map->structure_size));
4781 ++ rc = -EINVAL;
4782 + goto error;
4783 + }
4784 + }
4785 +diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
4786 +index 01ed21e8bfee5..040c7dc1d4792 100644
4787 +--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
4788 ++++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
4789 +@@ -46,7 +46,7 @@ static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma)
4790 + unsigned long vsize = vma->vm_end - vma->vm_start;
4791 + pgprot_t prot = vma->vm_page_prot;
4792 +
4793 +- if (vma->vm_pgoff + vsize > lpc_ctrl->mem_base + lpc_ctrl->mem_size)
4794 ++ if (vma->vm_pgoff + vma_pages(vma) > lpc_ctrl->mem_size >> PAGE_SHIFT)
4795 + return -EINVAL;
4796 +
4797 + /* ast2400/2500 AHB accesses are not cache coherent */
4798 +diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
4799 +index b60fbeaffcbd0..20b5fb2a207cc 100644
4800 +--- a/drivers/soc/aspeed/aspeed-p2a-ctrl.c
4801 ++++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
4802 +@@ -110,7 +110,7 @@ static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma)
4803 + vsize = vma->vm_end - vma->vm_start;
4804 + prot = vma->vm_page_prot;
4805 +
4806 +- if (vma->vm_pgoff + vsize > ctrl->mem_base + ctrl->mem_size)
4807 ++ if (vma->vm_pgoff + vma_pages(vma) > ctrl->mem_size >> PAGE_SHIFT)
4808 + return -EINVAL;
4809 +
4810 + /* ast2400/2500 AHB accesses are not cache coherent */
4811 +diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
4812 +index 33a27e6c6d67d..45c5aa712edac 100644
4813 +--- a/drivers/soc/qcom/qcom_aoss.c
4814 ++++ b/drivers/soc/qcom/qcom_aoss.c
4815 +@@ -472,12 +472,12 @@ static int qmp_cooling_device_add(struct qmp *qmp,
4816 + static int qmp_cooling_devices_register(struct qmp *qmp)
4817 + {
4818 + struct device_node *np, *child;
4819 +- int count = QMP_NUM_COOLING_RESOURCES;
4820 ++ int count = 0;
4821 + int ret;
4822 +
4823 + np = qmp->dev->of_node;
4824 +
4825 +- qmp->cooling_devs = devm_kcalloc(qmp->dev, count,
4826 ++ qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
4827 + sizeof(*qmp->cooling_devs),
4828 + GFP_KERNEL);
4829 +
4830 +@@ -493,12 +493,16 @@ static int qmp_cooling_devices_register(struct qmp *qmp)
4831 + goto unroll;
4832 + }
4833 +
4834 ++ if (!count)
4835 ++ devm_kfree(qmp->dev, qmp->cooling_devs);
4836 ++
4837 + return 0;
4838 +
4839 + unroll:
4840 + while (--count >= 0)
4841 + thermal_cooling_device_unregister
4842 + (qmp->cooling_devs[count].cdev);
4843 ++ devm_kfree(qmp->dev, qmp->cooling_devs);
4844 +
4845 + return ret;
4846 + }
4847 +diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
4848 +index cb6feb34dd401..f980af0373452 100644
4849 +--- a/drivers/staging/board/board.c
4850 ++++ b/drivers/staging/board/board.c
4851 +@@ -136,6 +136,7 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc)
4852 + static int board_staging_add_dev_domain(struct platform_device *pdev,
4853 + const char *domain)
4854 + {
4855 ++ struct device *dev = &pdev->dev;
4856 + struct of_phandle_args pd_args;
4857 + struct device_node *np;
4858 +
4859 +@@ -148,7 +149,11 @@ static int board_staging_add_dev_domain(struct platform_device *pdev,
4860 + pd_args.np = np;
4861 + pd_args.args_count = 0;
4862 +
4863 +- return of_genpd_add_device(&pd_args, &pdev->dev);
4864 ++ /* Initialization similar to device_pm_init_common() */
4865 ++ spin_lock_init(&dev->power.lock);
4866 ++ dev->power.early_init = true;
4867 ++
4868 ++ return of_genpd_add_device(&pd_args, dev);
4869 + }
4870 + #else
4871 + static inline int board_staging_add_dev_domain(struct platform_device *pdev,
4872 +diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
4873 +index 4b379542ecd50..3fbe223d59b8e 100644
4874 +--- a/drivers/staging/ks7010/ks7010_sdio.c
4875 ++++ b/drivers/staging/ks7010/ks7010_sdio.c
4876 +@@ -938,9 +938,9 @@ static void ks7010_private_init(struct ks_wlan_private *priv,
4877 + memset(&priv->wstats, 0, sizeof(priv->wstats));
4878 +
4879 + /* sleep mode */
4880 ++ atomic_set(&priv->sleepstatus.status, 0);
4881 + atomic_set(&priv->sleepstatus.doze_request, 0);
4882 + atomic_set(&priv->sleepstatus.wakeup_request, 0);
4883 +- atomic_set(&priv->sleepstatus.wakeup_request, 0);
4884 +
4885 + trx_device_init(priv);
4886 + hostif_init(priv);
4887 +diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
4888 +index 1deb74112ad43..11d9d9155eef2 100644
4889 +--- a/drivers/staging/rts5208/rtsx_scsi.c
4890 ++++ b/drivers/staging/rts5208/rtsx_scsi.c
4891 +@@ -2802,10 +2802,10 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4892 + }
4893 +
4894 + if (dev_info_id == 0x15) {
4895 +- buf_len = 0x3A;
4896 ++ buf_len = 0x3C;
4897 + data_len = 0x3A;
4898 + } else {
4899 +- buf_len = 0x6A;
4900 ++ buf_len = 0x6C;
4901 + data_len = 0x6A;
4902 + }
4903 +
4904 +@@ -2855,11 +2855,7 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
4905 + }
4906 +
4907 + rtsx_stor_set_xfer_buf(buf, buf_len, srb);
4908 +-
4909 +- if (dev_info_id == 0x15)
4910 +- scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
4911 +- else
4912 +- scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
4913 ++ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
4914 +
4915 + kfree(buf);
4916 + return STATUS_SUCCESS;
4917 +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
4918 +index 596ad3edec9c0..48fabece76443 100644
4919 +--- a/drivers/target/target_core_xcopy.c
4920 ++++ b/drivers/target/target_core_xcopy.c
4921 +@@ -533,7 +533,6 @@ void target_xcopy_release_pt(void)
4922 + * @cdb: SCSI CDB to be copied into @xpt_cmd.
4923 + * @remote_port: If false, use the LUN through which the XCOPY command has
4924 + * been received. If true, use @se_dev->xcopy_lun.
4925 +- * @alloc_mem: Whether or not to allocate an SGL list.
4926 + *
4927 + * Set up a SCSI command (READ or WRITE) that will be used to execute an
4928 + * XCOPY command.
4929 +@@ -543,12 +542,9 @@ static int target_xcopy_setup_pt_cmd(
4930 + struct xcopy_op *xop,
4931 + struct se_device *se_dev,
4932 + unsigned char *cdb,
4933 +- bool remote_port,
4934 +- bool alloc_mem)
4935 ++ bool remote_port)
4936 + {
4937 + struct se_cmd *cmd = &xpt_cmd->se_cmd;
4938 +- sense_reason_t sense_rc;
4939 +- int ret = 0, rc;
4940 +
4941 + /*
4942 + * Setup LUN+port to honor reservations based upon xop->op_origin for
4943 +@@ -564,46 +560,17 @@ static int target_xcopy_setup_pt_cmd(
4944 + cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
4945 +
4946 + cmd->tag = 0;
4947 +- sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
4948 +- if (sense_rc) {
4949 +- ret = -EINVAL;
4950 +- goto out;
4951 +- }
4952 ++ if (target_setup_cmd_from_cdb(cmd, cdb))
4953 ++ return -EINVAL;
4954 +
4955 +- if (alloc_mem) {
4956 +- rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
4957 +- cmd->data_length, false, false);
4958 +- if (rc < 0) {
4959 +- ret = rc;
4960 +- goto out;
4961 +- }
4962 +- /*
4963 +- * Set this bit so that transport_free_pages() allows the
4964 +- * caller to release SGLs + physical memory allocated by
4965 +- * transport_generic_get_mem()..
4966 +- */
4967 +- cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
4968 +- } else {
4969 +- /*
4970 +- * Here the previously allocated SGLs for the internal READ
4971 +- * are mapped zero-copy to the internal WRITE.
4972 +- */
4973 +- sense_rc = transport_generic_map_mem_to_cmd(cmd,
4974 +- xop->xop_data_sg, xop->xop_data_nents,
4975 +- NULL, 0);
4976 +- if (sense_rc) {
4977 +- ret = -EINVAL;
4978 +- goto out;
4979 +- }
4980 ++ if (transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg,
4981 ++ xop->xop_data_nents, NULL, 0))
4982 ++ return -EINVAL;
4983 +
4984 +- pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
4985 +- " %u\n", cmd->t_data_sg, cmd->t_data_nents);
4986 +- }
4987 ++ pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
4988 ++ " %u\n", cmd->t_data_sg, cmd->t_data_nents);
4989 +
4990 + return 0;
4991 +-
4992 +-out:
4993 +- return ret;
4994 + }
4995 +
4996 + static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
4997 +@@ -660,15 +627,13 @@ static int target_xcopy_read_source(
4998 + xop->src_pt_cmd = xpt_cmd;
4999 +
5000 + rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
5001 +- remote_port, true);
5002 ++ remote_port);
5003 + if (rc < 0) {
5004 + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
5005 + transport_generic_free_cmd(se_cmd, 0);
5006 + return rc;
5007 + }
5008 +
5009 +- xop->xop_data_sg = se_cmd->t_data_sg;
5010 +- xop->xop_data_nents = se_cmd->t_data_nents;
5011 + pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
5012 + " memory\n", xop->xop_data_sg, xop->xop_data_nents);
5013 +
5014 +@@ -678,12 +643,6 @@ static int target_xcopy_read_source(
5015 + transport_generic_free_cmd(se_cmd, 0);
5016 + return rc;
5017 + }
5018 +- /*
5019 +- * Clear off the allocated t_data_sg, that has been saved for
5020 +- * zero-copy WRITE submission reuse in struct xcopy_op..
5021 +- */
5022 +- se_cmd->t_data_sg = NULL;
5023 +- se_cmd->t_data_nents = 0;
5024 +
5025 + return 0;
5026 + }
5027 +@@ -722,19 +681,9 @@ static int target_xcopy_write_destination(
5028 + xop->dst_pt_cmd = xpt_cmd;
5029 +
5030 + rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
5031 +- remote_port, false);
5032 ++ remote_port);
5033 + if (rc < 0) {
5034 +- struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
5035 + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
5036 +- /*
5037 +- * If the failure happened before the t_mem_list hand-off in
5038 +- * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
5039 +- * core releases this memory on error during X-COPY WRITE I/O.
5040 +- */
5041 +- src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
5042 +- src_cmd->t_data_sg = xop->xop_data_sg;
5043 +- src_cmd->t_data_nents = xop->xop_data_nents;
5044 +-
5045 + transport_generic_free_cmd(se_cmd, 0);
5046 + return rc;
5047 + }
5048 +@@ -742,7 +691,6 @@ static int target_xcopy_write_destination(
5049 + rc = target_xcopy_issue_pt_cmd(xpt_cmd);
5050 + if (rc < 0) {
5051 + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
5052 +- se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
5053 + transport_generic_free_cmd(se_cmd, 0);
5054 + return rc;
5055 + }
5056 +@@ -758,7 +706,7 @@ static void target_xcopy_do_work(struct work_struct *work)
5057 + sector_t src_lba, dst_lba, end_lba;
5058 + unsigned int max_sectors;
5059 + int rc = 0;
5060 +- unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0;
5061 ++ unsigned short nolb, max_nolb, copied_nolb = 0;
5062 +
5063 + if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
5064 + goto err_free;
5065 +@@ -788,7 +736,23 @@ static void target_xcopy_do_work(struct work_struct *work)
5066 + (unsigned long long)src_lba, (unsigned long long)dst_lba);
5067 +
5068 + while (src_lba < end_lba) {
5069 +- cur_nolb = min(nolb, max_nolb);
5070 ++ unsigned short cur_nolb = min(nolb, max_nolb);
5071 ++ u32 cur_bytes = cur_nolb * src_dev->dev_attrib.block_size;
5072 ++
5073 ++ if (cur_bytes != xop->xop_data_bytes) {
5074 ++ /*
5075 ++ * (Re)allocate a buffer large enough to hold the XCOPY
5076 ++ * I/O size, which can be reused each read / write loop.
5077 ++ */
5078 ++ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
5079 ++ rc = target_alloc_sgl(&xop->xop_data_sg,
5080 ++ &xop->xop_data_nents,
5081 ++ cur_bytes,
5082 ++ false, false);
5083 ++ if (rc < 0)
5084 ++ goto out;
5085 ++ xop->xop_data_bytes = cur_bytes;
5086 ++ }
5087 +
5088 + pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
5089 + " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
5090 +@@ -819,12 +783,11 @@ static void target_xcopy_do_work(struct work_struct *work)
5091 + nolb -= cur_nolb;
5092 +
5093 + transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
5094 +- xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
5095 +-
5096 + transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
5097 + }
5098 +
5099 + xcopy_pt_undepend_remotedev(xop);
5100 ++ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
5101 + kfree(xop);
5102 +
5103 + pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
5104 +@@ -838,6 +801,7 @@ static void target_xcopy_do_work(struct work_struct *work)
5105 +
5106 + out:
5107 + xcopy_pt_undepend_remotedev(xop);
5108 ++ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
5109 +
5110 + err_free:
5111 + kfree(xop);
5112 +diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
5113 +index 974bc1e19ff2b..a1805a14eea07 100644
5114 +--- a/drivers/target/target_core_xcopy.h
5115 ++++ b/drivers/target/target_core_xcopy.h
5116 +@@ -41,6 +41,7 @@ struct xcopy_op {
5117 + struct xcopy_pt_cmd *src_pt_cmd;
5118 + struct xcopy_pt_cmd *dst_pt_cmd;
5119 +
5120 ++ u32 xop_data_bytes;
5121 + u32 xop_data_nents;
5122 + struct scatterlist *xop_data_sg;
5123 + struct work_struct xop_work;
5124 +diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
5125 +index 66f95f758be05..73226337f5610 100644
5126 +--- a/drivers/tty/hvc/hvsi.c
5127 ++++ b/drivers/tty/hvc/hvsi.c
5128 +@@ -1038,7 +1038,7 @@ static const struct tty_operations hvsi_ops = {
5129 +
5130 + static int __init hvsi_init(void)
5131 + {
5132 +- int i;
5133 ++ int i, ret;
5134 +
5135 + hvsi_driver = alloc_tty_driver(hvsi_count);
5136 + if (!hvsi_driver)
5137 +@@ -1069,12 +1069,25 @@ static int __init hvsi_init(void)
5138 + }
5139 + hvsi_wait = wait_for_state; /* irqs active now */
5140 +
5141 +- if (tty_register_driver(hvsi_driver))
5142 +- panic("Couldn't register hvsi console driver\n");
5143 ++ ret = tty_register_driver(hvsi_driver);
5144 ++ if (ret) {
5145 ++ pr_err("Couldn't register hvsi console driver\n");
5146 ++ goto err_free_irq;
5147 ++ }
5148 +
5149 + printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count);
5150 +
5151 + return 0;
5152 ++err_free_irq:
5153 ++ hvsi_wait = poll_for_state;
5154 ++ for (i = 0; i < hvsi_count; i++) {
5155 ++ struct hvsi_struct *hp = &hvsi_ports[i];
5156 ++
5157 ++ free_irq(hp->virq, hp);
5158 ++ }
5159 ++ tty_driver_kref_put(hvsi_driver);
5160 ++
5161 ++ return ret;
5162 + }
5163 + device_initcall(hvsi_init);
5164 +
5165 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
5166 +index 43fc5b6a25d35..a2bb103f22fc6 100644
5167 +--- a/drivers/tty/serial/8250/8250_pci.c
5168 ++++ b/drivers/tty/serial/8250/8250_pci.c
5169 +@@ -89,7 +89,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
5170 +
5171 + static int
5172 + setup_port(struct serial_private *priv, struct uart_8250_port *port,
5173 +- int bar, int offset, int regshift)
5174 ++ u8 bar, unsigned int offset, int regshift)
5175 + {
5176 + struct pci_dev *dev = priv->dev;
5177 +
5178 +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
5179 +index 8a7c6d65f10ef..777ef1a9591c0 100644
5180 +--- a/drivers/tty/serial/8250/8250_port.c
5181 ++++ b/drivers/tty/serial/8250/8250_port.c
5182 +@@ -125,7 +125,8 @@ static const struct serial8250_config uart_config[] = {
5183 + .name = "16C950/954",
5184 + .fifo_size = 128,
5185 + .tx_loadsz = 128,
5186 +- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
5187 ++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
5188 ++ .rxtrig_bytes = {16, 32, 112, 120},
5189 + /* UART_CAP_EFR breaks billionon CF bluetooth card. */
5190 + .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
5191 + },
5192 +diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
5193 +index bf0e2a4cb0cef..c6f927a76c3be 100644
5194 +--- a/drivers/tty/serial/jsm/jsm_neo.c
5195 ++++ b/drivers/tty/serial/jsm/jsm_neo.c
5196 +@@ -815,7 +815,9 @@ static void neo_parse_isr(struct jsm_board *brd, u32 port)
5197 + /* Parse any modem signal changes */
5198 + jsm_dbg(INTR, &ch->ch_bd->pci_dev,
5199 + "MOD_STAT: sending to parse_modem_sigs\n");
5200 ++ spin_lock_irqsave(&ch->uart_port.lock, lock_flags);
5201 + neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
5202 ++ spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags);
5203 + }
5204 + }
5205 +
5206 +diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
5207 +index 689774c073ca4..8438454ca653f 100644
5208 +--- a/drivers/tty/serial/jsm/jsm_tty.c
5209 ++++ b/drivers/tty/serial/jsm/jsm_tty.c
5210 +@@ -187,6 +187,7 @@ static void jsm_tty_break(struct uart_port *port, int break_state)
5211 +
5212 + static int jsm_tty_open(struct uart_port *port)
5213 + {
5214 ++ unsigned long lock_flags;
5215 + struct jsm_board *brd;
5216 + struct jsm_channel *channel =
5217 + container_of(port, struct jsm_channel, uart_port);
5218 +@@ -240,6 +241,7 @@ static int jsm_tty_open(struct uart_port *port)
5219 + channel->ch_cached_lsr = 0;
5220 + channel->ch_stops_sent = 0;
5221 +
5222 ++ spin_lock_irqsave(&port->lock, lock_flags);
5223 + termios = &port->state->port.tty->termios;
5224 + channel->ch_c_cflag = termios->c_cflag;
5225 + channel->ch_c_iflag = termios->c_iflag;
5226 +@@ -259,6 +261,7 @@ static int jsm_tty_open(struct uart_port *port)
5227 + jsm_carrier(channel);
5228 +
5229 + channel->ch_open_count++;
5230 ++ spin_unlock_irqrestore(&port->lock, lock_flags);
5231 +
5232 + jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n");
5233 + return 0;
5234 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
5235 +index 97ee1fc1cd247..ecff9b2088087 100644
5236 +--- a/drivers/tty/serial/sh-sci.c
5237 ++++ b/drivers/tty/serial/sh-sci.c
5238 +@@ -1763,6 +1763,10 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
5239 +
5240 + /* Handle BREAKs */
5241 + sci_handle_breaks(port);
5242 ++
5243 ++ /* drop invalid character received before break was detected */
5244 ++ serial_port_in(port, SCxRDR);
5245 ++
5246 + sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
5247 +
5248 + return IRQ_HANDLED;
5249 +@@ -1842,7 +1846,8 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
5250 + ret = sci_er_interrupt(irq, ptr);
5251 +
5252 + /* Break Interrupt */
5253 +- if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
5254 ++ if (s->irqs[SCIx_ERI_IRQ] != s->irqs[SCIx_BRI_IRQ] &&
5255 ++ (ssr_status & SCxSR_BRK(port)) && err_enabled)
5256 + ret = sci_br_interrupt(irq, ptr);
5257 +
5258 + /* Overrun Interrupt */
5259 +diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
5260 +index 48e4a5ca18359..f5f56ee07729f 100644
5261 +--- a/drivers/usb/chipidea/host.c
5262 ++++ b/drivers/usb/chipidea/host.c
5263 +@@ -233,18 +233,26 @@ static int ci_ehci_hub_control(
5264 + )
5265 + {
5266 + struct ehci_hcd *ehci = hcd_to_ehci(hcd);
5267 ++ unsigned int ports = HCS_N_PORTS(ehci->hcs_params);
5268 + u32 __iomem *status_reg;
5269 +- u32 temp;
5270 ++ u32 temp, port_index;
5271 + unsigned long flags;
5272 + int retval = 0;
5273 + struct device *dev = hcd->self.controller;
5274 + struct ci_hdrc *ci = dev_get_drvdata(dev);
5275 +
5276 +- status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
5277 ++ port_index = wIndex & 0xff;
5278 ++ port_index -= (port_index > 0);
5279 ++ status_reg = &ehci->regs->port_status[port_index];
5280 +
5281 + spin_lock_irqsave(&ehci->lock, flags);
5282 +
5283 + if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
5284 ++ if (!wIndex || wIndex > ports) {
5285 ++ retval = -EPIPE;
5286 ++ goto done;
5287 ++ }
5288 ++
5289 + temp = ehci_readl(ehci, status_reg);
5290 + if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
5291 + retval = -EPIPE;
5292 +@@ -273,7 +281,7 @@ static int ci_ehci_hub_control(
5293 + ehci_writel(ehci, temp, status_reg);
5294 + }
5295 +
5296 +- set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
5297 ++ set_bit(port_index, &ehci->suspended_ports);
5298 + goto done;
5299 + }
5300 +
5301 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
5302 +index 24dad1d78d1ea..6bd3fdb925cd9 100644
5303 +--- a/drivers/usb/gadget/composite.c
5304 ++++ b/drivers/usb/gadget/composite.c
5305 +@@ -481,7 +481,7 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
5306 + {
5307 + unsigned val;
5308 +
5309 +- if (c->MaxPower)
5310 ++ if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
5311 + val = c->MaxPower;
5312 + else
5313 + val = CONFIG_USB_GADGET_VBUS_DRAW;
5314 +@@ -905,7 +905,11 @@ static int set_config(struct usb_composite_dev *cdev,
5315 + }
5316 +
5317 + /* when we return, be sure our power usage is valid */
5318 +- power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
5319 ++ if (c->MaxPower || (c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
5320 ++ power = c->MaxPower;
5321 ++ else
5322 ++ power = CONFIG_USB_GADGET_VBUS_DRAW;
5323 ++
5324 + if (gadget->speed < USB_SPEED_SUPER)
5325 + power = min(power, 500U);
5326 + else
5327 +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
5328 +index 99b840daf3d94..57da62e331848 100644
5329 +--- a/drivers/usb/gadget/function/u_ether.c
5330 ++++ b/drivers/usb/gadget/function/u_ether.c
5331 +@@ -491,8 +491,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
5332 + }
5333 + spin_unlock_irqrestore(&dev->lock, flags);
5334 +
5335 +- if (skb && !in) {
5336 +- dev_kfree_skb_any(skb);
5337 ++ if (!in) {
5338 ++ if (skb)
5339 ++ dev_kfree_skb_any(skb);
5340 + return NETDEV_TX_OK;
5341 + }
5342 +
5343 +diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
5344 +index b6f196f5e252e..b0e0f8ea98a9c 100644
5345 +--- a/drivers/usb/host/ehci-mv.c
5346 ++++ b/drivers/usb/host/ehci-mv.c
5347 +@@ -41,26 +41,25 @@ struct ehci_hcd_mv {
5348 + int (*set_vbus)(unsigned int vbus);
5349 + };
5350 +
5351 +-static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
5352 ++static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
5353 + {
5354 +- clk_prepare_enable(ehci_mv->clk);
5355 +-}
5356 ++ int retval;
5357 +
5358 +-static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
5359 +-{
5360 +- clk_disable_unprepare(ehci_mv->clk);
5361 +-}
5362 ++ retval = clk_prepare_enable(ehci_mv->clk);
5363 ++ if (retval)
5364 ++ return retval;
5365 +
5366 +-static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
5367 +-{
5368 +- ehci_clock_enable(ehci_mv);
5369 +- return phy_init(ehci_mv->phy);
5370 ++ retval = phy_init(ehci_mv->phy);
5371 ++ if (retval)
5372 ++ clk_disable_unprepare(ehci_mv->clk);
5373 ++
5374 ++ return retval;
5375 + }
5376 +
5377 + static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
5378 + {
5379 + phy_exit(ehci_mv->phy);
5380 +- ehci_clock_disable(ehci_mv);
5381 ++ clk_disable_unprepare(ehci_mv->clk);
5382 + }
5383 +
5384 + static int mv_ehci_reset(struct usb_hcd *hcd)
5385 +diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
5386 +index c3f74d6674e1d..f457e083a6f89 100644
5387 +--- a/drivers/usb/host/fotg210-hcd.c
5388 ++++ b/drivers/usb/host/fotg210-hcd.c
5389 +@@ -2511,11 +2511,6 @@ retry_xacterr:
5390 + return count;
5391 + }
5392 +
5393 +-/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
5394 +-#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
5395 +-/* ... and packet size, for any kind of endpoint descriptor */
5396 +-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
5397 +-
5398 + /* reverse of qh_urb_transaction: free a list of TDs.
5399 + * used for cleanup after errors, before HC sees an URB's TDs.
5400 + */
5401 +@@ -2601,7 +2596,7 @@ static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
5402 + token |= (1 /* "in" */ << 8);
5403 + /* else it's already initted to "out" pid (0 << 8) */
5404 +
5405 +- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
5406 ++ maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
5407 +
5408 + /*
5409 + * buffer gets wrapped in one or more qtds;
5410 +@@ -2715,9 +2710,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
5411 + gfp_t flags)
5412 + {
5413 + struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
5414 ++ struct usb_host_endpoint *ep;
5415 + u32 info1 = 0, info2 = 0;
5416 + int is_input, type;
5417 + int maxp = 0;
5418 ++ int mult;
5419 + struct usb_tt *tt = urb->dev->tt;
5420 + struct fotg210_qh_hw *hw;
5421 +
5422 +@@ -2732,14 +2729,15 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
5423 +
5424 + is_input = usb_pipein(urb->pipe);
5425 + type = usb_pipetype(urb->pipe);
5426 +- maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
5427 ++ ep = usb_pipe_endpoint(urb->dev, urb->pipe);
5428 ++ maxp = usb_endpoint_maxp(&ep->desc);
5429 ++ mult = usb_endpoint_maxp_mult(&ep->desc);
5430 +
5431 + /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
5432 + * acts like up to 3KB, but is built from smaller packets.
5433 + */
5434 +- if (max_packet(maxp) > 1024) {
5435 +- fotg210_dbg(fotg210, "bogus qh maxpacket %d\n",
5436 +- max_packet(maxp));
5437 ++ if (maxp > 1024) {
5438 ++ fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", maxp);
5439 + goto done;
5440 + }
5441 +
5442 +@@ -2753,8 +2751,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
5443 + */
5444 + if (type == PIPE_INTERRUPT) {
5445 + qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
5446 +- is_input, 0,
5447 +- hb_mult(maxp) * max_packet(maxp)));
5448 ++ is_input, 0, mult * maxp));
5449 + qh->start = NO_FRAME;
5450 +
5451 + if (urb->dev->speed == USB_SPEED_HIGH) {
5452 +@@ -2791,7 +2788,7 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
5453 + think_time = tt ? tt->think_time : 0;
5454 + qh->tt_usecs = NS_TO_US(think_time +
5455 + usb_calc_bus_time(urb->dev->speed,
5456 +- is_input, 0, max_packet(maxp)));
5457 ++ is_input, 0, maxp));
5458 + qh->period = urb->interval;
5459 + if (qh->period > fotg210->periodic_size) {
5460 + qh->period = fotg210->periodic_size;
5461 +@@ -2854,11 +2851,11 @@ static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
5462 + * to help them do so. So now people expect to use
5463 + * such nonconformant devices with Linux too; sigh.
5464 + */
5465 +- info1 |= max_packet(maxp) << 16;
5466 ++ info1 |= maxp << 16;
5467 + info2 |= (FOTG210_TUNE_MULT_HS << 30);
5468 + } else { /* PIPE_INTERRUPT */
5469 +- info1 |= max_packet(maxp) << 16;
5470 +- info2 |= hb_mult(maxp) << 30;
5471 ++ info1 |= maxp << 16;
5472 ++ info2 |= mult << 30;
5473 + }
5474 + break;
5475 + default:
5476 +@@ -3928,6 +3925,7 @@ static void iso_stream_init(struct fotg210_hcd *fotg210,
5477 + int is_input;
5478 + long bandwidth;
5479 + unsigned multi;
5480 ++ struct usb_host_endpoint *ep;
5481 +
5482 + /*
5483 + * this might be a "high bandwidth" highspeed endpoint,
5484 +@@ -3935,14 +3933,14 @@ static void iso_stream_init(struct fotg210_hcd *fotg210,
5485 + */
5486 + epnum = usb_pipeendpoint(pipe);
5487 + is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
5488 +- maxp = usb_maxpacket(dev, pipe, !is_input);
5489 ++ ep = usb_pipe_endpoint(dev, pipe);
5490 ++ maxp = usb_endpoint_maxp(&ep->desc);
5491 + if (is_input)
5492 + buf1 = (1 << 11);
5493 + else
5494 + buf1 = 0;
5495 +
5496 +- maxp = max_packet(maxp);
5497 +- multi = hb_mult(maxp);
5498 ++ multi = usb_endpoint_maxp_mult(&ep->desc);
5499 + buf1 |= maxp;
5500 + maxp *= multi;
5501 +
5502 +@@ -4463,13 +4461,12 @@ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
5503 +
5504 + /* HC need not update length with this error */
5505 + if (!(t & FOTG210_ISOC_BABBLE)) {
5506 +- desc->actual_length =
5507 +- fotg210_itdlen(urb, desc, t);
5508 ++ desc->actual_length = FOTG210_ITD_LENGTH(t);
5509 + urb->actual_length += desc->actual_length;
5510 + }
5511 + } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
5512 + desc->status = 0;
5513 +- desc->actual_length = fotg210_itdlen(urb, desc, t);
5514 ++ desc->actual_length = FOTG210_ITD_LENGTH(t);
5515 + urb->actual_length += desc->actual_length;
5516 + } else {
5517 + /* URB was too late */
5518 +diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
5519 +index 1b4db95e5c43a..291add93d84ee 100644
5520 +--- a/drivers/usb/host/fotg210.h
5521 ++++ b/drivers/usb/host/fotg210.h
5522 +@@ -686,11 +686,6 @@ static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
5523 + return fotg210_readl(fotg210, &fotg210->regs->frame_index);
5524 + }
5525 +
5526 +-#define fotg210_itdlen(urb, desc, t) ({ \
5527 +- usb_pipein((urb)->pipe) ? \
5528 +- (desc)->length - FOTG210_ITD_LENGTH(t) : \
5529 +- FOTG210_ITD_LENGTH(t); \
5530 +-})
5531 + /*-------------------------------------------------------------------------*/
5532 +
5533 + #endif /* __LINUX_FOTG210_H */
5534 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5535 +index a3813c75a3de8..505da4999e208 100644
5536 +--- a/drivers/usb/host/xhci.c
5537 ++++ b/drivers/usb/host/xhci.c
5538 +@@ -4662,19 +4662,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
5539 + {
5540 + unsigned long long timeout_ns;
5541 +
5542 +- if (xhci->quirks & XHCI_INTEL_HOST)
5543 +- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
5544 +- else
5545 +- timeout_ns = udev->u1_params.sel;
5546 +-
5547 + /* Prevent U1 if service interval is shorter than U1 exit latency */
5548 + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
5549 +- if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
5550 ++ if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
5551 + dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
5552 + return USB3_LPM_DISABLED;
5553 + }
5554 + }
5555 +
5556 ++ if (xhci->quirks & XHCI_INTEL_HOST)
5557 ++ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
5558 ++ else
5559 ++ timeout_ns = udev->u1_params.sel;
5560 ++
5561 + /* The U1 timeout is encoded in 1us intervals.
5562 + * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
5563 + */
5564 +@@ -4726,19 +4726,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
5565 + {
5566 + unsigned long long timeout_ns;
5567 +
5568 +- if (xhci->quirks & XHCI_INTEL_HOST)
5569 +- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
5570 +- else
5571 +- timeout_ns = udev->u2_params.sel;
5572 +-
5573 + /* Prevent U2 if service interval is shorter than U2 exit latency */
5574 + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
5575 +- if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
5576 ++ if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
5577 + dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
5578 + return USB3_LPM_DISABLED;
5579 + }
5580 + }
5581 +
5582 ++ if (xhci->quirks & XHCI_INTEL_HOST)
5583 ++ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
5584 ++ else
5585 ++ timeout_ns = udev->u2_params.sel;
5586 ++
5587 + /* The U2 timeout is encoded in 256us intervals */
5588 + timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
5589 + /* If the necessary timeout value is bigger than what we can set in the
5590 +diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
5591 +index 327d4f7baaf7c..89d659cef5c63 100644
5592 +--- a/drivers/usb/musb/musb_dsps.c
5593 ++++ b/drivers/usb/musb/musb_dsps.c
5594 +@@ -890,23 +890,22 @@ static int dsps_probe(struct platform_device *pdev)
5595 + if (!glue->usbss_base)
5596 + return -ENXIO;
5597 +
5598 +- if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
5599 +- ret = dsps_setup_optional_vbus_irq(pdev, glue);
5600 +- if (ret)
5601 +- goto err_iounmap;
5602 +- }
5603 +-
5604 + platform_set_drvdata(pdev, glue);
5605 + pm_runtime_enable(&pdev->dev);
5606 + ret = dsps_create_musb_pdev(glue, pdev);
5607 + if (ret)
5608 + goto err;
5609 +
5610 ++ if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
5611 ++ ret = dsps_setup_optional_vbus_irq(pdev, glue);
5612 ++ if (ret)
5613 ++ goto err;
5614 ++ }
5615 ++
5616 + return 0;
5617 +
5618 + err:
5619 + pm_runtime_disable(&pdev->dev);
5620 +-err_iounmap:
5621 + iounmap(glue->usbss_base);
5622 + return ret;
5623 + }
5624 +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
5625 +index 98636fbf71882..170abb06a8a4d 100644
5626 +--- a/drivers/usb/usbip/vhci_hcd.c
5627 ++++ b/drivers/usb/usbip/vhci_hcd.c
5628 +@@ -455,8 +455,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
5629 + vhci_hcd->port_status[rhport] &= ~(1 << USB_PORT_FEAT_RESET);
5630 + vhci_hcd->re_timeout = 0;
5631 +
5632 ++ /*
5633 ++ * A few drivers do usb reset during probe when
5634 ++ * the device could be in VDEV_ST_USED state
5635 ++ */
5636 + if (vhci_hcd->vdev[rhport].ud.status ==
5637 +- VDEV_ST_NOTASSIGNED) {
5638 ++ VDEV_ST_NOTASSIGNED ||
5639 ++ vhci_hcd->vdev[rhport].ud.status ==
5640 ++ VDEV_ST_USED) {
5641 + usbip_dbg_vhci_rh(
5642 + " enable rhport %d (status %u)\n",
5643 + rhport,
5644 +@@ -952,8 +958,32 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
5645 + spin_lock(&vdev->priv_lock);
5646 +
5647 + list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
5648 ++ struct urb *urb;
5649 ++
5650 ++ /* give back urb of unsent unlink request */
5651 + pr_info("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
5652 ++
5653 ++ urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
5654 ++ if (!urb) {
5655 ++ list_del(&unlink->list);
5656 ++ kfree(unlink);
5657 ++ continue;
5658 ++ }
5659 ++
5660 ++ urb->status = -ENODEV;
5661 ++
5662 ++ usb_hcd_unlink_urb_from_ep(hcd, urb);
5663 ++
5664 + list_del(&unlink->list);
5665 ++
5666 ++ spin_unlock(&vdev->priv_lock);
5667 ++ spin_unlock_irqrestore(&vhci->lock, flags);
5668 ++
5669 ++ usb_hcd_giveback_urb(hcd, urb, urb->status);
5670 ++
5671 ++ spin_lock_irqsave(&vhci->lock, flags);
5672 ++ spin_lock(&vdev->priv_lock);
5673 ++
5674 + kfree(unlink);
5675 + }
5676 +
5677 +diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
5678 +index 503ed2f3fbb5e..65743de8aad11 100644
5679 +--- a/drivers/vfio/Kconfig
5680 ++++ b/drivers/vfio/Kconfig
5681 +@@ -29,7 +29,7 @@ menuconfig VFIO
5682 +
5683 + If you don't know what to do here, say N.
5684 +
5685 +-menuconfig VFIO_NOIOMMU
5686 ++config VFIO_NOIOMMU
5687 + bool "VFIO No-IOMMU support"
5688 + depends on VFIO
5689 + help
5690 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
5691 +index 48e574ae60330..cec9173aac6f5 100644
5692 +--- a/drivers/vhost/net.c
5693 ++++ b/drivers/vhost/net.c
5694 +@@ -466,7 +466,7 @@ static void vhost_tx_batch(struct vhost_net *net,
5695 + .num = nvq->batched_xdp,
5696 + .ptr = nvq->xdp,
5697 + };
5698 +- int err;
5699 ++ int i, err;
5700 +
5701 + if (nvq->batched_xdp == 0)
5702 + goto signal_used;
5703 +@@ -475,6 +475,15 @@ static void vhost_tx_batch(struct vhost_net *net,
5704 + err = sock->ops->sendmsg(sock, msghdr, 0);
5705 + if (unlikely(err < 0)) {
5706 + vq_err(&nvq->vq, "Fail to batch sending packets\n");
5707 ++
5708 ++ /* free pages owned by XDP; since this is an unlikely error path,
5709 ++ * keep it simple and avoid more complex bulk update for the
5710 ++ * used pages
5711 ++ */
5712 ++ for (i = 0; i < nvq->batched_xdp; ++i)
5713 ++ put_page(virt_to_head_page(nvq->xdp[i].data));
5714 ++ nvq->batched_xdp = 0;
5715 ++ nvq->done_idx = 0;
5716 + return;
5717 + }
5718 +
5719 +diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
5720 +index ea31054a28ca8..c1d6e63362259 100644
5721 +--- a/drivers/video/fbdev/asiliantfb.c
5722 ++++ b/drivers/video/fbdev/asiliantfb.c
5723 +@@ -227,6 +227,9 @@ static int asiliantfb_check_var(struct fb_var_screeninfo *var,
5724 + {
5725 + unsigned long Ftarget, ratio, remainder;
5726 +
5727 ++ if (!var->pixclock)
5728 ++ return -EINVAL;
5729 ++
5730 + ratio = 1000000 / var->pixclock;
5731 + remainder = 1000000 % var->pixclock;
5732 + Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock;
5733 +diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
5734 +index a7bd9f25911b5..74bf26b527b91 100644
5735 +--- a/drivers/video/fbdev/kyro/fbdev.c
5736 ++++ b/drivers/video/fbdev/kyro/fbdev.c
5737 +@@ -372,6 +372,11 @@ static int kyro_dev_overlay_viewport_set(u32 x, u32 y, u32 ulWidth, u32 ulHeight
5738 + /* probably haven't called CreateOverlay yet */
5739 + return -EINVAL;
5740 +
5741 ++ if (ulWidth == 0 || ulWidth == 0xffffffff ||
5742 ++ ulHeight == 0 || ulHeight == 0xffffffff ||
5743 ++ (x < 2 && ulWidth + 2 == 0))
5744 ++ return -EINVAL;
5745 ++
5746 + /* Stop Ramdac Output */
5747 + DisableRamdacOutput(deviceInfo.pSTGReg);
5748 +
5749 +@@ -394,6 +399,9 @@ static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
5750 + {
5751 + struct kyrofb_info *par = info->par;
5752 +
5753 ++ if (!var->pixclock)
5754 ++ return -EINVAL;
5755 ++
5756 + if (var->bits_per_pixel != 16 && var->bits_per_pixel != 32) {
5757 + printk(KERN_WARNING "kyrofb: depth not supported: %u\n", var->bits_per_pixel);
5758 + return -EINVAL;
5759 +diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
5760 +index ca593a3e41d74..51c9d9508c0b0 100644
5761 +--- a/drivers/video/fbdev/riva/fbdev.c
5762 ++++ b/drivers/video/fbdev/riva/fbdev.c
5763 +@@ -1088,6 +1088,9 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
5764 + int mode_valid = 0;
5765 +
5766 + NVTRACE_ENTER();
5767 ++ if (!var->pixclock)
5768 ++ return -EINVAL;
5769 ++
5770 + switch (var->bits_per_pixel) {
5771 + case 1 ... 8:
5772 + var->red.offset = var->green.offset = var->blue.offset = 0;
5773 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
5774 +index dacd67dca43fe..946ae198b3449 100644
5775 +--- a/fs/btrfs/disk-io.c
5776 ++++ b/fs/btrfs/disk-io.c
5777 +@@ -2894,6 +2894,29 @@ int open_ctree(struct super_block *sb,
5778 + */
5779 + fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
5780 +
5781 ++ /*
5782 ++ * Flag our filesystem as having big metadata blocks if they are bigger
5783 ++ * than the page size
5784 ++ */
5785 ++ if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
5786 ++ if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
5787 ++ btrfs_info(fs_info,
5788 ++ "flagging fs with big metadata feature");
5789 ++ features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
5790 ++ }
5791 ++
5792 ++ /* Set up fs_info before parsing mount options */
5793 ++ nodesize = btrfs_super_nodesize(disk_super);
5794 ++ sectorsize = btrfs_super_sectorsize(disk_super);
5795 ++ stripesize = sectorsize;
5796 ++ fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
5797 ++ fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
5798 ++
5799 ++ /* Cache block sizes */
5800 ++ fs_info->nodesize = nodesize;
5801 ++ fs_info->sectorsize = sectorsize;
5802 ++ fs_info->stripesize = stripesize;
5803 ++
5804 + ret = btrfs_parse_options(fs_info, options, sb->s_flags);
5805 + if (ret) {
5806 + err = ret;
5807 +@@ -2920,28 +2943,6 @@ int open_ctree(struct super_block *sb,
5808 + if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
5809 + btrfs_info(fs_info, "has skinny extents");
5810 +
5811 +- /*
5812 +- * flag our filesystem as having big metadata blocks if
5813 +- * they are bigger than the page size
5814 +- */
5815 +- if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
5816 +- if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
5817 +- btrfs_info(fs_info,
5818 +- "flagging fs with big metadata feature");
5819 +- features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
5820 +- }
5821 +-
5822 +- nodesize = btrfs_super_nodesize(disk_super);
5823 +- sectorsize = btrfs_super_sectorsize(disk_super);
5824 +- stripesize = sectorsize;
5825 +- fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
5826 +- fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
5827 +-
5828 +- /* Cache block sizes */
5829 +- fs_info->nodesize = nodesize;
5830 +- fs_info->sectorsize = sectorsize;
5831 +- fs_info->stripesize = stripesize;
5832 +-
5833 + /*
5834 + * mixed block groups end up with duplicate but slightly offset
5835 + * extent buffers for the same range. It leads to corruptions
5836 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
5837 +index 33b8fedab6c67..b859ed50cf46c 100644
5838 +--- a/fs/btrfs/inode.c
5839 ++++ b/fs/btrfs/inode.c
5840 +@@ -1200,11 +1200,6 @@ static noinline void async_cow_submit(struct btrfs_work *work)
5841 + nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
5842 + PAGE_SHIFT;
5843 +
5844 +- /* atomic_sub_return implies a barrier */
5845 +- if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
5846 +- 5 * SZ_1M)
5847 +- cond_wake_up_nomb(&fs_info->async_submit_wait);
5848 +-
5849 + /*
5850 + * ->inode could be NULL if async_chunk_start has failed to compress,
5851 + * in which case we don't have anything to submit, yet we need to
5852 +@@ -1213,6 +1208,11 @@ static noinline void async_cow_submit(struct btrfs_work *work)
5853 + */
5854 + if (async_chunk->inode)
5855 + submit_compressed_extents(async_chunk);
5856 ++
5857 ++ /* atomic_sub_return implies a barrier */
5858 ++ if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
5859 ++ 5 * SZ_1M)
5860 ++ cond_wake_up_nomb(&fs_info->async_submit_wait);
5861 + }
5862 +
5863 + static noinline void async_cow_free(struct btrfs_work *work)
5864 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
5865 +index 5412361d0c270..8ea4b3da85d1a 100644
5866 +--- a/fs/btrfs/tree-log.c
5867 ++++ b/fs/btrfs/tree-log.c
5868 +@@ -719,7 +719,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
5869 + */
5870 + ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
5871 + ins.offset);
5872 +- if (ret == 0) {
5873 ++ if (ret < 0) {
5874 ++ goto out;
5875 ++ } else if (ret == 0) {
5876 + btrfs_init_generic_ref(&ref,
5877 + BTRFS_ADD_DELAYED_REF,
5878 + ins.objectid, ins.offset, 0);
5879 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
5880 +index e882c790292f9..8deee49a6b3fa 100644
5881 +--- a/fs/btrfs/volumes.c
5882 ++++ b/fs/btrfs/volumes.c
5883 +@@ -1311,6 +1311,9 @@ static void btrfs_close_one_device(struct btrfs_device *device)
5884 + fs_devices->rw_devices--;
5885 + }
5886 +
5887 ++ if (device->devid == BTRFS_DEV_REPLACE_DEVID)
5888 ++ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
5889 ++
5890 + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
5891 + fs_devices->missing_devices--;
5892 +
5893 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
5894 +index 85bd644f9773b..30f841a880acd 100644
5895 +--- a/fs/cifs/sess.c
5896 ++++ b/fs/cifs/sess.c
5897 +@@ -610,7 +610,7 @@ sess_alloc_buffer(struct sess_data *sess_data, int wct)
5898 + return 0;
5899 +
5900 + out_free_smb_buf:
5901 +- kfree(smb_buf);
5902 ++ cifs_small_buf_release(smb_buf);
5903 + sess_data->iov[0].iov_base = NULL;
5904 + sess_data->iov[0].iov_len = 0;
5905 + sess_data->buf0_type = CIFS_NO_BUFFER;
5906 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
5907 +index a57219c51c01a..f7d27cbbeb860 100644
5908 +--- a/fs/f2fs/checkpoint.c
5909 ++++ b/fs/f2fs/checkpoint.c
5910 +@@ -583,7 +583,7 @@ int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
5911 +
5912 + if (time_to_inject(sbi, FAULT_ORPHAN)) {
5913 + spin_unlock(&im->ino_lock);
5914 +- f2fs_show_injection_info(FAULT_ORPHAN);
5915 ++ f2fs_show_injection_info(sbi, FAULT_ORPHAN);
5916 + return -ENOSPC;
5917 + }
5918 +
5919 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
5920 +index 64ee2a064e339..1679f9c0b63b3 100644
5921 +--- a/fs/f2fs/data.c
5922 ++++ b/fs/f2fs/data.c
5923 +@@ -167,9 +167,10 @@ static bool f2fs_bio_post_read_required(struct bio *bio)
5924 +
5925 + static void f2fs_read_end_io(struct bio *bio)
5926 + {
5927 +- if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
5928 +- FAULT_READ_IO)) {
5929 +- f2fs_show_injection_info(FAULT_READ_IO);
5930 ++ struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
5931 ++
5932 ++ if (time_to_inject(sbi, FAULT_READ_IO)) {
5933 ++ f2fs_show_injection_info(sbi, FAULT_READ_IO);
5934 + bio->bi_status = BLK_STS_IOERR;
5935 + }
5936 +
5937 +@@ -191,7 +192,7 @@ static void f2fs_write_end_io(struct bio *bio)
5938 + struct bvec_iter_all iter_all;
5939 +
5940 + if (time_to_inject(sbi, FAULT_WRITE_IO)) {
5941 +- f2fs_show_injection_info(FAULT_WRITE_IO);
5942 ++ f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
5943 + bio->bi_status = BLK_STS_IOERR;
5944 + }
5945 +
5946 +@@ -1190,7 +1191,21 @@ next_dnode:
5947 + if (err) {
5948 + if (flag == F2FS_GET_BLOCK_BMAP)
5949 + map->m_pblk = 0;
5950 ++
5951 + if (err == -ENOENT) {
5952 ++ /*
5953 ++ * There is one exceptional case that read_node_page()
5954 ++ * may return -ENOENT due to filesystem has been
5955 ++ * shutdown or cp_error, so force to convert error
5956 ++ * number to EIO for such case.
5957 ++ */
5958 ++ if (map->m_may_create &&
5959 ++ (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
5960 ++ f2fs_cp_error(sbi))) {
5961 ++ err = -EIO;
5962 ++ goto unlock_out;
5963 ++ }
5964 ++
5965 + err = 0;
5966 + if (map->m_next_pgofs)
5967 + *map->m_next_pgofs =
5968 +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
5969 +index 78d041f9775a4..99c4a868d73b0 100644
5970 +--- a/fs/f2fs/dir.c
5971 ++++ b/fs/f2fs/dir.c
5972 +@@ -618,7 +618,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
5973 +
5974 + start:
5975 + if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
5976 +- f2fs_show_injection_info(FAULT_DIR_DEPTH);
5977 ++ f2fs_show_injection_info(F2FS_I_SB(dir), FAULT_DIR_DEPTH);
5978 + return -ENOSPC;
5979 + }
5980 +
5981 +@@ -892,6 +892,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
5982 + struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
5983 + struct blk_plug plug;
5984 + bool readdir_ra = sbi->readdir_ra == 1;
5985 ++ bool found_valid_dirent = false;
5986 + int err = 0;
5987 +
5988 + bit_pos = ((unsigned long)ctx->pos % d->max);
5989 +@@ -906,12 +907,15 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
5990 +
5991 + de = &d->dentry[bit_pos];
5992 + if (de->name_len == 0) {
5993 ++ if (found_valid_dirent || !bit_pos) {
5994 ++ printk_ratelimited(
5995 ++ "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
5996 ++ KERN_WARNING, sbi->sb->s_id,
5997 ++ le32_to_cpu(de->ino));
5998 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
5999 ++ }
6000 + bit_pos++;
6001 + ctx->pos = start_pos + bit_pos;
6002 +- printk_ratelimited(
6003 +- "%s, invalid namelen(0), ino:%u, run fsck to fix.",
6004 +- KERN_WARNING, le32_to_cpu(de->ino));
6005 +- set_sbi_flag(sbi, SBI_NEED_FSCK);
6006 + continue;
6007 + }
6008 +
6009 +@@ -954,6 +958,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
6010 + f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
6011 +
6012 + ctx->pos = start_pos + bit_pos;
6013 ++ found_valid_dirent = true;
6014 + }
6015 + out:
6016 + if (readdir_ra)
6017 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
6018 +index 4ca3c2a0a0f5b..031a17bf52a24 100644
6019 +--- a/fs/f2fs/f2fs.h
6020 ++++ b/fs/f2fs/f2fs.h
6021 +@@ -1374,9 +1374,10 @@ struct f2fs_private_dio {
6022 + };
6023 +
6024 + #ifdef CONFIG_F2FS_FAULT_INJECTION
6025 +-#define f2fs_show_injection_info(type) \
6026 +- printk_ratelimited("%sF2FS-fs : inject %s in %s of %pS\n", \
6027 +- KERN_INFO, f2fs_fault_name[type], \
6028 ++#define f2fs_show_injection_info(sbi, type) \
6029 ++ printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
6030 ++ KERN_INFO, sbi->sb->s_id, \
6031 ++ f2fs_fault_name[type], \
6032 + __func__, __builtin_return_address(0))
6033 + static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
6034 + {
6035 +@@ -1396,7 +1397,7 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
6036 + return false;
6037 + }
6038 + #else
6039 +-#define f2fs_show_injection_info(type) do { } while (0)
6040 ++#define f2fs_show_injection_info(sbi, type) do { } while (0)
6041 + static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
6042 + {
6043 + return false;
6044 +@@ -1781,7 +1782,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
6045 + return ret;
6046 +
6047 + if (time_to_inject(sbi, FAULT_BLOCK)) {
6048 +- f2fs_show_injection_info(FAULT_BLOCK);
6049 ++ f2fs_show_injection_info(sbi, FAULT_BLOCK);
6050 + release = *count;
6051 + goto release_quota;
6052 + }
6053 +@@ -2033,7 +2034,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
6054 + }
6055 +
6056 + if (time_to_inject(sbi, FAULT_BLOCK)) {
6057 +- f2fs_show_injection_info(FAULT_BLOCK);
6058 ++ f2fs_show_injection_info(sbi, FAULT_BLOCK);
6059 + goto enospc;
6060 + }
6061 +
6062 +@@ -2148,7 +2149,8 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
6063 + return page;
6064 +
6065 + if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
6066 +- f2fs_show_injection_info(FAULT_PAGE_ALLOC);
6067 ++ f2fs_show_injection_info(F2FS_M_SB(mapping),
6068 ++ FAULT_PAGE_ALLOC);
6069 + return NULL;
6070 + }
6071 + }
6072 +@@ -2163,7 +2165,7 @@ static inline struct page *f2fs_pagecache_get_page(
6073 + int fgp_flags, gfp_t gfp_mask)
6074 + {
6075 + if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
6076 +- f2fs_show_injection_info(FAULT_PAGE_GET);
6077 ++ f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
6078 + return NULL;
6079 + }
6080 +
6081 +@@ -2232,7 +2234,7 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
6082 + return bio;
6083 + }
6084 + if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
6085 +- f2fs_show_injection_info(FAULT_ALLOC_BIO);
6086 ++ f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
6087 + return NULL;
6088 + }
6089 +
6090 +@@ -2797,7 +2799,7 @@ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
6091 + size_t size, gfp_t flags)
6092 + {
6093 + if (time_to_inject(sbi, FAULT_KMALLOC)) {
6094 +- f2fs_show_injection_info(FAULT_KMALLOC);
6095 ++ f2fs_show_injection_info(sbi, FAULT_KMALLOC);
6096 + return NULL;
6097 + }
6098 +
6099 +@@ -2814,7 +2816,7 @@ static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
6100 + size_t size, gfp_t flags)
6101 + {
6102 + if (time_to_inject(sbi, FAULT_KVMALLOC)) {
6103 +- f2fs_show_injection_info(FAULT_KVMALLOC);
6104 ++ f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
6105 + return NULL;
6106 + }
6107 +
6108 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
6109 +index 6e58b2e62b189..516007bb1ced1 100644
6110 +--- a/fs/f2fs/file.c
6111 ++++ b/fs/f2fs/file.c
6112 +@@ -682,7 +682,7 @@ int f2fs_truncate(struct inode *inode)
6113 + trace_f2fs_truncate(inode);
6114 +
6115 + if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
6116 +- f2fs_show_injection_info(FAULT_TRUNCATE);
6117 ++ f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
6118 + return -EIO;
6119 + }
6120 +
6121 +@@ -981,7 +981,6 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
6122 + }
6123 +
6124 + if (pg_start < pg_end) {
6125 +- struct address_space *mapping = inode->i_mapping;
6126 + loff_t blk_start, blk_end;
6127 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
6128 +
6129 +@@ -993,8 +992,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
6130 + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
6131 + down_write(&F2FS_I(inode)->i_mmap_sem);
6132 +
6133 +- truncate_inode_pages_range(mapping, blk_start,
6134 +- blk_end - 1);
6135 ++ truncate_pagecache_range(inode, blk_start, blk_end - 1);
6136 +
6137 + f2fs_lock_op(sbi);
6138 + ret = f2fs_truncate_hole(inode, pg_start, pg_end);
6139 +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
6140 +index a78aa5480454f..4b6c36208f552 100644
6141 +--- a/fs/f2fs/gc.c
6142 ++++ b/fs/f2fs/gc.c
6143 +@@ -54,7 +54,7 @@ static int gc_thread_func(void *data)
6144 + }
6145 +
6146 + if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
6147 +- f2fs_show_injection_info(FAULT_CHECKPOINT);
6148 ++ f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
6149 + f2fs_stop_checkpoint(sbi, false);
6150 + }
6151 +
6152 +@@ -1095,8 +1095,10 @@ next_step:
6153 + int err;
6154 +
6155 + if (S_ISREG(inode->i_mode)) {
6156 +- if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
6157 ++ if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
6158 ++ sbi->skipped_gc_rwsem++;
6159 + continue;
6160 ++ }
6161 + if (!down_write_trylock(
6162 + &fi->i_gc_rwsem[WRITE])) {
6163 + sbi->skipped_gc_rwsem++;
6164 +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
6165 +index 386ad54c13c3a..502bd491336a8 100644
6166 +--- a/fs/f2fs/inode.c
6167 ++++ b/fs/f2fs/inode.c
6168 +@@ -681,7 +681,7 @@ retry:
6169 + err = f2fs_truncate(inode);
6170 +
6171 + if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
6172 +- f2fs_show_injection_info(FAULT_EVICT_INODE);
6173 ++ f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
6174 + err = -EIO;
6175 + }
6176 +
6177 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
6178 +index 48bb5d3c709db..4cb182c20eedd 100644
6179 +--- a/fs/f2fs/node.c
6180 ++++ b/fs/f2fs/node.c
6181 +@@ -2406,7 +2406,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
6182 + struct free_nid *i = NULL;
6183 + retry:
6184 + if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
6185 +- f2fs_show_injection_info(FAULT_ALLOC_NID);
6186 ++ f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
6187 + return false;
6188 + }
6189 +
6190 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
6191 +index 5ba677f85533c..78c54bb7898df 100644
6192 +--- a/fs/f2fs/segment.c
6193 ++++ b/fs/f2fs/segment.c
6194 +@@ -489,7 +489,7 @@ int f2fs_commit_inmem_pages(struct inode *inode)
6195 + void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
6196 + {
6197 + if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
6198 +- f2fs_show_injection_info(FAULT_CHECKPOINT);
6199 ++ f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
6200 + f2fs_stop_checkpoint(sbi, false);
6201 + }
6202 +
6203 +@@ -1017,8 +1017,9 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
6204 +
6205 + if (dc->error)
6206 + printk_ratelimited(
6207 +- "%sF2FS-fs: Issue discard(%u, %u, %u) failed, ret: %d",
6208 +- KERN_INFO, dc->lstart, dc->start, dc->len, dc->error);
6209 ++ "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
6210 ++ KERN_INFO, sbi->sb->s_id,
6211 ++ dc->lstart, dc->start, dc->len, dc->error);
6212 + __detach_discard_cmd(dcc, dc);
6213 + }
6214 +
6215 +@@ -1158,7 +1159,7 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
6216 + dc->len += len;
6217 +
6218 + if (time_to_inject(sbi, FAULT_DISCARD)) {
6219 +- f2fs_show_injection_info(FAULT_DISCARD);
6220 ++ f2fs_show_injection_info(sbi, FAULT_DISCARD);
6221 + err = -EIO;
6222 + goto submit;
6223 + }
6224 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
6225 +index 6d904dc9bd199..41bf656658ba8 100644
6226 +--- a/fs/f2fs/super.c
6227 ++++ b/fs/f2fs/super.c
6228 +@@ -1994,6 +1994,33 @@ static int f2fs_enable_quotas(struct super_block *sb)
6229 + return 0;
6230 + }
6231 +
6232 ++static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
6233 ++{
6234 ++ struct quota_info *dqopt = sb_dqopt(sbi->sb);
6235 ++ struct address_space *mapping = dqopt->files[type]->i_mapping;
6236 ++ int ret = 0;
6237 ++
6238 ++ ret = dquot_writeback_dquots(sbi->sb, type);
6239 ++ if (ret)
6240 ++ goto out;
6241 ++
6242 ++ ret = filemap_fdatawrite(mapping);
6243 ++ if (ret)
6244 ++ goto out;
6245 ++
6246 ++ /* if we are using journalled quota */
6247 ++ if (is_journalled_quota(sbi))
6248 ++ goto out;
6249 ++
6250 ++ ret = filemap_fdatawait(mapping);
6251 ++
6252 ++ truncate_inode_pages(&dqopt->files[type]->i_data, 0);
6253 ++out:
6254 ++ if (ret)
6255 ++ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
6256 ++ return ret;
6257 ++}
6258 ++
6259 + int f2fs_quota_sync(struct super_block *sb, int type)
6260 + {
6261 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
6262 +@@ -2001,57 +2028,42 @@ int f2fs_quota_sync(struct super_block *sb, int type)
6263 + int cnt;
6264 + int ret;
6265 +
6266 +- /*
6267 +- * do_quotactl
6268 +- * f2fs_quota_sync
6269 +- * down_read(quota_sem)
6270 +- * dquot_writeback_dquots()
6271 +- * f2fs_dquot_commit
6272 +- * block_operation
6273 +- * down_read(quota_sem)
6274 +- */
6275 +- f2fs_lock_op(sbi);
6276 +-
6277 +- down_read(&sbi->quota_sem);
6278 +- ret = dquot_writeback_dquots(sb, type);
6279 +- if (ret)
6280 +- goto out;
6281 +-
6282 + /*
6283 + * Now when everything is written we can discard the pagecache so
6284 + * that userspace sees the changes.
6285 + */
6286 + for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
6287 +- struct address_space *mapping;
6288 +
6289 + if (type != -1 && cnt != type)
6290 + continue;
6291 +- if (!sb_has_quota_active(sb, cnt))
6292 +- continue;
6293 +
6294 +- mapping = dqopt->files[cnt]->i_mapping;
6295 ++ if (!sb_has_quota_active(sb, type))
6296 ++ return 0;
6297 +
6298 +- ret = filemap_fdatawrite(mapping);
6299 +- if (ret)
6300 +- goto out;
6301 ++ inode_lock(dqopt->files[cnt]);
6302 +
6303 +- /* if we are using journalled quota */
6304 +- if (is_journalled_quota(sbi))
6305 +- continue;
6306 ++ /*
6307 ++ * do_quotactl
6308 ++ * f2fs_quota_sync
6309 ++ * down_read(quota_sem)
6310 ++ * dquot_writeback_dquots()
6311 ++ * f2fs_dquot_commit
6312 ++ * block_operation
6313 ++ * down_read(quota_sem)
6314 ++ */
6315 ++ f2fs_lock_op(sbi);
6316 ++ down_read(&sbi->quota_sem);
6317 +
6318 +- ret = filemap_fdatawait(mapping);
6319 +- if (ret)
6320 +- set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
6321 ++ ret = f2fs_quota_sync_file(sbi, cnt);
6322 ++
6323 ++ up_read(&sbi->quota_sem);
6324 ++ f2fs_unlock_op(sbi);
6325 +
6326 +- inode_lock(dqopt->files[cnt]);
6327 +- truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
6328 + inode_unlock(dqopt->files[cnt]);
6329 ++
6330 ++ if (ret)
6331 ++ break;
6332 + }
6333 +-out:
6334 +- if (ret)
6335 +- set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
6336 +- up_read(&sbi->quota_sem);
6337 +- f2fs_unlock_op(sbi);
6338 + return ret;
6339 + }
6340 +
6341 +diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
6342 +index 0ce39658a6200..44a426c8ea01e 100644
6343 +--- a/fs/fscache/cookie.c
6344 ++++ b/fs/fscache/cookie.c
6345 +@@ -74,10 +74,8 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
6346 + static int fscache_set_key(struct fscache_cookie *cookie,
6347 + const void *index_key, size_t index_key_len)
6348 + {
6349 +- unsigned long long h;
6350 + u32 *buf;
6351 + int bufs;
6352 +- int i;
6353 +
6354 + bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
6355 +
6356 +@@ -91,17 +89,7 @@ static int fscache_set_key(struct fscache_cookie *cookie,
6357 + }
6358 +
6359 + memcpy(buf, index_key, index_key_len);
6360 +-
6361 +- /* Calculate a hash and combine this with the length in the first word
6362 +- * or first half word
6363 +- */
6364 +- h = (unsigned long)cookie->parent;
6365 +- h += index_key_len + cookie->type;
6366 +-
6367 +- for (i = 0; i < bufs; i++)
6368 +- h += buf[i];
6369 +-
6370 +- cookie->key_hash = h ^ (h >> 32);
6371 ++ cookie->key_hash = fscache_hash(0, buf, bufs);
6372 + return 0;
6373 + }
6374 +
6375 +diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
6376 +index 9616af3768e11..d09d4e69c818e 100644
6377 +--- a/fs/fscache/internal.h
6378 ++++ b/fs/fscache/internal.h
6379 +@@ -97,6 +97,8 @@ extern struct workqueue_struct *fscache_object_wq;
6380 + extern struct workqueue_struct *fscache_op_wq;
6381 + DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
6382 +
6383 ++extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
6384 ++
6385 + static inline bool fscache_object_congested(void)
6386 + {
6387 + return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
6388 +diff --git a/fs/fscache/main.c b/fs/fscache/main.c
6389 +index 59c2494efda34..3aa3756c71761 100644
6390 +--- a/fs/fscache/main.c
6391 ++++ b/fs/fscache/main.c
6392 +@@ -94,6 +94,45 @@ static struct ctl_table fscache_sysctls_root[] = {
6393 + };
6394 + #endif
6395 +
6396 ++/*
6397 ++ * Mixing scores (in bits) for (7,20):
6398 ++ * Input delta: 1-bit 2-bit
6399 ++ * 1 round: 330.3 9201.6
6400 ++ * 2 rounds: 1246.4 25475.4
6401 ++ * 3 rounds: 1907.1 31295.1
6402 ++ * 4 rounds: 2042.3 31718.6
6403 ++ * Perfect: 2048 31744
6404 ++ * (32*64) (32*31/2 * 64)
6405 ++ */
6406 ++#define HASH_MIX(x, y, a) \
6407 ++ ( x ^= (a), \
6408 ++ y ^= x, x = rol32(x, 7),\
6409 ++ x += y, y = rol32(y,20),\
6410 ++ y *= 9 )
6411 ++
6412 ++static inline unsigned int fold_hash(unsigned long x, unsigned long y)
6413 ++{
6414 ++ /* Use arch-optimized multiply if one exists */
6415 ++ return __hash_32(y ^ __hash_32(x));
6416 ++}
6417 ++
6418 ++/*
6419 ++ * Generate a hash. This is derived from full_name_hash(), but we want to be
6420 ++ * sure it is arch independent and that it doesn't change as bits of the
6421 ++ * computed hash value might appear on disk. The caller also guarantees that
6422 ++ * the hashed data will be a series of aligned 32-bit words.
6423 ++ */
6424 ++unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
6425 ++{
6426 ++ unsigned int a, x = 0, y = salt;
6427 ++
6428 ++ for (; n; n--) {
6429 ++ a = *data++;
6430 ++ HASH_MIX(x, y, a);
6431 ++ }
6432 ++ return fold_hash(x, y);
6433 ++}
6434 ++
6435 + /*
6436 + * initialise the fs caching module
6437 + */
6438 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
6439 +index 16aa55b73ccf5..7205a89fbb5f3 100644
6440 +--- a/fs/fuse/dev.c
6441 ++++ b/fs/fuse/dev.c
6442 +@@ -282,10 +282,10 @@ void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
6443 +
6444 + /*
6445 + * test_and_set_bit() implies smp_mb() between bit
6446 +- * changing and below intr_entry check. Pairs with
6447 ++ * changing and below FR_INTERRUPTED check. Pairs with
6448 + * smp_mb() from queue_interrupt().
6449 + */
6450 +- if (!list_empty(&req->intr_entry)) {
6451 ++ if (test_bit(FR_INTERRUPTED, &req->flags)) {
6452 + spin_lock(&fiq->lock);
6453 + list_del_init(&req->intr_entry);
6454 + spin_unlock(&fiq->lock);
6455 +diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
6456 +index 72dec177b3494..94c290a333a0a 100644
6457 +--- a/fs/gfs2/lock_dlm.c
6458 ++++ b/fs/gfs2/lock_dlm.c
6459 +@@ -292,6 +292,11 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
6460 + gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
6461 + gfs2_update_request_times(gl);
6462 +
6463 ++ /* don't want to call dlm if we've unmounted the lock protocol */
6464 ++ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
6465 ++ gfs2_glock_free(gl);
6466 ++ return;
6467 ++ }
6468 + /* don't want to skip dlm_unlock writing the lvb when lock has one */
6469 +
6470 + if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
6471 +diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
6472 +index 498cb70c2c0d0..273a81971ed57 100644
6473 +--- a/fs/lockd/svclock.c
6474 ++++ b/fs/lockd/svclock.c
6475 +@@ -395,28 +395,10 @@ nlmsvc_release_lockowner(struct nlm_lock *lock)
6476 + nlmsvc_put_lockowner(lock->fl.fl_owner);
6477 + }
6478 +
6479 +-static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
6480 +-{
6481 +- struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner;
6482 +- new->fl_owner = nlmsvc_get_lockowner(nlm_lo);
6483 +-}
6484 +-
6485 +-static void nlmsvc_locks_release_private(struct file_lock *fl)
6486 +-{
6487 +- nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner);
6488 +-}
6489 +-
6490 +-static const struct file_lock_operations nlmsvc_lock_ops = {
6491 +- .fl_copy_lock = nlmsvc_locks_copy_lock,
6492 +- .fl_release_private = nlmsvc_locks_release_private,
6493 +-};
6494 +-
6495 + void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
6496 + pid_t pid)
6497 + {
6498 + fl->fl_owner = nlmsvc_find_lockowner(host, pid);
6499 +- if (fl->fl_owner != NULL)
6500 +- fl->fl_ops = &nlmsvc_lock_ops;
6501 + }
6502 +
6503 + /*
6504 +@@ -788,9 +770,21 @@ nlmsvc_notify_blocked(struct file_lock *fl)
6505 + printk(KERN_WARNING "lockd: notification for unknown block!\n");
6506 + }
6507 +
6508 ++static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
6509 ++{
6510 ++ return nlmsvc_get_lockowner(owner);
6511 ++}
6512 ++
6513 ++static void nlmsvc_put_owner(fl_owner_t owner)
6514 ++{
6515 ++ nlmsvc_put_lockowner(owner);
6516 ++}
6517 ++
6518 + const struct lock_manager_operations nlmsvc_lock_operations = {
6519 + .lm_notify = nlmsvc_notify_blocked,
6520 + .lm_grant = nlmsvc_grant_deferred,
6521 ++ .lm_get_owner = nlmsvc_get_owner,
6522 ++ .lm_put_owner = nlmsvc_put_owner,
6523 + };
6524 +
6525 + /*
6526 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
6527 +index 6509ec3cb3730..073be36b0686c 100644
6528 +--- a/fs/overlayfs/dir.c
6529 ++++ b/fs/overlayfs/dir.c
6530 +@@ -513,8 +513,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
6531 + goto out_cleanup;
6532 + }
6533 + err = ovl_instantiate(dentry, inode, newdentry, hardlink);
6534 +- if (err)
6535 +- goto out_cleanup;
6536 ++ if (err) {
6537 ++ ovl_cleanup(udir, newdentry);
6538 ++ dput(newdentry);
6539 ++ }
6540 + out_dput:
6541 + dput(upper);
6542 + out_unlock:
6543 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
6544 +index 2c807283115d7..ec57bbb6bb05c 100644
6545 +--- a/fs/userfaultfd.c
6546 ++++ b/fs/userfaultfd.c
6547 +@@ -32,11 +32,6 @@ int sysctl_unprivileged_userfaultfd __read_mostly = 1;
6548 +
6549 + static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
6550 +
6551 +-enum userfaultfd_state {
6552 +- UFFD_STATE_WAIT_API,
6553 +- UFFD_STATE_RUNNING,
6554 +-};
6555 +-
6556 + /*
6557 + * Start with fault_pending_wqh and fault_wqh so they're more likely
6558 + * to be in the same cacheline.
6559 +@@ -68,8 +63,6 @@ struct userfaultfd_ctx {
6560 + unsigned int flags;
6561 + /* features requested from the userspace */
6562 + unsigned int features;
6563 +- /* state machine */
6564 +- enum userfaultfd_state state;
6565 + /* released */
6566 + bool released;
6567 + /* memory mappings are changing because of non-cooperative event */
6568 +@@ -103,6 +96,14 @@ struct userfaultfd_wake_range {
6569 + unsigned long len;
6570 + };
6571 +
6572 ++/* internal indication that UFFD_API ioctl was successfully executed */
6573 ++#define UFFD_FEATURE_INITIALIZED (1u << 31)
6574 ++
6575 ++static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
6576 ++{
6577 ++ return ctx->features & UFFD_FEATURE_INITIALIZED;
6578 ++}
6579 ++
6580 + static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
6581 + int wake_flags, void *key)
6582 + {
6583 +@@ -699,7 +700,6 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
6584 +
6585 + refcount_set(&ctx->refcount, 1);
6586 + ctx->flags = octx->flags;
6587 +- ctx->state = UFFD_STATE_RUNNING;
6588 + ctx->features = octx->features;
6589 + ctx->released = false;
6590 + ctx->mmap_changing = false;
6591 +@@ -980,38 +980,33 @@ static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
6592 +
6593 + poll_wait(file, &ctx->fd_wqh, wait);
6594 +
6595 +- switch (ctx->state) {
6596 +- case UFFD_STATE_WAIT_API:
6597 ++ if (!userfaultfd_is_initialized(ctx))
6598 + return EPOLLERR;
6599 +- case UFFD_STATE_RUNNING:
6600 +- /*
6601 +- * poll() never guarantees that read won't block.
6602 +- * userfaults can be waken before they're read().
6603 +- */
6604 +- if (unlikely(!(file->f_flags & O_NONBLOCK)))
6605 +- return EPOLLERR;
6606 +- /*
6607 +- * lockless access to see if there are pending faults
6608 +- * __pollwait last action is the add_wait_queue but
6609 +- * the spin_unlock would allow the waitqueue_active to
6610 +- * pass above the actual list_add inside
6611 +- * add_wait_queue critical section. So use a full
6612 +- * memory barrier to serialize the list_add write of
6613 +- * add_wait_queue() with the waitqueue_active read
6614 +- * below.
6615 +- */
6616 +- ret = 0;
6617 +- smp_mb();
6618 +- if (waitqueue_active(&ctx->fault_pending_wqh))
6619 +- ret = EPOLLIN;
6620 +- else if (waitqueue_active(&ctx->event_wqh))
6621 +- ret = EPOLLIN;
6622 +-
6623 +- return ret;
6624 +- default:
6625 +- WARN_ON_ONCE(1);
6626 ++
6627 ++ /*
6628 ++ * poll() never guarantees that read won't block.
6629 ++ * userfaults can be waken before they're read().
6630 ++ */
6631 ++ if (unlikely(!(file->f_flags & O_NONBLOCK)))
6632 + return EPOLLERR;
6633 +- }
6634 ++ /*
6635 ++ * lockless access to see if there are pending faults
6636 ++ * __pollwait last action is the add_wait_queue but
6637 ++ * the spin_unlock would allow the waitqueue_active to
6638 ++ * pass above the actual list_add inside
6639 ++ * add_wait_queue critical section. So use a full
6640 ++ * memory barrier to serialize the list_add write of
6641 ++ * add_wait_queue() with the waitqueue_active read
6642 ++ * below.
6643 ++ */
6644 ++ ret = 0;
6645 ++ smp_mb();
6646 ++ if (waitqueue_active(&ctx->fault_pending_wqh))
6647 ++ ret = EPOLLIN;
6648 ++ else if (waitqueue_active(&ctx->event_wqh))
6649 ++ ret = EPOLLIN;
6650 ++
6651 ++ return ret;
6652 + }
6653 +
6654 + static const struct file_operations userfaultfd_fops;
6655 +@@ -1205,7 +1200,7 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf,
6656 + struct uffd_msg msg;
6657 + int no_wait = file->f_flags & O_NONBLOCK;
6658 +
6659 +- if (ctx->state == UFFD_STATE_WAIT_API)
6660 ++ if (!userfaultfd_is_initialized(ctx))
6661 + return -EINVAL;
6662 +
6663 + for (;;) {
6664 +@@ -1807,9 +1802,10 @@ out:
6665 + static inline unsigned int uffd_ctx_features(__u64 user_features)
6666 + {
6667 + /*
6668 +- * For the current set of features the bits just coincide
6669 ++ * For the current set of features the bits just coincide. Set
6670 ++ * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
6671 + */
6672 +- return (unsigned int)user_features;
6673 ++ return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
6674 + }
6675 +
6676 + /*
6677 +@@ -1822,12 +1818,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
6678 + {
6679 + struct uffdio_api uffdio_api;
6680 + void __user *buf = (void __user *)arg;
6681 ++ unsigned int ctx_features;
6682 + int ret;
6683 + __u64 features;
6684 +
6685 +- ret = -EINVAL;
6686 +- if (ctx->state != UFFD_STATE_WAIT_API)
6687 +- goto out;
6688 + ret = -EFAULT;
6689 + if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
6690 + goto out;
6691 +@@ -1844,9 +1838,13 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
6692 + ret = -EFAULT;
6693 + if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
6694 + goto out;
6695 +- ctx->state = UFFD_STATE_RUNNING;
6696 ++
6697 + /* only enable the requested features for this uffd context */
6698 +- ctx->features = uffd_ctx_features(features);
6699 ++ ctx_features = uffd_ctx_features(features);
6700 ++ ret = -EINVAL;
6701 ++ if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
6702 ++ goto err_out;
6703 ++
6704 + ret = 0;
6705 + out:
6706 + return ret;
6707 +@@ -1863,7 +1861,7 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
6708 + int ret = -EINVAL;
6709 + struct userfaultfd_ctx *ctx = file->private_data;
6710 +
6711 +- if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
6712 ++ if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
6713 + return -EINVAL;
6714 +
6715 + switch(cmd) {
6716 +@@ -1964,7 +1962,6 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
6717 + refcount_set(&ctx->refcount, 1);
6718 + ctx->flags = flags;
6719 + ctx->features = 0;
6720 +- ctx->state = UFFD_STATE_WAIT_API;
6721 + ctx->released = false;
6722 + ctx->mmap_changing = false;
6723 + ctx->mm = current->mm;
6724 +diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
6725 +index 0588ef3bc6ff6..48722f2b8543b 100644
6726 +--- a/include/crypto/public_key.h
6727 ++++ b/include/crypto/public_key.h
6728 +@@ -38,9 +38,9 @@ extern void public_key_free(struct public_key *key);
6729 + struct public_key_signature {
6730 + struct asymmetric_key_id *auth_ids[2];
6731 + u8 *s; /* Signature */
6732 +- u32 s_size; /* Number of bytes in signature */
6733 + u8 *digest;
6734 +- u8 digest_size; /* Number of bytes in digest */
6735 ++ u32 s_size; /* Number of bytes in signature */
6736 ++ u32 digest_size; /* Number of bytes in digest */
6737 + const char *pkey_algo;
6738 + const char *hash_algo;
6739 + const char *encoding;
6740 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
6741 +index a0513c444446d..cef70d6e1657c 100644
6742 +--- a/include/linux/hugetlb.h
6743 ++++ b/include/linux/hugetlb.h
6744 +@@ -542,6 +542,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
6745 +
6746 + void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
6747 +
6748 ++static inline void hugetlb_count_init(struct mm_struct *mm)
6749 ++{
6750 ++ atomic_long_set(&mm->hugetlb_usage, 0);
6751 ++}
6752 ++
6753 + static inline void hugetlb_count_add(long l, struct mm_struct *mm)
6754 + {
6755 + atomic_long_add(l, &mm->hugetlb_usage);
6756 +@@ -711,6 +716,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
6757 + return &mm->page_table_lock;
6758 + }
6759 +
6760 ++static inline void hugetlb_count_init(struct mm_struct *mm)
6761 ++{
6762 ++}
6763 ++
6764 + static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
6765 + {
6766 + }
6767 +diff --git a/include/linux/list.h b/include/linux/list.h
6768 +index 85c92555e31f8..ce19c6b632a59 100644
6769 +--- a/include/linux/list.h
6770 ++++ b/include/linux/list.h
6771 +@@ -567,6 +567,15 @@ static inline void list_splice_tail_init(struct list_head *list,
6772 + pos != (head); \
6773 + pos = n, n = pos->prev)
6774 +
6775 ++/**
6776 ++ * list_entry_is_head - test if the entry points to the head of the list
6777 ++ * @pos: the type * to cursor
6778 ++ * @head: the head for your list.
6779 ++ * @member: the name of the list_head within the struct.
6780 ++ */
6781 ++#define list_entry_is_head(pos, head, member) \
6782 ++ (&pos->member == (head))
6783 ++
6784 + /**
6785 + * list_for_each_entry - iterate over list of given type
6786 + * @pos: the type * to use as a loop cursor.
6787 +@@ -575,7 +584,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6788 + */
6789 + #define list_for_each_entry(pos, head, member) \
6790 + for (pos = list_first_entry(head, typeof(*pos), member); \
6791 +- &pos->member != (head); \
6792 ++ !list_entry_is_head(pos, head, member); \
6793 + pos = list_next_entry(pos, member))
6794 +
6795 + /**
6796 +@@ -586,7 +595,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6797 + */
6798 + #define list_for_each_entry_reverse(pos, head, member) \
6799 + for (pos = list_last_entry(head, typeof(*pos), member); \
6800 +- &pos->member != (head); \
6801 ++ !list_entry_is_head(pos, head, member); \
6802 + pos = list_prev_entry(pos, member))
6803 +
6804 + /**
6805 +@@ -611,7 +620,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6806 + */
6807 + #define list_for_each_entry_continue(pos, head, member) \
6808 + for (pos = list_next_entry(pos, member); \
6809 +- &pos->member != (head); \
6810 ++ !list_entry_is_head(pos, head, member); \
6811 + pos = list_next_entry(pos, member))
6812 +
6813 + /**
6814 +@@ -625,7 +634,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6815 + */
6816 + #define list_for_each_entry_continue_reverse(pos, head, member) \
6817 + for (pos = list_prev_entry(pos, member); \
6818 +- &pos->member != (head); \
6819 ++ !list_entry_is_head(pos, head, member); \
6820 + pos = list_prev_entry(pos, member))
6821 +
6822 + /**
6823 +@@ -637,7 +646,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6824 + * Iterate over list of given type, continuing from current position.
6825 + */
6826 + #define list_for_each_entry_from(pos, head, member) \
6827 +- for (; &pos->member != (head); \
6828 ++ for (; !list_entry_is_head(pos, head, member); \
6829 + pos = list_next_entry(pos, member))
6830 +
6831 + /**
6832 +@@ -650,7 +659,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6833 + * Iterate backwards over list of given type, continuing from current position.
6834 + */
6835 + #define list_for_each_entry_from_reverse(pos, head, member) \
6836 +- for (; &pos->member != (head); \
6837 ++ for (; !list_entry_is_head(pos, head, member); \
6838 + pos = list_prev_entry(pos, member))
6839 +
6840 + /**
6841 +@@ -663,7 +672,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6842 + #define list_for_each_entry_safe(pos, n, head, member) \
6843 + for (pos = list_first_entry(head, typeof(*pos), member), \
6844 + n = list_next_entry(pos, member); \
6845 +- &pos->member != (head); \
6846 ++ !list_entry_is_head(pos, head, member); \
6847 + pos = n, n = list_next_entry(n, member))
6848 +
6849 + /**
6850 +@@ -679,7 +688,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6851 + #define list_for_each_entry_safe_continue(pos, n, head, member) \
6852 + for (pos = list_next_entry(pos, member), \
6853 + n = list_next_entry(pos, member); \
6854 +- &pos->member != (head); \
6855 ++ !list_entry_is_head(pos, head, member); \
6856 + pos = n, n = list_next_entry(n, member))
6857 +
6858 + /**
6859 +@@ -694,7 +703,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6860 + */
6861 + #define list_for_each_entry_safe_from(pos, n, head, member) \
6862 + for (n = list_next_entry(pos, member); \
6863 +- &pos->member != (head); \
6864 ++ !list_entry_is_head(pos, head, member); \
6865 + pos = n, n = list_next_entry(n, member))
6866 +
6867 + /**
6868 +@@ -710,7 +719,7 @@ static inline void list_splice_tail_init(struct list_head *list,
6869 + #define list_for_each_entry_safe_reverse(pos, n, head, member) \
6870 + for (pos = list_last_entry(head, typeof(*pos), member), \
6871 + n = list_prev_entry(pos, member); \
6872 +- &pos->member != (head); \
6873 ++ !list_entry_is_head(pos, head, member); \
6874 + pos = n, n = list_prev_entry(n, member))
6875 +
6876 + /**
6877 +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
6878 +index 451efd4499cc5..961e35c68e413 100644
6879 +--- a/include/linux/memory_hotplug.h
6880 ++++ b/include/linux/memory_hotplug.h
6881 +@@ -358,6 +358,6 @@ extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
6882 + unsigned long pnum);
6883 + extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
6884 + int online_type);
6885 +-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
6886 +- unsigned long nr_pages);
6887 ++extern struct zone *zone_for_pfn_range(int online_type, int nid,
6888 ++ unsigned long start_pfn, unsigned long nr_pages);
6889 + #endif /* __LINUX_MEMORY_HOTPLUG_H */
6890 +diff --git a/include/linux/pci.h b/include/linux/pci.h
6891 +index 6a6a819c5b49b..9a937f8b27838 100644
6892 +--- a/include/linux/pci.h
6893 ++++ b/include/linux/pci.h
6894 +@@ -1688,8 +1688,9 @@ static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
6895 + static inline void pci_disable_device(struct pci_dev *dev) { }
6896 + static inline int pci_assign_resource(struct pci_dev *dev, int i)
6897 + { return -EBUSY; }
6898 +-static inline int __pci_register_driver(struct pci_driver *drv,
6899 +- struct module *owner)
6900 ++static inline int __must_check __pci_register_driver(struct pci_driver *drv,
6901 ++ struct module *owner,
6902 ++ const char *mod_name)
6903 + { return 0; }
6904 + static inline int pci_register_driver(struct pci_driver *drv)
6905 + { return 0; }
6906 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
6907 +index 0ad57693f3926..42588645478d9 100644
6908 +--- a/include/linux/pci_ids.h
6909 ++++ b/include/linux/pci_ids.h
6910 +@@ -2476,7 +2476,8 @@
6911 + #define PCI_VENDOR_ID_TDI 0x192E
6912 + #define PCI_DEVICE_ID_TDI_EHCI 0x0101
6913 +
6914 +-#define PCI_VENDOR_ID_FREESCALE 0x1957
6915 ++#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */
6916 ++#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */
6917 + #define PCI_DEVICE_ID_MPC8308 0xc006
6918 + #define PCI_DEVICE_ID_MPC8315E 0x00b4
6919 + #define PCI_DEVICE_ID_MPC8315 0x00b5
6920 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
6921 +index 6493c98c86317..b04b5bd43f541 100644
6922 +--- a/include/linux/skbuff.h
6923 ++++ b/include/linux/skbuff.h
6924 +@@ -1887,7 +1887,7 @@ static inline void __skb_insert(struct sk_buff *newsk,
6925 + WRITE_ONCE(newsk->prev, prev);
6926 + WRITE_ONCE(next->prev, newsk);
6927 + WRITE_ONCE(prev->next, newsk);
6928 +- list->qlen++;
6929 ++ WRITE_ONCE(list->qlen, list->qlen + 1);
6930 + }
6931 +
6932 + static inline void __skb_queue_splice(const struct sk_buff_head *list,
6933 +diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
6934 +index d7ef5b97174ce..3c6c4b1dbf1a4 100644
6935 +--- a/include/linux/sunrpc/xprt.h
6936 ++++ b/include/linux/sunrpc/xprt.h
6937 +@@ -419,6 +419,7 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
6938 + #define XPRT_CONGESTED (9)
6939 + #define XPRT_CWND_WAIT (10)
6940 + #define XPRT_WRITE_SPACE (11)
6941 ++#define XPRT_SND_IS_COOKIE (12)
6942 +
6943 + static inline void xprt_set_connected(struct rpc_xprt *xprt)
6944 + {
6945 +diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
6946 +index edbbf4bfdd9e5..4a245d7a5c8d6 100644
6947 +--- a/include/uapi/linux/pkt_sched.h
6948 ++++ b/include/uapi/linux/pkt_sched.h
6949 +@@ -807,6 +807,8 @@ struct tc_codel_xstats {
6950 +
6951 + /* FQ_CODEL */
6952 +
6953 ++#define FQ_CODEL_QUANTUM_MAX (1 << 20)
6954 ++
6955 + enum {
6956 + TCA_FQ_CODEL_UNSPEC,
6957 + TCA_FQ_CODEL_TARGET,
6958 +diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
6959 +index be07b5470f4bb..f51bc8f368134 100644
6960 +--- a/include/uapi/linux/serial_reg.h
6961 ++++ b/include/uapi/linux/serial_reg.h
6962 +@@ -62,6 +62,7 @@
6963 + * ST16C654: 8 16 56 60 8 16 32 56 PORT_16654
6964 + * TI16C750: 1 16 32 56 xx xx xx xx PORT_16750
6965 + * TI16C752: 8 16 56 60 8 16 32 56
6966 ++ * OX16C950: 16 32 112 120 16 32 64 112 PORT_16C950
6967 + * Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA
6968 + */
6969 + #define UART_FCR_R_TRIG_00 0x00
6970 +diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
6971 +index cb6425e52bf7a..01e893cf9b9f7 100644
6972 +--- a/kernel/dma/debug.c
6973 ++++ b/kernel/dma/debug.c
6974 +@@ -846,7 +846,7 @@ static int dump_show(struct seq_file *seq, void *v)
6975 + }
6976 + DEFINE_SHOW_ATTRIBUTE(dump);
6977 +
6978 +-static void dma_debug_fs_init(void)
6979 ++static int __init dma_debug_fs_init(void)
6980 + {
6981 + struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
6982 +
6983 +@@ -859,7 +859,10 @@ static void dma_debug_fs_init(void)
6984 + debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
6985 + debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
6986 + debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
6987 ++
6988 ++ return 0;
6989 + }
6990 ++core_initcall_sync(dma_debug_fs_init);
6991 +
6992 + static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
6993 + {
6994 +@@ -944,8 +947,6 @@ static int dma_debug_init(void)
6995 + spin_lock_init(&dma_entry_hash[i].lock);
6996 + }
6997 +
6998 +- dma_debug_fs_init();
6999 +-
7000 + nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
7001 + for (i = 0; i < nr_pages; ++i)
7002 + dma_debug_create_entries(GFP_KERNEL);
7003 +diff --git a/kernel/events/core.c b/kernel/events/core.c
7004 +index 2f848123cdae8..1993a741d2dc5 100644
7005 +--- a/kernel/events/core.c
7006 ++++ b/kernel/events/core.c
7007 +@@ -9259,7 +9259,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
7008 + return;
7009 +
7010 + if (ifh->nr_file_filters) {
7011 +- mm = get_task_mm(event->ctx->task);
7012 ++ mm = get_task_mm(task);
7013 + if (!mm)
7014 + goto restart;
7015 +
7016 +diff --git a/kernel/fork.c b/kernel/fork.c
7017 +index 50f37d5afb32b..cf2cebd214b92 100644
7018 +--- a/kernel/fork.c
7019 ++++ b/kernel/fork.c
7020 +@@ -1028,6 +1028,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
7021 + mm->pmd_huge_pte = NULL;
7022 + #endif
7023 + mm_init_uprobes_state(mm);
7024 ++ hugetlb_count_init(mm);
7025 +
7026 + if (current->mm) {
7027 + mm->flags = current->mm->flags & MMF_INIT_MASK;
7028 +diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
7029 +index a6a79f85c81a8..f26415341c752 100644
7030 +--- a/kernel/pid_namespace.c
7031 ++++ b/kernel/pid_namespace.c
7032 +@@ -53,7 +53,8 @@ static struct kmem_cache *create_pid_cachep(unsigned int level)
7033 + mutex_lock(&pid_caches_mutex);
7034 + /* Name collision forces to do allocation under mutex. */
7035 + if (!*pkc)
7036 +- *pkc = kmem_cache_create(name, len, 0, SLAB_HWCACHE_ALIGN, 0);
7037 ++ *pkc = kmem_cache_create(name, len, 0,
7038 ++ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, 0);
7039 + mutex_unlock(&pid_caches_mutex);
7040 + /* current can fail, but someone else can succeed. */
7041 + return READ_ONCE(*pkc);
7042 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
7043 +index 233322c77b76c..5de084dab4fa6 100644
7044 +--- a/kernel/trace/trace_kprobe.c
7045 ++++ b/kernel/trace/trace_kprobe.c
7046 +@@ -646,7 +646,11 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
7047 + /* Register new event */
7048 + ret = register_kprobe_event(tk);
7049 + if (ret) {
7050 +- pr_warn("Failed to register probe event(%d)\n", ret);
7051 ++ if (ret == -EEXIST) {
7052 ++ trace_probe_log_set_index(0);
7053 ++ trace_probe_log_err(0, EVENT_EXIST);
7054 ++ } else
7055 ++ pr_warn("Failed to register probe event(%d)\n", ret);
7056 + goto end;
7057 + }
7058 +
7059 +diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
7060 +index f98d6d94cbbf7..23e85cb151346 100644
7061 +--- a/kernel/trace/trace_probe.c
7062 ++++ b/kernel/trace/trace_probe.c
7063 +@@ -1029,11 +1029,36 @@ error:
7064 + return ret;
7065 + }
7066 +
7067 ++static struct trace_event_call *
7068 ++find_trace_event_call(const char *system, const char *event_name)
7069 ++{
7070 ++ struct trace_event_call *tp_event;
7071 ++ const char *name;
7072 ++
7073 ++ list_for_each_entry(tp_event, &ftrace_events, list) {
7074 ++ if (!tp_event->class->system ||
7075 ++ strcmp(system, tp_event->class->system))
7076 ++ continue;
7077 ++ name = trace_event_name(tp_event);
7078 ++ if (!name || strcmp(event_name, name))
7079 ++ continue;
7080 ++ return tp_event;
7081 ++ }
7082 ++
7083 ++ return NULL;
7084 ++}
7085 ++
7086 + int trace_probe_register_event_call(struct trace_probe *tp)
7087 + {
7088 + struct trace_event_call *call = trace_probe_event_call(tp);
7089 + int ret;
7090 +
7091 ++ lockdep_assert_held(&event_mutex);
7092 ++
7093 ++ if (find_trace_event_call(trace_probe_group_name(tp),
7094 ++ trace_probe_name(tp)))
7095 ++ return -EEXIST;
7096 ++
7097 + ret = register_trace_event(&call->event);
7098 + if (!ret)
7099 + return -ENODEV;
7100 +diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
7101 +index a0ff9e200ef6f..bab9e0dba9af2 100644
7102 +--- a/kernel/trace/trace_probe.h
7103 ++++ b/kernel/trace/trace_probe.h
7104 +@@ -410,6 +410,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
7105 + C(NO_EVENT_NAME, "Event name is not specified"), \
7106 + C(EVENT_TOO_LONG, "Event name is too long"), \
7107 + C(BAD_EVENT_NAME, "Event name must follow the same rules as C identifiers"), \
7108 ++ C(EVENT_EXIST, "Given group/event name is already used by another event"), \
7109 + C(RETVAL_ON_PROBE, "$retval is not available on probe"), \
7110 + C(BAD_STACK_NUM, "Invalid stack number"), \
7111 + C(BAD_ARG_NUM, "Invalid argument number"), \
7112 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
7113 +index 5294843de6efd..b515db036becc 100644
7114 +--- a/kernel/trace/trace_uprobe.c
7115 ++++ b/kernel/trace/trace_uprobe.c
7116 +@@ -514,7 +514,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
7117 +
7118 + ret = register_uprobe_event(tu);
7119 + if (ret) {
7120 +- pr_warn("Failed to register probe event(%d)\n", ret);
7121 ++ if (ret == -EEXIST) {
7122 ++ trace_probe_log_set_index(0);
7123 ++ trace_probe_log_err(0, EVENT_EXIST);
7124 ++ } else
7125 ++ pr_warn("Failed to register probe event(%d)\n", ret);
7126 + goto end;
7127 + }
7128 +
7129 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
7130 +index 6aeb53b4e19f8..885d4792abdfc 100644
7131 +--- a/kernel/workqueue.c
7132 ++++ b/kernel/workqueue.c
7133 +@@ -5869,6 +5869,13 @@ static void __init wq_numa_init(void)
7134 + return;
7135 + }
7136 +
7137 ++ for_each_possible_cpu(cpu) {
7138 ++ if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
7139 ++ pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
7140 ++ return;
7141 ++ }
7142 ++ }
7143 ++
7144 + wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
7145 + BUG_ON(!wq_update_unbound_numa_attrs_buf);
7146 +
7147 +@@ -5886,11 +5893,6 @@ static void __init wq_numa_init(void)
7148 +
7149 + for_each_possible_cpu(cpu) {
7150 + node = cpu_to_node(cpu);
7151 +- if (WARN_ON(node == NUMA_NO_NODE)) {
7152 +- pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
7153 +- /* happens iff arch is bonkers, let's just proceed */
7154 +- return;
7155 +- }
7156 + cpumask_set_cpu(cpu, tbl[node]);
7157 + }
7158 +
7159 +diff --git a/lib/test_bpf.c b/lib/test_bpf.c
7160 +index 5ef3eccee27cb..3ae002ced4c7a 100644
7161 +--- a/lib/test_bpf.c
7162 ++++ b/lib/test_bpf.c
7163 +@@ -4286,8 +4286,8 @@ static struct bpf_test tests[] = {
7164 + .u.insns_int = {
7165 + BPF_LD_IMM64(R0, 0),
7166 + BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
7167 +- BPF_STX_MEM(BPF_W, R10, R1, -40),
7168 +- BPF_LDX_MEM(BPF_W, R0, R10, -40),
7169 ++ BPF_STX_MEM(BPF_DW, R10, R1, -40),
7170 ++ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
7171 + BPF_EXIT_INSN(),
7172 + },
7173 + INTERNAL,
7174 +@@ -6684,7 +6684,14 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
7175 + u64 duration;
7176 + u32 ret;
7177 +
7178 +- if (test->test[i].data_size == 0 &&
7179 ++ /*
7180 ++ * NOTE: Several sub-tests may be present, in which case
7181 ++ * a zero {data_size, result} tuple indicates the end of
7182 ++ * the sub-test array. The first test is always run,
7183 ++ * even if both data_size and result happen to be zero.
7184 ++ */
7185 ++ if (i > 0 &&
7186 ++ test->test[i].data_size == 0 &&
7187 + test->test[i].result == 0)
7188 + break;
7189 +
7190 +diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
7191 +index 2d7d257a430e6..35d398b065e4f 100644
7192 +--- a/lib/test_stackinit.c
7193 ++++ b/lib/test_stackinit.c
7194 +@@ -67,10 +67,10 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
7195 + #define INIT_STRUCT_none /**/
7196 + #define INIT_STRUCT_zero = { }
7197 + #define INIT_STRUCT_static_partial = { .two = 0, }
7198 +-#define INIT_STRUCT_static_all = { .one = arg->one, \
7199 +- .two = arg->two, \
7200 +- .three = arg->three, \
7201 +- .four = arg->four, \
7202 ++#define INIT_STRUCT_static_all = { .one = 0, \
7203 ++ .two = 0, \
7204 ++ .three = 0, \
7205 ++ .four = 0, \
7206 + }
7207 + #define INIT_STRUCT_dynamic_partial = { .two = arg->two, }
7208 + #define INIT_STRUCT_dynamic_all = { .one = arg->one, \
7209 +@@ -84,8 +84,7 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
7210 + var.one = 0; \
7211 + var.two = 0; \
7212 + var.three = 0; \
7213 +- memset(&var.four, 0, \
7214 +- sizeof(var.four))
7215 ++ var.four = 0
7216 +
7217 + /*
7218 + * @name: unique string name for the test
7219 +@@ -208,18 +207,13 @@ struct test_small_hole {
7220 + unsigned long four;
7221 + };
7222 +
7223 +-/* Try to trigger unhandled padding in a structure. */
7224 +-struct test_aligned {
7225 +- u32 internal1;
7226 +- u64 internal2;
7227 +-} __aligned(64);
7228 +-
7229 ++/* Trigger unhandled padding in a structure. */
7230 + struct test_big_hole {
7231 + u8 one;
7232 + u8 two;
7233 + u8 three;
7234 + /* 61 byte padding hole here. */
7235 +- struct test_aligned four;
7236 ++ u8 four __aligned(64);
7237 + } __aligned(64);
7238 +
7239 + struct test_trailing_hole {
7240 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
7241 +index 308beca3ffebc..bcc2686bd0a1b 100644
7242 +--- a/mm/memory_hotplug.c
7243 ++++ b/mm/memory_hotplug.c
7244 +@@ -775,8 +775,8 @@ static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn
7245 + return movable_node_enabled ? movable_zone : kernel_zone;
7246 + }
7247 +
7248 +-struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
7249 +- unsigned long nr_pages)
7250 ++struct zone *zone_for_pfn_range(int online_type, int nid,
7251 ++ unsigned long start_pfn, unsigned long nr_pages)
7252 + {
7253 + if (online_type == MMOP_ONLINE_KERNEL)
7254 + return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
7255 +diff --git a/mm/vmscan.c b/mm/vmscan.c
7256 +index fad9be4703ece..de94881eaa927 100644
7257 +--- a/mm/vmscan.c
7258 ++++ b/mm/vmscan.c
7259 +@@ -2513,7 +2513,7 @@ out:
7260 + cgroup_size = max(cgroup_size, protection);
7261 +
7262 + scan = lruvec_size - lruvec_size * protection /
7263 +- cgroup_size;
7264 ++ (cgroup_size + 1);
7265 +
7266 + /*
7267 + * Minimally target SWAP_CLUSTER_MAX pages to keep
7268 +diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
7269 +index 3963eb11c3fbd..44e6c74ed4288 100644
7270 +--- a/net/9p/trans_xen.c
7271 ++++ b/net/9p/trans_xen.c
7272 +@@ -138,7 +138,7 @@ static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
7273 +
7274 + static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
7275 + {
7276 +- struct xen_9pfs_front_priv *priv = NULL;
7277 ++ struct xen_9pfs_front_priv *priv;
7278 + RING_IDX cons, prod, masked_cons, masked_prod;
7279 + unsigned long flags;
7280 + u32 size = p9_req->tc.size;
7281 +@@ -151,7 +151,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
7282 + break;
7283 + }
7284 + read_unlock(&xen_9pfs_lock);
7285 +- if (!priv || priv->client != client)
7286 ++ if (list_entry_is_head(priv, &xen_9pfs_devs, list))
7287 + return -EINVAL;
7288 +
7289 + num = p9_req->tc.tag % priv->num_rings;
7290 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
7291 +index e8e7f108b0161..31469ff084cd3 100644
7292 +--- a/net/bluetooth/hci_event.c
7293 ++++ b/net/bluetooth/hci_event.c
7294 +@@ -4202,6 +4202,21 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
7295 +
7296 + switch (ev->status) {
7297 + case 0x00:
7298 ++ /* The synchronous connection complete event should only be
7299 ++ * sent once per new connection. Receiving a successful
7300 ++ * complete event when the connection status is already
7301 ++ * BT_CONNECTED means that the device is misbehaving and sent
7302 ++ * multiple complete event packets for the same new connection.
7303 ++ *
7304 ++ * Registering the device more than once can corrupt kernel
7305 ++ * memory, hence upon detecting this invalid event, we report
7306 ++ * an error and ignore the packet.
7307 ++ */
7308 ++ if (conn->state == BT_CONNECTED) {
7309 ++ bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
7310 ++ goto unlock;
7311 ++ }
7312 ++
7313 + conn->handle = __le16_to_cpu(ev->handle);
7314 + conn->state = BT_CONNECTED;
7315 + conn->type = ev->link_type;
7316 +@@ -4905,9 +4920,64 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
7317 + }
7318 + #endif
7319 +
7320 ++static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
7321 ++ u8 bdaddr_type, bdaddr_t *local_rpa)
7322 ++{
7323 ++ if (conn->out) {
7324 ++ conn->dst_type = bdaddr_type;
7325 ++ conn->resp_addr_type = bdaddr_type;
7326 ++ bacpy(&conn->resp_addr, bdaddr);
7327 ++
7328 ++ /* Check if the controller has set a Local RPA then it must be
7329 ++ * used instead or hdev->rpa.
7330 ++ */
7331 ++ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
7332 ++ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
7333 ++ bacpy(&conn->init_addr, local_rpa);
7334 ++ } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
7335 ++ conn->init_addr_type = ADDR_LE_DEV_RANDOM;
7336 ++ bacpy(&conn->init_addr, &conn->hdev->rpa);
7337 ++ } else {
7338 ++ hci_copy_identity_address(conn->hdev, &conn->init_addr,
7339 ++ &conn->init_addr_type);
7340 ++ }
7341 ++ } else {
7342 ++ conn->resp_addr_type = conn->hdev->adv_addr_type;
7343 ++ /* Check if the controller has set a Local RPA then it must be
7344 ++ * used instead or hdev->rpa.
7345 ++ */
7346 ++ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
7347 ++ conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
7348 ++ bacpy(&conn->resp_addr, local_rpa);
7349 ++ } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
7350 ++ /* In case of ext adv, resp_addr will be updated in
7351 ++ * Adv Terminated event.
7352 ++ */
7353 ++ if (!ext_adv_capable(conn->hdev))
7354 ++ bacpy(&conn->resp_addr,
7355 ++ &conn->hdev->random_addr);
7356 ++ } else {
7357 ++ bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
7358 ++ }
7359 ++
7360 ++ conn->init_addr_type = bdaddr_type;
7361 ++ bacpy(&conn->init_addr, bdaddr);
7362 ++
7363 ++ /* For incoming connections, set the default minimum
7364 ++ * and maximum connection interval. They will be used
7365 ++ * to check if the parameters are in range and if not
7366 ++ * trigger the connection update procedure.
7367 ++ */
7368 ++ conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
7369 ++ conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
7370 ++ }
7371 ++}
7372 ++
7373 + static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
7374 +- bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
7375 +- u16 interval, u16 latency, u16 supervision_timeout)
7376 ++ bdaddr_t *bdaddr, u8 bdaddr_type,
7377 ++ bdaddr_t *local_rpa, u8 role, u16 handle,
7378 ++ u16 interval, u16 latency,
7379 ++ u16 supervision_timeout)
7380 + {
7381 + struct hci_conn_params *params;
7382 + struct hci_conn *conn;
7383 +@@ -4955,32 +5025,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
7384 + cancel_delayed_work(&conn->le_conn_timeout);
7385 + }
7386 +
7387 +- if (!conn->out) {
7388 +- /* Set the responder (our side) address type based on
7389 +- * the advertising address type.
7390 +- */
7391 +- conn->resp_addr_type = hdev->adv_addr_type;
7392 +- if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
7393 +- /* In case of ext adv, resp_addr will be updated in
7394 +- * Adv Terminated event.
7395 +- */
7396 +- if (!ext_adv_capable(hdev))
7397 +- bacpy(&conn->resp_addr, &hdev->random_addr);
7398 +- } else {
7399 +- bacpy(&conn->resp_addr, &hdev->bdaddr);
7400 +- }
7401 +-
7402 +- conn->init_addr_type = bdaddr_type;
7403 +- bacpy(&conn->init_addr, bdaddr);
7404 +-
7405 +- /* For incoming connections, set the default minimum
7406 +- * and maximum connection interval. They will be used
7407 +- * to check if the parameters are in range and if not
7408 +- * trigger the connection update procedure.
7409 +- */
7410 +- conn->le_conn_min_interval = hdev->le_conn_min_interval;
7411 +- conn->le_conn_max_interval = hdev->le_conn_max_interval;
7412 +- }
7413 ++ le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
7414 +
7415 + /* Lookup the identity address from the stored connection
7416 + * address and address type.
7417 +@@ -5074,7 +5119,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
7418 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7419 +
7420 + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
7421 +- ev->role, le16_to_cpu(ev->handle),
7422 ++ NULL, ev->role, le16_to_cpu(ev->handle),
7423 + le16_to_cpu(ev->interval),
7424 + le16_to_cpu(ev->latency),
7425 + le16_to_cpu(ev->supervision_timeout));
7426 +@@ -5088,7 +5133,7 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
7427 + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7428 +
7429 + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
7430 +- ev->role, le16_to_cpu(ev->handle),
7431 ++ &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
7432 + le16_to_cpu(ev->interval),
7433 + le16_to_cpu(ev->latency),
7434 + le16_to_cpu(ev->supervision_timeout));
7435 +@@ -5119,7 +5164,8 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
7436 + if (conn) {
7437 + struct adv_info *adv_instance;
7438 +
7439 +- if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
7440 ++ if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
7441 ++ bacmp(&conn->resp_addr, BDADDR_ANY))
7442 + return;
7443 +
7444 + if (!hdev->cur_adv_instance) {
7445 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
7446 +index 1b7540cb8e5c4..1915943bb646a 100644
7447 +--- a/net/bluetooth/sco.c
7448 ++++ b/net/bluetooth/sco.c
7449 +@@ -48,6 +48,8 @@ struct sco_conn {
7450 + spinlock_t lock;
7451 + struct sock *sk;
7452 +
7453 ++ struct delayed_work timeout_work;
7454 ++
7455 + unsigned int mtu;
7456 + };
7457 +
7458 +@@ -73,9 +75,20 @@ struct sco_pinfo {
7459 + #define SCO_CONN_TIMEOUT (HZ * 40)
7460 + #define SCO_DISCONN_TIMEOUT (HZ * 2)
7461 +
7462 +-static void sco_sock_timeout(struct timer_list *t)
7463 ++static void sco_sock_timeout(struct work_struct *work)
7464 + {
7465 +- struct sock *sk = from_timer(sk, t, sk_timer);
7466 ++ struct sco_conn *conn = container_of(work, struct sco_conn,
7467 ++ timeout_work.work);
7468 ++ struct sock *sk;
7469 ++
7470 ++ sco_conn_lock(conn);
7471 ++ sk = conn->sk;
7472 ++ if (sk)
7473 ++ sock_hold(sk);
7474 ++ sco_conn_unlock(conn);
7475 ++
7476 ++ if (!sk)
7477 ++ return;
7478 +
7479 + BT_DBG("sock %p state %d", sk, sk->sk_state);
7480 +
7481 +@@ -89,14 +102,21 @@ static void sco_sock_timeout(struct timer_list *t)
7482 +
7483 + static void sco_sock_set_timer(struct sock *sk, long timeout)
7484 + {
7485 ++ if (!sco_pi(sk)->conn)
7486 ++ return;
7487 ++
7488 + BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
7489 +- sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
7490 ++ cancel_delayed_work(&sco_pi(sk)->conn->timeout_work);
7491 ++ schedule_delayed_work(&sco_pi(sk)->conn->timeout_work, timeout);
7492 + }
7493 +
7494 + static void sco_sock_clear_timer(struct sock *sk)
7495 + {
7496 ++ if (!sco_pi(sk)->conn)
7497 ++ return;
7498 ++
7499 + BT_DBG("sock %p state %d", sk, sk->sk_state);
7500 +- sk_stop_timer(sk, &sk->sk_timer);
7501 ++ cancel_delayed_work(&sco_pi(sk)->conn->timeout_work);
7502 + }
7503 +
7504 + /* ---- SCO connections ---- */
7505 +@@ -176,6 +196,9 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
7506 + sco_chan_del(sk, err);
7507 + bh_unlock_sock(sk);
7508 + sock_put(sk);
7509 ++
7510 ++ /* Ensure no more work items will run before freeing conn. */
7511 ++ cancel_delayed_work_sync(&conn->timeout_work);
7512 + }
7513 +
7514 + hcon->sco_data = NULL;
7515 +@@ -190,6 +213,8 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
7516 + sco_pi(sk)->conn = conn;
7517 + conn->sk = sk;
7518 +
7519 ++ INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout);
7520 ++
7521 + if (parent)
7522 + bt_accept_enqueue(parent, sk, true);
7523 + }
7524 +@@ -209,44 +234,32 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
7525 + return err;
7526 + }
7527 +
7528 +-static int sco_connect(struct sock *sk)
7529 ++static int sco_connect(struct hci_dev *hdev, struct sock *sk)
7530 + {
7531 + struct sco_conn *conn;
7532 + struct hci_conn *hcon;
7533 +- struct hci_dev *hdev;
7534 + int err, type;
7535 +
7536 + BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
7537 +
7538 +- hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
7539 +- if (!hdev)
7540 +- return -EHOSTUNREACH;
7541 +-
7542 +- hci_dev_lock(hdev);
7543 +-
7544 + if (lmp_esco_capable(hdev) && !disable_esco)
7545 + type = ESCO_LINK;
7546 + else
7547 + type = SCO_LINK;
7548 +
7549 + if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
7550 +- (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
7551 +- err = -EOPNOTSUPP;
7552 +- goto done;
7553 +- }
7554 ++ (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)))
7555 ++ return -EOPNOTSUPP;
7556 +
7557 + hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
7558 + sco_pi(sk)->setting);
7559 +- if (IS_ERR(hcon)) {
7560 +- err = PTR_ERR(hcon);
7561 +- goto done;
7562 +- }
7563 ++ if (IS_ERR(hcon))
7564 ++ return PTR_ERR(hcon);
7565 +
7566 + conn = sco_conn_add(hcon);
7567 + if (!conn) {
7568 + hci_conn_drop(hcon);
7569 +- err = -ENOMEM;
7570 +- goto done;
7571 ++ return -ENOMEM;
7572 + }
7573 +
7574 + /* Update source addr of the socket */
7575 +@@ -254,7 +267,7 @@ static int sco_connect(struct sock *sk)
7576 +
7577 + err = sco_chan_add(conn, sk, NULL);
7578 + if (err)
7579 +- goto done;
7580 ++ return err;
7581 +
7582 + if (hcon->state == BT_CONNECTED) {
7583 + sco_sock_clear_timer(sk);
7584 +@@ -264,9 +277,6 @@ static int sco_connect(struct sock *sk)
7585 + sco_sock_set_timer(sk, sk->sk_sndtimeo);
7586 + }
7587 +
7588 +-done:
7589 +- hci_dev_unlock(hdev);
7590 +- hci_dev_put(hdev);
7591 + return err;
7592 + }
7593 +
7594 +@@ -484,8 +494,6 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
7595 +
7596 + sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
7597 +
7598 +- timer_setup(&sk->sk_timer, sco_sock_timeout, 0);
7599 +-
7600 + bt_sock_link(&sco_sk_list, sk);
7601 + return sk;
7602 + }
7603 +@@ -550,6 +558,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
7604 + {
7605 + struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
7606 + struct sock *sk = sock->sk;
7607 ++ struct hci_dev *hdev;
7608 + int err;
7609 +
7610 + BT_DBG("sk %p", sk);
7611 +@@ -564,12 +573,19 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
7612 + if (sk->sk_type != SOCK_SEQPACKET)
7613 + return -EINVAL;
7614 +
7615 ++ hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR);
7616 ++ if (!hdev)
7617 ++ return -EHOSTUNREACH;
7618 ++ hci_dev_lock(hdev);
7619 ++
7620 + lock_sock(sk);
7621 +
7622 + /* Set destination address and psm */
7623 + bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
7624 +
7625 +- err = sco_connect(sk);
7626 ++ err = sco_connect(hdev, sk);
7627 ++ hci_dev_unlock(hdev);
7628 ++ hci_dev_put(hdev);
7629 + if (err)
7630 + goto done;
7631 +
7632 +diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
7633 +index a566289628522..910f164dd20cb 100644
7634 +--- a/net/caif/chnl_net.c
7635 ++++ b/net/caif/chnl_net.c
7636 +@@ -53,20 +53,6 @@ struct chnl_net {
7637 + enum caif_states state;
7638 + };
7639 +
7640 +-static void robust_list_del(struct list_head *delete_node)
7641 +-{
7642 +- struct list_head *list_node;
7643 +- struct list_head *n;
7644 +- ASSERT_RTNL();
7645 +- list_for_each_safe(list_node, n, &chnl_net_list) {
7646 +- if (list_node == delete_node) {
7647 +- list_del(list_node);
7648 +- return;
7649 +- }
7650 +- }
7651 +- WARN_ON(1);
7652 +-}
7653 +-
7654 + static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
7655 + {
7656 + struct sk_buff *skb;
7657 +@@ -368,6 +354,7 @@ static int chnl_net_init(struct net_device *dev)
7658 + ASSERT_RTNL();
7659 + priv = netdev_priv(dev);
7660 + strncpy(priv->name, dev->name, sizeof(priv->name));
7661 ++ INIT_LIST_HEAD(&priv->list_field);
7662 + return 0;
7663 + }
7664 +
7665 +@@ -376,7 +363,7 @@ static void chnl_net_uninit(struct net_device *dev)
7666 + struct chnl_net *priv;
7667 + ASSERT_RTNL();
7668 + priv = netdev_priv(dev);
7669 +- robust_list_del(&priv->list_field);
7670 ++ list_del_init(&priv->list_field);
7671 + }
7672 +
7673 + static const struct net_device_ops netdev_ops = {
7674 +@@ -541,7 +528,7 @@ static void __exit chnl_exit_module(void)
7675 + rtnl_lock();
7676 + list_for_each_safe(list_node, _tmp, &chnl_net_list) {
7677 + dev = list_entry(list_node, struct chnl_net, list_field);
7678 +- list_del(list_node);
7679 ++ list_del_init(list_node);
7680 + delete_device(dev);
7681 + }
7682 + rtnl_unlock();
7683 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
7684 +index 96957a7c732fa..b740a74f06f22 100644
7685 +--- a/net/core/flow_dissector.c
7686 ++++ b/net/core/flow_dissector.c
7687 +@@ -1025,8 +1025,10 @@ proto_again:
7688 + FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7689 + target_container);
7690 +
7691 +- memcpy(&key_addrs->v4addrs, &iph->saddr,
7692 +- sizeof(key_addrs->v4addrs));
7693 ++ memcpy(&key_addrs->v4addrs.src, &iph->saddr,
7694 ++ sizeof(key_addrs->v4addrs.src));
7695 ++ memcpy(&key_addrs->v4addrs.dst, &iph->daddr,
7696 ++ sizeof(key_addrs->v4addrs.dst));
7697 + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
7698 + }
7699 +
7700 +@@ -1070,8 +1072,10 @@ proto_again:
7701 + FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7702 + target_container);
7703 +
7704 +- memcpy(&key_addrs->v6addrs, &iph->saddr,
7705 +- sizeof(key_addrs->v6addrs));
7706 ++ memcpy(&key_addrs->v6addrs.src, &iph->saddr,
7707 ++ sizeof(key_addrs->v6addrs.src));
7708 ++ memcpy(&key_addrs->v6addrs.dst, &iph->daddr,
7709 ++ sizeof(key_addrs->v6addrs.dst));
7710 + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
7711 + }
7712 +
7713 +diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
7714 +index 25187528c308a..1f352d669c944 100644
7715 +--- a/net/dccp/minisocks.c
7716 ++++ b/net/dccp/minisocks.c
7717 +@@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
7718 + newdp->dccps_role = DCCP_ROLE_SERVER;
7719 + newdp->dccps_hc_rx_ackvec = NULL;
7720 + newdp->dccps_service_list = NULL;
7721 ++ newdp->dccps_hc_rx_ccid = NULL;
7722 ++ newdp->dccps_hc_tx_ccid = NULL;
7723 + newdp->dccps_service = dreq->dreq_service;
7724 + newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
7725 + newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
7726 +diff --git a/net/dsa/slave.c b/net/dsa/slave.c
7727 +index 75b4cd4bcafb9..59759ceb426ac 100644
7728 +--- a/net/dsa/slave.c
7729 ++++ b/net/dsa/slave.c
7730 +@@ -1327,13 +1327,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
7731 + * use the switch internal MDIO bus instead
7732 + */
7733 + ret = dsa_slave_phy_connect(slave_dev, dp->index);
7734 +- if (ret) {
7735 +- netdev_err(slave_dev,
7736 +- "failed to connect to port %d: %d\n",
7737 +- dp->index, ret);
7738 +- phylink_destroy(dp->pl);
7739 +- return ret;
7740 +- }
7741 ++ }
7742 ++ if (ret) {
7743 ++ netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
7744 ++ ERR_PTR(ret));
7745 ++ phylink_destroy(dp->pl);
7746 + }
7747 +
7748 + return ret;
7749 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
7750 +index fd8298b8b1c52..c4989e5903e43 100644
7751 +--- a/net/ipv4/ip_gre.c
7752 ++++ b/net/ipv4/ip_gre.c
7753 +@@ -446,8 +446,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
7754 +
7755 + static int gre_handle_offloads(struct sk_buff *skb, bool csum)
7756 + {
7757 +- if (csum && skb_checksum_start(skb) < skb->data)
7758 +- return -EINVAL;
7759 + return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
7760 + }
7761 +
7762 +@@ -605,15 +603,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
7763 + }
7764 +
7765 + if (dev->header_ops) {
7766 ++ const int pull_len = tunnel->hlen + sizeof(struct iphdr);
7767 ++
7768 + if (skb_cow_head(skb, 0))
7769 + goto free_skb;
7770 +
7771 + tnl_params = (const struct iphdr *)skb->data;
7772 +
7773 ++ if (pull_len > skb_transport_offset(skb))
7774 ++ goto free_skb;
7775 ++
7776 + /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
7777 + * to gre header.
7778 + */
7779 +- skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
7780 ++ skb_pull(skb, pull_len);
7781 + skb_reset_mac_header(skb);
7782 + } else {
7783 + if (skb_cow_head(skb, dev->needed_headroom))
7784 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
7785 +index f52bc9c22e5b8..0ec529d77a56e 100644
7786 +--- a/net/ipv4/ip_output.c
7787 ++++ b/net/ipv4/ip_output.c
7788 +@@ -446,8 +446,9 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
7789 + {
7790 + BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
7791 + offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
7792 +- memcpy(&iph->saddr, &fl4->saddr,
7793 +- sizeof(fl4->saddr) + sizeof(fl4->daddr));
7794 ++
7795 ++ iph->saddr = fl4->saddr;
7796 ++ iph->daddr = fl4->daddr;
7797 + }
7798 +
7799 + /* Note: skb->sk can be different from sk, in case of tunnels */
7800 +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
7801 +index f5f4369c131c9..858bb10d8341e 100644
7802 +--- a/net/ipv4/nexthop.c
7803 ++++ b/net/ipv4/nexthop.c
7804 +@@ -1183,6 +1183,7 @@ static int nh_create_ipv4(struct net *net, struct nexthop *nh,
7805 + .fc_gw4 = cfg->gw.ipv4,
7806 + .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
7807 + .fc_flags = cfg->nh_flags,
7808 ++ .fc_nlinfo = cfg->nlinfo,
7809 + .fc_encap = cfg->nh_encap,
7810 + .fc_encap_type = cfg->nh_encap_type,
7811 + };
7812 +@@ -1218,6 +1219,7 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
7813 + .fc_ifindex = cfg->nh_ifindex,
7814 + .fc_gateway = cfg->gw.ipv6,
7815 + .fc_flags = cfg->nh_flags,
7816 ++ .fc_nlinfo = cfg->nlinfo,
7817 + .fc_encap = cfg->nh_encap,
7818 + .fc_encap_type = cfg->nh_encap_type,
7819 + };
7820 +diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
7821 +index 8af4fefe371f2..a5ec77a5ad6f5 100644
7822 +--- a/net/ipv4/tcp_fastopen.c
7823 ++++ b/net/ipv4/tcp_fastopen.c
7824 +@@ -379,8 +379,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
7825 + return NULL;
7826 + }
7827 +
7828 +- if (syn_data &&
7829 +- tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
7830 ++ if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
7831 + goto fastopen;
7832 +
7833 + if (foc->len == 0) {
7834 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
7835 +index a1768ded2d545..c0fcfa2964686 100644
7836 +--- a/net/ipv4/tcp_input.c
7837 ++++ b/net/ipv4/tcp_input.c
7838 +@@ -1209,7 +1209,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
7839 + if (dup_sack && (sacked & TCPCB_RETRANS)) {
7840 + if (tp->undo_marker && tp->undo_retrans > 0 &&
7841 + after(end_seq, tp->undo_marker))
7842 +- tp->undo_retrans--;
7843 ++ tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
7844 + if ((sacked & TCPCB_SACKED_ACKED) &&
7845 + before(start_seq, state->reord))
7846 + state->reord = start_seq;
7847 +diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c
7848 +index b9df879c48d3f..69c021704abd7 100644
7849 +--- a/net/ipv6/netfilter/nf_socket_ipv6.c
7850 ++++ b/net/ipv6/netfilter/nf_socket_ipv6.c
7851 +@@ -99,7 +99,7 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
7852 + {
7853 + __be16 uninitialized_var(dport), uninitialized_var(sport);
7854 + const struct in6_addr *daddr = NULL, *saddr = NULL;
7855 +- struct ipv6hdr *iph = ipv6_hdr(skb);
7856 ++ struct ipv6hdr *iph = ipv6_hdr(skb), ipv6_var;
7857 + struct sk_buff *data_skb = NULL;
7858 + int doff = 0;
7859 + int thoff = 0, tproto;
7860 +@@ -129,8 +129,6 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb,
7861 + thoff + sizeof(*hp);
7862 +
7863 + } else if (tproto == IPPROTO_ICMPV6) {
7864 +- struct ipv6hdr ipv6_var;
7865 +-
7866 + if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
7867 + &sport, &dport, &ipv6_var))
7868 + return NULL;
7869 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
7870 +index 95805a6331be2..421b2c89ce12a 100644
7871 +--- a/net/l2tp/l2tp_core.c
7872 ++++ b/net/l2tp/l2tp_core.c
7873 +@@ -886,8 +886,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
7874 + }
7875 +
7876 + if (tunnel->version == L2TP_HDR_VER_3 &&
7877 +- l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
7878 ++ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
7879 ++ l2tp_session_dec_refcount(session);
7880 + goto error;
7881 ++ }
7882 +
7883 + l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
7884 + l2tp_session_dec_refcount(session);
7885 +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
7886 +index 6f576306a4d74..ddc001ad90555 100644
7887 +--- a/net/mac80211/iface.c
7888 ++++ b/net/mac80211/iface.c
7889 +@@ -1875,9 +1875,16 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
7890 +
7891 + netdev_set_default_ethtool_ops(ndev, &ieee80211_ethtool_ops);
7892 +
7893 +- /* MTU range: 256 - 2304 */
7894 ++ /* MTU range is normally 256 - 2304, where the upper limit is
7895 ++ * the maximum MSDU size. Monitor interfaces send and receive
7896 ++ * MPDU and A-MSDU frames which may be much larger so we do
7897 ++ * not impose an upper limit in that case.
7898 ++ */
7899 + ndev->min_mtu = 256;
7900 +- ndev->max_mtu = local->hw.max_mtu;
7901 ++ if (type == NL80211_IFTYPE_MONITOR)
7902 ++ ndev->max_mtu = 0;
7903 ++ else
7904 ++ ndev->max_mtu = local->hw.max_mtu;
7905 +
7906 + ret = register_netdevice(ndev);
7907 + if (ret) {
7908 +diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
7909 +index 8cd3daf0e3db6..1778e4e8ce247 100644
7910 +--- a/net/netlabel/netlabel_cipso_v4.c
7911 ++++ b/net/netlabel/netlabel_cipso_v4.c
7912 +@@ -144,8 +144,8 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
7913 + return -ENOMEM;
7914 + doi_def->map.std = kzalloc(sizeof(*doi_def->map.std), GFP_KERNEL);
7915 + if (doi_def->map.std == NULL) {
7916 +- ret_val = -ENOMEM;
7917 +- goto add_std_failure;
7918 ++ kfree(doi_def);
7919 ++ return -ENOMEM;
7920 + }
7921 + doi_def->type = CIPSO_V4_MAP_TRANS;
7922 +
7923 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
7924 +index 9d993b4cf1aff..acc76a738cfd8 100644
7925 +--- a/net/netlink/af_netlink.c
7926 ++++ b/net/netlink/af_netlink.c
7927 +@@ -2521,13 +2521,15 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
7928 + /* errors reported via destination sk->sk_err, but propagate
7929 + * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
7930 + err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
7931 ++ if (err == -ESRCH)
7932 ++ err = 0;
7933 + }
7934 +
7935 + if (report) {
7936 + int err2;
7937 +
7938 + err2 = nlmsg_unicast(sk, skb, portid);
7939 +- if (!err || err == -ESRCH)
7940 ++ if (!err)
7941 + err = err2;
7942 + }
7943 +
7944 +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
7945 +index 76d72c3f52eda..86fb2f953bd5b 100644
7946 +--- a/net/sched/sch_fq_codel.c
7947 ++++ b/net/sched/sch_fq_codel.c
7948 +@@ -370,6 +370,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
7949 + {
7950 + struct fq_codel_sched_data *q = qdisc_priv(sch);
7951 + struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
7952 ++ u32 quantum = 0;
7953 + int err;
7954 +
7955 + if (!opt)
7956 +@@ -387,6 +388,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
7957 + q->flows_cnt > 65536)
7958 + return -EINVAL;
7959 + }
7960 ++ if (tb[TCA_FQ_CODEL_QUANTUM]) {
7961 ++ quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
7962 ++ if (quantum > FQ_CODEL_QUANTUM_MAX) {
7963 ++ NL_SET_ERR_MSG(extack, "Invalid quantum");
7964 ++ return -EINVAL;
7965 ++ }
7966 ++ }
7967 + sch_tree_lock(sch);
7968 +
7969 + if (tb[TCA_FQ_CODEL_TARGET]) {
7970 +@@ -413,8 +421,8 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
7971 + if (tb[TCA_FQ_CODEL_ECN])
7972 + q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
7973 +
7974 +- if (tb[TCA_FQ_CODEL_QUANTUM])
7975 +- q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
7976 ++ if (quantum)
7977 ++ q->quantum = quantum;
7978 +
7979 + if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
7980 + q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
7981 +diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
7982 +index a4de4853c79de..da9ed0613eb7b 100644
7983 +--- a/net/sched/sch_taprio.c
7984 ++++ b/net/sched/sch_taprio.c
7985 +@@ -1503,7 +1503,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
7986 + taprio_set_picos_per_byte(dev, q);
7987 +
7988 + if (mqprio) {
7989 +- netdev_set_num_tc(dev, mqprio->num_tc);
7990 ++ err = netdev_set_num_tc(dev, mqprio->num_tc);
7991 ++ if (err)
7992 ++ goto free_sched;
7993 + for (i = 0; i < mqprio->num_tc; i++)
7994 + netdev_set_tc_queue(dev, i,
7995 + mqprio->count[i],
7996 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
7997 +index d5470c7fe8792..c0016473a255a 100644
7998 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
7999 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
8000 +@@ -1937,7 +1937,7 @@ gss_svc_init_net(struct net *net)
8001 + goto out2;
8002 + return 0;
8003 + out2:
8004 +- destroy_use_gss_proxy_proc_entry(net);
8005 ++ rsi_cache_destroy_net(net);
8006 + out1:
8007 + rsc_cache_destroy_net(net);
8008 + return rv;
8009 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
8010 +index 639837b3a5d90..3653898f465ff 100644
8011 +--- a/net/sunrpc/xprt.c
8012 ++++ b/net/sunrpc/xprt.c
8013 +@@ -729,9 +729,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
8014 + /* Try to schedule an autoclose RPC call */
8015 + if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
8016 + queue_work(xprtiod_workqueue, &xprt->task_cleanup);
8017 +- else if (xprt->snd_task)
8018 ++ else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
8019 + rpc_wake_up_queued_task_set_status(&xprt->pending,
8020 +- xprt->snd_task, -ENOTCONN);
8021 ++ xprt->snd_task, -ENOTCONN);
8022 + spin_unlock(&xprt->transport_lock);
8023 + }
8024 + EXPORT_SYMBOL_GPL(xprt_force_disconnect);
8025 +@@ -820,6 +820,7 @@ bool xprt_lock_connect(struct rpc_xprt *xprt,
8026 + goto out;
8027 + if (xprt->snd_task != task)
8028 + goto out;
8029 ++ set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
8030 + xprt->snd_task = cookie;
8031 + ret = true;
8032 + out:
8033 +@@ -835,6 +836,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
8034 + if (!test_bit(XPRT_LOCKED, &xprt->state))
8035 + goto out;
8036 + xprt->snd_task =NULL;
8037 ++ clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
8038 + xprt->ops->release_xprt(xprt, NULL);
8039 + xprt_schedule_autodisconnect(xprt);
8040 + out:
8041 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
8042 +index a5922ce9109cf..fbbac9ba2862f 100644
8043 +--- a/net/tipc/socket.c
8044 ++++ b/net/tipc/socket.c
8045 +@@ -1756,6 +1756,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
8046 + bool connected = !tipc_sk_type_connectionless(sk);
8047 + struct tipc_sock *tsk = tipc_sk(sk);
8048 + int rc, err, hlen, dlen, copy;
8049 ++ struct tipc_skb_cb *skb_cb;
8050 + struct sk_buff_head xmitq;
8051 + struct tipc_msg *hdr;
8052 + struct sk_buff *skb;
8053 +@@ -1779,6 +1780,7 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
8054 + if (unlikely(rc))
8055 + goto exit;
8056 + skb = skb_peek(&sk->sk_receive_queue);
8057 ++ skb_cb = TIPC_SKB_CB(skb);
8058 + hdr = buf_msg(skb);
8059 + dlen = msg_data_sz(hdr);
8060 + hlen = msg_hdr_sz(hdr);
8061 +@@ -1798,18 +1800,33 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
8062 +
8063 + /* Capture data if non-error msg, otherwise just set return value */
8064 + if (likely(!err)) {
8065 +- copy = min_t(int, dlen, buflen);
8066 +- if (unlikely(copy != dlen))
8067 +- m->msg_flags |= MSG_TRUNC;
8068 +- rc = skb_copy_datagram_msg(skb, hlen, m, copy);
8069 ++ int offset = skb_cb->bytes_read;
8070 ++
8071 ++ copy = min_t(int, dlen - offset, buflen);
8072 ++ rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
8073 ++ if (unlikely(rc))
8074 ++ goto exit;
8075 ++ if (unlikely(offset + copy < dlen)) {
8076 ++ if (flags & MSG_EOR) {
8077 ++ if (!(flags & MSG_PEEK))
8078 ++ skb_cb->bytes_read = offset + copy;
8079 ++ } else {
8080 ++ m->msg_flags |= MSG_TRUNC;
8081 ++ skb_cb->bytes_read = 0;
8082 ++ }
8083 ++ } else {
8084 ++ if (flags & MSG_EOR)
8085 ++ m->msg_flags |= MSG_EOR;
8086 ++ skb_cb->bytes_read = 0;
8087 ++ }
8088 + } else {
8089 + copy = 0;
8090 + rc = 0;
8091 +- if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
8092 ++ if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
8093 + rc = -ECONNRESET;
8094 ++ goto exit;
8095 ++ }
8096 + }
8097 +- if (unlikely(rc))
8098 +- goto exit;
8099 +
8100 + /* Mark message as group event if applicable */
8101 + if (unlikely(grp_evt)) {
8102 +@@ -1832,6 +1849,9 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
8103 + tipc_node_distr_xmit(sock_net(sk), &xmitq);
8104 + }
8105 +
8106 ++ if (skb_cb->bytes_read)
8107 ++ goto exit;
8108 ++
8109 + tsk_advance_rx_queue(sk);
8110 +
8111 + if (likely(!connected))
8112 +@@ -2255,7 +2275,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
8113 + static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
8114 + u32 dport, struct sk_buff_head *xmitq)
8115 + {
8116 +- unsigned long time_limit = jiffies + 2;
8117 ++ unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
8118 + struct sk_buff *skb;
8119 + unsigned int lim;
8120 + atomic_t *dcnt;
8121 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
8122 +index 52ee3a9bb7093..3098710c9c344 100644
8123 +--- a/net/unix/af_unix.c
8124 ++++ b/net/unix/af_unix.c
8125 +@@ -2734,7 +2734,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
8126 +
8127 + other = unix_peer(sk);
8128 + if (other && unix_peer(other) != sk &&
8129 +- unix_recvq_full(other) &&
8130 ++ unix_recvq_full_lockless(other) &&
8131 + unix_dgram_peer_wake_me(sk, other))
8132 + writable = 0;
8133 +
8134 +diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh
8135 +index e68b9ee6814b8..35db26f736b9d 100755
8136 +--- a/samples/bpf/test_override_return.sh
8137 ++++ b/samples/bpf/test_override_return.sh
8138 +@@ -1,5 +1,6 @@
8139 + #!/bin/bash
8140 +
8141 ++rm -r tmpmnt
8142 + rm -f testfile.img
8143 + dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
8144 + DEVICE=$(losetup --show -f testfile.img)
8145 +diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
8146 +index ea6dae78f0dff..2ed13e9f3fcb0 100644
8147 +--- a/samples/bpf/tracex7_user.c
8148 ++++ b/samples/bpf/tracex7_user.c
8149 +@@ -13,6 +13,11 @@ int main(int argc, char **argv)
8150 + char command[256];
8151 + int ret;
8152 +
8153 ++ if (!argv[1]) {
8154 ++ fprintf(stderr, "ERROR: Run with the btrfs device argument!\n");
8155 ++ return 0;
8156 ++ }
8157 ++
8158 + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
8159 +
8160 + if (load_bpf_file(filename)) {
8161 +diff --git a/scripts/gen_ksymdeps.sh b/scripts/gen_ksymdeps.sh
8162 +index 1324986e1362c..725e8c9c1b53f 100755
8163 +--- a/scripts/gen_ksymdeps.sh
8164 ++++ b/scripts/gen_ksymdeps.sh
8165 +@@ -4,7 +4,13 @@
8166 + set -e
8167 +
8168 + # List of exported symbols
8169 +-ksyms=$($NM $1 | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z)
8170 ++#
8171 ++# If the object has no symbol, $NM warns 'no symbols'.
8172 ++# Suppress the stderr.
8173 ++# TODO:
8174 ++# Use -q instead of 2>/dev/null when we upgrade the minimum version of
8175 ++# binutils to 2.37, llvm to 13.0.0.
8176 ++ksyms=$($NM $1 2>/dev/null | sed -n 's/.*__ksym_marker_\(.*\)/\1/p' | tr A-Z a-z)
8177 +
8178 + if [ -z "$ksyms" ]; then
8179 + exit 0
8180 +diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
8181 +index 38ac3da4e791e..beeba1a9be170 100644
8182 +--- a/security/smack/smack_access.c
8183 ++++ b/security/smack/smack_access.c
8184 +@@ -81,23 +81,22 @@ int log_policy = SMACK_AUDIT_DENIED;
8185 + int smk_access_entry(char *subject_label, char *object_label,
8186 + struct list_head *rule_list)
8187 + {
8188 +- int may = -ENOENT;
8189 + struct smack_rule *srp;
8190 +
8191 + list_for_each_entry_rcu(srp, rule_list, list) {
8192 + if (srp->smk_object->smk_known == object_label &&
8193 + srp->smk_subject->smk_known == subject_label) {
8194 +- may = srp->smk_access;
8195 +- break;
8196 ++ int may = srp->smk_access;
8197 ++ /*
8198 ++ * MAY_WRITE implies MAY_LOCK.
8199 ++ */
8200 ++ if ((may & MAY_WRITE) == MAY_WRITE)
8201 ++ may |= MAY_LOCK;
8202 ++ return may;
8203 + }
8204 + }
8205 +
8206 +- /*
8207 +- * MAY_WRITE implies MAY_LOCK.
8208 +- */
8209 +- if ((may & MAY_WRITE) == MAY_WRITE)
8210 +- may |= MAY_LOCK;
8211 +- return may;
8212 ++ return -ENOENT;
8213 + }
8214 +
8215 + /**
8216 +diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
8217 +index 71f2d42188c46..51e75b7819682 100644
8218 +--- a/sound/soc/atmel/Kconfig
8219 ++++ b/sound/soc/atmel/Kconfig
8220 +@@ -11,7 +11,6 @@ if SND_ATMEL_SOC
8221 +
8222 + config SND_ATMEL_SOC_PDC
8223 + bool
8224 +- depends on HAS_DMA
8225 +
8226 + config SND_ATMEL_SOC_DMA
8227 + bool
8228 +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
8229 +index c67b86e2d0c0a..7830d014d9247 100644
8230 +--- a/sound/soc/intel/boards/bytcr_rt5640.c
8231 ++++ b/sound/soc/intel/boards/bytcr_rt5640.c
8232 +@@ -284,9 +284,6 @@ static const struct snd_soc_dapm_widget byt_rt5640_widgets[] = {
8233 + static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
8234 + {"Headphone", NULL, "Platform Clock"},
8235 + {"Headset Mic", NULL, "Platform Clock"},
8236 +- {"Internal Mic", NULL, "Platform Clock"},
8237 +- {"Speaker", NULL, "Platform Clock"},
8238 +-
8239 + {"Headset Mic", NULL, "MICBIAS1"},
8240 + {"IN2P", NULL, "Headset Mic"},
8241 + {"Headphone", NULL, "HPOL"},
8242 +@@ -294,19 +291,23 @@ static const struct snd_soc_dapm_route byt_rt5640_audio_map[] = {
8243 + };
8244 +
8245 + static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic1_map[] = {
8246 ++ {"Internal Mic", NULL, "Platform Clock"},
8247 + {"DMIC1", NULL, "Internal Mic"},
8248 + };
8249 +
8250 + static const struct snd_soc_dapm_route byt_rt5640_intmic_dmic2_map[] = {
8251 ++ {"Internal Mic", NULL, "Platform Clock"},
8252 + {"DMIC2", NULL, "Internal Mic"},
8253 + };
8254 +
8255 + static const struct snd_soc_dapm_route byt_rt5640_intmic_in1_map[] = {
8256 ++ {"Internal Mic", NULL, "Platform Clock"},
8257 + {"Internal Mic", NULL, "MICBIAS1"},
8258 + {"IN1P", NULL, "Internal Mic"},
8259 + };
8260 +
8261 + static const struct snd_soc_dapm_route byt_rt5640_intmic_in3_map[] = {
8262 ++ {"Internal Mic", NULL, "Platform Clock"},
8263 + {"Internal Mic", NULL, "MICBIAS1"},
8264 + {"IN3P", NULL, "Internal Mic"},
8265 + };
8266 +@@ -348,6 +349,7 @@ static const struct snd_soc_dapm_route byt_rt5640_ssp0_aif2_map[] = {
8267 + };
8268 +
8269 + static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
8270 ++ {"Speaker", NULL, "Platform Clock"},
8271 + {"Speaker", NULL, "SPOLP"},
8272 + {"Speaker", NULL, "SPOLN"},
8273 + {"Speaker", NULL, "SPORP"},
8274 +@@ -355,6 +357,7 @@ static const struct snd_soc_dapm_route byt_rt5640_stereo_spk_map[] = {
8275 + };
8276 +
8277 + static const struct snd_soc_dapm_route byt_rt5640_mono_spk_map[] = {
8278 ++ {"Speaker", NULL, "Platform Clock"},
8279 + {"Speaker", NULL, "SPOLP"},
8280 + {"Speaker", NULL, "SPOLN"},
8281 + };
8282 +diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
8283 +index 476ef1897961d..79c6cf2c14bfb 100644
8284 +--- a/sound/soc/intel/skylake/skl-messages.c
8285 ++++ b/sound/soc/intel/skylake/skl-messages.c
8286 +@@ -802,9 +802,12 @@ static u16 skl_get_module_param_size(struct skl_dev *skl,
8287 +
8288 + case SKL_MODULE_TYPE_BASE_OUTFMT:
8289 + case SKL_MODULE_TYPE_MIC_SELECT:
8290 +- case SKL_MODULE_TYPE_KPB:
8291 + return sizeof(struct skl_base_outfmt_cfg);
8292 +
8293 ++ case SKL_MODULE_TYPE_MIXER:
8294 ++ case SKL_MODULE_TYPE_KPB:
8295 ++ return sizeof(struct skl_base_cfg);
8296 ++
8297 + default:
8298 + /*
8299 + * return only base cfg when no specific module type is
8300 +@@ -857,10 +860,14 @@ static int skl_set_module_format(struct skl_dev *skl,
8301 +
8302 + case SKL_MODULE_TYPE_BASE_OUTFMT:
8303 + case SKL_MODULE_TYPE_MIC_SELECT:
8304 +- case SKL_MODULE_TYPE_KPB:
8305 + skl_set_base_outfmt_format(skl, module_config, *param_data);
8306 + break;
8307 +
8308 ++ case SKL_MODULE_TYPE_MIXER:
8309 ++ case SKL_MODULE_TYPE_KPB:
8310 ++ skl_set_base_module_format(skl, module_config, *param_data);
8311 ++ break;
8312 ++
8313 + default:
8314 + skl_set_base_module_format(skl, module_config, *param_data);
8315 + break;
8316 +diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
8317 +index 7f287424af9b7..439dd4ba690c4 100644
8318 +--- a/sound/soc/intel/skylake/skl-pcm.c
8319 ++++ b/sound/soc/intel/skylake/skl-pcm.c
8320 +@@ -1333,21 +1333,6 @@ static int skl_get_module_info(struct skl_dev *skl,
8321 + return -EIO;
8322 + }
8323 +
8324 +- list_for_each_entry(module, &skl->uuid_list, list) {
8325 +- if (guid_equal(uuid_mod, &module->uuid)) {
8326 +- mconfig->id.module_id = module->id;
8327 +- if (mconfig->module)
8328 +- mconfig->module->loadable = module->is_loadable;
8329 +- ret = 0;
8330 +- break;
8331 +- }
8332 +- }
8333 +-
8334 +- if (ret)
8335 +- return ret;
8336 +-
8337 +- uuid_mod = &module->uuid;
8338 +- ret = -EIO;
8339 + for (i = 0; i < skl->nr_modules; i++) {
8340 + skl_module = skl->modules[i];
8341 + uuid_tplg = &skl_module->uuid;
8342 +@@ -1357,10 +1342,18 @@ static int skl_get_module_info(struct skl_dev *skl,
8343 + break;
8344 + }
8345 + }
8346 ++
8347 + if (skl->nr_modules && ret)
8348 + return ret;
8349 +
8350 ++ ret = -EIO;
8351 + list_for_each_entry(module, &skl->uuid_list, list) {
8352 ++ if (guid_equal(uuid_mod, &module->uuid)) {
8353 ++ mconfig->id.module_id = module->id;
8354 ++ mconfig->module->loadable = module->is_loadable;
8355 ++ ret = 0;
8356 ++ }
8357 ++
8358 + for (i = 0; i < MAX_IN_QUEUE; i++) {
8359 + pin_id = &mconfig->m_in_pin[i].id;
8360 + if (guid_equal(&pin_id->mod_uuid, &module->uuid))
8361 +@@ -1374,7 +1367,7 @@ static int skl_get_module_info(struct skl_dev *skl,
8362 + }
8363 + }
8364 +
8365 +- return 0;
8366 ++ return ret;
8367 + }
8368 +
8369 + static int skl_populate_modules(struct skl_dev *skl)
8370 +diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
8371 +index 61c984f10d8e6..086c90e095770 100644
8372 +--- a/sound/soc/rockchip/rockchip_i2s.c
8373 ++++ b/sound/soc/rockchip/rockchip_i2s.c
8374 +@@ -186,7 +186,9 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
8375 + {
8376 + struct rk_i2s_dev *i2s = to_info(cpu_dai);
8377 + unsigned int mask = 0, val = 0;
8378 ++ int ret = 0;
8379 +
8380 ++ pm_runtime_get_sync(cpu_dai->dev);
8381 + mask = I2S_CKR_MSS_MASK;
8382 + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
8383 + case SND_SOC_DAIFMT_CBS_CFS:
8384 +@@ -199,7 +201,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
8385 + i2s->is_master_mode = false;
8386 + break;
8387 + default:
8388 +- return -EINVAL;
8389 ++ ret = -EINVAL;
8390 ++ goto err_pm_put;
8391 + }
8392 +
8393 + regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
8394 +@@ -213,7 +216,8 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
8395 + val = I2S_CKR_CKP_POS;
8396 + break;
8397 + default:
8398 +- return -EINVAL;
8399 ++ ret = -EINVAL;
8400 ++ goto err_pm_put;
8401 + }
8402 +
8403 + regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
8404 +@@ -229,14 +233,15 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
8405 + case SND_SOC_DAIFMT_I2S:
8406 + val = I2S_TXCR_IBM_NORMAL;
8407 + break;
8408 +- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
8409 +- val = I2S_TXCR_TFS_PCM;
8410 +- break;
8411 +- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
8412 ++ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */
8413 + val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
8414 + break;
8415 ++ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
8416 ++ val = I2S_TXCR_TFS_PCM;
8417 ++ break;
8418 + default:
8419 +- return -EINVAL;
8420 ++ ret = -EINVAL;
8421 ++ goto err_pm_put;
8422 + }
8423 +
8424 + regmap_update_bits(i2s->regmap, I2S_TXCR, mask, val);
8425 +@@ -252,19 +257,23 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
8426 + case SND_SOC_DAIFMT_I2S:
8427 + val = I2S_RXCR_IBM_NORMAL;
8428 + break;
8429 +- case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
8430 +- val = I2S_RXCR_TFS_PCM;
8431 +- break;
8432 +- case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
8433 ++ case SND_SOC_DAIFMT_DSP_A: /* PCM delay 1 bit mode */
8434 + val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
8435 + break;
8436 ++ case SND_SOC_DAIFMT_DSP_B: /* PCM no delay mode */
8437 ++ val = I2S_RXCR_TFS_PCM;
8438 ++ break;
8439 + default:
8440 +- return -EINVAL;
8441 ++ ret = -EINVAL;
8442 ++ goto err_pm_put;
8443 + }
8444 +
8445 + regmap_update_bits(i2s->regmap, I2S_RXCR, mask, val);
8446 +
8447 +- return 0;
8448 ++err_pm_put:
8449 ++ pm_runtime_put(cpu_dai->dev);
8450 ++
8451 ++ return ret;
8452 + }
8453 +
8454 + static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
8455 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
8456 +index 9832affd5d54b..c75c9b03d6e77 100644
8457 +--- a/tools/perf/Makefile.config
8458 ++++ b/tools/perf/Makefile.config
8459 +@@ -118,10 +118,10 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
8460 + FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
8461 + FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
8462 +
8463 +-FEATURE_CHECK_LDFLAGS-libunwind-arm = -lunwind -lunwind-arm
8464 +-FEATURE_CHECK_LDFLAGS-libunwind-aarch64 = -lunwind -lunwind-aarch64
8465 +-FEATURE_CHECK_LDFLAGS-libunwind-x86 = -lunwind -llzma -lunwind-x86
8466 +-FEATURE_CHECK_LDFLAGS-libunwind-x86_64 = -lunwind -llzma -lunwind-x86_64
8467 ++FEATURE_CHECK_LDFLAGS-libunwind-arm += -lunwind -lunwind-arm
8468 ++FEATURE_CHECK_LDFLAGS-libunwind-aarch64 += -lunwind -lunwind-aarch64
8469 ++FEATURE_CHECK_LDFLAGS-libunwind-x86 += -lunwind -llzma -lunwind-x86
8470 ++FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64
8471 +
8472 + FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
8473 +
8474 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
8475 +index 767fe1bfd922c..8c3addc2e9e1e 100644
8476 +--- a/tools/perf/util/machine.c
8477 ++++ b/tools/perf/util/machine.c
8478 +@@ -2020,6 +2020,7 @@ static int add_callchain_ip(struct thread *thread,
8479 +
8480 + al.filtered = 0;
8481 + al.sym = NULL;
8482 ++ al.srcline = NULL;
8483 + if (!cpumode) {
8484 + thread__find_cpumode_addr_location(thread, ip, &al);
8485 + } else {
8486 +diff --git a/tools/testing/selftests/bpf/progs/xdp_tx.c b/tools/testing/selftests/bpf/progs/xdp_tx.c
8487 +index 57912e7c94b0a..9ed477776eca8 100644
8488 +--- a/tools/testing/selftests/bpf/progs/xdp_tx.c
8489 ++++ b/tools/testing/selftests/bpf/progs/xdp_tx.c
8490 +@@ -3,7 +3,7 @@
8491 + #include <linux/bpf.h>
8492 + #include "bpf_helpers.h"
8493 +
8494 +-SEC("tx")
8495 ++SEC("xdp")
8496 + int xdp_tx(struct xdp_md *xdp)
8497 + {
8498 + return XDP_TX;
8499 +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
8500 +index 1c4219ceced2f..45c7a55f0b8b5 100644
8501 +--- a/tools/testing/selftests/bpf/test_maps.c
8502 ++++ b/tools/testing/selftests/bpf/test_maps.c
8503 +@@ -972,7 +972,7 @@ static void test_sockmap(unsigned int tasks, void *data)
8504 +
8505 + FD_ZERO(&w);
8506 + FD_SET(sfd[3], &w);
8507 +- to.tv_sec = 1;
8508 ++ to.tv_sec = 30;
8509 + to.tv_usec = 0;
8510 + s = select(sfd[3] + 1, &w, NULL, NULL, &to);
8511 + if (s == -1) {
8512 +diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
8513 +index ba8ffcdaac302..995278e684b6e 100755
8514 +--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
8515 ++++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
8516 +@@ -108,7 +108,7 @@ ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
8517 + ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
8518 +
8519 + ip -n ns1 link set dev veth11 xdp obj xdp_dummy.o sec xdp_dummy
8520 +-ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec tx
8521 ++ip -n ns2 link set dev veth22 xdp obj xdp_tx.o sec xdp
8522 + ip -n ns3 link set dev veth33 xdp obj xdp_dummy.o sec xdp_dummy
8523 +
8524 + trap cleanup EXIT
8525 +diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
8526 +index 59e417ec3e134..25d7f8f37cfd6 100644
8527 +--- a/tools/thermal/tmon/Makefile
8528 ++++ b/tools/thermal/tmon/Makefile
8529 +@@ -10,7 +10,7 @@ override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS}
8530 + # Add "-fstack-protector" only if toolchain supports it.
8531 + override CFLAGS+= $(call cc-option,-fstack-protector-strong)
8532 + CC?= $(CROSS_COMPILE)gcc
8533 +-PKG_CONFIG?= pkg-config
8534 ++PKG_CONFIG?= $(CROSS_COMPILE)pkg-config
8535 +
8536 + override CFLAGS+=-D VERSION=\"$(VERSION)\"
8537 + LDFLAGS+=
8538 +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
8539 +index 4af85605730e4..f7150fbeeb55e 100644
8540 +--- a/virt/kvm/arm/arm.c
8541 ++++ b/virt/kvm/arm/arm.c
8542 +@@ -1141,6 +1141,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
8543 + if (copy_from_user(&reg, argp, sizeof(reg)))
8544 + break;
8545 +
8546 ++ /*
8547 ++ * We could owe a reset due to PSCI. Handle the pending reset
8548 ++ * here to ensure userspace register accesses are ordered after
8549 ++ * the reset.
8550 ++ */
8551 ++ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
8552 ++ kvm_reset_vcpu(vcpu);
8553 ++
8554 + if (ioctl == KVM_SET_ONE_REG)
8555 + r = kvm_arm_set_reg(vcpu, &reg);
8556 + else