Gentoo Archives: gentoo-commits

From: Thomas Deutschmann <whissi@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sun, 01 Dec 2019 14:06:53
Message-Id: 1575209178.84ccfd99c9438da3264cfbc161b7bafe5bbe3245.whissi@gentoo
1 commit: 84ccfd99c9438da3264cfbc161b7bafe5bbe3245
2 Author: Thomas Deutschmann <whissi <AT> whissi <DOT> de>
3 AuthorDate: Sun Dec 1 14:06:18 2019 +0000
4 Commit: Thomas Deutschmann <whissi <AT> gentoo <DOT> org>
5 CommitDate: Sun Dec 1 14:06:18 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=84ccfd99
7
8 Linux patch 4.19.87
9
10 Signed-off-by: Thomas Deutschmann <whissi <AT> whissi.de>
11
12 1086_linux-4.19.87.patch | 11996 +++++++++++++++++++++++++++++++++++++++++++++
13 1 file changed, 11996 insertions(+)
14
15 diff --git a/1086_linux-4.19.87.patch b/1086_linux-4.19.87.patch
16 new file mode 100644
17 index 0000000..2cc0d06
18 --- /dev/null
19 +++ b/1086_linux-4.19.87.patch
20 @@ -0,0 +1,11996 @@
21 +diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst
22 +index e3a796c0d3a2..2d19c9f4c1fe 100644
23 +--- a/Documentation/admin-guide/hw-vuln/mds.rst
24 ++++ b/Documentation/admin-guide/hw-vuln/mds.rst
25 +@@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are:
26 +
27 + ============ =============================================================
28 +
29 +-Not specifying this option is equivalent to "mds=full".
30 +-
31 ++Not specifying this option is equivalent to "mds=full". For processors
32 ++that are affected by both TAA (TSX Asynchronous Abort) and MDS,
33 ++specifying just "mds=off" without an accompanying "tsx_async_abort=off"
34 ++will have no effect as the same mitigation is used for both
35 ++vulnerabilities.
36 +
37 + Mitigation selection guide
38 + --------------------------
39 +diff --git a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
40 +index fddbd7579c53..af6865b822d2 100644
41 +--- a/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
42 ++++ b/Documentation/admin-guide/hw-vuln/tsx_async_abort.rst
43 +@@ -174,7 +174,10 @@ the option "tsx_async_abort=". The valid arguments for this option are:
44 + CPU is not vulnerable to cross-thread TAA attacks.
45 + ============ =============================================================
46 +
47 +-Not specifying this option is equivalent to "tsx_async_abort=full".
48 ++Not specifying this option is equivalent to "tsx_async_abort=full". For
49 ++processors that are affected by both TAA and MDS, specifying just
50 ++"tsx_async_abort=off" without an accompanying "mds=off" will have no
51 ++effect as the same mitigation is used for both vulnerabilities.
52 +
53 + The kernel command line also allows to control the TSX feature using the
54 + parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used
55 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
56 +index 475ed980b25b..ce1af89dbbb8 100644
57 +--- a/Documentation/admin-guide/kernel-parameters.txt
58 ++++ b/Documentation/admin-guide/kernel-parameters.txt
59 +@@ -2359,6 +2359,12 @@
60 + SMT on vulnerable CPUs
61 + off - Unconditionally disable MDS mitigation
62 +
63 ++ On TAA-affected machines, mds=off can be prevented by
64 ++ an active TAA mitigation as both vulnerabilities are
65 ++ mitigated with the same mechanism so in order to disable
66 ++ this mitigation, you need to specify tsx_async_abort=off
67 ++ too.
68 ++
69 + Not specifying this option is equivalent to
70 + mds=full.
71 +
72 +@@ -4773,6 +4779,11 @@
73 + vulnerable to cross-thread TAA attacks.
74 + off - Unconditionally disable TAA mitigation
75 +
76 ++ On MDS-affected machines, tsx_async_abort=off can be
77 ++ prevented by an active MDS mitigation as both vulnerabilities
78 ++ are mitigated with the same mechanism so in order to disable
79 ++ this mitigation, you need to specify mds=off too.
80 ++
81 + Not specifying this option is equivalent to
82 + tsx_async_abort=full. On CPUs which are MDS affected
83 + and deploy MDS mitigation, TAA mitigation is not
84 +diff --git a/Documentation/devicetree/bindings/spi/spi-uniphier.txt b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
85 +index 504a4ecfc7b1..b04e66a52de5 100644
86 +--- a/Documentation/devicetree/bindings/spi/spi-uniphier.txt
87 ++++ b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
88 +@@ -5,18 +5,20 @@ UniPhier SoCs have SCSSI which supports SPI single channel.
89 + Required properties:
90 + - compatible: should be "socionext,uniphier-scssi"
91 + - reg: address and length of the spi master registers
92 +- - #address-cells: must be <1>, see spi-bus.txt
93 +- - #size-cells: must be <0>, see spi-bus.txt
94 +- - clocks: A phandle to the clock for the device.
95 +- - resets: A phandle to the reset control for the device.
96 ++ - interrupts: a single interrupt specifier
97 ++ - pinctrl-names: should be "default"
98 ++ - pinctrl-0: pin control state for the default mode
99 ++ - clocks: a phandle to the clock for the device
100 ++ - resets: a phandle to the reset control for the device
101 +
102 + Example:
103 +
104 + spi0: spi@54006000 {
105 + compatible = "socionext,uniphier-scssi";
106 + reg = <0x54006000 0x100>;
107 +- #address-cells = <1>;
108 +- #size-cells = <0>;
109 ++ interrupts = <0 39 4>;
110 ++ pinctrl-names = "default";
111 ++ pinctrl-0 = <&pinctrl_spi0>;
112 + clocks = <&peri_clk 11>;
113 + resets = <&peri_rst 11>;
114 + };
115 +diff --git a/Makefile b/Makefile
116 +index feb0568e9535..9240f36099de 100644
117 +--- a/Makefile
118 ++++ b/Makefile
119 +@@ -1,7 +1,7 @@
120 + # SPDX-License-Identifier: GPL-2.0
121 + VERSION = 4
122 + PATCHLEVEL = 19
123 +-SUBLEVEL = 86
124 ++SUBLEVEL = 87
125 + EXTRAVERSION =
126 + NAME = "People's Front"
127 +
128 +diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
129 +index 8aec462d90fb..30f66b123541 100644
130 +--- a/arch/arc/kernel/perf_event.c
131 ++++ b/arch/arc/kernel/perf_event.c
132 +@@ -490,8 +490,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
133 + /* loop thru all available h/w condition indexes */
134 + for (j = 0; j < cc_bcr.c; j++) {
135 + write_aux_reg(ARC_REG_CC_INDEX, j);
136 +- cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
137 +- cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
138 ++ cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
139 ++ cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
140 +
141 + /* See if it has been mapped to a perf event_id */
142 + for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
143 +diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
144 +index f8f31872fa14..d6d517e4922f 100644
145 +--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
146 ++++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
147 +@@ -115,7 +115,9 @@
148 + regulator-name = "enet_3v3";
149 + regulator-min-microvolt = <3300000>;
150 + regulator-max-microvolt = <3300000>;
151 +- gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
152 ++ gpio = <&gpio2 6 GPIO_ACTIVE_LOW>;
153 ++ regulator-boot-on;
154 ++ regulator-always-on;
155 + };
156 +
157 + reg_pcie_gpio: regulator-pcie-gpio {
158 +@@ -178,6 +180,7 @@
159 + phy-supply = <&reg_enet_3v3>;
160 + phy-mode = "rgmii";
161 + phy-handle = <&ethphy1>;
162 ++ phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
163 + status = "okay";
164 +
165 + mdio {
166 +@@ -371,6 +374,8 @@
167 + MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081
168 + MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081
169 + MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91
170 ++ /* phy reset */
171 ++ MX6SX_PAD_ENET2_CRS__GPIO2_IO_7 0x10b0
172 + >;
173 + };
174 +
175 +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
176 +index 70e560cf8ca0..d8cbe772f690 100644
177 +--- a/arch/arm/mm/mmu.c
178 ++++ b/arch/arm/mm/mmu.c
179 +@@ -1195,6 +1195,9 @@ void __init adjust_lowmem_bounds(void)
180 + phys_addr_t block_start = reg->base;
181 + phys_addr_t block_end = reg->base + reg->size;
182 +
183 ++ if (memblock_is_nomap(reg))
184 ++ continue;
185 ++
186 + if (reg->base < vmalloc_limit) {
187 + if (block_end > lowmem_limit)
188 + /*
189 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
190 +index 5d8787f0ca5f..9a5e28141211 100644
191 +--- a/arch/arm64/Makefile
192 ++++ b/arch/arm64/Makefile
193 +@@ -148,6 +148,7 @@ archclean:
194 + $(Q)$(MAKE) $(clean)=$(boot)
195 + $(Q)$(MAKE) $(clean)=$(boot)/dts
196 +
197 ++ifeq ($(KBUILD_EXTMOD),)
198 + # We need to generate vdso-offsets.h before compiling certain files in kernel/.
199 + # In order to do that, we should use the archprepare target, but we can't since
200 + # asm-offsets.h is included in some files used to generate vdso-offsets.h, and
201 +@@ -157,6 +158,7 @@ archclean:
202 + prepare: vdso_prepare
203 + vdso_prepare: prepare0
204 + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
205 ++endif
206 +
207 + define archhelp
208 + echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
209 +diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
210 +index dd95d33a5bd5..03a6c256b7ec 100644
211 +--- a/arch/arm64/include/asm/string.h
212 ++++ b/arch/arm64/include/asm/string.h
213 +@@ -16,6 +16,7 @@
214 + #ifndef __ASM_STRING_H
215 + #define __ASM_STRING_H
216 +
217 ++#ifndef CONFIG_KASAN
218 + #define __HAVE_ARCH_STRRCHR
219 + extern char *strrchr(const char *, int c);
220 +
221 +@@ -34,6 +35,13 @@ extern __kernel_size_t strlen(const char *);
222 + #define __HAVE_ARCH_STRNLEN
223 + extern __kernel_size_t strnlen(const char *, __kernel_size_t);
224 +
225 ++#define __HAVE_ARCH_MEMCMP
226 ++extern int memcmp(const void *, const void *, size_t);
227 ++
228 ++#define __HAVE_ARCH_MEMCHR
229 ++extern void *memchr(const void *, int, __kernel_size_t);
230 ++#endif
231 ++
232 + #define __HAVE_ARCH_MEMCPY
233 + extern void *memcpy(void *, const void *, __kernel_size_t);
234 + extern void *__memcpy(void *, const void *, __kernel_size_t);
235 +@@ -42,16 +50,10 @@ extern void *__memcpy(void *, const void *, __kernel_size_t);
236 + extern void *memmove(void *, const void *, __kernel_size_t);
237 + extern void *__memmove(void *, const void *, __kernel_size_t);
238 +
239 +-#define __HAVE_ARCH_MEMCHR
240 +-extern void *memchr(const void *, int, __kernel_size_t);
241 +-
242 + #define __HAVE_ARCH_MEMSET
243 + extern void *memset(void *, int, __kernel_size_t);
244 + extern void *__memset(void *, int, __kernel_size_t);
245 +
246 +-#define __HAVE_ARCH_MEMCMP
247 +-extern int memcmp(const void *, const void *, size_t);
248 +-
249 + #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
250 + #define __HAVE_ARCH_MEMCPY_FLUSHCACHE
251 + void memcpy_flushcache(void *dst, const void *src, size_t cnt);
252 +diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
253 +index d894a20b70b2..72f63a59b008 100644
254 +--- a/arch/arm64/kernel/arm64ksyms.c
255 ++++ b/arch/arm64/kernel/arm64ksyms.c
256 +@@ -44,20 +44,23 @@ EXPORT_SYMBOL(__arch_copy_in_user);
257 + EXPORT_SYMBOL(memstart_addr);
258 +
259 + /* string / mem functions */
260 ++#ifndef CONFIG_KASAN
261 + EXPORT_SYMBOL(strchr);
262 + EXPORT_SYMBOL(strrchr);
263 + EXPORT_SYMBOL(strcmp);
264 + EXPORT_SYMBOL(strncmp);
265 + EXPORT_SYMBOL(strlen);
266 + EXPORT_SYMBOL(strnlen);
267 ++EXPORT_SYMBOL(memcmp);
268 ++EXPORT_SYMBOL(memchr);
269 ++#endif
270 ++
271 + EXPORT_SYMBOL(memset);
272 + EXPORT_SYMBOL(memcpy);
273 + EXPORT_SYMBOL(memmove);
274 + EXPORT_SYMBOL(__memset);
275 + EXPORT_SYMBOL(__memcpy);
276 + EXPORT_SYMBOL(__memmove);
277 +-EXPORT_SYMBOL(memchr);
278 +-EXPORT_SYMBOL(memcmp);
279 +
280 + /* atomic bitops */
281 + EXPORT_SYMBOL(set_bit);
282 +diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
283 +index 4444c1d25f4b..0f164a4baf52 100644
284 +--- a/arch/arm64/lib/memchr.S
285 ++++ b/arch/arm64/lib/memchr.S
286 +@@ -30,7 +30,7 @@
287 + * Returns:
288 + * x0 - address of first occurrence of 'c' or 0
289 + */
290 +-ENTRY(memchr)
291 ++WEAK(memchr)
292 + and w1, w1, #0xff
293 + 1: subs x2, x2, #1
294 + b.mi 2f
295 +diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
296 +index 2a4e239bd17a..fb295f52e9f8 100644
297 +--- a/arch/arm64/lib/memcmp.S
298 ++++ b/arch/arm64/lib/memcmp.S
299 +@@ -58,7 +58,7 @@ pos .req x11
300 + limit_wd .req x12
301 + mask .req x13
302 +
303 +-ENTRY(memcmp)
304 ++WEAK(memcmp)
305 + cbz limit, .Lret0
306 + eor tmp1, src1, src2
307 + tst tmp1, #7
308 +diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S
309 +index dae0cf5591f9..7c83091d1bcd 100644
310 +--- a/arch/arm64/lib/strchr.S
311 ++++ b/arch/arm64/lib/strchr.S
312 +@@ -29,7 +29,7 @@
313 + * Returns:
314 + * x0 - address of first occurrence of 'c' or 0
315 + */
316 +-ENTRY(strchr)
317 ++WEAK(strchr)
318 + and w1, w1, #0xff
319 + 1: ldrb w2, [x0], #1
320 + cmp w2, w1
321 +diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
322 +index 471fe61760ef..7d5d15398bfb 100644
323 +--- a/arch/arm64/lib/strcmp.S
324 ++++ b/arch/arm64/lib/strcmp.S
325 +@@ -60,7 +60,7 @@ tmp3 .req x9
326 + zeroones .req x10
327 + pos .req x11
328 +
329 +-ENTRY(strcmp)
330 ++WEAK(strcmp)
331 + eor tmp1, src1, src2
332 + mov zeroones, #REP8_01
333 + tst tmp1, #7
334 +diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
335 +index 55ccc8e24c08..8e0b14205dcb 100644
336 +--- a/arch/arm64/lib/strlen.S
337 ++++ b/arch/arm64/lib/strlen.S
338 +@@ -56,7 +56,7 @@ pos .req x12
339 + #define REP8_7f 0x7f7f7f7f7f7f7f7f
340 + #define REP8_80 0x8080808080808080
341 +
342 +-ENTRY(strlen)
343 ++WEAK(strlen)
344 + mov zeroones, #REP8_01
345 + bic src, srcin, #15
346 + ands tmp1, srcin, #15
347 +diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
348 +index e267044761c6..66bd145935d9 100644
349 +--- a/arch/arm64/lib/strncmp.S
350 ++++ b/arch/arm64/lib/strncmp.S
351 +@@ -64,7 +64,7 @@ limit_wd .req x13
352 + mask .req x14
353 + endloop .req x15
354 +
355 +-ENTRY(strncmp)
356 ++WEAK(strncmp)
357 + cbz limit, .Lret0
358 + eor tmp1, src1, src2
359 + mov zeroones, #REP8_01
360 +diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
361 +index eae38da6e0bb..355be04441fe 100644
362 +--- a/arch/arm64/lib/strnlen.S
363 ++++ b/arch/arm64/lib/strnlen.S
364 +@@ -59,7 +59,7 @@ limit_wd .req x14
365 + #define REP8_7f 0x7f7f7f7f7f7f7f7f
366 + #define REP8_80 0x8080808080808080
367 +
368 +-ENTRY(strnlen)
369 ++WEAK(strnlen)
370 + cbz limit, .Lhit_limit
371 + mov zeroones, #REP8_01
372 + bic src, srcin, #15
373 +diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S
374 +index f8e2784d5752..ea84924d5990 100644
375 +--- a/arch/arm64/lib/strrchr.S
376 ++++ b/arch/arm64/lib/strrchr.S
377 +@@ -29,7 +29,7 @@
378 + * Returns:
379 + * x0 - address of last occurrence of 'c' or 0
380 + */
381 +-ENTRY(strrchr)
382 ++WEAK(strrchr)
383 + mov x3, #0
384 + and w1, w1, #0xff
385 + 1: ldrb w2, [x0], #1
386 +diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
387 +index b29c3b241e1b..107082877064 100644
388 +--- a/arch/m68k/kernel/uboot.c
389 ++++ b/arch/m68k/kernel/uboot.c
390 +@@ -102,5 +102,5 @@ __init void process_uboot_commandline(char *commandp, int size)
391 + }
392 +
393 + parse_uboot_commandline(commandp, len);
394 +- commandp[size - 1] = 0;
395 ++ commandp[len - 1] = 0;
396 + }
397 +diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h
398 +index 8e84fc385b94..19b2841219ad 100644
399 +--- a/arch/nds32/include/asm/bitfield.h
400 ++++ b/arch/nds32/include/asm/bitfield.h
401 +@@ -692,8 +692,8 @@
402 + #define PFM_CTL_offKU1 13 /* Enable user mode event counting for PFMC1 */
403 + #define PFM_CTL_offKU2 14 /* Enable user mode event counting for PFMC2 */
404 + #define PFM_CTL_offSEL0 15 /* The event selection for PFMC0 */
405 +-#define PFM_CTL_offSEL1 21 /* The event selection for PFMC1 */
406 +-#define PFM_CTL_offSEL2 27 /* The event selection for PFMC2 */
407 ++#define PFM_CTL_offSEL1 16 /* The event selection for PFMC1 */
408 ++#define PFM_CTL_offSEL2 22 /* The event selection for PFMC2 */
409 + /* bit 28:31 reserved */
410 +
411 + #define PFM_CTL_mskEN0 ( 0x01 << PFM_CTL_offEN0 )
412 +diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
413 +index 25e3184f11f7..7d5ddf53750c 100644
414 +--- a/arch/powerpc/boot/Makefile
415 ++++ b/arch/powerpc/boot/Makefile
416 +@@ -32,8 +32,8 @@ else
417 + endif
418 +
419 + BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
420 +- -fno-strict-aliasing -Os -msoft-float -pipe \
421 +- -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
422 ++ -fno-strict-aliasing -Os -msoft-float -mno-altivec -mno-vsx \
423 ++ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
424 + -D$(compress-y)
425 +
426 + ifdef CONFIG_PPC64_BOOT_WRAPPER
427 +diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
428 +index 0272570d02de..dfb199ef5b94 100644
429 +--- a/arch/powerpc/boot/opal.c
430 ++++ b/arch/powerpc/boot/opal.c
431 +@@ -13,8 +13,6 @@
432 + #include <libfdt.h>
433 + #include "../include/asm/opal-api.h"
434 +
435 +-#ifdef CONFIG_PPC64_BOOT_WRAPPER
436 +-
437 + /* Global OPAL struct used by opal-call.S */
438 + struct opal {
439 + u64 base;
440 +@@ -101,9 +99,3 @@ int opal_console_init(void *devp, struct serial_console_data *scdp)
441 +
442 + return 0;
443 + }
444 +-#else
445 +-int opal_console_init(void *devp, struct serial_console_data *scdp)
446 +-{
447 +- return -1;
448 +-}
449 +-#endif /* __powerpc64__ */
450 +diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
451 +index e398173ae67d..d0609c116e4f 100644
452 +--- a/arch/powerpc/include/asm/asm-prototypes.h
453 ++++ b/arch/powerpc/include/asm/asm-prototypes.h
454 +@@ -146,8 +146,11 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
455 + /* Patch sites */
456 + extern s32 patch__call_flush_count_cache;
457 + extern s32 patch__flush_count_cache_return;
458 ++extern s32 patch__flush_link_stack_return;
459 ++extern s32 patch__call_kvm_flush_link_stack;
460 + extern s32 patch__memset_nocache, patch__memcpy_nocache;
461 +
462 + extern long flush_count_cache;
463 ++extern long kvm_flush_link_stack;
464 +
465 + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
466 +diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
467 +index 759597bf0fd8..ccf44c135389 100644
468 +--- a/arch/powerpc/include/asm/security_features.h
469 ++++ b/arch/powerpc/include/asm/security_features.h
470 +@@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
471 + // Software required to flush count cache on context switch
472 + #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
473 +
474 ++// Software required to flush link stack on context switch
475 ++#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
476 ++
477 +
478 + // Features enabled by default
479 + #define SEC_FTR_DEFAULT \
480 +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
481 +index 110eba400de7..af1f3d5f9a0f 100644
482 +--- a/arch/powerpc/kernel/eeh_driver.c
483 ++++ b/arch/powerpc/kernel/eeh_driver.c
484 +@@ -281,6 +281,10 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
485 + struct pci_driver *driver;
486 + enum pci_ers_result new_result;
487 +
488 ++ if (!edev->pdev) {
489 ++ eeh_edev_info(edev, "no device");
490 ++ return;
491 ++ }
492 + device_lock(&edev->pdev->dev);
493 + if (eeh_edev_actionable(edev)) {
494 + driver = eeh_pcid_get(edev->pdev);
495 +diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
496 +index 1b238ecc553e..210d239a9395 100644
497 +--- a/arch/powerpc/kernel/eeh_pe.c
498 ++++ b/arch/powerpc/kernel/eeh_pe.c
499 +@@ -379,7 +379,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
500 + while (parent) {
501 + if (!(parent->type & EEH_PE_INVALID))
502 + break;
503 +- parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
504 ++ parent->type &= ~EEH_PE_INVALID;
505 + parent = parent->parent;
506 + }
507 +
508 +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
509 +index 7a46e0e57a36..58b50967b3e5 100644
510 +--- a/arch/powerpc/kernel/entry_64.S
511 ++++ b/arch/powerpc/kernel/entry_64.S
512 +@@ -533,6 +533,7 @@ flush_count_cache:
513 + /* Save LR into r9 */
514 + mflr r9
515 +
516 ++ // Flush the link stack
517 + .rept 64
518 + bl .+4
519 + .endr
520 +@@ -542,6 +543,11 @@ flush_count_cache:
521 + .balign 32
522 + /* Restore LR */
523 + 1: mtlr r9
524 ++
525 ++ // If we're just flushing the link stack, return here
526 ++3: nop
527 ++ patch_site 3b patch__flush_link_stack_return
528 ++
529 + li r9,0x7fff
530 + mtctr r9
531 +
532 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
533 +index 909c9407e392..02b69a68139c 100644
534 +--- a/arch/powerpc/kernel/process.c
535 ++++ b/arch/powerpc/kernel/process.c
536 +@@ -575,12 +575,11 @@ void flush_all_to_thread(struct task_struct *tsk)
537 + if (tsk->thread.regs) {
538 + preempt_disable();
539 + BUG_ON(tsk != current);
540 +- save_all(tsk);
541 +-
542 + #ifdef CONFIG_SPE
543 + if (tsk->thread.regs->msr & MSR_SPE)
544 + tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
545 + #endif
546 ++ save_all(tsk);
547 +
548 + preempt_enable();
549 + }
550 +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
551 +index 70568ccbd9fd..a5c5940d970a 100644
552 +--- a/arch/powerpc/kernel/security.c
553 ++++ b/arch/powerpc/kernel/security.c
554 +@@ -24,11 +24,12 @@ enum count_cache_flush_type {
555 + COUNT_CACHE_FLUSH_HW = 0x4,
556 + };
557 + static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
558 ++static bool link_stack_flush_enabled;
559 +
560 + bool barrier_nospec_enabled;
561 + static bool no_nospec;
562 + static bool btb_flush_enabled;
563 +-#ifdef CONFIG_PPC_FSL_BOOK3E
564 ++#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
565 + static bool no_spectrev2;
566 + #endif
567 +
568 +@@ -106,7 +107,7 @@ static __init int barrier_nospec_debugfs_init(void)
569 + device_initcall(barrier_nospec_debugfs_init);
570 + #endif /* CONFIG_DEBUG_FS */
571 +
572 +-#ifdef CONFIG_PPC_FSL_BOOK3E
573 ++#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
574 + static int __init handle_nospectre_v2(char *p)
575 + {
576 + no_spectrev2 = true;
577 +@@ -114,6 +115,9 @@ static int __init handle_nospectre_v2(char *p)
578 + return 0;
579 + }
580 + early_param("nospectre_v2", handle_nospectre_v2);
581 ++#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
582 ++
583 ++#ifdef CONFIG_PPC_FSL_BOOK3E
584 + void setup_spectre_v2(void)
585 + {
586 + if (no_spectrev2 || cpu_mitigations_off())
587 +@@ -201,11 +205,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
588 +
589 + if (ccd)
590 + seq_buf_printf(&s, "Indirect branch cache disabled");
591 ++
592 ++ if (link_stack_flush_enabled)
593 ++ seq_buf_printf(&s, ", Software link stack flush");
594 ++
595 + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
596 + seq_buf_printf(&s, "Mitigation: Software count cache flush");
597 +
598 + if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
599 + seq_buf_printf(&s, " (hardware accelerated)");
600 ++
601 ++ if (link_stack_flush_enabled)
602 ++ seq_buf_printf(&s, ", Software link stack flush");
603 ++
604 + } else if (btb_flush_enabled) {
605 + seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
606 + } else {
607 +@@ -366,18 +378,49 @@ static __init int stf_barrier_debugfs_init(void)
608 + device_initcall(stf_barrier_debugfs_init);
609 + #endif /* CONFIG_DEBUG_FS */
610 +
611 ++static void no_count_cache_flush(void)
612 ++{
613 ++ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
614 ++ pr_info("count-cache-flush: software flush disabled.\n");
615 ++}
616 ++
617 + static void toggle_count_cache_flush(bool enable)
618 + {
619 +- if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
620 ++ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
621 ++ !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
622 ++ enable = false;
623 ++
624 ++ if (!enable) {
625 + patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
626 +- count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
627 +- pr_info("count-cache-flush: software flush disabled.\n");
628 ++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
629 ++ patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
630 ++#endif
631 ++ pr_info("link-stack-flush: software flush disabled.\n");
632 ++ link_stack_flush_enabled = false;
633 ++ no_count_cache_flush();
634 + return;
635 + }
636 +
637 ++ // This enables the branch from _switch to flush_count_cache
638 + patch_branch_site(&patch__call_flush_count_cache,
639 + (u64)&flush_count_cache, BRANCH_SET_LINK);
640 +
641 ++#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
642 ++ // This enables the branch from guest_exit_cont to kvm_flush_link_stack
643 ++ patch_branch_site(&patch__call_kvm_flush_link_stack,
644 ++ (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
645 ++#endif
646 ++
647 ++ pr_info("link-stack-flush: software flush enabled.\n");
648 ++ link_stack_flush_enabled = true;
649 ++
650 ++ // If we just need to flush the link stack, patch an early return
651 ++ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
652 ++ patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
653 ++ no_count_cache_flush();
654 ++ return;
655 ++ }
656 ++
657 + if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
658 + count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
659 + pr_info("count-cache-flush: full software flush sequence enabled.\n");
660 +@@ -391,7 +434,26 @@ static void toggle_count_cache_flush(bool enable)
661 +
662 + void setup_count_cache_flush(void)
663 + {
664 +- toggle_count_cache_flush(true);
665 ++ bool enable = true;
666 ++
667 ++ if (no_spectrev2 || cpu_mitigations_off()) {
668 ++ if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
669 ++ security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
670 ++ pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
671 ++
672 ++ enable = false;
673 ++ }
674 ++
675 ++ /*
676 ++ * There's no firmware feature flag/hypervisor bit to tell us we need to
677 ++ * flush the link stack on context switch. So we set it here if we see
678 ++ * either of the Spectre v2 mitigations that aim to protect userspace.
679 ++ */
680 ++ if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
681 ++ security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
682 ++ security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
683 ++
684 ++ toggle_count_cache_flush(enable);
685 + }
686 +
687 + #ifdef CONFIG_DEBUG_FS
688 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
689 +index f1878e13dd56..7fe3077a1ef6 100644
690 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
691 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
692 +@@ -18,6 +18,7 @@
693 + */
694 +
695 + #include <asm/ppc_asm.h>
696 ++#include <asm/code-patching-asm.h>
697 + #include <asm/kvm_asm.h>
698 + #include <asm/reg.h>
699 + #include <asm/mmu.h>
700 +@@ -1559,6 +1560,10 @@ mc_cont:
701 + 1:
702 + #endif /* CONFIG_KVM_XICS */
703 +
704 ++ /* Possibly flush the link stack here. */
705 ++1: nop
706 ++ patch_site 1b patch__call_kvm_flush_link_stack
707 ++
708 + /* For hash guest, read the guest SLB and save it away */
709 + ld r5, VCPU_KVM(r9)
710 + lbz r0, KVM_RADIX(r5)
711 +@@ -2107,6 +2112,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
712 + mtlr r0
713 + blr
714 +
715 ++.balign 32
716 ++.global kvm_flush_link_stack
717 ++kvm_flush_link_stack:
718 ++ /* Save LR into r0 */
719 ++ mflr r0
720 ++
721 ++ /* Flush the link stack. On Power8 it's up to 32 entries in size. */
722 ++ .rept 32
723 ++ bl .+4
724 ++ .endr
725 ++
726 ++ /* And on Power9 it's up to 64. */
727 ++BEGIN_FTR_SECTION
728 ++ .rept 32
729 ++ bl .+4
730 ++ .endr
731 ++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
732 ++
733 ++ /* Restore LR */
734 ++ mtlr r0
735 ++ blr
736 ++
737 ++
738 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
739 + /*
740 + * Softpatch interrupt for transactional memory emulation cases
741 +diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
742 +index 3ea4c1f107d7..69caeb5bccb2 100644
743 +--- a/arch/powerpc/mm/pgtable-radix.c
744 ++++ b/arch/powerpc/mm/pgtable-radix.c
745 +@@ -294,15 +294,15 @@ retry:
746 + }
747 +
748 + if (split_text_mapping && (mapping_size == PUD_SIZE) &&
749 +- (addr <= __pa_symbol(__init_begin)) &&
750 +- (addr + mapping_size) >= __pa_symbol(_stext)) {
751 ++ (addr < __pa_symbol(__init_begin)) &&
752 ++ (addr + mapping_size) > __pa_symbol(__init_begin)) {
753 + max_mapping_size = PMD_SIZE;
754 + goto retry;
755 + }
756 +
757 + if (split_text_mapping && (mapping_size == PMD_SIZE) &&
758 +- (addr <= __pa_symbol(__init_begin)) &&
759 +- (addr + mapping_size) >= __pa_symbol(_stext)) {
760 ++ (addr < __pa_symbol(__init_begin)) &&
761 ++ (addr + mapping_size) > __pa_symbol(__init_begin)) {
762 + mapping_size = PAGE_SIZE;
763 + psize = mmu_virtual_psize;
764 + }
765 +diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
766 +index 796ff5de26d0..1749f15fc070 100644
767 +--- a/arch/powerpc/mm/tlb-radix.c
768 ++++ b/arch/powerpc/mm/tlb-radix.c
769 +@@ -1072,7 +1072,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
770 + goto local;
771 + }
772 + _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
773 +- goto local;
774 + } else {
775 + local:
776 + _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
777 +diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
778 +index 232bf5987f91..dd3cc4632b9a 100644
779 +--- a/arch/powerpc/platforms/powernv/memtrace.c
780 ++++ b/arch/powerpc/platforms/powernv/memtrace.c
781 +@@ -244,9 +244,11 @@ static int memtrace_online(void)
782 + * we need to online the memory ourselves.
783 + */
784 + if (!memhp_auto_online) {
785 ++ lock_device_hotplug();
786 + walk_memory_range(PFN_DOWN(ent->start),
787 + PFN_UP(ent->start + ent->size - 1),
788 + NULL, online_mem_block);
789 ++ unlock_device_hotplug();
790 + }
791 +
792 + /*
793 +diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
794 +index cdbfc5cfd6f3..f5387ad82279 100644
795 +--- a/arch/powerpc/platforms/ps3/os-area.c
796 ++++ b/arch/powerpc/platforms/ps3/os-area.c
797 +@@ -664,7 +664,7 @@ static int update_flash_db(void)
798 + db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff);
799 +
800 + count = os_area_flash_write(db, sizeof(struct os_area_db), pos);
801 +- if (count < sizeof(struct os_area_db)) {
802 ++ if (count < 0 || count < sizeof(struct os_area_db)) {
803 + pr_debug("%s: os_area_flash_write failed %zd\n", __func__,
804 + count);
805 + error = count < 0 ? count : -EIO;
806 +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
807 +index 2f166136bb50..d93ff494e778 100644
808 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
809 ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
810 +@@ -676,7 +676,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
811 + nid = memory_add_physaddr_to_nid(lmb->base_addr);
812 +
813 + /* Add the memory */
814 +- rc = add_memory(nid, lmb->base_addr, block_sz);
815 ++ rc = __add_memory(nid, lmb->base_addr, block_sz);
816 + if (rc) {
817 + invalidate_lmb_associativity_index(lmb);
818 + return rc;
819 +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
820 +index ea602f7f97ce..49e3a88b6a0c 100644
821 +--- a/arch/powerpc/platforms/pseries/lpar.c
822 ++++ b/arch/powerpc/platforms/pseries/lpar.c
823 +@@ -48,6 +48,7 @@
824 + #include <asm/kexec.h>
825 + #include <asm/fadump.h>
826 + #include <asm/asm-prototypes.h>
827 ++#include <asm/debugfs.h>
828 +
829 + #include "pseries.h"
830 +
831 +@@ -1032,3 +1033,56 @@ static int __init reserve_vrma_context_id(void)
832 + return 0;
833 + }
834 + machine_device_initcall(pseries, reserve_vrma_context_id);
835 ++
836 ++#ifdef CONFIG_DEBUG_FS
837 ++/* debugfs file interface for vpa data */
838 ++static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len,
839 ++ loff_t *pos)
840 ++{
841 ++ int cpu = (long)filp->private_data;
842 ++ struct lppaca *lppaca = &lppaca_of(cpu);
843 ++
844 ++ return simple_read_from_buffer(buf, len, pos, lppaca,
845 ++ sizeof(struct lppaca));
846 ++}
847 ++
848 ++static const struct file_operations vpa_fops = {
849 ++ .open = simple_open,
850 ++ .read = vpa_file_read,
851 ++ .llseek = default_llseek,
852 ++};
853 ++
854 ++static int __init vpa_debugfs_init(void)
855 ++{
856 ++ char name[16];
857 ++ long i;
858 ++ static struct dentry *vpa_dir;
859 ++
860 ++ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
861 ++ return 0;
862 ++
863 ++ vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root);
864 ++ if (!vpa_dir) {
865 ++ pr_warn("%s: can't create vpa root dir\n", __func__);
866 ++ return -ENOMEM;
867 ++ }
868 ++
869 ++ /* set up the per-cpu vpa file*/
870 ++ for_each_possible_cpu(i) {
871 ++ struct dentry *d;
872 ++
873 ++ sprintf(name, "cpu-%ld", i);
874 ++
875 ++ d = debugfs_create_file(name, 0400, vpa_dir, (void *)i,
876 ++ &vpa_fops);
877 ++ if (!d) {
878 ++ pr_warn("%s: can't create per-cpu vpa file\n",
879 ++ __func__);
880 ++ return -ENOMEM;
881 ++ }
882 ++ }
883 ++
884 ++ return 0;
885 ++}
886 ++machine_arch_initcall(pseries, vpa_debugfs_init);
887 ++#endif /* CONFIG_DEBUG_FS */
888 +diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
889 +index 9d7d8e6d705c..9ba44e190e5e 100644
890 +--- a/arch/powerpc/xmon/Makefile
891 ++++ b/arch/powerpc/xmon/Makefile
892 +@@ -13,6 +13,12 @@ UBSAN_SANITIZE := n
893 + ORIG_CFLAGS := $(KBUILD_CFLAGS)
894 + KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
895 +
896 ++ifdef CONFIG_CC_IS_CLANG
897 ++# clang stores addresses on the stack causing the frame size to blow
898 ++# out. See https://github.com/ClangBuiltLinux/linux/issues/252
899 ++KBUILD_CFLAGS += -Wframe-larger-than=4096
900 ++endif
901 ++
902 + ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
903 +
904 + obj-y += xmon.o nonstdio.o spr_access.o
905 +diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c
906 +index 70ef2724cdf6..bd2f2db557cc 100644
907 +--- a/arch/riscv/mm/ioremap.c
908 ++++ b/arch/riscv/mm/ioremap.c
909 +@@ -42,7 +42,7 @@ static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
910 +
911 + /* Page-align mappings */
912 + offset = addr & (~PAGE_MASK);
913 +- addr &= PAGE_MASK;
914 ++ addr -= offset;
915 + size = PAGE_ALIGN(size + offset);
916 +
917 + area = get_vm_area_caller(size, VM_IOREMAP, caller);
918 +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
919 +index 44404836e9d1..df92c2af99b6 100644
920 +--- a/arch/s390/kernel/perf_cpum_sf.c
921 ++++ b/arch/s390/kernel/perf_cpum_sf.c
922 +@@ -2045,14 +2045,17 @@ static int __init init_cpum_sampling_pmu(void)
923 + }
924 +
925 + sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
926 +- if (!sfdbg)
927 ++ if (!sfdbg) {
928 + pr_err("Registering for s390dbf failed\n");
929 ++ return -ENOMEM;
930 ++ }
931 + debug_register_view(sfdbg, &debug_sprintf_view);
932 +
933 + err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
934 + cpumf_measurement_alert);
935 + if (err) {
936 + pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
937 ++ debug_unregister(sfdbg);
938 + goto out;
939 + }
940 +
941 +@@ -2061,6 +2064,7 @@ static int __init init_cpum_sampling_pmu(void)
942 + pr_cpumsf_err(RS_INIT_FAILURE_PERF);
943 + unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
944 + cpumf_measurement_alert);
945 ++ debug_unregister(sfdbg);
946 + goto out;
947 + }
948 +
949 +diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
950 +index f71ef3729888..316faa0130ba 100644
951 +--- a/arch/sparc/include/asm/cmpxchg_64.h
952 ++++ b/arch/sparc/include/asm/cmpxchg_64.h
953 +@@ -52,7 +52,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
954 + return val;
955 + }
956 +
957 +-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
958 ++#define xchg(ptr,x) \
959 ++({ __typeof__(*(ptr)) __ret; \
960 ++ __ret = (__typeof__(*(ptr))) \
961 ++ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
962 ++ __ret; \
963 ++})
964 +
965 + void __xchg_called_with_bad_pointer(void);
966 +
967 +diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
968 +index 05df5f043053..3c5a1c620f0f 100644
969 +--- a/arch/sparc/include/asm/parport.h
970 ++++ b/arch/sparc/include/asm/parport.h
971 +@@ -21,6 +21,7 @@
972 + */
973 + #define HAS_DMA
974 +
975 ++#ifdef CONFIG_PARPORT_PC_FIFO
976 + static DEFINE_SPINLOCK(dma_spin_lock);
977 +
978 + #define claim_dma_lock() \
979 +@@ -31,6 +32,7 @@ static DEFINE_SPINLOCK(dma_spin_lock);
980 +
981 + #define release_dma_lock(__flags) \
982 + spin_unlock_irqrestore(&dma_spin_lock, __flags);
983 ++#endif
984 +
985 + static struct sparc_ebus_info {
986 + struct ebus_dma_info info;
987 +diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
988 +index 8d80b27502e6..7e524efed584 100644
989 +--- a/arch/um/drivers/line.c
990 ++++ b/arch/um/drivers/line.c
991 +@@ -261,7 +261,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
992 + if (err == 0) {
993 + spin_unlock(&line->lock);
994 + return IRQ_NONE;
995 +- } else if (err < 0) {
996 ++ } else if ((err < 0) && (err != -EAGAIN)) {
997 + line->head = line->buffer;
998 + line->tail = line->buffer;
999 + }
1000 +@@ -284,7 +284,7 @@ int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
1001 + if (err)
1002 + return err;
1003 + if (output)
1004 +- err = um_request_irq(driver->write_irq, fd, IRQ_NONE,
1005 ++ err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
1006 + line_write_interrupt, IRQF_SHARED,
1007 + driver->write_irq_name, data);
1008 + return err;
1009 +diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
1010 +index 6de1fd3d0097..ee696efec99f 100644
1011 +--- a/arch/x86/include/asm/ptrace.h
1012 ++++ b/arch/x86/include/asm/ptrace.h
1013 +@@ -236,24 +236,52 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs,
1014 + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
1015 + }
1016 +
1017 ++/**
1018 ++ * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
1019 ++ * @regs: pt_regs which contains kernel stack pointer.
1020 ++ * @n: stack entry number.
1021 ++ *
1022 ++ * regs_get_kernel_stack_nth() returns the address of the @n th entry of the
1023 ++ * kernel stack which is specified by @regs. If the @n th entry is NOT in
1024 ++ * the kernel stack, this returns NULL.
1025 ++ */
1026 ++static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
1027 ++{
1028 ++ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
1029 ++
1030 ++ addr += n;
1031 ++ if (regs_within_kernel_stack(regs, (unsigned long)addr))
1032 ++ return addr;
1033 ++ else
1034 ++ return NULL;
1035 ++}
1036 ++
1037 ++/* To avoid include hell, we can't include uaccess.h */
1038 ++extern long probe_kernel_read(void *dst, const void *src, size_t size);
1039 ++
1040 + /**
1041 + * regs_get_kernel_stack_nth() - get Nth entry of the stack
1042 + * @regs: pt_regs which contains kernel stack pointer.
1043 + * @n: stack entry number.
1044 + *
1045 + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1046 +- * is specified by @regs. If the @n th entry is NOT in the kernel stack,
1047 ++ * is specified by @regs. If the @n th entry is NOT in the kernel stack
1048 + * this returns 0.
1049 + */
1050 + static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
1051 + unsigned int n)
1052 + {
1053 +- unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
1054 +- addr += n;
1055 +- if (regs_within_kernel_stack(regs, (unsigned long)addr))
1056 +- return *addr;
1057 +- else
1058 +- return 0;
1059 ++ unsigned long *addr;
1060 ++ unsigned long val;
1061 ++ long ret;
1062 ++
1063 ++ addr = regs_get_kernel_stack_nth_addr(regs, n);
1064 ++ if (addr) {
1065 ++ ret = probe_kernel_read(&val, addr, sizeof(val));
1066 ++ if (!ret)
1067 ++ return val;
1068 ++ }
1069 ++ return 0;
1070 + }
1071 +
1072 + #define arch_has_single_step() (1)
1073 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
1074 +index 4684ad7ba793..2d23a448e72d 100644
1075 +--- a/arch/x86/kernel/cpu/bugs.c
1076 ++++ b/arch/x86/kernel/cpu/bugs.c
1077 +@@ -39,6 +39,7 @@ static void __init spectre_v2_select_mitigation(void);
1078 + static void __init ssb_select_mitigation(void);
1079 + static void __init l1tf_select_mitigation(void);
1080 + static void __init mds_select_mitigation(void);
1081 ++static void __init mds_print_mitigation(void);
1082 + static void __init taa_select_mitigation(void);
1083 +
1084 + /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
1085 +@@ -108,6 +109,12 @@ void __init check_bugs(void)
1086 + mds_select_mitigation();
1087 + taa_select_mitigation();
1088 +
1089 ++ /*
1090 ++ * As MDS and TAA mitigations are inter-related, print MDS
1091 ++ * mitigation until after TAA mitigation selection is done.
1092 ++ */
1093 ++ mds_print_mitigation();
1094 ++
1095 + arch_smt_update();
1096 +
1097 + #ifdef CONFIG_X86_32
1098 +@@ -245,6 +252,12 @@ static void __init mds_select_mitigation(void)
1099 + (mds_nosmt || cpu_mitigations_auto_nosmt()))
1100 + cpu_smt_disable(false);
1101 + }
1102 ++}
1103 ++
1104 ++static void __init mds_print_mitigation(void)
1105 ++{
1106 ++ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
1107 ++ return;
1108 +
1109 + pr_info("%s\n", mds_strings[mds_mitigation]);
1110 + }
1111 +@@ -304,8 +317,12 @@ static void __init taa_select_mitigation(void)
1112 + return;
1113 + }
1114 +
1115 +- /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */
1116 +- if (taa_mitigation == TAA_MITIGATION_OFF)
1117 ++ /*
1118 ++ * TAA mitigation via VERW is turned off if both
1119 ++ * tsx_async_abort=off and mds=off are specified.
1120 ++ */
1121 ++ if (taa_mitigation == TAA_MITIGATION_OFF &&
1122 ++ mds_mitigation == MDS_MITIGATION_OFF)
1123 + goto out;
1124 +
1125 + if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
1126 +@@ -339,6 +356,15 @@ static void __init taa_select_mitigation(void)
1127 + if (taa_nosmt || cpu_mitigations_auto_nosmt())
1128 + cpu_smt_disable(false);
1129 +
1130 ++ /*
1131 ++ * Update MDS mitigation, if necessary, as the mds_user_clear is
1132 ++ * now enabled for TAA mitigation.
1133 ++ */
1134 ++ if (mds_mitigation == MDS_MITIGATION_OFF &&
1135 ++ boot_cpu_has_bug(X86_BUG_MDS)) {
1136 ++ mds_mitigation = MDS_MITIGATION_FULL;
1137 ++ mds_select_mitigation();
1138 ++ }
1139 + out:
1140 + pr_info("%s\n", taa_strings[taa_mitigation]);
1141 + }
1142 +diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
1143 +index cc43c5abd187..b99a04da70f6 100644
1144 +--- a/arch/x86/kernel/cpu/intel_rdt.c
1145 ++++ b/arch/x86/kernel/cpu/intel_rdt.c
1146 +@@ -610,6 +610,13 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
1147 + cancel_delayed_work(&d->cqm_limbo);
1148 + }
1149 +
1150 ++ /*
1151 ++ * rdt_domain "d" is going to be freed below, so clear
1152 ++ * its pointer from pseudo_lock_region struct.
1153 ++ */
1154 ++ if (d->plr)
1155 ++ d->plr->d = NULL;
1156 ++
1157 + kfree(d->ctrl_val);
1158 + kfree(d->mbps_val);
1159 + kfree(d->rmid_busy_llc);
1160 +diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
1161 +index 968ace3c6d73..c8b72aff55e0 100644
1162 +--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
1163 ++++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
1164 +@@ -408,8 +408,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
1165 + for_each_alloc_enabled_rdt_resource(r)
1166 + seq_printf(s, "%s:uninitialized\n", r->name);
1167 + } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1168 +- seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name,
1169 +- rdtgrp->plr->d->id, rdtgrp->plr->cbm);
1170 ++ if (!rdtgrp->plr->d) {
1171 ++ rdt_last_cmd_clear();
1172 ++ rdt_last_cmd_puts("Cache domain offline\n");
1173 ++ ret = -ENODEV;
1174 ++ } else {
1175 ++ seq_printf(s, "%s:%d=%x\n",
1176 ++ rdtgrp->plr->r->name,
1177 ++ rdtgrp->plr->d->id,
1178 ++ rdtgrp->plr->cbm);
1179 ++ }
1180 + } else {
1181 + closid = rdtgrp->closid;
1182 + for_each_alloc_enabled_rdt_resource(r) {
1183 +diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
1184 +index 912d53939f4f..a999a58ca331 100644
1185 +--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
1186 ++++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
1187 +@@ -1116,6 +1116,11 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
1188 + goto out;
1189 + }
1190 +
1191 ++ if (!plr->d) {
1192 ++ ret = -ENODEV;
1193 ++ goto out;
1194 ++ }
1195 ++
1196 + plr->thread_done = 0;
1197 + cpu = cpumask_first(&plr->d->cpu_mask);
1198 + if (!cpu_online(cpu)) {
1199 +@@ -1429,6 +1434,11 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
1200 +
1201 + plr = rdtgrp->plr;
1202 +
1203 ++ if (!plr->d) {
1204 ++ mutex_unlock(&rdtgroup_mutex);
1205 ++ return -ENODEV;
1206 ++ }
1207 ++
1208 + /*
1209 + * Task is required to run with affinity to the cpus associated
1210 + * with the pseudo-locked region. If this is not the case the task
1211 +diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
1212 +index ad64031e82dc..a2d7e6646cce 100644
1213 +--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
1214 ++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
1215 +@@ -268,17 +268,27 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
1216 + struct seq_file *s, void *v)
1217 + {
1218 + struct rdtgroup *rdtgrp;
1219 ++ struct cpumask *mask;
1220 + int ret = 0;
1221 +
1222 + rdtgrp = rdtgroup_kn_lock_live(of->kn);
1223 +
1224 + if (rdtgrp) {
1225 +- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
1226 +- seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
1227 +- cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
1228 +- else
1229 ++ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1230 ++ if (!rdtgrp->plr->d) {
1231 ++ rdt_last_cmd_clear();
1232 ++ rdt_last_cmd_puts("Cache domain offline\n");
1233 ++ ret = -ENODEV;
1234 ++ } else {
1235 ++ mask = &rdtgrp->plr->d->cpu_mask;
1236 ++ seq_printf(s, is_cpu_list(of) ?
1237 ++ "%*pbl\n" : "%*pb\n",
1238 ++ cpumask_pr_args(mask));
1239 ++ }
1240 ++ } else {
1241 + seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
1242 + cpumask_pr_args(&rdtgrp->cpu_mask));
1243 ++ }
1244 + } else {
1245 + ret = -ENOENT;
1246 + }
1247 +@@ -1286,6 +1296,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
1248 + struct rdt_resource *r;
1249 + struct rdt_domain *d;
1250 + unsigned int size;
1251 ++ int ret = 0;
1252 + bool sep;
1253 + u32 ctrl;
1254 +
1255 +@@ -1296,11 +1307,18 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
1256 + }
1257 +
1258 + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1259 +- seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
1260 +- size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
1261 +- rdtgrp->plr->d,
1262 +- rdtgrp->plr->cbm);
1263 +- seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
1264 ++ if (!rdtgrp->plr->d) {
1265 ++ rdt_last_cmd_clear();
1266 ++ rdt_last_cmd_puts("Cache domain offline\n");
1267 ++ ret = -ENODEV;
1268 ++ } else {
1269 ++ seq_printf(s, "%*s:", max_name_width,
1270 ++ rdtgrp->plr->r->name);
1271 ++ size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
1272 ++ rdtgrp->plr->d,
1273 ++ rdtgrp->plr->cbm);
1274 ++ seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
1275 ++ }
1276 + goto out;
1277 + }
1278 +
1279 +@@ -1330,7 +1348,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
1280 + out:
1281 + rdtgroup_kn_unlock(of->kn);
1282 +
1283 +- return 0;
1284 ++ return ret;
1285 + }
1286 +
1287 + /* rdtgroup information files for one cache resource. */
1288 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1289 +index d7db7608de5f..eddf91a0e363 100644
1290 +--- a/arch/x86/kvm/mmu.c
1291 ++++ b/arch/x86/kvm/mmu.c
1292 +@@ -3261,7 +3261,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
1293 + * here.
1294 + */
1295 + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
1296 +- level == PT_PAGE_TABLE_LEVEL &&
1297 ++ !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
1298 + PageTransCompoundMap(pfn_to_page(pfn)) &&
1299 + !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
1300 + unsigned long mask;
1301 +@@ -5709,9 +5709,9 @@ restart:
1302 + * the guest, and the guest page table is using 4K page size
1303 + * mapping if the indirect sp has level = 1.
1304 + */
1305 +- if (sp->role.direct &&
1306 +- !kvm_is_reserved_pfn(pfn) &&
1307 +- PageTransCompoundMap(pfn_to_page(pfn))) {
1308 ++ if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
1309 ++ !kvm_is_zone_device_pfn(pfn) &&
1310 ++ PageTransCompoundMap(pfn_to_page(pfn))) {
1311 + drop_spte(kvm, sptep);
1312 + need_tlb_flush = 1;
1313 + goto restart;
1314 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1315 +index 1ab4bb3d6a04..0b7559bf15ea 100644
1316 +--- a/arch/x86/kvm/vmx.c
1317 ++++ b/arch/x86/kvm/vmx.c
1318 +@@ -2079,7 +2079,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1319 + return -1;
1320 + }
1321 +
1322 +-static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1323 ++static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
1324 + {
1325 + struct {
1326 + u64 vpid : 16;
1327 +@@ -2094,7 +2094,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1328 + BUG_ON(error);
1329 + }
1330 +
1331 +-static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1332 ++static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
1333 + {
1334 + struct {
1335 + u64 eptp, gpa;
1336 +@@ -11013,6 +11013,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
1337 + vmx->loaded_vmcs = vmcs;
1338 + vmx_vcpu_load(vcpu, cpu);
1339 + put_cpu();
1340 ++
1341 ++ vm_entry_controls_reset_shadow(vmx);
1342 ++ vm_exit_controls_reset_shadow(vmx);
1343 ++ vmx_segment_cache_clear(vmx);
1344 + }
1345 +
1346 + /*
1347 +@@ -12690,6 +12694,9 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
1348 + if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
1349 + evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
1350 +
1351 ++ if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, exit_qual))
1352 ++ return EXIT_REASON_INVALID_STATE;
1353 ++
1354 + enter_guest_mode(vcpu);
1355 +
1356 + if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
1357 +@@ -12699,7 +12706,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
1358 + vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
1359 +
1360 + vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
1361 +- vmx_segment_cache_clear(vmx);
1362 +
1363 + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
1364 + vcpu->arch.tsc_offset += vmcs12->tsc_offset;
1365 +@@ -12833,13 +12839,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
1366 + */
1367 + skip_emulated_instruction(vcpu);
1368 +
1369 +- ret = check_vmentry_postreqs(vcpu, vmcs12, &exit_qual);
1370 +- if (ret) {
1371 +- nested_vmx_entry_failure(vcpu, vmcs12,
1372 +- EXIT_REASON_INVALID_STATE, exit_qual);
1373 +- return 1;
1374 +- }
1375 +-
1376 + /*
1377 + * We're finally done with prerequisite checking, and can start with
1378 + * the nested entry.
1379 +@@ -13530,9 +13529,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1380 + }
1381 +
1382 + vmx_switch_vmcs(vcpu, &vmx->vmcs01);
1383 +- vm_entry_controls_reset_shadow(vmx);
1384 +- vm_exit_controls_reset_shadow(vmx);
1385 +- vmx_segment_cache_clear(vmx);
1386 +
1387 + /* Update any VMCS fields that might have changed while L2 ran */
1388 + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
1389 +diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
1390 +index b02a36b2c14f..a42015b305f4 100644
1391 +--- a/arch/x86/tools/gen-insn-attr-x86.awk
1392 ++++ b/arch/x86/tools/gen-insn-attr-x86.awk
1393 +@@ -69,7 +69,7 @@ BEGIN {
1394 +
1395 + lprefix1_expr = "\\((66|!F3)\\)"
1396 + lprefix2_expr = "\\(F3\\)"
1397 +- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
1398 ++ lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
1399 + lprefix_expr = "\\((66|F2|F3)\\)"
1400 + max_lprefix = 4
1401 +
1402 +@@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
1403 + return add_flags(imm, mod)
1404 + }
1405 +
1406 +-/^[0-9a-f]+\:/ {
1407 ++/^[0-9a-f]+:/ {
1408 + if (NR == 1)
1409 + next
1410 + # get index
1411 +diff --git a/block/blk-core.c b/block/blk-core.c
1412 +index 074ae9376189..ea33d6abdcfc 100644
1413 +--- a/block/blk-core.c
1414 ++++ b/block/blk-core.c
1415 +@@ -784,6 +784,9 @@ void blk_cleanup_queue(struct request_queue *q)
1416 + * prevent that q->request_fn() gets invoked after draining finished.
1417 + */
1418 + blk_freeze_queue(q);
1419 ++
1420 ++ rq_qos_exit(q);
1421 ++
1422 + spin_lock_irq(lock);
1423 + queue_flag_set(QUEUE_FLAG_DEAD, q);
1424 + spin_unlock_irq(lock);
1425 +diff --git a/block/blk-merge.c b/block/blk-merge.c
1426 +index 2e042190a4f1..1dced51de1c6 100644
1427 +--- a/block/blk-merge.c
1428 ++++ b/block/blk-merge.c
1429 +@@ -669,6 +669,31 @@ static void blk_account_io_merge(struct request *req)
1430 + part_stat_unlock();
1431 + }
1432 + }
1433 ++/*
1434 ++ * Two cases of handling DISCARD merge:
1435 ++ * If max_discard_segments > 1, the driver takes every bio
1436 ++ * as a range and send them to controller together. The ranges
1437 ++ * needn't to be contiguous.
1438 ++ * Otherwise, the bios/requests will be handled as same as
1439 ++ * others which should be contiguous.
1440 ++ */
1441 ++static inline bool blk_discard_mergable(struct request *req)
1442 ++{
1443 ++ if (req_op(req) == REQ_OP_DISCARD &&
1444 ++ queue_max_discard_segments(req->q) > 1)
1445 ++ return true;
1446 ++ return false;
1447 ++}
1448 ++
1449 ++enum elv_merge blk_try_req_merge(struct request *req, struct request *next)
1450 ++{
1451 ++ if (blk_discard_mergable(req))
1452 ++ return ELEVATOR_DISCARD_MERGE;
1453 ++ else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
1454 ++ return ELEVATOR_BACK_MERGE;
1455 ++
1456 ++ return ELEVATOR_NO_MERGE;
1457 ++}
1458 +
1459 + /*
1460 + * For non-mq, this has to be called with the request spinlock acquired.
1461 +@@ -686,12 +711,6 @@ static struct request *attempt_merge(struct request_queue *q,
1462 + if (req_op(req) != req_op(next))
1463 + return NULL;
1464 +
1465 +- /*
1466 +- * not contiguous
1467 +- */
1468 +- if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
1469 +- return NULL;
1470 +-
1471 + if (rq_data_dir(req) != rq_data_dir(next)
1472 + || req->rq_disk != next->rq_disk
1473 + || req_no_special_merge(next))
1474 +@@ -715,11 +734,19 @@ static struct request *attempt_merge(struct request_queue *q,
1475 + * counts here. Handle DISCARDs separately, as they
1476 + * have separate settings.
1477 + */
1478 +- if (req_op(req) == REQ_OP_DISCARD) {
1479 ++
1480 ++ switch (blk_try_req_merge(req, next)) {
1481 ++ case ELEVATOR_DISCARD_MERGE:
1482 + if (!req_attempt_discard_merge(q, req, next))
1483 + return NULL;
1484 +- } else if (!ll_merge_requests_fn(q, req, next))
1485 ++ break;
1486 ++ case ELEVATOR_BACK_MERGE:
1487 ++ if (!ll_merge_requests_fn(q, req, next))
1488 ++ return NULL;
1489 ++ break;
1490 ++ default:
1491 + return NULL;
1492 ++ }
1493 +
1494 + /*
1495 + * If failfast settings disagree or any of the two is already
1496 +@@ -843,8 +870,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
1497 +
1498 + enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
1499 + {
1500 +- if (req_op(rq) == REQ_OP_DISCARD &&
1501 +- queue_max_discard_segments(rq->q) > 1)
1502 ++ if (blk_discard_mergable(rq))
1503 + return ELEVATOR_DISCARD_MERGE;
1504 + else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
1505 + return ELEVATOR_BACK_MERGE;
1506 +diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
1507 +index bab47a17b96f..8286640d4d66 100644
1508 +--- a/block/blk-sysfs.c
1509 ++++ b/block/blk-sysfs.c
1510 +@@ -997,8 +997,6 @@ void blk_unregister_queue(struct gendisk *disk)
1511 + kobject_del(&q->kobj);
1512 + blk_trace_remove_sysfs(disk_to_dev(disk));
1513 +
1514 +- rq_qos_exit(q);
1515 +-
1516 + mutex_lock(&q->sysfs_lock);
1517 + if (q->request_fn || (q->mq_ops && q->elevator))
1518 + elv_unregister_queue(q);
1519 +diff --git a/crypto/testmgr.c b/crypto/testmgr.c
1520 +index 3664c26f4838..13cb2ea99d6a 100644
1521 +--- a/crypto/testmgr.c
1522 ++++ b/crypto/testmgr.c
1523 +@@ -1400,8 +1400,8 @@ static int test_comp(struct crypto_comp *tfm,
1524 + int ilen;
1525 + unsigned int dlen = COMP_BUF_SIZE;
1526 +
1527 +- memset(output, 0, sizeof(COMP_BUF_SIZE));
1528 +- memset(decomp_output, 0, sizeof(COMP_BUF_SIZE));
1529 ++ memset(output, 0, COMP_BUF_SIZE);
1530 ++ memset(decomp_output, 0, COMP_BUF_SIZE);
1531 +
1532 + ilen = ctemplate[i].inlen;
1533 + ret = crypto_comp_compress(tfm, ctemplate[i].input,
1534 +@@ -1445,7 +1445,7 @@ static int test_comp(struct crypto_comp *tfm,
1535 + int ilen;
1536 + unsigned int dlen = COMP_BUF_SIZE;
1537 +
1538 +- memset(decomp_output, 0, sizeof(COMP_BUF_SIZE));
1539 ++ memset(decomp_output, 0, COMP_BUF_SIZE);
1540 +
1541 + ilen = dtemplate[i].inlen;
1542 + ret = crypto_comp_decompress(tfm, dtemplate[i].input,
1543 +diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
1544 +index 6b0d3ef7309c..2ccfbb61ca89 100644
1545 +--- a/drivers/acpi/acpi_memhotplug.c
1546 ++++ b/drivers/acpi/acpi_memhotplug.c
1547 +@@ -228,7 +228,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
1548 + if (node < 0)
1549 + node = memory_add_physaddr_to_nid(info->start_addr);
1550 +
1551 +- result = add_memory(node, info->start_addr, info->length);
1552 ++ result = __add_memory(node, info->start_addr, info->length);
1553 +
1554 + /*
1555 + * If the memory block has been used by the kernel, add_memory()
1556 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
1557 +index e1b6231cfa1c..1dcc48b9d33c 100644
1558 +--- a/drivers/acpi/scan.c
1559 ++++ b/drivers/acpi/scan.c
1560 +@@ -1550,6 +1550,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
1561 + */
1562 + static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
1563 + {"BSG1160", },
1564 ++ {"INT33FE", },
1565 + {}
1566 + };
1567 +
1568 +diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
1569 +index e89146ddede6..d5c76b50d357 100644
1570 +--- a/drivers/atm/zatm.c
1571 ++++ b/drivers/atm/zatm.c
1572 +@@ -126,7 +126,7 @@ static unsigned long dummy[2] = {0,0};
1573 + #define zin_n(r) inl(zatm_dev->base+r*4)
1574 + #define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
1575 + #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
1576 +-#define zwait while (zin(CMR) & uPD98401_BUSY)
1577 ++#define zwait() do {} while (zin(CMR) & uPD98401_BUSY)
1578 +
1579 + /* RX0, RX1, TX0, TX1 */
1580 + static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
1581 +@@ -140,7 +140,7 @@ static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
1582 +
1583 + static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
1584 + {
1585 +- zwait;
1586 ++ zwait();
1587 + zout(value,CER);
1588 + zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
1589 + (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
1590 +@@ -149,10 +149,10 @@ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
1591 +
1592 + static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
1593 + {
1594 +- zwait;
1595 ++ zwait();
1596 + zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
1597 + (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
1598 +- zwait;
1599 ++ zwait();
1600 + return zin(CER);
1601 + }
1602 +
1603 +@@ -241,7 +241,7 @@ static void refill_pool(struct atm_dev *dev,int pool)
1604 + }
1605 + if (first) {
1606 + spin_lock_irqsave(&zatm_dev->lock, flags);
1607 +- zwait;
1608 ++ zwait();
1609 + zout(virt_to_bus(first),CER);
1610 + zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
1611 + CMR);
1612 +@@ -508,9 +508,9 @@ static int open_rx_first(struct atm_vcc *vcc)
1613 + }
1614 + if (zatm_vcc->pool < 0) return -EMSGSIZE;
1615 + spin_lock_irqsave(&zatm_dev->lock, flags);
1616 +- zwait;
1617 ++ zwait();
1618 + zout(uPD98401_OPEN_CHAN,CMR);
1619 +- zwait;
1620 ++ zwait();
1621 + DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
1622 + chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
1623 + spin_unlock_irqrestore(&zatm_dev->lock, flags);
1624 +@@ -571,21 +571,21 @@ static void close_rx(struct atm_vcc *vcc)
1625 + pos = vcc->vci >> 1;
1626 + shift = (1-(vcc->vci & 1)) << 4;
1627 + zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
1628 +- zwait;
1629 ++ zwait();
1630 + zout(uPD98401_NOP,CMR);
1631 +- zwait;
1632 ++ zwait();
1633 + zout(uPD98401_NOP,CMR);
1634 + spin_unlock_irqrestore(&zatm_dev->lock, flags);
1635 + }
1636 + spin_lock_irqsave(&zatm_dev->lock, flags);
1637 +- zwait;
1638 ++ zwait();
1639 + zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
1640 + uPD98401_CHAN_ADDR_SHIFT),CMR);
1641 +- zwait;
1642 ++ zwait();
1643 + udelay(10); /* why oh why ... ? */
1644 + zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
1645 + uPD98401_CHAN_ADDR_SHIFT),CMR);
1646 +- zwait;
1647 ++ zwait();
1648 + if (!(zin(CMR) & uPD98401_CHAN_ADDR))
1649 + printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
1650 + "%d\n",vcc->dev->number,zatm_vcc->rx_chan);
1651 +@@ -699,7 +699,7 @@ printk("NONONONOO!!!!\n");
1652 + skb_queue_tail(&zatm_vcc->tx_queue,skb);
1653 + DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
1654 + uPD98401_TXVC_QRP));
1655 +- zwait;
1656 ++ zwait();
1657 + zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
1658 + uPD98401_CHAN_ADDR_SHIFT),CMR);
1659 + spin_unlock_irqrestore(&zatm_dev->lock, flags);
1660 +@@ -891,12 +891,12 @@ static void close_tx(struct atm_vcc *vcc)
1661 + }
1662 + spin_lock_irqsave(&zatm_dev->lock, flags);
1663 + #if 0
1664 +- zwait;
1665 ++ zwait();
1666 + zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
1667 + #endif
1668 +- zwait;
1669 ++ zwait();
1670 + zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
1671 +- zwait;
1672 ++ zwait();
1673 + if (!(zin(CMR) & uPD98401_CHAN_ADDR))
1674 + printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
1675 + "%d\n",vcc->dev->number,chan);
1676 +@@ -926,9 +926,9 @@ static int open_tx_first(struct atm_vcc *vcc)
1677 + zatm_vcc->tx_chan = 0;
1678 + if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1679 + spin_lock_irqsave(&zatm_dev->lock, flags);
1680 +- zwait;
1681 ++ zwait();
1682 + zout(uPD98401_OPEN_CHAN,CMR);
1683 +- zwait;
1684 ++ zwait();
1685 + DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
1686 + chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
1687 + spin_unlock_irqrestore(&zatm_dev->lock, flags);
1688 +@@ -1557,7 +1557,7 @@ static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
1689 + struct zatm_dev *zatm_dev;
1690 +
1691 + zatm_dev = ZATM_DEV(dev);
1692 +- zwait;
1693 ++ zwait();
1694 + zout(value,CER);
1695 + zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
1696 + (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
1697 +@@ -1569,10 +1569,10 @@ static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
1698 + struct zatm_dev *zatm_dev;
1699 +
1700 + zatm_dev = ZATM_DEV(dev);
1701 +- zwait;
1702 ++ zwait();
1703 + zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
1704 + (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
1705 +- zwait;
1706 ++ zwait();
1707 + return zin(CER) & 0xff;
1708 + }
1709 +
1710 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
1711 +index 85ee64d0a44e..ac1574a69610 100644
1712 +--- a/drivers/base/memory.c
1713 ++++ b/drivers/base/memory.c
1714 +@@ -228,7 +228,6 @@ static bool pages_correctly_probed(unsigned long start_pfn)
1715 + /*
1716 + * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
1717 + * OK to have direct references to sparsemem variables in here.
1718 +- * Must already be protected by mem_hotplug_begin().
1719 + */
1720 + static int
1721 + memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
1722 +@@ -294,7 +293,6 @@ static int memory_subsys_online(struct device *dev)
1723 + if (mem->online_type < 0)
1724 + mem->online_type = MMOP_ONLINE_KEEP;
1725 +
1726 +- /* Already under protection of mem_hotplug_begin() */
1727 + ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
1728 +
1729 + /* clear online_type */
1730 +@@ -341,19 +339,11 @@ store_mem_state(struct device *dev,
1731 + goto err;
1732 + }
1733 +
1734 +- /*
1735 +- * Memory hotplug needs to hold mem_hotplug_begin() for probe to find
1736 +- * the correct memory block to online before doing device_online(dev),
1737 +- * which will take dev->mutex. Take the lock early to prevent an
1738 +- * inversion, memory_subsys_online() callbacks will be implemented by
1739 +- * assuming it's already protected.
1740 +- */
1741 +- mem_hotplug_begin();
1742 +-
1743 + switch (online_type) {
1744 + case MMOP_ONLINE_KERNEL:
1745 + case MMOP_ONLINE_MOVABLE:
1746 + case MMOP_ONLINE_KEEP:
1747 ++ /* mem->online_type is protected by device_hotplug_lock */
1748 + mem->online_type = online_type;
1749 + ret = device_online(&mem->dev);
1750 + break;
1751 +@@ -364,7 +354,6 @@ store_mem_state(struct device *dev,
1752 + ret = -EINVAL; /* should never happen */
1753 + }
1754 +
1755 +- mem_hotplug_done();
1756 + err:
1757 + unlock_device_hotplug();
1758 +
1759 +@@ -519,15 +508,20 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
1760 + if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
1761 + return -EINVAL;
1762 +
1763 ++ ret = lock_device_hotplug_sysfs();
1764 ++ if (ret)
1765 ++ return ret;
1766 ++
1767 + nid = memory_add_physaddr_to_nid(phys_addr);
1768 +- ret = add_memory(nid, phys_addr,
1769 +- MIN_MEMORY_BLOCK_SIZE * sections_per_block);
1770 ++ ret = __add_memory(nid, phys_addr,
1771 ++ MIN_MEMORY_BLOCK_SIZE * sections_per_block);
1772 +
1773 + if (ret)
1774 + goto out;
1775 +
1776 + ret = count;
1777 + out:
1778 ++ unlock_device_hotplug();
1779 + return ret;
1780 + }
1781 +
1782 +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
1783 +index bf5be0bfaf77..52c292d0908a 100644
1784 +--- a/drivers/base/power/domain.c
1785 ++++ b/drivers/base/power/domain.c
1786 +@@ -467,6 +467,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
1787 + return -EAGAIN;
1788 + }
1789 +
1790 ++ /* Default to shallowest state. */
1791 ++ if (!genpd->gov)
1792 ++ genpd->state_idx = 0;
1793 ++
1794 + if (genpd->power_off) {
1795 + int ret;
1796 +
1797 +@@ -1686,6 +1690,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
1798 + ret = genpd_set_default_power_state(genpd);
1799 + if (ret)
1800 + return ret;
1801 ++ } else if (!gov) {
1802 ++ pr_warn("%s : no governor for states\n", genpd->name);
1803 + }
1804 +
1805 + device_initialize(&genpd->dev);
1806 +diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
1807 +index 3aaf6af3ec23..2158e130744e 100644
1808 +--- a/drivers/block/amiflop.c
1809 ++++ b/drivers/block/amiflop.c
1810 +@@ -1701,11 +1701,41 @@ static const struct block_device_operations floppy_fops = {
1811 + .check_events = amiga_check_events,
1812 + };
1813 +
1814 ++static struct gendisk *fd_alloc_disk(int drive)
1815 ++{
1816 ++ struct gendisk *disk;
1817 ++
1818 ++ disk = alloc_disk(1);
1819 ++ if (!disk)
1820 ++ goto out;
1821 ++
1822 ++ disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
1823 ++ if (IS_ERR(disk->queue)) {
1824 ++ disk->queue = NULL;
1825 ++ goto out_put_disk;
1826 ++ }
1827 ++
1828 ++ unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
1829 ++ if (!unit[drive].trackbuf)
1830 ++ goto out_cleanup_queue;
1831 ++
1832 ++ return disk;
1833 ++
1834 ++out_cleanup_queue:
1835 ++ blk_cleanup_queue(disk->queue);
1836 ++ disk->queue = NULL;
1837 ++out_put_disk:
1838 ++ put_disk(disk);
1839 ++out:
1840 ++ unit[drive].type->code = FD_NODRIVE;
1841 ++ return NULL;
1842 ++}
1843 ++
1844 + static int __init fd_probe_drives(void)
1845 + {
1846 + int drive,drives,nomem;
1847 +
1848 +- printk(KERN_INFO "FD: probing units\nfound ");
1849 ++ pr_info("FD: probing units\nfound");
1850 + drives=0;
1851 + nomem=0;
1852 + for(drive=0;drive<FD_MAX_UNITS;drive++) {
1853 +@@ -1713,27 +1743,17 @@ static int __init fd_probe_drives(void)
1854 + fd_probe(drive);
1855 + if (unit[drive].type->code == FD_NODRIVE)
1856 + continue;
1857 +- disk = alloc_disk(1);
1858 ++
1859 ++ disk = fd_alloc_disk(drive);
1860 + if (!disk) {
1861 +- unit[drive].type->code = FD_NODRIVE;
1862 ++ pr_cont(" no mem for fd%d", drive);
1863 ++ nomem = 1;
1864 + continue;
1865 + }
1866 + unit[drive].gendisk = disk;
1867 +-
1868 +- disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
1869 +- if (!disk->queue) {
1870 +- unit[drive].type->code = FD_NODRIVE;
1871 +- continue;
1872 +- }
1873 +-
1874 + drives++;
1875 +- if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
1876 +- printk("no mem for ");
1877 +- unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */
1878 +- drives--;
1879 +- nomem = 1;
1880 +- }
1881 +- printk("fd%d ",drive);
1882 ++
1883 ++ pr_cont(" fd%d",drive);
1884 + disk->major = FLOPPY_MAJOR;
1885 + disk->first_minor = drive;
1886 + disk->fops = &floppy_fops;
1887 +@@ -1744,11 +1764,11 @@ static int __init fd_probe_drives(void)
1888 + }
1889 + if ((drives > 0) || (nomem == 0)) {
1890 + if (drives == 0)
1891 +- printk("no drives");
1892 +- printk("\n");
1893 ++ pr_cont(" no drives");
1894 ++ pr_cont("\n");
1895 + return drives;
1896 + }
1897 +- printk("\n");
1898 ++ pr_cont("\n");
1899 + return -ENOMEM;
1900 + }
1901 +
1902 +@@ -1831,30 +1851,6 @@ out_blkdev:
1903 + return ret;
1904 + }
1905 +
1906 +-#if 0 /* not safe to unload */
1907 +-static int __exit amiga_floppy_remove(struct platform_device *pdev)
1908 +-{
1909 +- int i;
1910 +-
1911 +- for( i = 0; i < FD_MAX_UNITS; i++) {
1912 +- if (unit[i].type->code != FD_NODRIVE) {
1913 +- struct request_queue *q = unit[i].gendisk->queue;
1914 +- del_gendisk(unit[i].gendisk);
1915 +- put_disk(unit[i].gendisk);
1916 +- kfree(unit[i].trackbuf);
1917 +- if (q)
1918 +- blk_cleanup_queue(q);
1919 +- }
1920 +- }
1921 +- blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
1922 +- free_irq(IRQ_AMIGA_CIAA_TB, NULL);
1923 +- free_irq(IRQ_AMIGA_DSKBLK, NULL);
1924 +- custom.dmacon = DMAF_DISK; /* disable DMA */
1925 +- amiga_chip_free(raw_buf);
1926 +- unregister_blkdev(FLOPPY_MAJOR, "fd");
1927 +-}
1928 +-#endif
1929 +-
1930 + static struct platform_driver amiga_floppy_driver = {
1931 + .driver = {
1932 + .name = "amiga-floppy",
1933 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1934 +index 867841c56a6d..996b1ef5f076 100644
1935 +--- a/drivers/block/nbd.c
1936 ++++ b/drivers/block/nbd.c
1937 +@@ -945,6 +945,7 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1938 + if (sock->ops->shutdown == sock_no_shutdown) {
1939 + dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1940 + *err = -EINVAL;
1941 ++ sockfd_put(sock);
1942 + return NULL;
1943 + }
1944 +
1945 +@@ -983,14 +984,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1946 + sockfd_put(sock);
1947 + return -ENOMEM;
1948 + }
1949 ++
1950 ++ config->socks = socks;
1951 ++
1952 + nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
1953 + if (!nsock) {
1954 + sockfd_put(sock);
1955 + return -ENOMEM;
1956 + }
1957 +
1958 +- config->socks = socks;
1959 +-
1960 + nsock->fallback_index = -1;
1961 + nsock->dead = false;
1962 + mutex_init(&nsock->tx_lock);
1963 +diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
1964 +index 87b9e7fbf062..27323fa23997 100644
1965 +--- a/drivers/block/skd_main.c
1966 ++++ b/drivers/block/skd_main.c
1967 +@@ -1416,7 +1416,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
1968 +
1969 + case SKD_CHECK_STATUS_BUSY_IMMINENT:
1970 + skd_log_skreq(skdev, skreq, "retry(busy)");
1971 +- blk_requeue_request(skdev->queue, req);
1972 ++ blk_mq_requeue_request(req, true);
1973 + dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
1974 + skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
1975 + skdev->timer_countdown = SKD_TIMER_MINUTES(20);
1976 +@@ -1426,7 +1426,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
1977 + case SKD_CHECK_STATUS_REQUEUE_REQUEST:
1978 + if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
1979 + skd_log_skreq(skdev, skreq, "retry");
1980 +- blk_requeue_request(skdev->queue, req);
1981 ++ blk_mq_requeue_request(req, true);
1982 + break;
1983 + }
1984 + /* fall through */
1985 +diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
1986 +index 66fe1e6dc631..27829273f3c9 100644
1987 +--- a/drivers/bluetooth/hci_bcsp.c
1988 ++++ b/drivers/bluetooth/hci_bcsp.c
1989 +@@ -606,6 +606,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1990 + if (*ptr == 0xc0) {
1991 + BT_ERR("Short BCSP packet");
1992 + kfree_skb(bcsp->rx_skb);
1993 ++ bcsp->rx_skb = NULL;
1994 + bcsp->rx_state = BCSP_W4_PKT_START;
1995 + bcsp->rx_count = 0;
1996 + } else
1997 +@@ -621,6 +622,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1998 + bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
1999 + BT_ERR("Error in BCSP hdr checksum");
2000 + kfree_skb(bcsp->rx_skb);
2001 ++ bcsp->rx_skb = NULL;
2002 + bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
2003 + bcsp->rx_count = 0;
2004 + continue;
2005 +@@ -645,6 +647,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
2006 + bscp_get_crc(bcsp));
2007 +
2008 + kfree_skb(bcsp->rx_skb);
2009 ++ bcsp->rx_skb = NULL;
2010 + bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
2011 + bcsp->rx_count = 0;
2012 + continue;
2013 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
2014 +index 27a82a559ab9..933268b8d6a5 100644
2015 +--- a/drivers/cdrom/cdrom.c
2016 ++++ b/drivers/cdrom/cdrom.c
2017 +@@ -411,10 +411,10 @@ static int cdrom_get_disc_info(struct cdrom_device_info *cdi,
2018 + * hack to have the capability flags defined const, while we can still
2019 + * change it here without gcc complaining at every line.
2020 + */
2021 +-#define ENSURE(call, bits) \
2022 +-do { \
2023 +- if (cdo->call == NULL) \
2024 +- *change_capability &= ~(bits); \
2025 ++#define ENSURE(cdo, call, bits) \
2026 ++do { \
2027 ++ if (cdo->call == NULL) \
2028 ++ WARN_ON_ONCE((cdo)->capability & (bits)); \
2029 + } while (0)
2030 +
2031 + /*
2032 +@@ -590,7 +590,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
2033 + {
2034 + static char banner_printed;
2035 + const struct cdrom_device_ops *cdo = cdi->ops;
2036 +- int *change_capability = (int *)&cdo->capability; /* hack */
2037 +
2038 + cd_dbg(CD_OPEN, "entering register_cdrom\n");
2039 +
2040 +@@ -602,16 +601,16 @@ int register_cdrom(struct cdrom_device_info *cdi)
2041 + cdrom_sysctl_register();
2042 + }
2043 +
2044 +- ENSURE(drive_status, CDC_DRIVE_STATUS);
2045 ++ ENSURE(cdo, drive_status, CDC_DRIVE_STATUS);
2046 + if (cdo->check_events == NULL && cdo->media_changed == NULL)
2047 +- *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
2048 +- ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
2049 +- ENSURE(lock_door, CDC_LOCK);
2050 +- ENSURE(select_speed, CDC_SELECT_SPEED);
2051 +- ENSURE(get_last_session, CDC_MULTI_SESSION);
2052 +- ENSURE(get_mcn, CDC_MCN);
2053 +- ENSURE(reset, CDC_RESET);
2054 +- ENSURE(generic_packet, CDC_GENERIC_PACKET);
2055 ++ WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC));
2056 ++ ENSURE(cdo, tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
2057 ++ ENSURE(cdo, lock_door, CDC_LOCK);
2058 ++ ENSURE(cdo, select_speed, CDC_SELECT_SPEED);
2059 ++ ENSURE(cdo, get_last_session, CDC_MULTI_SESSION);
2060 ++ ENSURE(cdo, get_mcn, CDC_MCN);
2061 ++ ENSURE(cdo, reset, CDC_RESET);
2062 ++ ENSURE(cdo, generic_packet, CDC_GENERIC_PACKET);
2063 + cdi->mc_flags = 0;
2064 + cdi->options = CDO_USE_FFLAGS;
2065 +
2066 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
2067 +index c55f6aeb4227..b353a5e5f8b1 100644
2068 +--- a/drivers/char/virtio_console.c
2069 ++++ b/drivers/char/virtio_console.c
2070 +@@ -1349,24 +1349,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols)
2071 + port->cons.ws.ws_col = cols;
2072 + }
2073 +
2074 +-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
2075 ++static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
2076 + {
2077 + struct port_buffer *buf;
2078 +- unsigned int nr_added_bufs;
2079 ++ int nr_added_bufs;
2080 + int ret;
2081 +
2082 + nr_added_bufs = 0;
2083 + do {
2084 + buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
2085 + if (!buf)
2086 +- break;
2087 ++ return -ENOMEM;
2088 +
2089 + spin_lock_irq(lock);
2090 + ret = add_inbuf(vq, buf);
2091 + if (ret < 0) {
2092 + spin_unlock_irq(lock);
2093 + free_buf(buf, true);
2094 +- break;
2095 ++ return ret;
2096 + }
2097 + nr_added_bufs++;
2098 + spin_unlock_irq(lock);
2099 +@@ -1386,7 +1386,6 @@ static int add_port(struct ports_device *portdev, u32 id)
2100 + char debugfs_name[16];
2101 + struct port *port;
2102 + dev_t devt;
2103 +- unsigned int nr_added_bufs;
2104 + int err;
2105 +
2106 + port = kmalloc(sizeof(*port), GFP_KERNEL);
2107 +@@ -1445,11 +1444,13 @@ static int add_port(struct ports_device *portdev, u32 id)
2108 + spin_lock_init(&port->outvq_lock);
2109 + init_waitqueue_head(&port->waitqueue);
2110 +
2111 +- /* Fill the in_vq with buffers so the host can send us data. */
2112 +- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
2113 +- if (!nr_added_bufs) {
2114 ++ /* We can safely ignore ENOSPC because it means
2115 ++ * the queue already has buffers. Buffers are removed
2116 ++ * only by virtcons_remove(), not by unplug_port()
2117 ++ */
2118 ++ err = fill_queue(port->in_vq, &port->inbuf_lock);
2119 ++ if (err < 0 && err != -ENOSPC) {
2120 + dev_err(port->dev, "Error allocating inbufs\n");
2121 +- err = -ENOMEM;
2122 + goto free_device;
2123 + }
2124 +
2125 +@@ -2083,14 +2084,11 @@ static int virtcons_probe(struct virtio_device *vdev)
2126 + INIT_WORK(&portdev->control_work, &control_work_handler);
2127 +
2128 + if (multiport) {
2129 +- unsigned int nr_added_bufs;
2130 +-
2131 + spin_lock_init(&portdev->c_ivq_lock);
2132 + spin_lock_init(&portdev->c_ovq_lock);
2133 +
2134 +- nr_added_bufs = fill_queue(portdev->c_ivq,
2135 +- &portdev->c_ivq_lock);
2136 +- if (!nr_added_bufs) {
2137 ++ err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
2138 ++ if (err < 0) {
2139 + dev_err(&vdev->dev,
2140 + "Error allocating buffers for control queue\n");
2141 + /*
2142 +@@ -2101,7 +2099,7 @@ static int virtcons_probe(struct virtio_device *vdev)
2143 + VIRTIO_CONSOLE_DEVICE_READY, 0);
2144 + /* Device was functional: we need full cleanup. */
2145 + virtcons_remove(vdev);
2146 +- return -ENOMEM;
2147 ++ return err;
2148 + }
2149 + } else {
2150 + /*
2151 +diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
2152 +index da7bafcfbe70..b3eaf654fac9 100644
2153 +--- a/drivers/clk/at91/clk-audio-pll.c
2154 ++++ b/drivers/clk/at91/clk-audio-pll.c
2155 +@@ -509,7 +509,7 @@ static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np)
2156 +
2157 + static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np)
2158 + {
2159 +- struct clk_audio_pad *apmc_ck;
2160 ++ struct clk_audio_pmc *apmc_ck;
2161 + struct clk_init_data init = {};
2162 +
2163 + apmc_ck = kzalloc(sizeof(*apmc_ck), GFP_KERNEL);
2164 +diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
2165 +index 0fc75c395957..d083b860f083 100644
2166 +--- a/drivers/clk/mmp/clk-of-mmp2.c
2167 ++++ b/drivers/clk/mmp/clk-of-mmp2.c
2168 +@@ -227,8 +227,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
2169 + /* The gate clocks has mux parent. */
2170 + {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
2171 + {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
2172 +- {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
2173 +- {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
2174 ++ {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
2175 ++ {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
2176 + {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
2177 + {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
2178 + {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},
2179 +diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2180 +index ee9c12cf3f08..2a6098179921 100644
2181 +--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2182 ++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2183 +@@ -158,7 +158,12 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
2184 + #define SUN50I_A64_PLL_MIPI_REG 0x040
2185 +
2186 + static struct ccu_nkm pll_mipi_clk = {
2187 +- .enable = BIT(31),
2188 ++ /*
2189 ++ * The bit 23 and 22 are called "LDO{1,2}_EN" on the SoC's
2190 ++ * user manual, and by experiments the PLL doesn't work without
2191 ++ * these bits toggled.
2192 ++ */
2193 ++ .enable = BIT(31) | BIT(23) | BIT(22),
2194 + .lock = BIT(28),
2195 + .n = _SUNXI_CCU_MULT(8, 4),
2196 + .k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
2197 +diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
2198 +index cc857d4d4a86..68551effb5ca 100644
2199 +--- a/drivers/clk/tegra/clk-tegra20.c
2200 ++++ b/drivers/clk/tegra/clk-tegra20.c
2201 +@@ -578,7 +578,6 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
2202 + [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
2203 + [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
2204 + [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
2205 +- [tegra_clk_emc] = { .dt_id = TEGRA20_CLK_EMC, .present = true },
2206 + };
2207 +
2208 + static unsigned long tegra20_clk_measure_input_freq(void)
2209 +@@ -799,6 +798,31 @@ static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
2210 + TEGRA_INIT_DATA_NODIV("disp2", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, 0, TEGRA20_CLK_DISP2),
2211 + };
2212 +
2213 ++static void __init tegra20_emc_clk_init(void)
2214 ++{
2215 ++ struct clk *clk;
2216 ++
2217 ++ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
2218 ++ ARRAY_SIZE(mux_pllmcp_clkm),
2219 ++ CLK_SET_RATE_NO_REPARENT,
2220 ++ clk_base + CLK_SOURCE_EMC,
2221 ++ 30, 2, 0, &emc_lock);
2222 ++
2223 ++ clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
2224 ++ &emc_lock);
2225 ++ clks[TEGRA20_CLK_MC] = clk;
2226 ++
2227 ++ /*
2228 ++ * Note that 'emc_mux' source and 'emc' rate shouldn't be changed at
2229 ++ * the same time due to a HW bug, this won't happen because we're
2230 ++ * defining 'emc_mux' and 'emc' as distinct clocks.
2231 ++ */
2232 ++ clk = tegra_clk_register_divider("emc", "emc_mux",
2233 ++ clk_base + CLK_SOURCE_EMC, CLK_IS_CRITICAL,
2234 ++ TEGRA_DIVIDER_INT, 0, 8, 1, &emc_lock);
2235 ++ clks[TEGRA20_CLK_EMC] = clk;
2236 ++}
2237 ++
2238 + static void __init tegra20_periph_clk_init(void)
2239 + {
2240 + struct tegra_periph_init_data *data;
2241 +@@ -812,15 +836,7 @@ static void __init tegra20_periph_clk_init(void)
2242 + clks[TEGRA20_CLK_AC97] = clk;
2243 +
2244 + /* emc */
2245 +- clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
2246 +- ARRAY_SIZE(mux_pllmcp_clkm),
2247 +- CLK_SET_RATE_NO_REPARENT,
2248 +- clk_base + CLK_SOURCE_EMC,
2249 +- 30, 2, 0, &emc_lock);
2250 +-
2251 +- clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
2252 +- &emc_lock);
2253 +- clks[TEGRA20_CLK_MC] = clk;
2254 ++ tegra20_emc_clk_init();
2255 +
2256 + /* dsi */
2257 + clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
2258 +diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
2259 +index 080bfa24863e..7264e9731034 100644
2260 +--- a/drivers/clk/tegra/clk-tegra210.c
2261 ++++ b/drivers/clk/tegra/clk-tegra210.c
2262 +@@ -2603,7 +2603,7 @@ static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = {
2263 + [TEGRA_POWERGATE_MPE] = {
2264 + .handle_lvl2_ovr = tegra210_generic_mbist_war,
2265 + .lvl2_offset = LVL2_CLK_GATE_OVRE,
2266 +- .lvl2_mask = BIT(2),
2267 ++ .lvl2_mask = BIT(29),
2268 + },
2269 + [TEGRA_POWERGATE_SOR] = {
2270 + .handle_lvl2_ovr = tegra210_generic_mbist_war,
2271 +@@ -2654,14 +2654,14 @@ static struct tegra210_domain_mbist_war tegra210_pg_mbist_war[] = {
2272 + .num_clks = ARRAY_SIZE(nvdec_slcg_clkids),
2273 + .clk_init_data = nvdec_slcg_clkids,
2274 + .handle_lvl2_ovr = tegra210_generic_mbist_war,
2275 +- .lvl2_offset = LVL2_CLK_GATE_OVRC,
2276 ++ .lvl2_offset = LVL2_CLK_GATE_OVRE,
2277 + .lvl2_mask = BIT(9) | BIT(31),
2278 + },
2279 + [TEGRA_POWERGATE_NVJPG] = {
2280 + .num_clks = ARRAY_SIZE(nvjpg_slcg_clkids),
2281 + .clk_init_data = nvjpg_slcg_clkids,
2282 + .handle_lvl2_ovr = tegra210_generic_mbist_war,
2283 +- .lvl2_offset = LVL2_CLK_GATE_OVRC,
2284 ++ .lvl2_offset = LVL2_CLK_GATE_OVRE,
2285 + .lvl2_mask = BIT(9) | BIT(31),
2286 + },
2287 + [TEGRA_POWERGATE_AUD] = {
2288 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
2289 +index ace5ec65e36f..9d8d64f706e0 100644
2290 +--- a/drivers/cpufreq/cpufreq.c
2291 ++++ b/drivers/cpufreq/cpufreq.c
2292 +@@ -909,6 +909,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
2293 + struct freq_attr *fattr = to_attr(attr);
2294 + ssize_t ret;
2295 +
2296 ++ if (!fattr->show)
2297 ++ return -EIO;
2298 ++
2299 + down_read(&policy->rwsem);
2300 + ret = fattr->show(policy, buf);
2301 + up_read(&policy->rwsem);
2302 +@@ -923,6 +926,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
2303 + struct freq_attr *fattr = to_attr(attr);
2304 + ssize_t ret = -EINVAL;
2305 +
2306 ++ if (!fattr->store)
2307 ++ return -EIO;
2308 ++
2309 + /*
2310 + * cpus_read_trylock() is used here to work around a circular lock
2311 + * dependency problem with respect to the cpufreq_register_driver().
2312 +diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
2313 +index a091ae57f902..45985b955d2c 100644
2314 +--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
2315 ++++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
2316 +@@ -449,8 +449,7 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
2317 + * @pdesc: pointer HW descriptor struct
2318 + * @mode: Any one of the modes defined in [CC7x-DESC]
2319 + */
2320 +-static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
2321 +- enum drv_cipher_mode mode)
2322 ++static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
2323 + {
2324 + pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
2325 + }
2326 +@@ -461,8 +460,7 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
2327 + * @pdesc: pointer HW descriptor struct
2328 + * @mode: Any one of the modes defined in [CC7x-DESC]
2329 + */
2330 +-static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
2331 +- enum drv_crypto_direction mode)
2332 ++static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
2333 + {
2334 + pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
2335 + }
2336 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
2337 +index e2ab46bfa666..d350253d161a 100644
2338 +--- a/drivers/devfreq/devfreq.c
2339 ++++ b/drivers/devfreq/devfreq.c
2340 +@@ -257,7 +257,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
2341 + /* Restore previous state before return */
2342 + mutex_lock(&devfreq_list_lock);
2343 + if (err)
2344 +- return ERR_PTR(err);
2345 ++ return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
2346 +
2347 + governor = find_devfreq_governor(name);
2348 + }
2349 +diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
2350 +index c009d94f40c5..34be60fe6892 100644
2351 +--- a/drivers/edac/thunderx_edac.c
2352 ++++ b/drivers/edac/thunderx_edac.c
2353 +@@ -1884,7 +1884,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
2354 + default:
2355 + dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n",
2356 + l2c->pdev->device);
2357 +- return IRQ_NONE;
2358 ++ goto err_free;
2359 + }
2360 +
2361 + while (CIRC_CNT(l2c->ring_head, l2c->ring_tail,
2362 +@@ -1906,7 +1906,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
2363 + l2c->ring_tail++;
2364 + }
2365 +
2366 +- return IRQ_HANDLED;
2367 ++ ret = IRQ_HANDLED;
2368 +
2369 + err_free:
2370 + kfree(other);
2371 +diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
2372 +index c8f169bf2e27..62337be07afc 100644
2373 +--- a/drivers/firmware/google/gsmi.c
2374 ++++ b/drivers/firmware/google/gsmi.c
2375 +@@ -480,11 +480,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
2376 + if (count < sizeof(u32))
2377 + return -EINVAL;
2378 + param.type = *(u32 *)buf;
2379 +- count -= sizeof(u32);
2380 + buf += sizeof(u32);
2381 +
2382 + /* The remaining buffer is the data payload */
2383 +- if (count > gsmi_dev.data_buf->length)
2384 ++ if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
2385 + return -EINVAL;
2386 + param.data_len = count - sizeof(u32);
2387 +
2388 +@@ -504,7 +503,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
2389 +
2390 + spin_unlock_irqrestore(&gsmi_dev.lock, flags);
2391 +
2392 +- return rc;
2393 ++ return (rc == 0) ? count : rc;
2394 +
2395 + }
2396 +
2397 +diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
2398 +index ac6c1c0548b6..78254ed93206 100644
2399 +--- a/drivers/gpio/gpio-max77620.c
2400 ++++ b/drivers/gpio/gpio-max77620.c
2401 +@@ -163,13 +163,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
2402 + case 0:
2403 + val = MAX77620_CNFG_GPIO_DBNC_None;
2404 + break;
2405 +- case 1000 ... 8000:
2406 ++ case 1 ... 8000:
2407 + val = MAX77620_CNFG_GPIO_DBNC_8ms;
2408 + break;
2409 +- case 9000 ... 16000:
2410 ++ case 8001 ... 16000:
2411 + val = MAX77620_CNFG_GPIO_DBNC_16ms;
2412 + break;
2413 +- case 17000 ... 32000:
2414 ++ case 16001 ... 32000:
2415 + val = MAX77620_CNFG_GPIO_DBNC_32ms;
2416 + break;
2417 + default:
2418 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2419 +index c7c505095402..6bf032e81e39 100644
2420 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2421 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
2422 +@@ -3472,18 +3472,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
2423 +
2424 + static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
2425 + {
2426 ++ struct amdgpu_device *adev = hwmgr->adev;
2427 + int i;
2428 + u32 tmp = 0;
2429 +
2430 + if (!query)
2431 + return -EINVAL;
2432 +
2433 +- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
2434 +- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
2435 +- *query = tmp;
2436 ++ /*
2437 ++ * PPSMC_MSG_GetCurrPkgPwr is not supported on:
2438 ++ * - Hawaii
2439 ++ * - Bonaire
2440 ++ * - Fiji
2441 ++ * - Tonga
2442 ++ */
2443 ++ if ((adev->asic_type != CHIP_HAWAII) &&
2444 ++ (adev->asic_type != CHIP_BONAIRE) &&
2445 ++ (adev->asic_type != CHIP_FIJI) &&
2446 ++ (adev->asic_type != CHIP_TONGA)) {
2447 ++ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
2448 ++ tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
2449 ++ *query = tmp;
2450 +
2451 +- if (tmp != 0)
2452 +- return 0;
2453 ++ if (tmp != 0)
2454 ++ return 0;
2455 ++ }
2456 +
2457 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
2458 + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2459 +diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
2460 +index 2c9b284036d1..961abb6ea18e 100644
2461 +--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
2462 ++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
2463 +@@ -691,8 +691,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
2464 + i915_gem_gtt_finish_pages(obj, pages);
2465 +
2466 + for_each_sgt_page(page, sgt_iter, pages) {
2467 +- if (obj->mm.dirty)
2468 ++ if (obj->mm.dirty && trylock_page(page)) {
2469 ++ /*
2470 ++ * As this may not be anonymous memory (e.g. shmem)
2471 ++ * but exist on a real mapping, we have to lock
2472 ++ * the page in order to dirty it -- holding
2473 ++ * the page reference is not sufficient to
2474 ++ * prevent the inode from being truncated.
2475 ++ * Play safe and take the lock.
2476 ++ *
2477 ++ * However...!
2478 ++ *
2479 ++ * The mmu-notifier can be invalidated for a
2480 ++ * migrate_page, that is alreadying holding the lock
2481 ++ * on the page. Such a try_to_unmap() will result
2482 ++ * in us calling put_pages() and so recursively try
2483 ++ * to lock the page. We avoid that deadlock with
2484 ++ * a trylock_page() and in exchange we risk missing
2485 ++ * some page dirtying.
2486 ++ */
2487 + set_page_dirty(page);
2488 ++ unlock_page(page);
2489 ++ }
2490 +
2491 + mark_page_accessed(page);
2492 + put_page(page);
2493 +diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
2494 +index d6c8f8fdfda5..b7fda69342fc 100644
2495 +--- a/drivers/gpu/drm/i915/i915_pmu.c
2496 ++++ b/drivers/gpu/drm/i915/i915_pmu.c
2497 +@@ -827,8 +827,8 @@ create_event_attributes(struct drm_i915_private *i915)
2498 + const char *name;
2499 + const char *unit;
2500 + } events[] = {
2501 +- __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
2502 +- __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
2503 ++ __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
2504 ++ __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
2505 + __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
2506 + __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
2507 + };
2508 +diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
2509 +index bc26ec822e26..dd0687e36a47 100644
2510 +--- a/drivers/i2c/busses/i2c-uniphier-f.c
2511 ++++ b/drivers/i2c/busses/i2c-uniphier-f.c
2512 +@@ -98,6 +98,7 @@ struct uniphier_fi2c_priv {
2513 + unsigned int flags;
2514 + unsigned int busy_cnt;
2515 + unsigned int clk_cycle;
2516 ++ spinlock_t lock; /* IRQ synchronization */
2517 + };
2518 +
2519 + static void uniphier_fi2c_fill_txfifo(struct uniphier_fi2c_priv *priv,
2520 +@@ -142,9 +143,10 @@ static void uniphier_fi2c_set_irqs(struct uniphier_fi2c_priv *priv)
2521 + writel(priv->enabled_irqs, priv->membase + UNIPHIER_FI2C_IE);
2522 + }
2523 +
2524 +-static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv)
2525 ++static void uniphier_fi2c_clear_irqs(struct uniphier_fi2c_priv *priv,
2526 ++ u32 mask)
2527 + {
2528 +- writel(-1, priv->membase + UNIPHIER_FI2C_IC);
2529 ++ writel(mask, priv->membase + UNIPHIER_FI2C_IC);
2530 + }
2531 +
2532 + static void uniphier_fi2c_stop(struct uniphier_fi2c_priv *priv)
2533 +@@ -162,7 +164,10 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
2534 + struct uniphier_fi2c_priv *priv = dev_id;
2535 + u32 irq_status;
2536 +
2537 ++ spin_lock(&priv->lock);
2538 ++
2539 + irq_status = readl(priv->membase + UNIPHIER_FI2C_INT);
2540 ++ irq_status &= priv->enabled_irqs;
2541 +
2542 + dev_dbg(&priv->adap.dev,
2543 + "interrupt: enabled_irqs=%04x, irq_status=%04x\n",
2544 +@@ -207,7 +212,13 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
2545 +
2546 + if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) {
2547 + uniphier_fi2c_drain_rxfifo(priv);
2548 +- if (!priv->len)
2549 ++ /*
2550 ++ * If the number of bytes to read is multiple of the FIFO size
2551 ++ * (msg->len == 8, 16, 24, ...), the INT_RF bit is set a little
2552 ++ * earlier than INT_RB. We wait for INT_RB to confirm the
2553 ++ * completion of the current message.
2554 ++ */
2555 ++ if (!priv->len && (irq_status & UNIPHIER_FI2C_INT_RB))
2556 + goto data_done;
2557 +
2558 + if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) {
2559 +@@ -230,6 +241,8 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
2560 + goto handled;
2561 + }
2562 +
2563 ++ spin_unlock(&priv->lock);
2564 ++
2565 + return IRQ_NONE;
2566 +
2567 + data_done:
2568 +@@ -244,7 +257,14 @@ complete:
2569 + }
2570 +
2571 + handled:
2572 +- uniphier_fi2c_clear_irqs(priv);
2573 ++ /*
2574 ++ * This controller makes a pause while any bit of the IRQ status is
2575 ++ * asserted. Clear the asserted bit to kick the controller just before
2576 ++ * exiting the handler.
2577 ++ */
2578 ++ uniphier_fi2c_clear_irqs(priv, irq_status);
2579 ++
2580 ++ spin_unlock(&priv->lock);
2581 +
2582 + return IRQ_HANDLED;
2583 + }
2584 +@@ -252,6 +272,8 @@ handled:
2585 + static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr)
2586 + {
2587 + priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE;
2588 ++ uniphier_fi2c_set_irqs(priv);
2589 ++
2590 + /* do not use TX byte counter */
2591 + writel(0, priv->membase + UNIPHIER_FI2C_TBC);
2592 + /* set slave address */
2593 +@@ -284,6 +306,8 @@ static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr)
2594 + priv->enabled_irqs |= UNIPHIER_FI2C_INT_RF;
2595 + }
2596 +
2597 ++ uniphier_fi2c_set_irqs(priv);
2598 ++
2599 + /* set slave address with RD bit */
2600 + writel(UNIPHIER_FI2C_DTTX_CMD | UNIPHIER_FI2C_DTTX_RD | addr << 1,
2601 + priv->membase + UNIPHIER_FI2C_DTTX);
2602 +@@ -307,14 +331,16 @@ static void uniphier_fi2c_recover(struct uniphier_fi2c_priv *priv)
2603 + }
2604 +
2605 + static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
2606 +- struct i2c_msg *msg, bool stop)
2607 ++ struct i2c_msg *msg, bool repeat,
2608 ++ bool stop)
2609 + {
2610 + struct uniphier_fi2c_priv *priv = i2c_get_adapdata(adap);
2611 + bool is_read = msg->flags & I2C_M_RD;
2612 +- unsigned long time_left;
2613 ++ unsigned long time_left, flags;
2614 +
2615 +- dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, stop=%d\n",
2616 +- is_read ? "receive" : "transmit", msg->addr, msg->len, stop);
2617 ++ dev_dbg(&adap->dev, "%s: addr=0x%02x, len=%d, repeat=%d, stop=%d\n",
2618 ++ is_read ? "receive" : "transmit", msg->addr, msg->len,
2619 ++ repeat, stop);
2620 +
2621 + priv->len = msg->len;
2622 + priv->buf = msg->buf;
2623 +@@ -326,22 +352,36 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
2624 + priv->flags |= UNIPHIER_FI2C_STOP;
2625 +
2626 + reinit_completion(&priv->comp);
2627 +- uniphier_fi2c_clear_irqs(priv);
2628 ++ uniphier_fi2c_clear_irqs(priv, U32_MAX);
2629 + writel(UNIPHIER_FI2C_RST_TBRST | UNIPHIER_FI2C_RST_RBRST,
2630 + priv->membase + UNIPHIER_FI2C_RST); /* reset TX/RX FIFO */
2631 +
2632 ++ spin_lock_irqsave(&priv->lock, flags);
2633 ++
2634 + if (is_read)
2635 + uniphier_fi2c_rx_init(priv, msg->addr);
2636 + else
2637 + uniphier_fi2c_tx_init(priv, msg->addr);
2638 +
2639 +- uniphier_fi2c_set_irqs(priv);
2640 +-
2641 + dev_dbg(&adap->dev, "start condition\n");
2642 +- writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA,
2643 +- priv->membase + UNIPHIER_FI2C_CR);
2644 ++ /*
2645 ++ * For a repeated START condition, writing a slave address to the FIFO
2646 ++ * kicks the controller. So, the UNIPHIER_FI2C_CR register should be
2647 ++ * written only for a non-repeated START condition.
2648 ++ */
2649 ++ if (!repeat)
2650 ++ writel(UNIPHIER_FI2C_CR_MST | UNIPHIER_FI2C_CR_STA,
2651 ++ priv->membase + UNIPHIER_FI2C_CR);
2652 ++
2653 ++ spin_unlock_irqrestore(&priv->lock, flags);
2654 +
2655 + time_left = wait_for_completion_timeout(&priv->comp, adap->timeout);
2656 ++
2657 ++ spin_lock_irqsave(&priv->lock, flags);
2658 ++ priv->enabled_irqs = 0;
2659 ++ uniphier_fi2c_set_irqs(priv);
2660 ++ spin_unlock_irqrestore(&priv->lock, flags);
2661 ++
2662 + if (!time_left) {
2663 + dev_err(&adap->dev, "transaction timeout.\n");
2664 + uniphier_fi2c_recover(priv);
2665 +@@ -394,6 +434,7 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
2666 + struct i2c_msg *msgs, int num)
2667 + {
2668 + struct i2c_msg *msg, *emsg = msgs + num;
2669 ++ bool repeat = false;
2670 + int ret;
2671 +
2672 + ret = uniphier_fi2c_check_bus_busy(adap);
2673 +@@ -404,9 +445,11 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
2674 + /* Emit STOP if it is the last message or I2C_M_STOP is set. */
2675 + bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
2676 +
2677 +- ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
2678 ++ ret = uniphier_fi2c_master_xfer_one(adap, msg, repeat, stop);
2679 + if (ret)
2680 + return ret;
2681 ++
2682 ++ repeat = !stop;
2683 + }
2684 +
2685 + return num;
2686 +@@ -546,6 +589,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
2687 +
2688 + priv->clk_cycle = clk_rate / bus_speed;
2689 + init_completion(&priv->comp);
2690 ++ spin_lock_init(&priv->lock);
2691 + priv->adap.owner = THIS_MODULE;
2692 + priv->adap.algo = &uniphier_fi2c_algo;
2693 + priv->adap.dev.parent = dev;
2694 +diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
2695 +index 96f76896488d..802942adea8e 100644
2696 +--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
2697 ++++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
2698 +@@ -120,6 +120,8 @@ struct bnxt_re_dev {
2699 + #define BNXT_RE_FLAG_HAVE_L2_REF 3
2700 + #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
2701 + #define BNXT_RE_FLAG_QOS_WORK_REG 5
2702 ++#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
2703 ++#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
2704 + #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
2705 + struct net_device *netdev;
2706 + unsigned int version, major, minor;
2707 +diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
2708 +index 22bd9784fa2e..589b0d4677d5 100644
2709 +--- a/drivers/infiniband/hw/bnxt_re/main.c
2710 ++++ b/drivers/infiniband/hw/bnxt_re/main.c
2711 +@@ -864,10 +864,8 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
2712 + {
2713 + int i;
2714 +
2715 +- if (rdev->nq[0].hwq.max_elements) {
2716 +- for (i = 1; i < rdev->num_msix; i++)
2717 +- bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
2718 +- }
2719 ++ for (i = 1; i < rdev->num_msix; i++)
2720 ++ bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
2721 +
2722 + if (rdev->qplib_res.rcfw)
2723 + bnxt_qplib_cleanup_res(&rdev->qplib_res);
2724 +@@ -876,6 +874,7 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
2725 + static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
2726 + {
2727 + int rc = 0, i;
2728 ++ int num_vec_enabled = 0;
2729 +
2730 + bnxt_qplib_init_res(&rdev->qplib_res);
2731 +
2732 +@@ -891,9 +890,13 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
2733 + "Failed to enable NQ with rc = 0x%x", rc);
2734 + goto fail;
2735 + }
2736 ++ num_vec_enabled++;
2737 + }
2738 + return 0;
2739 + fail:
2740 ++ for (i = num_vec_enabled; i >= 0; i--)
2741 ++ bnxt_qplib_disable_nq(&rdev->nq[i]);
2742 ++
2743 + return rc;
2744 + }
2745 +
2746 +@@ -925,6 +928,7 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
2747 + static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
2748 + {
2749 + int rc = 0, i;
2750 ++ int num_vec_created = 0;
2751 +
2752 + /* Configure and allocate resources for qplib */
2753 + rdev->qplib_res.rcfw = &rdev->rcfw;
2754 +@@ -951,7 +955,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
2755 + if (rc) {
2756 + dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
2757 + i, rc);
2758 +- goto dealloc_dpi;
2759 ++ goto free_nq;
2760 + }
2761 + rc = bnxt_re_net_ring_alloc
2762 + (rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr,
2763 +@@ -964,14 +968,17 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
2764 + dev_err(rdev_to_dev(rdev),
2765 + "Failed to allocate NQ fw id with rc = 0x%x",
2766 + rc);
2767 ++ bnxt_qplib_free_nq(&rdev->nq[i]);
2768 + goto free_nq;
2769 + }
2770 ++ num_vec_created++;
2771 + }
2772 + return 0;
2773 + free_nq:
2774 +- for (i = 0; i < rdev->num_msix - 1; i++)
2775 ++ for (i = num_vec_created; i >= 0; i--) {
2776 ++ bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
2777 + bnxt_qplib_free_nq(&rdev->nq[i]);
2778 +-dealloc_dpi:
2779 ++ }
2780 + bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
2781 + &rdev->qplib_res.dpi_tbl,
2782 + &rdev->dpi_privileged);
2783 +@@ -989,12 +996,17 @@ static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
2784 + struct ib_event ib_event;
2785 +
2786 + ib_event.device = ibdev;
2787 +- if (qp)
2788 ++ if (qp) {
2789 + ib_event.element.qp = qp;
2790 +- else
2791 ++ ib_event.event = event;
2792 ++ if (qp->event_handler)
2793 ++ qp->event_handler(&ib_event, qp->qp_context);
2794 ++
2795 ++ } else {
2796 + ib_event.element.port_num = port_num;
2797 +- ib_event.event = event;
2798 +- ib_dispatch_event(&ib_event);
2799 ++ ib_event.event = event;
2800 ++ ib_dispatch_event(&ib_event);
2801 ++ }
2802 + }
2803 +
2804 + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
2805 +@@ -1201,8 +1213,11 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
2806 + if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
2807 + cancel_delayed_work(&rdev->worker);
2808 +
2809 +- bnxt_re_cleanup_res(rdev);
2810 +- bnxt_re_free_res(rdev);
2811 ++ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
2812 ++ &rdev->flags))
2813 ++ bnxt_re_cleanup_res(rdev);
2814 ++ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
2815 ++ bnxt_re_free_res(rdev);
2816 +
2817 + if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
2818 + rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
2819 +@@ -1332,12 +1347,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
2820 + pr_err("Failed to allocate resources: %#x\n", rc);
2821 + goto fail;
2822 + }
2823 ++ set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
2824 + rc = bnxt_re_init_res(rdev);
2825 + if (rc) {
2826 + pr_err("Failed to initialize resources: %#x\n", rc);
2827 + goto fail;
2828 + }
2829 +
2830 ++ set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
2831 ++
2832 + if (!rdev->is_virtfn) {
2833 + rc = bnxt_re_setup_qos(rdev);
2834 + if (rc)
2835 +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
2836 +index 6637df77d236..8b3b5fdc19bb 100644
2837 +--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
2838 ++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
2839 +@@ -614,13 +614,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
2840 +
2841 + bnxt_qplib_rcfw_stop_irq(rcfw, true);
2842 +
2843 +- if (rcfw->cmdq_bar_reg_iomem)
2844 +- iounmap(rcfw->cmdq_bar_reg_iomem);
2845 +- rcfw->cmdq_bar_reg_iomem = NULL;
2846 +-
2847 +- if (rcfw->creq_bar_reg_iomem)
2848 +- iounmap(rcfw->creq_bar_reg_iomem);
2849 +- rcfw->creq_bar_reg_iomem = NULL;
2850 ++ iounmap(rcfw->cmdq_bar_reg_iomem);
2851 ++ iounmap(rcfw->creq_bar_reg_iomem);
2852 +
2853 + indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
2854 + if (indx != rcfw->bmap_size)
2855 +@@ -629,6 +624,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
2856 + kfree(rcfw->cmdq_bitmap);
2857 + rcfw->bmap_size = 0;
2858 +
2859 ++ rcfw->cmdq_bar_reg_iomem = NULL;
2860 ++ rcfw->creq_bar_reg_iomem = NULL;
2861 + rcfw->aeq_handler = NULL;
2862 + rcfw->vector = 0;
2863 + }
2864 +@@ -714,6 +711,8 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
2865 + dev_err(&rcfw->pdev->dev,
2866 + "QPLIB: CREQ BAR region %d mapping failed",
2867 + rcfw->creq_bar_reg);
2868 ++ iounmap(rcfw->cmdq_bar_reg_iomem);
2869 ++ rcfw->cmdq_bar_reg_iomem = NULL;
2870 + return -ENOMEM;
2871 + }
2872 + rcfw->creq_qp_event_processed = 0;
2873 +diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
2874 +index 12d9e5f4beb1..58635b5f296f 100644
2875 +--- a/drivers/isdn/mISDN/tei.c
2876 ++++ b/drivers/isdn/mISDN/tei.c
2877 +@@ -1180,8 +1180,7 @@ static int
2878 + ctrl_teimanager(struct manager *mgr, void *arg)
2879 + {
2880 + /* currently we only have one option */
2881 +- int *val = (int *)arg;
2882 +- int ret = 0;
2883 ++ unsigned int *val = (unsigned int *)arg;
2884 +
2885 + switch (val[0]) {
2886 + case IMCLEAR_L2:
2887 +@@ -1197,9 +1196,9 @@ ctrl_teimanager(struct manager *mgr, void *arg)
2888 + test_and_clear_bit(OPTION_L1_HOLD, &mgr->options);
2889 + break;
2890 + default:
2891 +- ret = -EINVAL;
2892 ++ return -EINVAL;
2893 + }
2894 +- return ret;
2895 ++ return 0;
2896 + }
2897 +
2898 + /* This function does create a L2 for fixed TEI in NT Mode */
2899 +diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
2900 +index da7f4fc1a51d..a0f61eb853c5 100644
2901 +--- a/drivers/macintosh/windfarm_smu_sat.c
2902 ++++ b/drivers/macintosh/windfarm_smu_sat.c
2903 +@@ -22,14 +22,6 @@
2904 +
2905 + #define VERSION "1.0"
2906 +
2907 +-#define DEBUG
2908 +-
2909 +-#ifdef DEBUG
2910 +-#define DBG(args...) printk(args)
2911 +-#else
2912 +-#define DBG(args...) do { } while(0)
2913 +-#endif
2914 +-
2915 + /* If the cache is older than 800ms we'll refetch it */
2916 + #define MAX_AGE msecs_to_jiffies(800)
2917 +
2918 +@@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
2919 + buf[i+2] = data[3];
2920 + buf[i+3] = data[2];
2921 + }
2922 +-#ifdef DEBUG
2923 +- DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id);
2924 +- for (i = 0; i < len; ++i)
2925 +- DBG(" %x", buf[i]);
2926 +- DBG("\n");
2927 +-#endif
2928 +
2929 ++ printk(KERN_DEBUG "sat %d partition %x:", sat_id, id);
2930 ++ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
2931 ++ 16, 1, buf, len, false);
2932 + if (size)
2933 + *size = len;
2934 + return (struct smu_sdbp_header *) buf;
2935 +@@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat)
2936 + if (err < 0)
2937 + return err;
2938 + sat->last_read = jiffies;
2939 ++
2940 + #ifdef LOTSA_DEBUG
2941 + {
2942 + int i;
2943 +- DBG(KERN_DEBUG "wf_sat_get: data is");
2944 +- for (i = 0; i < 16; ++i)
2945 +- DBG(" %.2x", sat->cache[i]);
2946 +- DBG("\n");
2947 ++ printk(KERN_DEBUG "wf_sat_get: data is");
2948 ++ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
2949 ++ 16, 1, sat->cache, 16, false);
2950 + }
2951 + #endif
2952 + return 0;
2953 +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
2954 +index b78a8a4d061c..6c9b54288261 100644
2955 +--- a/drivers/md/dm-raid.c
2956 ++++ b/drivers/md/dm-raid.c
2957 +@@ -2475,7 +2475,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2958 + }
2959 +
2960 + /* Enable bitmap creation for RAID levels != 0 */
2961 +- mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
2962 ++ mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
2963 + mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2964 +
2965 + if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2966 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2967 +index 25e97de36717..4cf3d8ad0b4a 100644
2968 +--- a/drivers/md/raid10.c
2969 ++++ b/drivers/md/raid10.c
2970 +@@ -229,7 +229,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
2971 +
2972 + out_free_pages:
2973 + while (--j >= 0)
2974 +- resync_free_pages(&rps[j * 2]);
2975 ++ resync_free_pages(&rps[j]);
2976 +
2977 + j = 0;
2978 + out_free_bio:
2979 +diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
2980 +index 0e7a85c4996c..afd66d243403 100644
2981 +--- a/drivers/media/i2c/ov13858.c
2982 ++++ b/drivers/media/i2c/ov13858.c
2983 +@@ -1612,7 +1612,8 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
2984 + OV13858_NUM_OF_LINK_FREQS - 1,
2985 + 0,
2986 + link_freq_menu_items);
2987 +- ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
2988 ++ if (ov13858->link_freq)
2989 ++ ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
2990 +
2991 + pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
2992 + pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]);
2993 +@@ -1635,7 +1636,8 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
2994 + ov13858->hblank = v4l2_ctrl_new_std(
2995 + ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK,
2996 + hblank, hblank, 1, hblank);
2997 +- ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
2998 ++ if (ov13858->hblank)
2999 ++ ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
3000 +
3001 + exposure_max = mode->vts_def - 8;
3002 + ov13858->exposure = v4l2_ctrl_new_std(
3003 +diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
3004 +index 2a92e5aac9ed..ac17883a054f 100644
3005 +--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
3006 ++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
3007 +@@ -765,7 +765,11 @@ static int vivid_thread_vid_cap(void *data)
3008 + if (kthread_should_stop())
3009 + break;
3010 +
3011 +- mutex_lock(&dev->mutex);
3012 ++ if (!mutex_trylock(&dev->mutex)) {
3013 ++ schedule_timeout_uninterruptible(1);
3014 ++ continue;
3015 ++ }
3016 ++
3017 + cur_jiffies = jiffies;
3018 + if (dev->cap_seq_resync) {
3019 + dev->jiffies_vid_cap = cur_jiffies;
3020 +@@ -918,8 +922,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
3021 +
3022 + /* shutdown control thread */
3023 + vivid_grab_controls(dev, false);
3024 +- mutex_unlock(&dev->mutex);
3025 + kthread_stop(dev->kthread_vid_cap);
3026 + dev->kthread_vid_cap = NULL;
3027 +- mutex_lock(&dev->mutex);
3028 + }
3029 +diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
3030 +index 488590594150..c5f466a73312 100644
3031 +--- a/drivers/media/platform/vivid/vivid-kthread-out.c
3032 ++++ b/drivers/media/platform/vivid/vivid-kthread-out.c
3033 +@@ -135,7 +135,11 @@ static int vivid_thread_vid_out(void *data)
3034 + if (kthread_should_stop())
3035 + break;
3036 +
3037 +- mutex_lock(&dev->mutex);
3038 ++ if (!mutex_trylock(&dev->mutex)) {
3039 ++ schedule_timeout_uninterruptible(1);
3040 ++ continue;
3041 ++ }
3042 ++
3043 + cur_jiffies = jiffies;
3044 + if (dev->out_seq_resync) {
3045 + dev->jiffies_vid_out = cur_jiffies;
3046 +@@ -289,8 +293,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
3047 +
3048 + /* shutdown control thread */
3049 + vivid_grab_controls(dev, false);
3050 +- mutex_unlock(&dev->mutex);
3051 + kthread_stop(dev->kthread_vid_out);
3052 + dev->kthread_vid_out = NULL;
3053 +- mutex_lock(&dev->mutex);
3054 + }
3055 +diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
3056 +index cfb7cb4d37a8..e1794f8689d4 100644
3057 +--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
3058 ++++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
3059 +@@ -137,7 +137,11 @@ static int vivid_thread_sdr_cap(void *data)
3060 + if (kthread_should_stop())
3061 + break;
3062 +
3063 +- mutex_lock(&dev->mutex);
3064 ++ if (!mutex_trylock(&dev->mutex)) {
3065 ++ schedule_timeout_uninterruptible(1);
3066 ++ continue;
3067 ++ }
3068 ++
3069 + cur_jiffies = jiffies;
3070 + if (dev->sdr_cap_seq_resync) {
3071 + dev->jiffies_sdr_cap = cur_jiffies;
3072 +@@ -297,10 +301,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
3073 + }
3074 +
3075 + /* shutdown control thread */
3076 +- mutex_unlock(&dev->mutex);
3077 + kthread_stop(dev->kthread_sdr_cap);
3078 + dev->kthread_sdr_cap = NULL;
3079 +- mutex_lock(&dev->mutex);
3080 + }
3081 +
3082 + const struct vb2_ops vivid_sdr_cap_qops = {
3083 +diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
3084 +index 2e273f4dfc29..c58ae489f39c 100644
3085 +--- a/drivers/media/platform/vivid/vivid-vid-cap.c
3086 ++++ b/drivers/media/platform/vivid/vivid-vid-cap.c
3087 +@@ -222,9 +222,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
3088 + if (vb2_is_streaming(&dev->vb_vid_out_q))
3089 + dev->can_loop_video = vivid_vid_can_loop(dev);
3090 +
3091 +- if (dev->kthread_vid_cap)
3092 +- return 0;
3093 +-
3094 + dev->vid_cap_seq_count = 0;
3095 + dprintk(dev, 1, "%s\n", __func__);
3096 + for (i = 0; i < VIDEO_MAX_FRAME; i++)
3097 +diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
3098 +index 50248e2176a0..0f909500a0b8 100644
3099 +--- a/drivers/media/platform/vivid/vivid-vid-out.c
3100 ++++ b/drivers/media/platform/vivid/vivid-vid-out.c
3101 +@@ -146,9 +146,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
3102 + if (vb2_is_streaming(&dev->vb_vid_cap_q))
3103 + dev->can_loop_video = vivid_vid_can_loop(dev);
3104 +
3105 +- if (dev->kthread_vid_out)
3106 +- return 0;
3107 +-
3108 + dev->vid_out_seq_count = 0;
3109 + dprintk(dev, 1, "%s\n", __func__);
3110 + if (dev->start_streaming_error) {
3111 +diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
3112 +index f23a220352f7..6b10363fb6f0 100644
3113 +--- a/drivers/media/rc/imon.c
3114 ++++ b/drivers/media/rc/imon.c
3115 +@@ -1607,8 +1607,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
3116 + spin_unlock_irqrestore(&ictx->kc_lock, flags);
3117 +
3118 + /* send touchscreen events through input subsystem if touchpad data */
3119 +- if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
3120 +- buf[7] == 0x86) {
3121 ++ if (ictx->touch && len == 8 && buf[7] == 0x86) {
3122 + imon_touch_event(ictx, buf);
3123 + return;
3124 +
3125 +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
3126 +index a8f3169e30b3..ac4fddfd0a43 100644
3127 +--- a/drivers/media/usb/b2c2/flexcop-usb.c
3128 ++++ b/drivers/media/usb/b2c2/flexcop-usb.c
3129 +@@ -537,6 +537,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
3130 + struct flexcop_device *fc = NULL;
3131 + int ret;
3132 +
3133 ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
3134 ++ return -ENODEV;
3135 ++
3136 + if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
3137 + err("out of memory\n");
3138 + return -ENOMEM;
3139 +diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
3140 +index 5b51ed7d6243..5400ec99986f 100644
3141 +--- a/drivers/media/usb/dvb-usb/cxusb.c
3142 ++++ b/drivers/media/usb/dvb-usb/cxusb.c
3143 +@@ -457,7 +457,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
3144 + {
3145 + u8 ircode[4];
3146 +
3147 +- cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
3148 ++ if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
3149 ++ return 0;
3150 +
3151 + if (ircode[2] || ircode[3])
3152 + rc_keydown(d->rc_dev, RC_PROTO_NEC,
3153 +diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
3154 +index f29d1bef0293..cce29b604f4a 100644
3155 +--- a/drivers/media/usb/usbvision/usbvision-video.c
3156 ++++ b/drivers/media/usb/usbvision/usbvision-video.c
3157 +@@ -327,6 +327,10 @@ static int usbvision_v4l2_open(struct file *file)
3158 + if (mutex_lock_interruptible(&usbvision->v4l2_lock))
3159 + return -ERESTARTSYS;
3160 +
3161 ++ if (usbvision->remove_pending) {
3162 ++ err_code = -ENODEV;
3163 ++ goto unlock;
3164 ++ }
3165 + if (usbvision->user) {
3166 + err_code = -EBUSY;
3167 + } else {
3168 +@@ -390,6 +394,7 @@ unlock:
3169 + static int usbvision_v4l2_close(struct file *file)
3170 + {
3171 + struct usb_usbvision *usbvision = video_drvdata(file);
3172 ++ int r;
3173 +
3174 + PDEBUG(DBG_IO, "close");
3175 +
3176 +@@ -404,9 +409,10 @@ static int usbvision_v4l2_close(struct file *file)
3177 + usbvision_scratch_free(usbvision);
3178 +
3179 + usbvision->user--;
3180 ++ r = usbvision->remove_pending;
3181 + mutex_unlock(&usbvision->v4l2_lock);
3182 +
3183 +- if (usbvision->remove_pending) {
3184 ++ if (r) {
3185 + printk(KERN_INFO "%s: Final disconnect\n", __func__);
3186 + usbvision_release(usbvision);
3187 + return 0;
3188 +@@ -1090,6 +1096,11 @@ static int usbvision_radio_open(struct file *file)
3189 +
3190 + if (mutex_lock_interruptible(&usbvision->v4l2_lock))
3191 + return -ERESTARTSYS;
3192 ++
3193 ++ if (usbvision->remove_pending) {
3194 ++ err_code = -ENODEV;
3195 ++ goto out;
3196 ++ }
3197 + err_code = v4l2_fh_open(file);
3198 + if (err_code)
3199 + goto out;
3200 +@@ -1122,6 +1133,7 @@ out:
3201 + static int usbvision_radio_close(struct file *file)
3202 + {
3203 + struct usb_usbvision *usbvision = video_drvdata(file);
3204 ++ int r;
3205 +
3206 + PDEBUG(DBG_IO, "");
3207 +
3208 +@@ -1134,9 +1146,10 @@ static int usbvision_radio_close(struct file *file)
3209 + usbvision_audio_off(usbvision);
3210 + usbvision->radio = 0;
3211 + usbvision->user--;
3212 ++ r = usbvision->remove_pending;
3213 + mutex_unlock(&usbvision->v4l2_lock);
3214 +
3215 +- if (usbvision->remove_pending) {
3216 ++ if (r) {
3217 + printk(KERN_INFO "%s: Final disconnect\n", __func__);
3218 + v4l2_fh_release(file);
3219 + usbvision_release(usbvision);
3220 +@@ -1562,6 +1575,7 @@ err_usb:
3221 + static void usbvision_disconnect(struct usb_interface *intf)
3222 + {
3223 + struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
3224 ++ int u;
3225 +
3226 + PDEBUG(DBG_PROBE, "");
3227 +
3228 +@@ -1578,13 +1592,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
3229 + v4l2_device_disconnect(&usbvision->v4l2_dev);
3230 + usbvision_i2c_unregister(usbvision);
3231 + usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
3232 ++ u = usbvision->user;
3233 +
3234 + usb_put_dev(usbvision->dev);
3235 + usbvision->dev = NULL; /* USB device is no more */
3236 +
3237 + mutex_unlock(&usbvision->v4l2_lock);
3238 +
3239 +- if (usbvision->user) {
3240 ++ if (u) {
3241 + printk(KERN_INFO "%s: In use, disconnect pending\n",
3242 + __func__);
3243 + wake_up_interruptible(&usbvision->wait_frame);
3244 +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
3245 +index c3ddbf6c202a..cf4feff2a48c 100644
3246 +--- a/drivers/media/usb/uvc/uvc_driver.c
3247 ++++ b/drivers/media/usb/uvc/uvc_driver.c
3248 +@@ -2124,6 +2124,20 @@ static int uvc_probe(struct usb_interface *intf,
3249 + sizeof(dev->name) - len);
3250 + }
3251 +
3252 ++ /* Initialize the media device. */
3253 ++#ifdef CONFIG_MEDIA_CONTROLLER
3254 ++ dev->mdev.dev = &intf->dev;
3255 ++ strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
3256 ++ if (udev->serial)
3257 ++ strscpy(dev->mdev.serial, udev->serial,
3258 ++ sizeof(dev->mdev.serial));
3259 ++ usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
3260 ++ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
3261 ++ media_device_init(&dev->mdev);
3262 ++
3263 ++ dev->vdev.mdev = &dev->mdev;
3264 ++#endif
3265 ++
3266 + /* Parse the Video Class control descriptor. */
3267 + if (uvc_parse_control(dev) < 0) {
3268 + uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
3269 +@@ -2144,19 +2158,7 @@ static int uvc_probe(struct usb_interface *intf,
3270 + "linux-uvc-devel mailing list.\n");
3271 + }
3272 +
3273 +- /* Initialize the media device and register the V4L2 device. */
3274 +-#ifdef CONFIG_MEDIA_CONTROLLER
3275 +- dev->mdev.dev = &intf->dev;
3276 +- strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
3277 +- if (udev->serial)
3278 +- strlcpy(dev->mdev.serial, udev->serial,
3279 +- sizeof(dev->mdev.serial));
3280 +- strcpy(dev->mdev.bus_info, udev->devpath);
3281 +- dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
3282 +- media_device_init(&dev->mdev);
3283 +-
3284 +- dev->vdev.mdev = &dev->mdev;
3285 +-#endif
3286 ++ /* Register the V4L2 device. */
3287 + if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
3288 + goto error;
3289 +
3290 +diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
3291 +index 47d6d40f41cd..a4403a57ddc8 100644
3292 +--- a/drivers/mfd/arizona-core.c
3293 ++++ b/drivers/mfd/arizona-core.c
3294 +@@ -52,8 +52,10 @@ int arizona_clk32k_enable(struct arizona *arizona)
3295 + if (ret != 0)
3296 + goto err_ref;
3297 + ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
3298 +- if (ret != 0)
3299 +- goto err_pm;
3300 ++ if (ret != 0) {
3301 ++ pm_runtime_put_sync(arizona->dev);
3302 ++ goto err_ref;
3303 ++ }
3304 + break;
3305 + case ARIZONA_32KZ_MCLK2:
3306 + ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]);
3307 +@@ -67,8 +69,6 @@ int arizona_clk32k_enable(struct arizona *arizona)
3308 + ARIZONA_CLK_32K_ENA);
3309 + }
3310 +
3311 +-err_pm:
3312 +- pm_runtime_put_sync(arizona->dev);
3313 + err_ref:
3314 + if (ret != 0)
3315 + arizona->clk32k_ref--;
3316 +diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
3317 +index 15bc052704a6..9ca1f8c015de 100644
3318 +--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
3319 ++++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
3320 +@@ -31,8 +31,8 @@
3321 +
3322 + /* Interrupt Status Registers */
3323 + #define BXTWC_IRQLVL1 0x4E02
3324 +-#define BXTWC_PWRBTNIRQ 0x4E03
3325 +
3326 ++#define BXTWC_PWRBTNIRQ 0x4E03
3327 + #define BXTWC_THRM0IRQ 0x4E04
3328 + #define BXTWC_THRM1IRQ 0x4E05
3329 + #define BXTWC_THRM2IRQ 0x4E06
3330 +@@ -47,10 +47,9 @@
3331 +
3332 + /* Interrupt MASK Registers */
3333 + #define BXTWC_MIRQLVL1 0x4E0E
3334 +-#define BXTWC_MPWRTNIRQ 0x4E0F
3335 +-
3336 + #define BXTWC_MIRQLVL1_MCHGR BIT(5)
3337 +
3338 ++#define BXTWC_MPWRBTNIRQ 0x4E0F
3339 + #define BXTWC_MTHRM0IRQ 0x4E12
3340 + #define BXTWC_MTHRM1IRQ 0x4E13
3341 + #define BXTWC_MTHRM2IRQ 0x4E14
3342 +@@ -66,9 +65,7 @@
3343 + /* Whiskey Cove PMIC share same ACPI ID between different platforms */
3344 + #define BROXTON_PMIC_WC_HRV 4
3345 +
3346 +-/* Manage in two IRQ chips since mask registers are not consecutive */
3347 + enum bxtwc_irqs {
3348 +- /* Level 1 */
3349 + BXTWC_PWRBTN_LVL1_IRQ = 0,
3350 + BXTWC_TMU_LVL1_IRQ,
3351 + BXTWC_THRM_LVL1_IRQ,
3352 +@@ -77,9 +74,11 @@ enum bxtwc_irqs {
3353 + BXTWC_CHGR_LVL1_IRQ,
3354 + BXTWC_GPIO_LVL1_IRQ,
3355 + BXTWC_CRIT_LVL1_IRQ,
3356 ++};
3357 +
3358 +- /* Level 2 */
3359 +- BXTWC_PWRBTN_IRQ,
3360 ++enum bxtwc_irqs_pwrbtn {
3361 ++ BXTWC_PWRBTN_IRQ = 0,
3362 ++ BXTWC_UIBTN_IRQ,
3363 + };
3364 +
3365 + enum bxtwc_irqs_bcu {
3366 +@@ -113,7 +112,10 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = {
3367 + REGMAP_IRQ_REG(BXTWC_CHGR_LVL1_IRQ, 0, BIT(5)),
3368 + REGMAP_IRQ_REG(BXTWC_GPIO_LVL1_IRQ, 0, BIT(6)),
3369 + REGMAP_IRQ_REG(BXTWC_CRIT_LVL1_IRQ, 0, BIT(7)),
3370 +- REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 1, 0x03),
3371 ++};
3372 ++
3373 ++static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = {
3374 ++ REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01),
3375 + };
3376 +
3377 + static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = {
3378 +@@ -125,7 +127,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_adc[] = {
3379 + };
3380 +
3381 + static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = {
3382 +- REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)),
3383 ++ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20),
3384 + REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f),
3385 + REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f),
3386 + };
3387 +@@ -144,7 +146,16 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
3388 + .mask_base = BXTWC_MIRQLVL1,
3389 + .irqs = bxtwc_regmap_irqs,
3390 + .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs),
3391 +- .num_regs = 2,
3392 ++ .num_regs = 1,
3393 ++};
3394 ++
3395 ++static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
3396 ++ .name = "bxtwc_irq_chip_pwrbtn",
3397 ++ .status_base = BXTWC_PWRBTNIRQ,
3398 ++ .mask_base = BXTWC_MPWRBTNIRQ,
3399 ++ .irqs = bxtwc_regmap_irqs_pwrbtn,
3400 ++ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_pwrbtn),
3401 ++ .num_regs = 1,
3402 + };
3403 +
3404 + static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
3405 +@@ -472,6 +483,16 @@ static int bxtwc_probe(struct platform_device *pdev)
3406 + return ret;
3407 + }
3408 +
3409 ++ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
3410 ++ BXTWC_PWRBTN_LVL1_IRQ,
3411 ++ IRQF_ONESHOT,
3412 ++ &bxtwc_regmap_irq_chip_pwrbtn,
3413 ++ &pmic->irq_chip_data_pwrbtn);
3414 ++ if (ret) {
3415 ++ dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n");
3416 ++ return ret;
3417 ++ }
3418 ++
3419 + ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
3420 + BXTWC_TMU_LVL1_IRQ,
3421 + IRQF_ONESHOT,
3422 +diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
3423 +index 3f554c447521..d1495d76bf2c 100644
3424 +--- a/drivers/mfd/max8997.c
3425 ++++ b/drivers/mfd/max8997.c
3426 +@@ -153,12 +153,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
3427 +
3428 + pd->ono = irq_of_parse_and_map(dev->of_node, 1);
3429 +
3430 +- /*
3431 +- * ToDo: the 'wakeup' member in the platform data is more of a linux
3432 +- * specfic information. Hence, there is no binding for that yet and
3433 +- * not parsed here.
3434 +- */
3435 +-
3436 + return pd;
3437 + }
3438 +
3439 +@@ -246,7 +240,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
3440 + */
3441 +
3442 + /* MAX8997 has a power button input. */
3443 +- device_init_wakeup(max8997->dev, pdata->wakeup);
3444 ++ device_init_wakeup(max8997->dev, true);
3445 +
3446 + return ret;
3447 +
3448 +diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
3449 +index 234febfe6398..d0bf50e3568d 100644
3450 +--- a/drivers/mfd/mc13xxx-core.c
3451 ++++ b/drivers/mfd/mc13xxx-core.c
3452 +@@ -278,7 +278,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
3453 + if (ret)
3454 + goto out;
3455 +
3456 +- adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
3457 ++ adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
3458 ++ MC13XXX_ADC0_CHRGRAWDIV;
3459 + adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
3460 +
3461 + /*
3462 +diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
3463 +index cac3bcc308a7..7bb929f05d85 100644
3464 +--- a/drivers/misc/mic/scif/scif_fence.c
3465 ++++ b/drivers/misc/mic/scif/scif_fence.c
3466 +@@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
3467 + dma_fail:
3468 + if (!x100)
3469 + dma_pool_free(ep->remote_dev->signal_pool, status,
3470 +- status->src_dma_addr);
3471 ++ src - offsetof(struct scif_status, val));
3472 + alloc_fail:
3473 + return err;
3474 + }
3475 +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
3476 +index f171cce5197d..673f6a9616cd 100644
3477 +--- a/drivers/mmc/host/mtk-sd.c
3478 ++++ b/drivers/mmc/host/mtk-sd.c
3479 +@@ -390,7 +390,6 @@ struct msdc_host {
3480 + struct clk *src_clk_cg; /* msdc source clock control gate */
3481 + u32 mclk; /* mmc subsystem clock frequency */
3482 + u32 src_clk_freq; /* source clock frequency */
3483 +- u32 sclk; /* SD/MS bus clock frequency */
3484 + unsigned char timing;
3485 + bool vqmmc_enabled;
3486 + u32 latch_ck;
3487 +@@ -635,10 +634,10 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
3488 +
3489 + host->timeout_ns = ns;
3490 + host->timeout_clks = clks;
3491 +- if (host->sclk == 0) {
3492 ++ if (host->mmc->actual_clock == 0) {
3493 + timeout = 0;
3494 + } else {
3495 +- clk_ns = 1000000000UL / host->sclk;
3496 ++ clk_ns = 1000000000UL / host->mmc->actual_clock;
3497 + timeout = (ns + clk_ns - 1) / clk_ns + clks;
3498 + /* in 1048576 sclk cycle unit */
3499 + timeout = (timeout + (0x1 << 20) - 1) >> 20;
3500 +@@ -683,6 +682,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
3501 + if (!hz) {
3502 + dev_dbg(host->dev, "set mclk to 0\n");
3503 + host->mclk = 0;
3504 ++ host->mmc->actual_clock = 0;
3505 + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
3506 + return;
3507 + }
3508 +@@ -761,7 +761,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
3509 + while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
3510 + cpu_relax();
3511 + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
3512 +- host->sclk = sclk;
3513 ++ host->mmc->actual_clock = sclk;
3514 + host->mclk = hz;
3515 + host->timing = timing;
3516 + /* need because clk changed. */
3517 +@@ -772,7 +772,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
3518 + * mmc_select_hs400() will drop to 50Mhz and High speed mode,
3519 + * tune result of hs200/200Mhz is not suitable for 50Mhz
3520 + */
3521 +- if (host->sclk <= 52000000) {
3522 ++ if (host->mmc->actual_clock <= 52000000) {
3523 + writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
3524 + writel(host->def_tune_para.pad_tune, host->base + tune_reg);
3525 + } else {
3526 +@@ -787,7 +787,8 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
3527 + sdr_set_field(host->base + tune_reg,
3528 + MSDC_PAD_TUNE_CMDRRDLY,
3529 + host->hs400_cmd_int_delay);
3530 +- dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
3531 ++ dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
3532 ++ timing);
3533 + }
3534 +
3535 + static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
3536 +@@ -1055,6 +1056,7 @@ static void msdc_start_command(struct msdc_host *host,
3537 + WARN_ON(host->cmd);
3538 + host->cmd = cmd;
3539 +
3540 ++ mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
3541 + if (!msdc_cmd_is_ready(host, mrq, cmd))
3542 + return;
3543 +
3544 +@@ -1066,7 +1068,6 @@ static void msdc_start_command(struct msdc_host *host,
3545 +
3546 + cmd->error = 0;
3547 + rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
3548 +- mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
3549 +
3550 + sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
3551 + writel(cmd->arg, host->base + SDC_ARG);
3552 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
3553 +index ca3655d28e00..17cec68e56b4 100644
3554 +--- a/drivers/net/dsa/bcm_sf2.c
3555 ++++ b/drivers/net/dsa/bcm_sf2.c
3556 +@@ -1099,12 +1099,16 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
3557 + return ret;
3558 + }
3559 +
3560 ++ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
3561 ++
3562 + ret = bcm_sf2_mdio_register(ds);
3563 + if (ret) {
3564 + pr_err("failed to register MDIO bus\n");
3565 + return ret;
3566 + }
3567 +
3568 ++ bcm_sf2_gphy_enable_set(priv->dev->ds, false);
3569 ++
3570 + ret = bcm_sf2_cfp_rst(priv);
3571 + if (ret) {
3572 + pr_err("failed to reset CFP\n");
3573 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
3574 +index d075f0f7a3de..411ae9961bf4 100644
3575 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
3576 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
3577 +@@ -3028,7 +3028,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
3578 + .port_set_link = mv88e6xxx_port_set_link,
3579 + .port_set_duplex = mv88e6xxx_port_set_duplex,
3580 + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
3581 +- .port_set_speed = mv88e6390_port_set_speed,
3582 ++ .port_set_speed = mv88e6341_port_set_speed,
3583 + .port_tag_remap = mv88e6095_port_tag_remap,
3584 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3585 + .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3586 +@@ -3649,7 +3649,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
3587 + .port_set_link = mv88e6xxx_port_set_link,
3588 + .port_set_duplex = mv88e6xxx_port_set_duplex,
3589 + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
3590 +- .port_set_speed = mv88e6390_port_set_speed,
3591 ++ .port_set_speed = mv88e6341_port_set_speed,
3592 + .port_tag_remap = mv88e6095_port_tag_remap,
3593 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
3594 + .port_set_egress_floods = mv88e6352_port_set_egress_floods,
3595 +diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
3596 +index fdeddbfa829d..2f16a310c110 100644
3597 +--- a/drivers/net/dsa/mv88e6xxx/port.c
3598 ++++ b/drivers/net/dsa/mv88e6xxx/port.c
3599 +@@ -228,8 +228,11 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
3600 + ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
3601 + break;
3602 + case 2500:
3603 +- ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
3604 +- MV88E6390_PORT_MAC_CTL_ALTSPEED;
3605 ++ if (alt_bit)
3606 ++ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
3607 ++ MV88E6390_PORT_MAC_CTL_ALTSPEED;
3608 ++ else
3609 ++ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000;
3610 + break;
3611 + case 10000:
3612 + /* all bits set, fall through... */
3613 +@@ -291,6 +294,24 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
3614 + return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
3615 + }
3616 +
3617 ++/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
3618 ++int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
3619 ++{
3620 ++ if (speed == SPEED_MAX)
3621 ++ speed = port < 5 ? 1000 : 2500;
3622 ++
3623 ++ if (speed > 2500)
3624 ++ return -EOPNOTSUPP;
3625 ++
3626 ++ if (speed == 200 && port != 0)
3627 ++ return -EOPNOTSUPP;
3628 ++
3629 ++ if (speed == 2500 && port < 5)
3630 ++ return -EOPNOTSUPP;
3631 ++
3632 ++ return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
3633 ++}
3634 ++
3635 + /* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
3636 + int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
3637 + {
3638 +diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
3639 +index 95b59f5eb393..cbb64a7683e2 100644
3640 +--- a/drivers/net/dsa/mv88e6xxx/port.h
3641 ++++ b/drivers/net/dsa/mv88e6xxx/port.h
3642 +@@ -280,6 +280,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup);
3643 +
3644 + int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
3645 + int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
3646 ++int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
3647 + int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
3648 + int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
3649 + int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
3650 +diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
3651 +index 99b30353541a..9e87d7b8360f 100644
3652 +--- a/drivers/net/ethernet/amazon/Kconfig
3653 ++++ b/drivers/net/ethernet/amazon/Kconfig
3654 +@@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON
3655 +
3656 + config ENA_ETHERNET
3657 + tristate "Elastic Network Adapter (ENA) support"
3658 +- depends on (PCI_MSI && X86)
3659 ++ depends on PCI_MSI && !CPU_BIG_ENDIAN
3660 + ---help---
3661 + This driver supports Elastic Network Adapter (ENA)"
3662 +
3663 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3664 +index bb60104b4f80..338d22380434 100644
3665 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3666 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
3667 +@@ -1169,7 +1169,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
3668 + break;
3669 + }
3670 +
3671 +- return 0;
3672 ++ return ret;
3673 + }
3674 +
3675 + static void bcmgenet_power_up(struct bcmgenet_priv *priv,
3676 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
3677 +index 74eeb3a985bf..f175b20ac510 100644
3678 +--- a/drivers/net/ethernet/cadence/macb_main.c
3679 ++++ b/drivers/net/ethernet/cadence/macb_main.c
3680 +@@ -1721,7 +1721,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
3681 + padlen = 0;
3682 + /* No room for FCS, need to reallocate skb. */
3683 + else
3684 +- padlen = ETH_FCS_LEN - tailroom;
3685 ++ padlen = ETH_FCS_LEN;
3686 + } else {
3687 + /* Add room for FCS. */
3688 + padlen += ETH_FCS_LEN;
3689 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3690 +index e11a7de20b8f..3708f149d0a6 100644
3691 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3692 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
3693 +@@ -2547,7 +2547,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3694 + chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
3695 + GFP_KERNEL);
3696 + if (!chain)
3697 +- return -ENOMEM;
3698 ++ goto err_free_chain;
3699 +
3700 + cur_chain->next = chain;
3701 + chain->tqp_index = tx_ring->tqp->tqp_index;
3702 +@@ -2577,7 +2577,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3703 + while (rx_ring) {
3704 + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
3705 + if (!chain)
3706 +- return -ENOMEM;
3707 ++ goto err_free_chain;
3708 +
3709 + cur_chain->next = chain;
3710 + chain->tqp_index = rx_ring->tqp->tqp_index;
3711 +@@ -2592,6 +2592,16 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3712 + }
3713 +
3714 + return 0;
3715 ++
3716 ++err_free_chain:
3717 ++ cur_chain = head->next;
3718 ++ while (cur_chain) {
3719 ++ chain = cur_chain->next;
3720 ++ devm_kfree(&pdev->dev, chain);
3721 ++ cur_chain = chain;
3722 ++ }
3723 ++
3724 ++ return -ENOMEM;
3725 + }
3726 +
3727 + static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3728 +@@ -2836,8 +2846,10 @@ static int hns3_queue_to_ring(struct hnae3_queue *tqp,
3729 + return ret;
3730 +
3731 + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3732 +- if (ret)
3733 ++ if (ret) {
3734 ++ devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
3735 + return ret;
3736 ++ }
3737 +
3738 + return 0;
3739 + }
3740 +@@ -2864,6 +2876,12 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv)
3741 +
3742 + return 0;
3743 + err:
3744 ++ while (i--) {
3745 ++ devm_kfree(priv->dev, priv->ring_data[i].ring);
3746 ++ devm_kfree(priv->dev,
3747 ++ priv->ring_data[i + h->kinfo.num_tqps].ring);
3748 ++ }
3749 ++
3750 + devm_kfree(&pdev->dev, priv->ring_data);
3751 + return ret;
3752 + }
3753 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
3754 +index 68026a5ad7e7..690f62ed87dc 100644
3755 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
3756 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
3757 +@@ -24,15 +24,15 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring)
3758 + return ring->desc_num - used - 1;
3759 + }
3760 +
3761 +-static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
3762 ++static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
3763 + {
3764 +- int u = ring->next_to_use;
3765 +- int c = ring->next_to_clean;
3766 ++ int ntu = ring->next_to_use;
3767 ++ int ntc = ring->next_to_clean;
3768 +
3769 +- if (unlikely(h >= ring->desc_num))
3770 +- return 0;
3771 ++ if (ntu > ntc)
3772 ++ return head >= ntc && head <= ntu;
3773 +
3774 +- return u > c ? (h > c && h <= u) : (h > c || h <= u);
3775 ++ return head >= ntc || head <= ntu;
3776 + }
3777 +
3778 + static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
3779 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3780 +index b04df79f393f..f8cc8d1f0b20 100644
3781 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3782 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
3783 +@@ -2574,7 +2574,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3784 + }
3785 +
3786 + /* clear the source of interrupt if it is not cause by reset */
3787 +- if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
3788 ++ if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3789 + hclge_clear_event_cause(hdev, event_cause, clearval);
3790 + hclge_enable_vector(&hdev->misc_vector, true);
3791 + }
3792 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
3793 +index 398971a062f4..03491e8ebb73 100644
3794 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
3795 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
3796 +@@ -54,7 +54,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
3797 + struct hclge_desc desc;
3798 + int ret;
3799 +
3800 +- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3801 ++ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
3802 + return 0;
3803 +
3804 + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
3805 +@@ -92,7 +92,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
3806 + struct hclge_desc desc;
3807 + int ret;
3808 +
3809 +- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3810 ++ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
3811 + return 0;
3812 +
3813 + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
3814 +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
3815 +index e707d717012f..618032612f52 100644
3816 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
3817 ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
3818 +@@ -302,6 +302,28 @@ void fm10k_iov_suspend(struct pci_dev *pdev)
3819 + }
3820 + }
3821 +
3822 ++static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev)
3823 ++{
3824 ++ u32 err_mask;
3825 ++ int pos;
3826 ++
3827 ++ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
3828 ++ if (!pos)
3829 ++ return;
3830 ++
3831 ++ /* Mask the completion abort bit in the ERR_UNCOR_MASK register,
3832 ++ * preventing the device from reporting these errors to the upstream
3833 ++ * PCIe root device. This avoids bringing down platforms which upgrade
3834 ++ * non-fatal completer aborts into machine check exceptions. Completer
3835 ++ * aborts can occur whenever a VF reads a queue it doesn't own.
3836 ++ */
3837 ++ pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
3838 ++ err_mask |= PCI_ERR_UNC_COMP_ABORT;
3839 ++ pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
3840 ++
3841 ++ mmiowb();
3842 ++}
3843 ++
3844 + int fm10k_iov_resume(struct pci_dev *pdev)
3845 + {
3846 + struct fm10k_intfc *interface = pci_get_drvdata(pdev);
3847 +@@ -317,6 +339,12 @@ int fm10k_iov_resume(struct pci_dev *pdev)
3848 + if (!iov_data)
3849 + return -ENOMEM;
3850 +
3851 ++ /* Lower severity of completer abort error reporting as
3852 ++ * the VFs can trigger this any time they read a queue
3853 ++ * that they don't own.
3854 ++ */
3855 ++ fm10k_mask_aer_comp_abort(pdev);
3856 ++
3857 + /* allocate hardware resources for the VFs */
3858 + hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
3859 +
3860 +@@ -460,20 +488,6 @@ void fm10k_iov_disable(struct pci_dev *pdev)
3861 + fm10k_iov_free_data(pdev);
3862 + }
3863 +
3864 +-static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
3865 +-{
3866 +- u32 err_sev;
3867 +- int pos;
3868 +-
3869 +- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
3870 +- if (!pos)
3871 +- return;
3872 +-
3873 +- pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
3874 +- err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
3875 +- pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
3876 +-}
3877 +-
3878 + int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
3879 + {
3880 + int current_vfs = pci_num_vf(pdev);
3881 +@@ -495,12 +509,6 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
3882 +
3883 + /* allocate VFs if not already allocated */
3884 + if (num_vfs && num_vfs != current_vfs) {
3885 +- /* Disable completer abort error reporting as
3886 +- * the VFs can trigger this any time they read a queue
3887 +- * that they don't own.
3888 +- */
3889 +- fm10k_disable_aer_comp_abort(pdev);
3890 +-
3891 + err = pci_enable_sriov(pdev, num_vfs);
3892 + if (err) {
3893 + dev_err(&pdev->dev,
3894 +diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
3895 +index 9f4d700e09df..29ced6b74d36 100644
3896 +--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
3897 ++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
3898 +@@ -51,9 +51,15 @@
3899 + *
3900 + * The 40 bit 82580 SYSTIM overflows every
3901 + * 2^40 * 10^-9 / 60 = 18.3 minutes.
3902 ++ *
3903 ++ * SYSTIM is converted to real time using a timecounter. As
3904 ++ * timecounter_cyc2time() allows old timestamps, the timecounter
3905 ++ * needs to be updated at least once per half of the SYSTIM interval.
3906 ++ * Scheduling of delayed work is not very accurate, so we aim for 8
3907 ++ * minutes to be sure the actual interval is shorter than 9.16 minutes.
3908 + */
3909 +
3910 +-#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
3911 ++#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
3912 + #define IGB_PTP_TX_TIMEOUT (HZ * 15)
3913 + #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
3914 + #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
3915 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
3916 +index 94c59939a8cf..e639a365ac2d 100644
3917 +--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
3918 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
3919 +@@ -1745,6 +1745,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
3920 + err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
3921 + break;
3922 + case ETHTOOL_GRXCLSRLALL:
3923 ++ cmd->data = MAX_NUM_OF_FS_RULES;
3924 + while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
3925 + err = mlx4_en_get_flow(dev, cmd, i);
3926 + if (!err)
3927 +@@ -1811,6 +1812,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
3928 + struct mlx4_en_dev *mdev = priv->mdev;
3929 + struct mlx4_en_port_profile new_prof;
3930 + struct mlx4_en_priv *tmp;
3931 ++ int total_tx_count;
3932 + int port_up = 0;
3933 + int xdp_count;
3934 + int err = 0;
3935 +@@ -1825,13 +1827,12 @@ static int mlx4_en_set_channels(struct net_device *dev,
3936 +
3937 + mutex_lock(&mdev->state_lock);
3938 + xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
3939 +- if (channel->tx_count * priv->prof->num_up + xdp_count >
3940 +- priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) {
3941 ++ total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count;
3942 ++ if (total_tx_count > MAX_TX_RINGS) {
3943 + err = -EINVAL;
3944 + en_err(priv,
3945 + "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
3946 +- channel->tx_count * priv->prof->num_up + xdp_count,
3947 +- MAX_TX_RINGS);
3948 ++ total_tx_count, MAX_TX_RINGS);
3949 + goto out;
3950 + }
3951 +
3952 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
3953 +index 0d7fd3f043cf..5868ec11db1a 100644
3954 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
3955 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
3956 +@@ -92,6 +92,7 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
3957 + struct mlx4_en_dev *mdev = priv->mdev;
3958 + struct mlx4_en_port_profile new_prof;
3959 + struct mlx4_en_priv *tmp;
3960 ++ int total_count;
3961 + int port_up = 0;
3962 + int err = 0;
3963 +
3964 +@@ -105,6 +106,14 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
3965 + MLX4_EN_NUM_UP_HIGH;
3966 + new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
3967 + new_prof.num_up;
3968 ++ total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP];
3969 ++ if (total_count > MAX_TX_RINGS) {
3970 ++ err = -EINVAL;
3971 ++ en_err(priv,
3972 ++ "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
3973 ++ total_count, MAX_TX_RINGS);
3974 ++ goto out;
3975 ++ }
3976 + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
3977 + if (err)
3978 + goto out;
3979 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3980 +index 55ccd90beeb0..7366033cd31c 100644
3981 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3982 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3983 +@@ -1861,7 +1861,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
3984 +
3985 + unlock:
3986 + mutex_unlock(&esw->state_lock);
3987 +- return 0;
3988 ++ return err;
3989 + }
3990 +
3991 + int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
3992 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3993 +index c079f85593d6..82a53317285d 100644
3994 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3995 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3996 +@@ -520,7 +520,7 @@ static void del_sw_flow_group(struct fs_node *node)
3997 +
3998 + rhashtable_destroy(&fg->ftes_hash);
3999 + ida_destroy(&fg->fte_allocator);
4000 +- if (ft->autogroup.active)
4001 ++ if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
4002 + ft->autogroup.num_groups--;
4003 + err = rhltable_remove(&ft->fgs_hash,
4004 + &fg->hash,
4005 +@@ -1065,6 +1065,8 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
4006 +
4007 + ft->autogroup.active = true;
4008 + ft->autogroup.required_groups = max_num_groups;
4009 ++ /* We save place for flow groups in addition to max types */
4010 ++ ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
4011 +
4012 + return ft;
4013 + }
4014 +@@ -1270,8 +1272,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
4015 + return ERR_PTR(-ENOENT);
4016 +
4017 + if (ft->autogroup.num_groups < ft->autogroup.required_groups)
4018 +- /* We save place for flow groups in addition to max types */
4019 +- group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
4020 ++ group_size = ft->autogroup.group_size;
4021 +
4022 + /* ft->max_fte == ft->autogroup.max_types */
4023 + if (group_size == 0)
4024 +@@ -1298,7 +1299,8 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
4025 + if (IS_ERR(fg))
4026 + goto out;
4027 +
4028 +- ft->autogroup.num_groups++;
4029 ++ if (group_size == ft->autogroup.group_size)
4030 ++ ft->autogroup.num_groups++;
4031 +
4032 + out:
4033 + return fg;
4034 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
4035 +index 32070e5d993d..ba62fbce23a2 100644
4036 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
4037 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
4038 +@@ -121,6 +121,7 @@ struct mlx5_flow_table {
4039 + struct {
4040 + bool active;
4041 + unsigned int required_groups;
4042 ++ unsigned int group_size;
4043 + unsigned int num_groups;
4044 + } autogroup;
4045 + /* Protect fwd_rules */
4046 +diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
4047 +index 2cf89126fb23..d765e7a69d6b 100644
4048 +--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
4049 ++++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
4050 +@@ -86,6 +86,8 @@ retry:
4051 + return err;
4052 +
4053 + if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
4054 ++ fsm_state_err = min_t(enum mlxfw_fsm_state_err,
4055 ++ fsm_state_err, MLXFW_FSM_STATE_ERR_MAX);
4056 + pr_err("Firmware flash failed: %s\n",
4057 + mlxfw_fsm_state_err_str[fsm_state_err]);
4058 + return -EINVAL;
4059 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
4060 +index 2ab9cf25a08a..3f54b3ca38ba 100644
4061 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
4062 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
4063 +@@ -970,7 +970,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
4064 + if (d)
4065 + return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
4066 + else
4067 +- return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
4068 ++ return RT_TABLE_MAIN;
4069 + }
4070 +
4071 + static struct mlxsw_sp_rif *
4072 +@@ -1532,27 +1532,10 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
4073 + {
4074 + struct mlxsw_sp_ipip_entry *ipip_entry =
4075 + mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
4076 +- enum mlxsw_sp_l3proto ul_proto;
4077 +- union mlxsw_sp_l3addr saddr;
4078 +- u32 ul_tb_id;
4079 +
4080 + if (!ipip_entry)
4081 + return 0;
4082 +
4083 +- /* For flat configuration cases, moving overlay to a different VRF might
4084 +- * cause local address conflict, and the conflicting tunnels need to be
4085 +- * demoted.
4086 +- */
4087 +- ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
4088 +- ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
4089 +- saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
4090 +- if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
4091 +- saddr, ul_tb_id,
4092 +- ipip_entry)) {
4093 +- mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
4094 +- return 0;
4095 +- }
4096 +-
4097 + return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
4098 + true, false, false, extack);
4099 + }
4100 +diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
4101 +index dbd00982fd2b..2134045e14c3 100644
4102 +--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
4103 ++++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
4104 +@@ -206,6 +206,11 @@ enum nfp_bpf_map_use {
4105 + NFP_MAP_USE_ATOMIC_CNT,
4106 + };
4107 +
4108 ++struct nfp_bpf_map_word {
4109 ++ unsigned char type :4;
4110 ++ unsigned char non_zero_update :1;
4111 ++};
4112 ++
4113 + /**
4114 + * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
4115 + * @offmap: pointer to the offloaded BPF map
4116 +@@ -219,7 +224,7 @@ struct nfp_bpf_map {
4117 + struct nfp_app_bpf *bpf;
4118 + u32 tid;
4119 + struct list_head l;
4120 +- enum nfp_bpf_map_use use_map[];
4121 ++ struct nfp_bpf_map_word use_map[];
4122 + };
4123 +
4124 + struct nfp_bpf_neutral_map {
4125 +diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
4126 +index 1ccd6371a15b..6140e4650b71 100644
4127 +--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
4128 ++++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
4129 +@@ -299,10 +299,25 @@ static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
4130 + unsigned int i;
4131 +
4132 + for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
4133 +- if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT)
4134 ++ if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
4135 + word[i] = (__force u32)cpu_to_be32(word[i]);
4136 + }
4137 +
4138 ++/* Mark value as unsafely initialized in case it becomes atomic later
4139 ++ * and we didn't byte swap something non-byte swap neutral.
4140 ++ */
4141 ++static void
4142 ++nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
4143 ++{
4144 ++ u32 *word = value;
4145 ++ unsigned int i;
4146 ++
4147 ++ for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
4148 ++ if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
4149 ++ word[i] != (__force u32)cpu_to_be32(word[i]))
4150 ++ nfp_map->use_map[i].non_zero_update = 1;
4151 ++}
4152 ++
4153 + static int
4154 + nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
4155 + void *key, void *value)
4156 +@@ -322,6 +337,7 @@ nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
4157 + void *key, void *value, u64 flags)
4158 + {
4159 + nfp_map_bpf_byte_swap(offmap->dev_priv, value);
4160 ++ nfp_map_bpf_byte_swap_record(offmap->dev_priv, value);
4161 + return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
4162 + }
4163 +
4164 +diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
4165 +index a6e9248669e1..db7e186dae56 100644
4166 +--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
4167 ++++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
4168 +@@ -108,6 +108,46 @@ exit_set_location:
4169 + nfp_prog->adjust_head_location = location;
4170 + }
4171 +
4172 ++static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env)
4173 ++{
4174 ++ const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
4175 ++ const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
4176 ++ struct bpf_offloaded_map *offmap;
4177 ++ struct bpf_func_state *state;
4178 ++ struct nfp_bpf_map *nfp_map;
4179 ++ int off, i;
4180 ++
4181 ++ state = env->cur_state->frame[reg3->frameno];
4182 ++
4183 ++ /* We need to record each time update happens with non-zero words,
4184 ++ * in case such word is used in atomic operations.
4185 ++ * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before.
4186 ++ */
4187 ++
4188 ++ offmap = map_to_offmap(reg1->map_ptr);
4189 ++ nfp_map = offmap->dev_priv;
4190 ++ off = reg3->off + reg3->var_off.value;
4191 ++
4192 ++ for (i = 0; i < offmap->map.value_size; i++) {
4193 ++ struct bpf_stack_state *stack_entry;
4194 ++ unsigned int soff;
4195 ++
4196 ++ soff = -(off + i) - 1;
4197 ++ stack_entry = &state->stack[soff / BPF_REG_SIZE];
4198 ++ if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO)
4199 ++ continue;
4200 ++
4201 ++ if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) {
4202 ++ pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n",
4203 ++ i, soff);
4204 ++ return false;
4205 ++ }
4206 ++ nfp_map->use_map[i / 4].non_zero_update = 1;
4207 ++ }
4208 ++
4209 ++ return true;
4210 ++}
4211 ++
4212 + static int
4213 + nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
4214 + const struct bpf_reg_state *reg,
4215 +@@ -198,7 +238,8 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
4216 + bpf->helpers.map_update, reg1) ||
4217 + !nfp_bpf_stack_arg_ok("map_update", env, reg2,
4218 + meta->func_id ? &meta->arg2 : NULL) ||
4219 +- !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
4220 ++ !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) ||
4221 ++ !nfp_bpf_map_update_value_ok(env))
4222 + return -EOPNOTSUPP;
4223 + break;
4224 +
4225 +@@ -376,15 +417,22 @@ nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
4226 + struct nfp_bpf_map *nfp_map,
4227 + unsigned int off, enum nfp_bpf_map_use use)
4228 + {
4229 +- if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
4230 +- nfp_map->use_map[off / 4] != use) {
4231 ++ if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED &&
4232 ++ nfp_map->use_map[off / 4].type != use) {
4233 + pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
4234 +- nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
4235 ++ nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type),
4236 + nfp_bpf_map_use_name(use), off);
4237 + return -EOPNOTSUPP;
4238 + }
4239 +
4240 +- nfp_map->use_map[off / 4] = use;
4241 ++ if (nfp_map->use_map[off / 4].non_zero_update &&
4242 ++ use == NFP_MAP_USE_ATOMIC_CNT) {
4243 ++ pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n",
4244 ++ off);
4245 ++ return -EOPNOTSUPP;
4246 ++ }
4247 ++
4248 ++ nfp_map->use_map[off / 4].type = use;
4249 +
4250 + return 0;
4251 + }
4252 +diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
4253 +index a60e1c8d470a..32e786a3952b 100644
4254 +--- a/drivers/net/ethernet/qlogic/qed/qed.h
4255 ++++ b/drivers/net/ethernet/qlogic/qed/qed.h
4256 +@@ -914,7 +914,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
4257 + /* Prototypes */
4258 + int qed_fill_dev_info(struct qed_dev *cdev,
4259 + struct qed_dev_info *dev_info);
4260 +-void qed_link_update(struct qed_hwfn *hwfn);
4261 ++void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
4262 + u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
4263 + u32 input_len, u8 *input_buf,
4264 + u32 max_size, u8 *unzip_buf);
4265 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
4266 +index 637687b766ff..049a83b40e46 100644
4267 +--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
4268 ++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
4269 +@@ -1462,6 +1462,7 @@ static int qed_get_link_data(struct qed_hwfn *hwfn,
4270 + }
4271 +
4272 + static void qed_fill_link(struct qed_hwfn *hwfn,
4273 ++ struct qed_ptt *ptt,
4274 + struct qed_link_output *if_link)
4275 + {
4276 + struct qed_mcp_link_params params;
4277 +@@ -1542,7 +1543,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
4278 +
4279 + /* TODO - fill duplex properly */
4280 + if_link->duplex = DUPLEX_FULL;
4281 +- qed_mcp_get_media_type(hwfn->cdev, &media_type);
4282 ++ qed_mcp_get_media_type(hwfn, ptt, &media_type);
4283 + if_link->port = qed_get_port_type(media_type);
4284 +
4285 + if_link->autoneg = params.speed.autoneg;
4286 +@@ -1598,21 +1599,34 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
4287 + static void qed_get_current_link(struct qed_dev *cdev,
4288 + struct qed_link_output *if_link)
4289 + {
4290 ++ struct qed_hwfn *hwfn;
4291 ++ struct qed_ptt *ptt;
4292 + int i;
4293 +
4294 +- qed_fill_link(&cdev->hwfns[0], if_link);
4295 ++ hwfn = &cdev->hwfns[0];
4296 ++ if (IS_PF(cdev)) {
4297 ++ ptt = qed_ptt_acquire(hwfn);
4298 ++ if (ptt) {
4299 ++ qed_fill_link(hwfn, ptt, if_link);
4300 ++ qed_ptt_release(hwfn, ptt);
4301 ++ } else {
4302 ++ DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
4303 ++ }
4304 ++ } else {
4305 ++ qed_fill_link(hwfn, NULL, if_link);
4306 ++ }
4307 +
4308 + for_each_hwfn(cdev, i)
4309 + qed_inform_vf_link_state(&cdev->hwfns[i]);
4310 + }
4311 +
4312 +-void qed_link_update(struct qed_hwfn *hwfn)
4313 ++void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
4314 + {
4315 + void *cookie = hwfn->cdev->ops_cookie;
4316 + struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
4317 + struct qed_link_output if_link;
4318 +
4319 +- qed_fill_link(hwfn, &if_link);
4320 ++ qed_fill_link(hwfn, ptt, &if_link);
4321 + qed_inform_vf_link_state(hwfn);
4322 +
4323 + if (IS_LEAD_HWFN(hwfn) && cookie)
4324 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
4325 +index 58c7eb9d8e1b..938ace333af1 100644
4326 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
4327 ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
4328 +@@ -1382,7 +1382,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
4329 + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
4330 + qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
4331 +
4332 +- qed_link_update(p_hwfn);
4333 ++ qed_link_update(p_hwfn, p_ptt);
4334 + out:
4335 + spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
4336 + }
4337 +@@ -1849,12 +1849,10 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
4338 + return 0;
4339 + }
4340 +
4341 +-int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
4342 ++int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
4343 ++ struct qed_ptt *p_ptt, u32 *p_media_type)
4344 + {
4345 +- struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
4346 +- struct qed_ptt *p_ptt;
4347 +-
4348 +- if (IS_VF(cdev))
4349 ++ if (IS_VF(p_hwfn->cdev))
4350 + return -EINVAL;
4351 +
4352 + if (!qed_mcp_is_init(p_hwfn)) {
4353 +@@ -1862,16 +1860,15 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
4354 + return -EBUSY;
4355 + }
4356 +
4357 +- *p_media_type = MEDIA_UNSPECIFIED;
4358 +-
4359 +- p_ptt = qed_ptt_acquire(p_hwfn);
4360 +- if (!p_ptt)
4361 +- return -EBUSY;
4362 +-
4363 +- *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
4364 +- offsetof(struct public_port, media_type));
4365 ++ if (!p_ptt) {
4366 ++ *p_media_type = MEDIA_UNSPECIFIED;
4367 ++ return -EINVAL;
4368 ++ }
4369 +
4370 +- qed_ptt_release(p_hwfn, p_ptt);
4371 ++ *p_media_type = qed_rd(p_hwfn, p_ptt,
4372 ++ p_hwfn->mcp_info->port_addr +
4373 ++ offsetof(struct public_port,
4374 ++ media_type));
4375 +
4376 + return 0;
4377 + }
4378 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
4379 +index 85e6b3989e7a..80a6b5d1ff33 100644
4380 +--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
4381 ++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
4382 +@@ -322,14 +322,15 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
4383 + * @brief Get media type value of the port.
4384 + *
4385 + * @param cdev - qed dev pointer
4386 ++ * @param p_ptt
4387 + * @param mfw_ver - media type value
4388 + *
4389 + * @return int -
4390 + * 0 - Operation was successul.
4391 + * -EBUSY - Operation failed
4392 + */
4393 +-int qed_mcp_get_media_type(struct qed_dev *cdev,
4394 +- u32 *media_type);
4395 ++int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
4396 ++ struct qed_ptt *p_ptt, u32 *media_type);
4397 +
4398 + /**
4399 + * @brief General function for sending commands to the MCP
4400 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
4401 +index 6ab3fb008139..5dda547772c1 100644
4402 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
4403 ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
4404 +@@ -1698,7 +1698,7 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
4405 + ops->ports_update(cookie, vxlan_port, geneve_port);
4406 +
4407 + /* Always update link configuration according to bulletin */
4408 +- qed_link_update(hwfn);
4409 ++ qed_link_update(hwfn, NULL);
4410 + }
4411 +
4412 + void qed_iov_vf_task(struct work_struct *work)
4413 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
4414 +index 4b76c69fe86d..834208e55f7b 100644
4415 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
4416 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
4417 +@@ -883,7 +883,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
4418 + struct qlcnic_adapter *adapter = netdev_priv(netdev);
4419 +
4420 + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
4421 +- return 0;
4422 ++ return 1;
4423 +
4424 + switch (capid) {
4425 + case DCB_CAP_ATTR_PG:
4426 +diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
4427 +index f21661532ed3..cc8fbf398c0d 100644
4428 +--- a/drivers/net/ethernet/sfc/ptp.c
4429 ++++ b/drivers/net/ethernet/sfc/ptp.c
4430 +@@ -1534,7 +1534,8 @@ void efx_ptp_remove(struct efx_nic *efx)
4431 + (void)efx_ptp_disable(efx);
4432 +
4433 + cancel_work_sync(&efx->ptp_data->work);
4434 +- cancel_work_sync(&efx->ptp_data->pps_work);
4435 ++ if (efx->ptp_data->pps_workwq)
4436 ++ cancel_work_sync(&efx->ptp_data->pps_work);
4437 +
4438 + skb_queue_purge(&efx->ptp_data->rxq);
4439 + skb_queue_purge(&efx->ptp_data->txq);
4440 +diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
4441 +index d2caeb9edc04..28d582c18afb 100644
4442 +--- a/drivers/net/ethernet/socionext/netsec.c
4443 ++++ b/drivers/net/ethernet/socionext/netsec.c
4444 +@@ -274,6 +274,7 @@ struct netsec_priv {
4445 + struct clk *clk;
4446 + u32 msg_enable;
4447 + u32 freq;
4448 ++ u32 phy_addr;
4449 + bool rx_cksum_offload_flag;
4450 + };
4451 +
4452 +@@ -1346,11 +1347,11 @@ static int netsec_netdev_stop(struct net_device *ndev)
4453 + netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
4454 + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
4455 +
4456 +- ret = netsec_reset_hardware(priv, false);
4457 +-
4458 + phy_stop(ndev->phydev);
4459 + phy_disconnect(ndev->phydev);
4460 +
4461 ++ ret = netsec_reset_hardware(priv, false);
4462 ++
4463 + pm_runtime_put_sync(priv->dev);
4464 +
4465 + return ret;
4466 +@@ -1360,6 +1361,7 @@ static int netsec_netdev_init(struct net_device *ndev)
4467 + {
4468 + struct netsec_priv *priv = netdev_priv(ndev);
4469 + int ret;
4470 ++ u16 data;
4471 +
4472 + ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
4473 + if (ret)
4474 +@@ -1369,6 +1371,11 @@ static int netsec_netdev_init(struct net_device *ndev)
4475 + if (ret)
4476 + goto err1;
4477 +
4478 ++ /* set phy power down */
4479 ++ data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
4480 ++ BMCR_PDOWN;
4481 ++ netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
4482 ++
4483 + ret = netsec_reset_hardware(priv, true);
4484 + if (ret)
4485 + goto err2;
4486 +@@ -1418,7 +1425,7 @@ static const struct net_device_ops netsec_netdev_ops = {
4487 + };
4488 +
4489 + static int netsec_of_probe(struct platform_device *pdev,
4490 +- struct netsec_priv *priv)
4491 ++ struct netsec_priv *priv, u32 *phy_addr)
4492 + {
4493 + priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
4494 + if (!priv->phy_np) {
4495 +@@ -1426,6 +1433,8 @@ static int netsec_of_probe(struct platform_device *pdev,
4496 + return -EINVAL;
4497 + }
4498 +
4499 ++ *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
4500 ++
4501 + priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
4502 + if (IS_ERR(priv->clk)) {
4503 + dev_err(&pdev->dev, "phy_ref_clk not found\n");
4504 +@@ -1626,12 +1635,14 @@ static int netsec_probe(struct platform_device *pdev)
4505 + }
4506 +
4507 + if (dev_of_node(&pdev->dev))
4508 +- ret = netsec_of_probe(pdev, priv);
4509 ++ ret = netsec_of_probe(pdev, priv, &phy_addr);
4510 + else
4511 + ret = netsec_acpi_probe(pdev, priv, &phy_addr);
4512 + if (ret)
4513 + goto free_ndev;
4514 +
4515 ++ priv->phy_addr = phy_addr;
4516 ++
4517 + if (!priv->freq) {
4518 + dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
4519 + ret = -ENODEV;
4520 +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
4521 +index 1afed85550c0..8417d4c17844 100644
4522 +--- a/drivers/net/ethernet/ti/cpsw.c
4523 ++++ b/drivers/net/ethernet/ti/cpsw.c
4524 +@@ -642,6 +642,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
4525 +
4526 + /* Clear all mcast from ALE */
4527 + cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
4528 ++ __dev_mc_unsync(ndev, NULL);
4529 +
4530 + /* Flood All Unicast Packets to Host port */
4531 + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
4532 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
4533 +index 05115fb0c97a..10a8ef2d025a 100644
4534 +--- a/drivers/net/macsec.c
4535 ++++ b/drivers/net/macsec.c
4536 +@@ -2813,9 +2813,6 @@ static int macsec_dev_open(struct net_device *dev)
4537 + struct net_device *real_dev = macsec->real_dev;
4538 + int err;
4539 +
4540 +- if (!(real_dev->flags & IFF_UP))
4541 +- return -ENETDOWN;
4542 +-
4543 + err = dev_uc_add(real_dev, dev->dev_addr);
4544 + if (err < 0)
4545 + return err;
4546 +@@ -3305,6 +3302,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
4547 + if (err < 0)
4548 + goto del_dev;
4549 +
4550 ++ netif_stacked_transfer_operstate(real_dev, dev);
4551 ++ linkwatch_fire_event(dev);
4552 ++
4553 + macsec_generation++;
4554 +
4555 + return 0;
4556 +@@ -3489,6 +3489,20 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
4557 + return NOTIFY_DONE;
4558 +
4559 + switch (event) {
4560 ++ case NETDEV_DOWN:
4561 ++ case NETDEV_UP:
4562 ++ case NETDEV_CHANGE: {
4563 ++ struct macsec_dev *m, *n;
4564 ++ struct macsec_rxh_data *rxd;
4565 ++
4566 ++ rxd = macsec_data_rtnl(real_dev);
4567 ++ list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4568 ++ struct net_device *dev = m->secy.netdev;
4569 ++
4570 ++ netif_stacked_transfer_operstate(real_dev, dev);
4571 ++ }
4572 ++ break;
4573 ++ }
4574 + case NETDEV_UNREGISTER: {
4575 + struct macsec_dev *m, *n;
4576 + struct macsec_rxh_data *rxd;
4577 +diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
4578 +index b12023bc2cab..df8d49ad48c3 100644
4579 +--- a/drivers/net/ntb_netdev.c
4580 ++++ b/drivers/net/ntb_netdev.c
4581 +@@ -236,7 +236,7 @@ static void ntb_netdev_tx_timer(struct timer_list *t)
4582 + struct net_device *ndev = dev->ndev;
4583 +
4584 + if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
4585 +- mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
4586 ++ mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
4587 + } else {
4588 + /* Make sure anybody stopping the queue after this sees the new
4589 + * value of ntb_transport_tx_free_entry()
4590 +diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
4591 +index e4bf9e7d7583..eeadfde15940 100644
4592 +--- a/drivers/net/phy/dp83867.c
4593 ++++ b/drivers/net/phy/dp83867.c
4594 +@@ -33,10 +33,18 @@
4595 +
4596 + /* Extended Registers */
4597 + #define DP83867_CFG4 0x0031
4598 ++#define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6))
4599 ++#define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5)
4600 ++#define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5)
4601 ++#define DP83867_CFG4_SGMII_ANEG_TIMER_2US (1 << 5)
4602 ++#define DP83867_CFG4_SGMII_ANEG_TIMER_16MS (0 << 5)
4603 ++
4604 + #define DP83867_RGMIICTL 0x0032
4605 + #define DP83867_STRAP_STS1 0x006E
4606 + #define DP83867_RGMIIDCTL 0x0086
4607 + #define DP83867_IO_MUX_CFG 0x0170
4608 ++#define DP83867_10M_SGMII_CFG 0x016F
4609 ++#define DP83867_10M_SGMII_RATE_ADAPT_MASK BIT(7)
4610 +
4611 + #define DP83867_SW_RESET BIT(15)
4612 + #define DP83867_SW_RESTART BIT(14)
4613 +@@ -294,6 +302,35 @@ static int dp83867_config_init(struct phy_device *phydev)
4614 + }
4615 + }
4616 +
4617 ++ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
4618 ++ /* For support SPEED_10 in SGMII mode
4619 ++ * DP83867_10M_SGMII_RATE_ADAPT bit
4620 ++ * has to be cleared by software. That
4621 ++ * does not affect SPEED_100 and
4622 ++ * SPEED_1000.
4623 ++ */
4624 ++ val = phy_read_mmd(phydev, DP83867_DEVADDR,
4625 ++ DP83867_10M_SGMII_CFG);
4626 ++ val &= ~DP83867_10M_SGMII_RATE_ADAPT_MASK;
4627 ++ ret = phy_write_mmd(phydev, DP83867_DEVADDR,
4628 ++ DP83867_10M_SGMII_CFG, val);
4629 ++
4630 ++ if (ret)
4631 ++ return ret;
4632 ++
4633 ++ /* After reset SGMII Autoneg timer is set to 2us (bits 6 and 5
4634 ++ * are 01). That is not enough to finalize autoneg on some
4635 ++ * devices. Increase this timer duration to maximum 16ms.
4636 ++ */
4637 ++ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4);
4638 ++ val &= ~DP83867_CFG4_SGMII_ANEG_MASK;
4639 ++ val |= DP83867_CFG4_SGMII_ANEG_TIMER_16MS;
4640 ++ ret = phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val);
4641 ++
4642 ++ if (ret)
4643 ++ return ret;
4644 ++ }
4645 ++
4646 + /* Enable Interrupt output INT_OE in CFG3 register */
4647 + if (phy_interrupt_is_valid(phydev)) {
4648 + val = phy_read(phydev, DP83867_CFG3);
4649 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
4650 +index 9f895083bc0a..7f5ee6bb4430 100644
4651 +--- a/drivers/net/vrf.c
4652 ++++ b/drivers/net/vrf.c
4653 +@@ -993,24 +993,23 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
4654 + struct sk_buff *skb)
4655 + {
4656 + int orig_iif = skb->skb_iif;
4657 +- bool need_strict;
4658 ++ bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
4659 ++ bool is_ndisc = ipv6_ndisc_frame(skb);
4660 +
4661 +- /* loopback traffic; do not push through packet taps again.
4662 +- * Reset pkt_type for upper layers to process skb
4663 ++ /* loopback, multicast & non-ND link-local traffic; do not push through
4664 ++ * packet taps again. Reset pkt_type for upper layers to process skb
4665 + */
4666 +- if (skb->pkt_type == PACKET_LOOPBACK) {
4667 ++ if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
4668 + skb->dev = vrf_dev;
4669 + skb->skb_iif = vrf_dev->ifindex;
4670 + IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
4671 +- skb->pkt_type = PACKET_HOST;
4672 ++ if (skb->pkt_type == PACKET_LOOPBACK)
4673 ++ skb->pkt_type = PACKET_HOST;
4674 + goto out;
4675 + }
4676 +
4677 +- /* if packet is NDISC or addressed to multicast or link-local
4678 +- * then keep the ingress interface
4679 +- */
4680 +- need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
4681 +- if (!ipv6_ndisc_frame(skb) && !need_strict) {
4682 ++ /* if packet is NDISC then keep the ingress interface */
4683 ++ if (!is_ndisc) {
4684 + vrf_rx_stats(vrf_dev, skb->len);
4685 + skb->dev = vrf_dev;
4686 + skb->skb_iif = vrf_dev->ifindex;
4687 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
4688 +index d3d33cc2adfd..613ca74f1b28 100644
4689 +--- a/drivers/net/wireless/ath/ath10k/mac.c
4690 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
4691 +@@ -4686,6 +4686,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
4692 + goto err_core_stop;
4693 + }
4694 +
4695 ++ if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
4696 ++ ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
4697 ++ if (ret) {
4698 ++ ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
4699 ++ goto err_core_stop;
4700 ++ }
4701 ++ }
4702 ++
4703 + if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4704 + ret = ath10k_wmi_adaptive_qcs(ar, true);
4705 + if (ret) {
4706 +@@ -8551,12 +8559,6 @@ int ath10k_mac_register(struct ath10k *ar)
4707 + }
4708 +
4709 + if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
4710 +- ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
4711 +- if (ret) {
4712 +- ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
4713 +- goto err_dfs_detector_exit;
4714 +- }
4715 +-
4716 + ar->hw->wiphy->features |=
4717 + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
4718 + }
4719 +diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
4720 +index 97fa5c74f2fe..50a801a5d4f1 100644
4721 +--- a/drivers/net/wireless/ath/ath10k/pci.c
4722 ++++ b/drivers/net/wireless/ath/ath10k/pci.c
4723 +@@ -1054,10 +1054,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
4724 + struct ath10k_ce *ce = ath10k_ce_priv(ar);
4725 + int ret = 0;
4726 + u32 *buf;
4727 +- unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
4728 ++ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
4729 + struct ath10k_ce_pipe *ce_diag;
4730 + void *data_buf = NULL;
4731 +- u32 ce_data; /* Host buffer address in CE space */
4732 + dma_addr_t ce_data_base = 0;
4733 + int i;
4734 +
4735 +@@ -1071,9 +1070,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
4736 + * 1) 4-byte alignment
4737 + * 2) Buffer in DMA-able space
4738 + */
4739 +- orig_nbytes = nbytes;
4740 ++ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
4741 ++
4742 + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
4743 +- orig_nbytes,
4744 ++ alloc_nbytes,
4745 + &ce_data_base,
4746 + GFP_ATOMIC);
4747 + if (!data_buf) {
4748 +@@ -1081,9 +1081,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
4749 + goto done;
4750 + }
4751 +
4752 +- /* Copy caller's data to allocated DMA buf */
4753 +- memcpy(data_buf, data, orig_nbytes);
4754 +-
4755 + /*
4756 + * The address supplied by the caller is in the
4757 + * Target CPU virtual address space.
4758 +@@ -1096,12 +1093,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
4759 + */
4760 + address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
4761 +
4762 +- remaining_bytes = orig_nbytes;
4763 +- ce_data = ce_data_base;
4764 ++ remaining_bytes = nbytes;
4765 + while (remaining_bytes) {
4766 + /* FIXME: check cast */
4767 + nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
4768 +
4769 ++ /* Copy caller's data to allocated DMA buf */
4770 ++ memcpy(data_buf, data, nbytes);
4771 ++
4772 + /* Set up to receive directly into Target(!) address */
4773 + ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
4774 + if (ret != 0)
4775 +@@ -1111,7 +1110,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
4776 + * Request CE to send caller-supplied data that
4777 + * was copied to bounce buffer to Target(!) address.
4778 + */
4779 +- ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
4780 ++ ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
4781 + nbytes, 0, 0);
4782 + if (ret != 0)
4783 + goto done;
4784 +@@ -1152,12 +1151,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
4785 +
4786 + remaining_bytes -= nbytes;
4787 + address += nbytes;
4788 +- ce_data += nbytes;
4789 ++ data += nbytes;
4790 + }
4791 +
4792 + done:
4793 + if (data_buf) {
4794 +- dma_free_coherent(ar->dev, orig_nbytes, data_buf,
4795 ++ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
4796 + ce_data_base);
4797 + }
4798 +
4799 +diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
4800 +index fa1843a7e0fd..e2d78f77edb7 100644
4801 +--- a/drivers/net/wireless/ath/ath10k/snoc.c
4802 ++++ b/drivers/net/wireless/ath/ath10k/snoc.c
4803 +@@ -1190,7 +1190,7 @@ static int ath10k_wcn3990_clk_init(struct ath10k *ar)
4804 + return 0;
4805 +
4806 + err_clock_config:
4807 +- for (; i >= 0; i--) {
4808 ++ for (i = i - 1; i >= 0; i--) {
4809 + clk_info = &ar_snoc->clk[i];
4810 +
4811 + if (!clk_info->handle)
4812 +diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
4813 +index f09a4ad2e9de..f9c79e21ab22 100644
4814 +--- a/drivers/net/wireless/ath/ath10k/usb.c
4815 ++++ b/drivers/net/wireless/ath/ath10k/usb.c
4816 +@@ -49,6 +49,10 @@ ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe)
4817 + struct ath10k_urb_context *urb_context = NULL;
4818 + unsigned long flags;
4819 +
4820 ++ /* bail if this pipe is not initialized */
4821 ++ if (!pipe->ar_usb)
4822 ++ return NULL;
4823 ++
4824 + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
4825 + if (!list_empty(&pipe->urb_list_head)) {
4826 + urb_context = list_first_entry(&pipe->urb_list_head,
4827 +@@ -66,6 +70,10 @@ static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe,
4828 + {
4829 + unsigned long flags;
4830 +
4831 ++ /* bail if this pipe is not initialized */
4832 ++ if (!pipe->ar_usb)
4833 ++ return;
4834 ++
4835 + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
4836 +
4837 + pipe->urb_cnt++;
4838 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
4839 +index f019a20e5a1f..983e1abbd9e4 100644
4840 +--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
4841 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
4842 +@@ -4183,7 +4183,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
4843 +
4844 + static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
4845 + {
4846 +- u32 data, ko, kg;
4847 ++ u32 data = 0, ko, kg;
4848 +
4849 + if (!AR_SREV_9462_20_OR_LATER(ah))
4850 + return;
4851 +diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
4852 +index ceace95b1595..44296c015925 100644
4853 +--- a/drivers/net/wireless/ath/wil6210/debugfs.c
4854 ++++ b/drivers/net/wireless/ath/wil6210/debugfs.c
4855 +@@ -662,10 +662,10 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
4856 + enum { max_count = 4096 };
4857 + struct wil_blob_wrapper *wil_blob = file->private_data;
4858 + struct wil6210_priv *wil = wil_blob->wil;
4859 +- loff_t pos = *ppos;
4860 ++ loff_t aligned_pos, pos = *ppos;
4861 + size_t available = wil_blob->blob.size;
4862 + void *buf;
4863 +- size_t ret;
4864 ++ size_t unaligned_bytes, aligned_count, ret;
4865 + int rc;
4866 +
4867 + if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
4868 +@@ -683,7 +683,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
4869 + if (count > max_count)
4870 + count = max_count;
4871 +
4872 +- buf = kmalloc(count, GFP_KERNEL);
4873 ++ /* set pos to 4 bytes aligned */
4874 ++ unaligned_bytes = pos % 4;
4875 ++ aligned_pos = pos - unaligned_bytes;
4876 ++ aligned_count = count + unaligned_bytes;
4877 ++
4878 ++ buf = kmalloc(aligned_count, GFP_KERNEL);
4879 + if (!buf)
4880 + return -ENOMEM;
4881 +
4882 +@@ -694,9 +699,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
4883 + }
4884 +
4885 + wil_memcpy_fromio_32(buf, (const void __iomem *)
4886 +- wil_blob->blob.data + pos, count);
4887 ++ wil_blob->blob.data + aligned_pos, aligned_count);
4888 +
4889 +- ret = copy_to_user(user_buf, buf, count);
4890 ++ ret = copy_to_user(user_buf, buf + unaligned_bytes, count);
4891 +
4892 + wil_pm_runtime_put(wil);
4893 +
4894 +diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
4895 +index 920cb233f4db..10673fa9388e 100644
4896 +--- a/drivers/net/wireless/ath/wil6210/main.c
4897 ++++ b/drivers/net/wireless/ath/wil6210/main.c
4898 +@@ -1397,8 +1397,15 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
4899 + wil6210_clear_irq(wil);
4900 + /* CAF_ICR - clear and mask */
4901 + /* it is W1C, clear by writing back same value */
4902 +- wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
4903 +- wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
4904 ++ if (wil->hw_version < HW_VER_TALYN_MB) {
4905 ++ wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
4906 ++ wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
4907 ++ } else {
4908 ++ wil_s(wil,
4909 ++ RGF_CAF_ICR_TALYN_MB + offsetof(struct RGF_ICR, ICR), 0);
4910 ++ wil_w(wil, RGF_CAF_ICR_TALYN_MB +
4911 ++ offsetof(struct RGF_ICR, IMV), ~0);
4912 ++ }
4913 + /* clear PAL_UNIT_ICR (potential D0->D3 leftover)
4914 + * In Talyn-MB host cannot access this register due to
4915 + * access control, hence PAL_UNIT_ICR is cleared by the FW
4916 +diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
4917 +index 409a6fa8b6c8..5fa8d6ad6648 100644
4918 +--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
4919 ++++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
4920 +@@ -808,23 +808,24 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil,
4921 + wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
4922 + l2_rx_status);
4923 + /* Due to HW issue, KEY error will trigger a MIC error */
4924 +- if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) {
4925 +- wil_dbg_txrx(wil,
4926 +- "L2 MIC/KEY error, dropping packet\n");
4927 ++ if (l2_rx_status == WIL_RX_EDMA_ERROR_MIC) {
4928 ++ wil_err_ratelimited(wil,
4929 ++ "L2 MIC/KEY error, dropping packet\n");
4930 + stats->rx_mic_error++;
4931 + }
4932 +- if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) {
4933 +- wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n");
4934 ++ if (l2_rx_status == WIL_RX_EDMA_ERROR_KEY) {
4935 ++ wil_err_ratelimited(wil,
4936 ++ "L2 KEY error, dropping packet\n");
4937 + stats->rx_key_error++;
4938 + }
4939 +- if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) {
4940 +- wil_dbg_txrx(wil,
4941 +- "L2 REPLAY error, dropping packet\n");
4942 ++ if (l2_rx_status == WIL_RX_EDMA_ERROR_REPLAY) {
4943 ++ wil_err_ratelimited(wil,
4944 ++ "L2 REPLAY error, dropping packet\n");
4945 + stats->rx_replay++;
4946 + }
4947 +- if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) {
4948 +- wil_dbg_txrx(wil,
4949 +- "L2 AMSDU error, dropping packet\n");
4950 ++ if (l2_rx_status == WIL_RX_EDMA_ERROR_AMSDU) {
4951 ++ wil_err_ratelimited(wil,
4952 ++ "L2 AMSDU error, dropping packet\n");
4953 + stats->rx_amsdu_error++;
4954 + }
4955 + return -EFAULT;
4956 +diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
4957 +index 17c294b1ead1..75fe1a3b7046 100644
4958 +--- a/drivers/net/wireless/ath/wil6210/wil6210.h
4959 ++++ b/drivers/net/wireless/ath/wil6210/wil6210.h
4960 +@@ -319,6 +319,7 @@ struct RGF_ICR {
4961 + /* MAC timer, usec, for packet lifetime */
4962 + #define RGF_MAC_MTRL_COUNTER_0 (0x886aa8)
4963 +
4964 ++#define RGF_CAF_ICR_TALYN_MB (0x8893d4) /* struct RGF_ICR */
4965 + #define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */
4966 + #define RGF_CAF_OSC_CONTROL (0x88afa4)
4967 + #define BIT_CAF_OSC_XTAL_EN BIT(0)
4968 +diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
4969 +index 2010f771478d..8a603432f531 100644
4970 +--- a/drivers/net/wireless/ath/wil6210/wmi.c
4971 ++++ b/drivers/net/wireless/ath/wil6210/wmi.c
4972 +@@ -1639,16 +1639,17 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len,
4973 + {
4974 + int rc;
4975 + unsigned long remain;
4976 ++ ulong flags;
4977 +
4978 + mutex_lock(&wil->wmi_mutex);
4979 +
4980 +- spin_lock(&wil->wmi_ev_lock);
4981 ++ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
4982 + wil->reply_id = reply_id;
4983 + wil->reply_mid = mid;
4984 + wil->reply_buf = reply;
4985 + wil->reply_size = reply_size;
4986 + reinit_completion(&wil->wmi_call);
4987 +- spin_unlock(&wil->wmi_ev_lock);
4988 ++ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
4989 +
4990 + rc = __wmi_send(wil, cmdid, mid, buf, len);
4991 + if (rc)
4992 +@@ -1668,12 +1669,12 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len,
4993 + }
4994 +
4995 + out:
4996 +- spin_lock(&wil->wmi_ev_lock);
4997 ++ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
4998 + wil->reply_id = 0;
4999 + wil->reply_mid = U8_MAX;
5000 + wil->reply_buf = NULL;
5001 + wil->reply_size = 0;
5002 +- spin_unlock(&wil->wmi_ev_lock);
5003 ++ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
5004 +
5005 + mutex_unlock(&wil->wmi_mutex);
5006 +
5007 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
5008 +index 6255fb6d97a7..6188275b17e5 100644
5009 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
5010 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
5011 +@@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5012 + }
5013 +
5014 + spin_lock_bh(&wl->lock);
5015 ++ wl->wlc->vif = vif;
5016 + wl->mute_tx = false;
5017 + brcms_c_mute(wl->wlc, false);
5018 + if (vif->type == NL80211_IFTYPE_STATION)
5019 +@@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5020 + static void
5021 + brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5022 + {
5023 ++ struct brcms_info *wl = hw->priv;
5024 ++
5025 ++ spin_lock_bh(&wl->lock);
5026 ++ wl->wlc->vif = NULL;
5027 ++ spin_unlock_bh(&wl->lock);
5028 + }
5029 +
5030 + static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
5031 +@@ -840,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
5032 + status = brcms_c_aggregatable(wl->wlc, tid);
5033 + spin_unlock_bh(&wl->lock);
5034 + if (!status) {
5035 +- brcms_err(wl->wlc->hw->d11core,
5036 +- "START: tid %d is not agg\'able\n", tid);
5037 ++ brcms_dbg_ht(wl->wlc->hw->d11core,
5038 ++ "START: tid %d is not agg\'able\n", tid);
5039 + return -EINVAL;
5040 + }
5041 + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
5042 +@@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
5043 + spin_unlock_bh(&wl->lock);
5044 + }
5045 +
5046 ++static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
5047 ++ struct ieee80211_sta *sta, bool set)
5048 ++{
5049 ++ struct brcms_info *wl = hw->priv;
5050 ++ struct sk_buff *beacon = NULL;
5051 ++ u16 tim_offset = 0;
5052 ++
5053 ++ spin_lock_bh(&wl->lock);
5054 ++ if (wl->wlc->vif)
5055 ++ beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
5056 ++ &tim_offset, NULL);
5057 ++ if (beacon)
5058 ++ brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
5059 ++ wl->wlc->vif->bss_conf.dtim_period);
5060 ++ spin_unlock_bh(&wl->lock);
5061 ++
5062 ++ return 0;
5063 ++}
5064 ++
5065 + static const struct ieee80211_ops brcms_ops = {
5066 + .tx = brcms_ops_tx,
5067 + .start = brcms_ops_start,
5068 +@@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = {
5069 + .flush = brcms_ops_flush,
5070 + .get_tsf = brcms_ops_get_tsf,
5071 + .set_tsf = brcms_ops_set_tsf,
5072 ++ .set_tim = brcms_ops_beacon_set_tim,
5073 + };
5074 +
5075 + void brcms_dpc(unsigned long data)
5076 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
5077 +index c4d135cff04a..9f76b880814e 100644
5078 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
5079 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
5080 +@@ -563,6 +563,7 @@ struct brcms_c_info {
5081 +
5082 + struct wiphy *wiphy;
5083 + struct scb pri_scb;
5084 ++ struct ieee80211_vif *vif;
5085 +
5086 + struct sk_buff *beacon;
5087 + u16 beacon_tim_offset;
5088 +diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
5089 +index 04dd7a936593..5512c7f73fce 100644
5090 +--- a/drivers/net/wireless/cisco/airo.c
5091 ++++ b/drivers/net/wireless/cisco/airo.c
5092 +@@ -5462,7 +5462,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
5093 + we have to add a spin lock... */
5094 + rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
5095 + while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
5096 +- ptr += sprintf(ptr, "%pM %*s rssi = %d",
5097 ++ ptr += sprintf(ptr, "%pM %.*s rssi = %d",
5098 + BSSList_rid.bssid,
5099 + (int)BSSList_rid.ssidLen,
5100 + BSSList_rid.ssid,
5101 +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
5102 +index 47ec5293c045..7b74ef71bef1 100644
5103 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
5104 ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
5105 +@@ -376,11 +376,20 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
5106 + struct mwifiex_power_cfg power_cfg;
5107 + int dbm = MBM_TO_DBM(mbm);
5108 +
5109 +- if (type == NL80211_TX_POWER_FIXED) {
5110 ++ switch (type) {
5111 ++ case NL80211_TX_POWER_FIXED:
5112 + power_cfg.is_power_auto = 0;
5113 ++ power_cfg.is_power_fixed = 1;
5114 + power_cfg.power_level = dbm;
5115 +- } else {
5116 ++ break;
5117 ++ case NL80211_TX_POWER_LIMITED:
5118 ++ power_cfg.is_power_auto = 0;
5119 ++ power_cfg.is_power_fixed = 0;
5120 ++ power_cfg.power_level = dbm;
5121 ++ break;
5122 ++ case NL80211_TX_POWER_AUTOMATIC:
5123 + power_cfg.is_power_auto = 1;
5124 ++ break;
5125 + }
5126 +
5127 + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
5128 +diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
5129 +index 48e154e1865d..0dd592ea6e83 100644
5130 +--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
5131 ++++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
5132 +@@ -267,6 +267,7 @@ struct mwifiex_ds_encrypt_key {
5133 +
5134 + struct mwifiex_power_cfg {
5135 + u32 is_power_auto;
5136 ++ u32 is_power_fixed;
5137 + u32 power_level;
5138 + };
5139 +
5140 +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
5141 +index 843d65bba181..74e50566db1f 100644
5142 +--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
5143 ++++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
5144 +@@ -688,6 +688,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
5145 + txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf;
5146 + txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
5147 + if (!power_cfg->is_power_auto) {
5148 ++ u16 dbm_min = power_cfg->is_power_fixed ?
5149 ++ dbm : priv->min_tx_power_level;
5150 ++
5151 + txp_cfg->mode = cpu_to_le32(1);
5152 + pg_tlv = (struct mwifiex_types_power_group *)
5153 + (buf + sizeof(struct host_cmd_ds_txpwr_cfg));
5154 +@@ -702,7 +705,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
5155 + pg->last_rate_code = 0x03;
5156 + pg->modulation_class = MOD_CLASS_HR_DSSS;
5157 + pg->power_step = 0;
5158 +- pg->power_min = (s8) dbm;
5159 ++ pg->power_min = (s8) dbm_min;
5160 + pg->power_max = (s8) dbm;
5161 + pg++;
5162 + /* Power group for modulation class OFDM */
5163 +@@ -710,7 +713,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
5164 + pg->last_rate_code = 0x07;
5165 + pg->modulation_class = MOD_CLASS_OFDM;
5166 + pg->power_step = 0;
5167 +- pg->power_min = (s8) dbm;
5168 ++ pg->power_min = (s8) dbm_min;
5169 + pg->power_max = (s8) dbm;
5170 + pg++;
5171 + /* Power group for modulation class HTBW20 */
5172 +@@ -718,7 +721,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
5173 + pg->last_rate_code = 0x20;
5174 + pg->modulation_class = MOD_CLASS_HT;
5175 + pg->power_step = 0;
5176 +- pg->power_min = (s8) dbm;
5177 ++ pg->power_min = (s8) dbm_min;
5178 + pg->power_max = (s8) dbm;
5179 + pg->ht_bandwidth = HT_BW_20;
5180 + pg++;
5181 +@@ -727,7 +730,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
5182 + pg->last_rate_code = 0x20;
5183 + pg->modulation_class = MOD_CLASS_HT;
5184 + pg->power_step = 0;
5185 +- pg->power_min = (s8) dbm;
5186 ++ pg->power_min = (s8) dbm_min;
5187 + pg->power_max = (s8) dbm;
5188 + pg->ht_bandwidth = HT_BW_40;
5189 + }
5190 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
5191 +index 14e8c575f6c3..924c761f34fd 100644
5192 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
5193 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
5194 +@@ -793,9 +793,8 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
5195 + mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
5196 + usleep_range(500, 700);
5197 +
5198 +- reg_val = mt76_rr(dev, 0x2124);
5199 +- reg_val &= 0xffffff7e;
5200 +- mt76_wr(dev, 0x2124, reg_val);
5201 ++ reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
5202 ++ mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
5203 +
5204 + mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0);
5205 +
5206 +@@ -806,7 +805,7 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
5207 + mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz);
5208 + mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz);
5209 +
5210 +- mt76_wr(dev, 0x2124, reg_val);
5211 ++ mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
5212 + mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
5213 + msleep(100);
5214 +
5215 +diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
5216 +index 20447fdce4c3..227e5ebfe3dc 100644
5217 +--- a/drivers/net/wireless/mediatek/mt76/tx.c
5218 ++++ b/drivers/net/wireless/mediatek/mt76/tx.c
5219 +@@ -148,7 +148,8 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
5220 + {
5221 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
5222 +
5223 +- if (!ieee80211_is_data_qos(hdr->frame_control))
5224 ++ if (!ieee80211_is_data_qos(hdr->frame_control) ||
5225 ++ !ieee80211_is_data_present(hdr->frame_control))
5226 + return;
5227 +
5228 + mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
5229 +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
5230 +index 505ab1b055ff..2b4fcdf4ec5b 100644
5231 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
5232 ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
5233 +@@ -5691,6 +5691,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5234 + break;
5235 + case WLAN_CIPHER_SUITE_TKIP:
5236 + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
5237 ++ break;
5238 + default:
5239 + return -EOPNOTSUPP;
5240 + }
5241 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
5242 +index 85cedd083d2b..75bfa9dfef4a 100644
5243 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
5244 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
5245 +@@ -173,7 +173,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
5246 + rtl_read_byte(rtlpriv, FW_MAC1_READY));
5247 + }
5248 + RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
5249 +- "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n",
5250 ++ "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
5251 + rtl_read_dword(rtlpriv, REG_MCUFWDL));
5252 + return -1;
5253 + }
5254 +diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
5255 +index dbe78d8491ef..7f34ec077ee5 100644
5256 +--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
5257 ++++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
5258 +@@ -70,7 +70,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
5259 + out:
5260 + mutex_unlock(&wl->mutex);
5261 +
5262 +- return 0;
5263 ++ return ret;
5264 + }
5265 +
5266 + static int
5267 +diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
5268 +index bb43cebda9dc..60ae382f50da 100644
5269 +--- a/drivers/nfc/port100.c
5270 ++++ b/drivers/nfc/port100.c
5271 +@@ -792,7 +792,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
5272 +
5273 + rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
5274 + if (rc)
5275 +- usb_unlink_urb(dev->out_urb);
5276 ++ usb_kill_urb(dev->out_urb);
5277 +
5278 + exit:
5279 + mutex_unlock(&dev->out_urb_lock);
5280 +diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
5281 +index 6aa573227279..2ad263f708da 100644
5282 +--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
5283 ++++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
5284 +@@ -265,7 +265,7 @@ static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
5285 + return 0;
5286 + }
5287 +
5288 +-static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
5289 ++static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
5290 + {
5291 + u64 shift, mask;
5292 +
5293 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
5294 +index 5d0f99bcc987..44da9fe5b27b 100644
5295 +--- a/drivers/nvme/host/core.c
5296 ++++ b/drivers/nvme/host/core.c
5297 +@@ -3647,7 +3647,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
5298 + down_read(&ctrl->namespaces_rwsem);
5299 +
5300 + /* Forcibly unquiesce queues to avoid blocking dispatch */
5301 +- if (ctrl->admin_q)
5302 ++ if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
5303 + blk_mq_unquiesce_queue(ctrl->admin_q);
5304 +
5305 + list_for_each_entry(ns, &ctrl->namespaces, list)
5306 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
5307 +index a64a8bca0d5b..124f41157173 100644
5308 +--- a/drivers/nvme/host/pci.c
5309 ++++ b/drivers/nvme/host/pci.c
5310 +@@ -1652,6 +1652,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
5311 + struct pci_dev *pdev = to_pci_dev(dev->dev);
5312 + int bar;
5313 +
5314 ++ if (dev->cmb_size)
5315 ++ return;
5316 ++
5317 + dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
5318 + if (!dev->cmbsz)
5319 + return;
5320 +@@ -2136,7 +2139,6 @@ static void nvme_pci_disable(struct nvme_dev *dev)
5321 + {
5322 + struct pci_dev *pdev = to_pci_dev(dev->dev);
5323 +
5324 +- nvme_release_cmb(dev);
5325 + pci_free_irq_vectors(pdev);
5326 +
5327 + if (pci_is_enabled(pdev)) {
5328 +@@ -2583,19 +2585,19 @@ static void nvme_remove(struct pci_dev *pdev)
5329 + struct nvme_dev *dev = pci_get_drvdata(pdev);
5330 +
5331 + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
5332 +-
5333 +- cancel_work_sync(&dev->ctrl.reset_work);
5334 + pci_set_drvdata(pdev, NULL);
5335 +
5336 + if (!pci_device_is_present(pdev)) {
5337 + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
5338 + nvme_dev_disable(dev, true);
5339 ++ nvme_dev_remove_admin(dev);
5340 + }
5341 +
5342 + flush_work(&dev->ctrl.reset_work);
5343 + nvme_stop_ctrl(&dev->ctrl);
5344 + nvme_remove_namespaces(&dev->ctrl);
5345 + nvme_dev_disable(dev, true);
5346 ++ nvme_release_cmb(dev);
5347 + nvme_free_host_mem(dev);
5348 + nvme_dev_remove_admin(dev);
5349 + nvme_free_queues(dev, 0);
5350 +diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
5351 +index 5251689a1d9a..291f4121f516 100644
5352 +--- a/drivers/nvme/target/fcloop.c
5353 ++++ b/drivers/nvme/target/fcloop.c
5354 +@@ -648,6 +648,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
5355 + break;
5356 +
5357 + /* Fall-Thru to RSP handling */
5358 ++ /* FALLTHRU */
5359 +
5360 + case NVMET_FCOP_RSP:
5361 + if (fcpreq) {
5362 +diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
5363 +index 81a9dc5290a8..39d972e2595f 100644
5364 +--- a/drivers/nvme/target/io-cmd-file.c
5365 ++++ b/drivers/nvme/target/io-cmd-file.c
5366 +@@ -246,7 +246,8 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
5367 + break;
5368 +
5369 + offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
5370 +- len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
5371 ++ len = le32_to_cpu(range.nlb);
5372 ++ len <<= req->ns->blksize_shift;
5373 + if (offset + len > req->ns->size) {
5374 + ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
5375 + break;
5376 +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
5377 +index bac4b4bbc33d..68f52966bbc0 100644
5378 +--- a/drivers/of/unittest.c
5379 ++++ b/drivers/of/unittest.c
5380 +@@ -375,6 +375,7 @@ static void __init of_unittest_parse_phandle_with_args(void)
5381 + for (i = 0; i < 8; i++) {
5382 + bool passed = true;
5383 +
5384 ++ memset(&args, 0, sizeof(args));
5385 + rc = of_parse_phandle_with_args(np, "phandle-list",
5386 + "#phandle-cells", i, &args);
5387 +
5388 +@@ -428,6 +429,7 @@ static void __init of_unittest_parse_phandle_with_args(void)
5389 + }
5390 +
5391 + /* Check for missing list property */
5392 ++ memset(&args, 0, sizeof(args));
5393 + rc = of_parse_phandle_with_args(np, "phandle-list-missing",
5394 + "#phandle-cells", 0, &args);
5395 + unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
5396 +@@ -436,6 +438,7 @@ static void __init of_unittest_parse_phandle_with_args(void)
5397 + unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
5398 +
5399 + /* Check for missing cells property */
5400 ++ memset(&args, 0, sizeof(args));
5401 + rc = of_parse_phandle_with_args(np, "phandle-list",
5402 + "#phandle-cells-missing", 0, &args);
5403 + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
5404 +@@ -444,6 +447,7 @@ static void __init of_unittest_parse_phandle_with_args(void)
5405 + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
5406 +
5407 + /* Check for bad phandle in list */
5408 ++ memset(&args, 0, sizeof(args));
5409 + rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
5410 + "#phandle-cells", 0, &args);
5411 + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
5412 +@@ -452,6 +456,7 @@ static void __init of_unittest_parse_phandle_with_args(void)
5413 + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
5414 +
5415 + /* Check for incorrectly formed argument list */
5416 ++ memset(&args, 0, sizeof(args));
5417 + rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
5418 + "#phandle-cells", 1, &args);
5419 + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
5420 +@@ -502,6 +507,7 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
5421 + for (i = 0; i < 8; i++) {
5422 + bool passed = true;
5423 +
5424 ++ memset(&args, 0, sizeof(args));
5425 + rc = of_parse_phandle_with_args_map(np, "phandle-list",
5426 + "phandle", i, &args);
5427 +
5428 +@@ -559,21 +565,25 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
5429 + }
5430 +
5431 + /* Check for missing list property */
5432 ++ memset(&args, 0, sizeof(args));
5433 + rc = of_parse_phandle_with_args_map(np, "phandle-list-missing",
5434 + "phandle", 0, &args);
5435 + unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc);
5436 +
5437 + /* Check for missing cells,map,mask property */
5438 ++ memset(&args, 0, sizeof(args));
5439 + rc = of_parse_phandle_with_args_map(np, "phandle-list",
5440 + "phandle-missing", 0, &args);
5441 + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
5442 +
5443 + /* Check for bad phandle in list */
5444 ++ memset(&args, 0, sizeof(args));
5445 + rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle",
5446 + "phandle", 0, &args);
5447 + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
5448 +
5449 + /* Check for incorrectly formed argument list */
5450 ++ memset(&args, 0, sizeof(args));
5451 + rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args",
5452 + "phandle", 1, &args);
5453 + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
5454 +@@ -783,7 +793,7 @@ static void __init of_unittest_parse_interrupts(void)
5455 + for (i = 0; i < 4; i++) {
5456 + bool passed = true;
5457 +
5458 +- args.args_count = 0;
5459 ++ memset(&args, 0, sizeof(args));
5460 + rc = of_irq_parse_one(np, i, &args);
5461 +
5462 + passed &= !rc;
5463 +@@ -804,7 +814,7 @@ static void __init of_unittest_parse_interrupts(void)
5464 + for (i = 0; i < 4; i++) {
5465 + bool passed = true;
5466 +
5467 +- args.args_count = 0;
5468 ++ memset(&args, 0, sizeof(args));
5469 + rc = of_irq_parse_one(np, i, &args);
5470 +
5471 + /* Test the values from tests-phandle.dtsi */
5472 +@@ -860,6 +870,7 @@ static void __init of_unittest_parse_interrupts_extended(void)
5473 + for (i = 0; i < 7; i++) {
5474 + bool passed = true;
5475 +
5476 ++ memset(&args, 0, sizeof(args));
5477 + rc = of_irq_parse_one(np, i, &args);
5478 +
5479 + /* Test the values from tests-phandle.dtsi */
5480 +@@ -1067,20 +1078,44 @@ static void __init of_unittest_platform_populate(void)
5481 + * of np into dup node (present in live tree) and
5482 + * updates parent of children of np to dup.
5483 + *
5484 +- * @np: node already present in live tree
5485 ++ * @np: node whose properties are being added to the live tree
5486 + * @dup: node present in live tree to be updated
5487 + */
5488 + static void update_node_properties(struct device_node *np,
5489 + struct device_node *dup)
5490 + {
5491 + struct property *prop;
5492 ++ struct property *save_next;
5493 + struct device_node *child;
5494 +-
5495 +- for_each_property_of_node(np, prop)
5496 +- of_add_property(dup, prop);
5497 ++ int ret;
5498 +
5499 + for_each_child_of_node(np, child)
5500 + child->parent = dup;
5501 ++
5502 ++ /*
5503 ++ * "unittest internal error: unable to add testdata property"
5504 ++ *
5505 ++ * If this message reports a property in node '/__symbols__' then
5506 ++ * the respective unittest overlay contains a label that has the
5507 ++ * same name as a label in the live devicetree. The label will
5508 ++ * be in the live devicetree only if the devicetree source was
5509 ++ * compiled with the '-@' option. If you encounter this error,
5510 ++ * please consider renaming __all__ of the labels in the unittest
5511 ++ * overlay dts files with an odd prefix that is unlikely to be
5512 ++ * used in a real devicetree.
5513 ++ */
5514 ++
5515 ++ /*
5516 ++ * open code for_each_property_of_node() because of_add_property()
5517 ++ * sets prop->next to NULL
5518 ++ */
5519 ++ for (prop = np->properties; prop != NULL; prop = save_next) {
5520 ++ save_next = prop->next;
5521 ++ ret = of_add_property(dup, prop);
5522 ++ if (ret)
5523 ++ pr_err("unittest internal error: unable to add testdata property %pOF/%s",
5524 ++ np, prop->name);
5525 ++ }
5526 + }
5527 +
5528 + /**
5529 +@@ -1089,18 +1124,23 @@ static void update_node_properties(struct device_node *np,
5530 + *
5531 + * @np: Node to attach to live tree
5532 + */
5533 +-static int attach_node_and_children(struct device_node *np)
5534 ++static void attach_node_and_children(struct device_node *np)
5535 + {
5536 + struct device_node *next, *dup, *child;
5537 + unsigned long flags;
5538 + const char *full_name;
5539 +
5540 + full_name = kasprintf(GFP_KERNEL, "%pOF", np);
5541 ++
5542 ++ if (!strcmp(full_name, "/__local_fixups__") ||
5543 ++ !strcmp(full_name, "/__fixups__"))
5544 ++ return;
5545 ++
5546 + dup = of_find_node_by_path(full_name);
5547 + kfree(full_name);
5548 + if (dup) {
5549 + update_node_properties(np, dup);
5550 +- return 0;
5551 ++ return;
5552 + }
5553 +
5554 + child = np->child;
5555 +@@ -1121,8 +1161,6 @@ static int attach_node_and_children(struct device_node *np)
5556 + attach_node_and_children(child);
5557 + child = next;
5558 + }
5559 +-
5560 +- return 0;
5561 + }
5562 +
5563 + /**
5564 +diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
5565 +index 5e199e7d2d4f..765357b87ff6 100644
5566 +--- a/drivers/pci/controller/dwc/pci-keystone.c
5567 ++++ b/drivers/pci/controller/dwc/pci-keystone.c
5568 +@@ -36,6 +36,7 @@
5569 + #define PCIE_RC_K2HK 0xb008
5570 + #define PCIE_RC_K2E 0xb009
5571 + #define PCIE_RC_K2L 0xb00a
5572 ++#define PCIE_RC_K2G 0xb00b
5573 +
5574 + #define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
5575 +
5576 +@@ -50,6 +51,8 @@ static void quirk_limit_mrrs(struct pci_dev *dev)
5577 + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
5578 + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
5579 + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
5580 ++ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
5581 ++ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
5582 + { 0, },
5583 + };
5584 +
5585 +diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c
5586 +index 6692654798d4..c3a088910f48 100644
5587 +--- a/drivers/pci/controller/pcie-cadence-ep.c
5588 ++++ b/drivers/pci/controller/pcie-cadence-ep.c
5589 +@@ -355,7 +355,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
5590 + ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
5591 + ep->irq_pci_fn = fn;
5592 + }
5593 +- writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
5594 ++ writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
5595 +
5596 + return 0;
5597 + }
5598 +diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
5599 +index 0d100f56cb88..1bfbceb9f445 100644
5600 +--- a/drivers/pci/controller/pcie-mediatek.c
5601 ++++ b/drivers/pci/controller/pcie-mediatek.c
5602 +@@ -394,75 +394,6 @@ static struct pci_ops mtk_pcie_ops_v2 = {
5603 + .write = mtk_pcie_config_write,
5604 + };
5605 +
5606 +-static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
5607 +-{
5608 +- struct mtk_pcie *pcie = port->pcie;
5609 +- struct resource *mem = &pcie->mem;
5610 +- const struct mtk_pcie_soc *soc = port->pcie->soc;
5611 +- u32 val;
5612 +- size_t size;
5613 +- int err;
5614 +-
5615 +- /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
5616 +- if (pcie->base) {
5617 +- val = readl(pcie->base + PCIE_SYS_CFG_V2);
5618 +- val |= PCIE_CSR_LTSSM_EN(port->slot) |
5619 +- PCIE_CSR_ASPM_L1_EN(port->slot);
5620 +- writel(val, pcie->base + PCIE_SYS_CFG_V2);
5621 +- }
5622 +-
5623 +- /* Assert all reset signals */
5624 +- writel(0, port->base + PCIE_RST_CTRL);
5625 +-
5626 +- /*
5627 +- * Enable PCIe link down reset, if link status changed from link up to
5628 +- * link down, this will reset MAC control registers and configuration
5629 +- * space.
5630 +- */
5631 +- writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
5632 +-
5633 +- /* De-assert PHY, PE, PIPE, MAC and configuration reset */
5634 +- val = readl(port->base + PCIE_RST_CTRL);
5635 +- val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
5636 +- PCIE_MAC_SRSTB | PCIE_CRSTB;
5637 +- writel(val, port->base + PCIE_RST_CTRL);
5638 +-
5639 +- /* Set up vendor ID and class code */
5640 +- if (soc->need_fix_class_id) {
5641 +- val = PCI_VENDOR_ID_MEDIATEK;
5642 +- writew(val, port->base + PCIE_CONF_VEND_ID);
5643 +-
5644 +- val = PCI_CLASS_BRIDGE_HOST;
5645 +- writew(val, port->base + PCIE_CONF_CLASS_ID);
5646 +- }
5647 +-
5648 +- /* 100ms timeout value should be enough for Gen1/2 training */
5649 +- err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
5650 +- !!(val & PCIE_PORT_LINKUP_V2), 20,
5651 +- 100 * USEC_PER_MSEC);
5652 +- if (err)
5653 +- return -ETIMEDOUT;
5654 +-
5655 +- /* Set INTx mask */
5656 +- val = readl(port->base + PCIE_INT_MASK);
5657 +- val &= ~INTX_MASK;
5658 +- writel(val, port->base + PCIE_INT_MASK);
5659 +-
5660 +- /* Set AHB to PCIe translation windows */
5661 +- size = mem->end - mem->start;
5662 +- val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
5663 +- writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
5664 +-
5665 +- val = upper_32_bits(mem->start);
5666 +- writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
5667 +-
5668 +- /* Set PCIe to AXI translation memory space.*/
5669 +- val = fls(0xffffffff) | WIN_ENABLE;
5670 +- writel(val, port->base + PCIE_AXI_WINDOW0);
5671 +-
5672 +- return 0;
5673 +-}
5674 +-
5675 + static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
5676 + {
5677 + struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
5678 +@@ -639,8 +570,6 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
5679 + ret = mtk_pcie_allocate_msi_domains(port);
5680 + if (ret)
5681 + return ret;
5682 +-
5683 +- mtk_pcie_enable_msi(port);
5684 + }
5685 +
5686 + return 0;
5687 +@@ -707,6 +636,78 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
5688 + return 0;
5689 + }
5690 +
5691 ++static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
5692 ++{
5693 ++ struct mtk_pcie *pcie = port->pcie;
5694 ++ struct resource *mem = &pcie->mem;
5695 ++ const struct mtk_pcie_soc *soc = port->pcie->soc;
5696 ++ u32 val;
5697 ++ size_t size;
5698 ++ int err;
5699 ++
5700 ++ /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
5701 ++ if (pcie->base) {
5702 ++ val = readl(pcie->base + PCIE_SYS_CFG_V2);
5703 ++ val |= PCIE_CSR_LTSSM_EN(port->slot) |
5704 ++ PCIE_CSR_ASPM_L1_EN(port->slot);
5705 ++ writel(val, pcie->base + PCIE_SYS_CFG_V2);
5706 ++ }
5707 ++
5708 ++ /* Assert all reset signals */
5709 ++ writel(0, port->base + PCIE_RST_CTRL);
5710 ++
5711 ++ /*
5712 ++ * Enable PCIe link down reset, if link status changed from link up to
5713 ++ * link down, this will reset MAC control registers and configuration
5714 ++ * space.
5715 ++ */
5716 ++ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
5717 ++
5718 ++ /* De-assert PHY, PE, PIPE, MAC and configuration reset */
5719 ++ val = readl(port->base + PCIE_RST_CTRL);
5720 ++ val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
5721 ++ PCIE_MAC_SRSTB | PCIE_CRSTB;
5722 ++ writel(val, port->base + PCIE_RST_CTRL);
5723 ++
5724 ++ /* Set up vendor ID and class code */
5725 ++ if (soc->need_fix_class_id) {
5726 ++ val = PCI_VENDOR_ID_MEDIATEK;
5727 ++ writew(val, port->base + PCIE_CONF_VEND_ID);
5728 ++
5729 ++ val = PCI_CLASS_BRIDGE_PCI;
5730 ++ writew(val, port->base + PCIE_CONF_CLASS_ID);
5731 ++ }
5732 ++
5733 ++ /* 100ms timeout value should be enough for Gen1/2 training */
5734 ++ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
5735 ++ !!(val & PCIE_PORT_LINKUP_V2), 20,
5736 ++ 100 * USEC_PER_MSEC);
5737 ++ if (err)
5738 ++ return -ETIMEDOUT;
5739 ++
5740 ++ /* Set INTx mask */
5741 ++ val = readl(port->base + PCIE_INT_MASK);
5742 ++ val &= ~INTX_MASK;
5743 ++ writel(val, port->base + PCIE_INT_MASK);
5744 ++
5745 ++ if (IS_ENABLED(CONFIG_PCI_MSI))
5746 ++ mtk_pcie_enable_msi(port);
5747 ++
5748 ++ /* Set AHB to PCIe translation windows */
5749 ++ size = mem->end - mem->start;
5750 ++ val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
5751 ++ writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
5752 ++
5753 ++ val = upper_32_bits(mem->start);
5754 ++ writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
5755 ++
5756 ++ /* Set PCIe to AXI translation memory space.*/
5757 ++ val = fls(0xffffffff) | WIN_ENABLE;
5758 ++ writel(val, port->base + PCIE_AXI_WINDOW0);
5759 ++
5760 ++ return 0;
5761 ++}
5762 ++
5763 + static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
5764 + unsigned int devfn, int where)
5765 + {
5766 +diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
5767 +index 65eaa6b61868..ab36e5ca1aca 100644
5768 +--- a/drivers/pci/controller/vmd.c
5769 ++++ b/drivers/pci/controller/vmd.c
5770 +@@ -818,12 +818,12 @@ static void vmd_remove(struct pci_dev *dev)
5771 + {
5772 + struct vmd_dev *vmd = pci_get_drvdata(dev);
5773 +
5774 +- vmd_detach_resources(vmd);
5775 + sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
5776 + pci_stop_root_bus(vmd->bus);
5777 + pci_remove_root_bus(vmd->bus);
5778 + vmd_cleanup_srcu(vmd);
5779 + vmd_teardown_dma_ops(vmd);
5780 ++ vmd_detach_resources(vmd);
5781 + irq_domain_remove(vmd->irq_domain);
5782 + }
5783 +
5784 +diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
5785 +index 08925d24180b..1bd3c10ce189 100644
5786 +--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
5787 ++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
5788 +@@ -72,10 +72,8 @@
5789 + #define GPIO_REG_OFFSET(p) ((p) / 32)
5790 + #define GPIO_REG_SHIFT(p) ((p) % 32)
5791 +
5792 +-enum bcm2835_pinconf_param {
5793 +- /* argument: bcm2835_pinconf_pull */
5794 +- BCM2835_PINCONF_PARAM_PULL = (PIN_CONFIG_END + 1),
5795 +-};
5796 ++/* argument: bcm2835_pinconf_pull */
5797 ++#define BCM2835_PINCONF_PARAM_PULL (PIN_CONFIG_END + 1)
5798 +
5799 + struct bcm2835_pinctrl {
5800 + struct device *dev;
5801 +diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
5802 +index c4f4d904e4a6..618e04407ac8 100644
5803 +--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
5804 ++++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
5805 +@@ -608,7 +608,7 @@ static int madera_mux_set_mux(struct pinctrl_dev *pctldev,
5806 + unsigned int n_chip_groups = priv->chip->n_pin_groups;
5807 + const char *func_name = madera_mux_funcs[selector].name;
5808 + unsigned int reg;
5809 +- int i, ret;
5810 ++ int i, ret = 0;
5811 +
5812 + dev_dbg(priv->dev, "%s selecting %u (%s) for group %u (%s)\n",
5813 + __func__, selector, func_name, group,
5814 +diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
5815 +index 190f17e4bbda..1d3b88e6ab86 100644
5816 +--- a/drivers/pinctrl/pinctrl-lpc18xx.c
5817 ++++ b/drivers/pinctrl/pinctrl-lpc18xx.c
5818 +@@ -630,14 +630,8 @@ static const struct pinctrl_pin_desc lpc18xx_pins[] = {
5819 + LPC18XX_PIN(i2c0_sda, PIN_I2C0_SDA),
5820 + };
5821 +
5822 +-/**
5823 +- * enum lpc18xx_pin_config_param - possible pin configuration parameters
5824 +- * @PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt
5825 +- * controller.
5826 +- */
5827 +-enum lpc18xx_pin_config_param {
5828 +- PIN_CONFIG_GPIO_PIN_INT = PIN_CONFIG_END + 1,
5829 +-};
5830 ++/* PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt controller */
5831 ++#define PIN_CONFIG_GPIO_PIN_INT (PIN_CONFIG_END + 1)
5832 +
5833 + static const struct pinconf_generic_params lpc18xx_params[] = {
5834 + {"nxp,gpio-pin-interrupt", PIN_CONFIG_GPIO_PIN_INT, 0},
5835 +diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
5836 +index a0daf27042bd..90fd37e8207b 100644
5837 +--- a/drivers/pinctrl/pinctrl-zynq.c
5838 ++++ b/drivers/pinctrl/pinctrl-zynq.c
5839 +@@ -971,15 +971,12 @@ enum zynq_io_standards {
5840 + zynq_iostd_max
5841 + };
5842 +
5843 +-/**
5844 +- * enum zynq_pin_config_param - possible pin configuration parameters
5845 +- * @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to
5846 ++/*
5847 ++ * PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to
5848 + * this parameter (on a custom format) tells the driver which alternative
5849 + * IO standard to use.
5850 + */
5851 +-enum zynq_pin_config_param {
5852 +- PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1,
5853 +-};
5854 ++#define PIN_CONFIG_IOSTANDARD (PIN_CONFIG_END + 1)
5855 +
5856 + static const struct pinconf_generic_params zynq_dt_params[] = {
5857 + {"io-standard", PIN_CONFIG_IOSTANDARD, zynq_iostd_lvcmos18},
5858 +diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
5859 +index cf82db78e69e..0c30f5eb4c71 100644
5860 +--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
5861 ++++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
5862 +@@ -1028,10 +1028,23 @@ static int pmic_gpio_probe(struct platform_device *pdev)
5863 + return ret;
5864 + }
5865 +
5866 +- ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
5867 +- if (ret) {
5868 +- dev_err(dev, "failed to add pin range\n");
5869 +- goto err_range;
5870 ++ /*
5871 ++ * For DeviceTree-supported systems, the gpio core checks the
5872 ++ * pinctrl's device node for the "gpio-ranges" property.
5873 ++ * If it is present, it takes care of adding the pin ranges
5874 ++ * for the driver. In this case the driver can skip ahead.
5875 ++ *
5876 ++ * In order to remain compatible with older, existing DeviceTree
5877 ++ * files which don't set the "gpio-ranges" property or systems that
5878 ++ * utilize ACPI the driver has to call gpiochip_add_pin_range().
5879 ++ */
5880 ++ if (!of_property_read_bool(dev->of_node, "gpio-ranges")) {
5881 ++ ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0,
5882 ++ npins);
5883 ++ if (ret) {
5884 ++ dev_err(dev, "failed to add pin range\n");
5885 ++ goto err_range;
5886 ++ }
5887 + }
5888 +
5889 + return 0;
5890 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
5891 +index 26ebedc1f6d3..61aaaf58c599 100644
5892 +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
5893 ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
5894 +@@ -1042,6 +1042,7 @@ static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl,
5895 + static int sunxi_pinctrl_build_state(struct platform_device *pdev)
5896 + {
5897 + struct sunxi_pinctrl *pctl = platform_get_drvdata(pdev);
5898 ++ void *ptr;
5899 + int i;
5900 +
5901 + /*
5902 +@@ -1108,13 +1109,15 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
5903 + }
5904 +
5905 + /* And now allocated and fill the array for real */
5906 +- pctl->functions = krealloc(pctl->functions,
5907 +- pctl->nfunctions * sizeof(*pctl->functions),
5908 +- GFP_KERNEL);
5909 +- if (!pctl->functions) {
5910 ++ ptr = krealloc(pctl->functions,
5911 ++ pctl->nfunctions * sizeof(*pctl->functions),
5912 ++ GFP_KERNEL);
5913 ++ if (!ptr) {
5914 + kfree(pctl->functions);
5915 ++ pctl->functions = NULL;
5916 + return -ENOMEM;
5917 + }
5918 ++ pctl->functions = ptr;
5919 +
5920 + for (i = 0; i < pctl->desc->npins; i++) {
5921 + const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
5922 +diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
5923 +index a26f410800c2..f40b1c192106 100644
5924 +--- a/drivers/platform/x86/intel_cht_int33fe.c
5925 ++++ b/drivers/platform/x86/intel_cht_int33fe.c
5926 +@@ -24,6 +24,7 @@
5927 + #include <linux/i2c.h>
5928 + #include <linux/interrupt.h>
5929 + #include <linux/module.h>
5930 ++#include <linux/platform_device.h>
5931 + #include <linux/regulator/consumer.h>
5932 + #include <linux/slab.h>
5933 +
5934 +@@ -88,9 +89,9 @@ static const struct property_entry fusb302_props[] = {
5935 + { }
5936 + };
5937 +
5938 +-static int cht_int33fe_probe(struct i2c_client *client)
5939 ++static int cht_int33fe_probe(struct platform_device *pdev)
5940 + {
5941 +- struct device *dev = &client->dev;
5942 ++ struct device *dev = &pdev->dev;
5943 + struct i2c_board_info board_info;
5944 + struct cht_int33fe_data *data;
5945 + struct i2c_client *max17047;
5946 +@@ -207,7 +208,7 @@ static int cht_int33fe_probe(struct i2c_client *client)
5947 + if (!data->pi3usb30532)
5948 + goto out_unregister_fusb302;
5949 +
5950 +- i2c_set_clientdata(client, data);
5951 ++ platform_set_drvdata(pdev, data);
5952 +
5953 + return 0;
5954 +
5955 +@@ -223,9 +224,9 @@ out_unregister_max17047:
5956 + return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
5957 + }
5958 +
5959 +-static int cht_int33fe_remove(struct i2c_client *i2c)
5960 ++static int cht_int33fe_remove(struct platform_device *pdev)
5961 + {
5962 +- struct cht_int33fe_data *data = i2c_get_clientdata(i2c);
5963 ++ struct cht_int33fe_data *data = platform_get_drvdata(pdev);
5964 +
5965 + i2c_unregister_device(data->pi3usb30532);
5966 + i2c_unregister_device(data->fusb302);
5967 +@@ -237,29 +238,22 @@ static int cht_int33fe_remove(struct i2c_client *i2c)
5968 + return 0;
5969 + }
5970 +
5971 +-static const struct i2c_device_id cht_int33fe_i2c_id[] = {
5972 +- { }
5973 +-};
5974 +-MODULE_DEVICE_TABLE(i2c, cht_int33fe_i2c_id);
5975 +-
5976 + static const struct acpi_device_id cht_int33fe_acpi_ids[] = {
5977 + { "INT33FE", },
5978 + { }
5979 + };
5980 + MODULE_DEVICE_TABLE(acpi, cht_int33fe_acpi_ids);
5981 +
5982 +-static struct i2c_driver cht_int33fe_driver = {
5983 ++static struct platform_driver cht_int33fe_driver = {
5984 + .driver = {
5985 + .name = "Intel Cherry Trail ACPI INT33FE driver",
5986 + .acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids),
5987 + },
5988 +- .probe_new = cht_int33fe_probe,
5989 ++ .probe = cht_int33fe_probe,
5990 + .remove = cht_int33fe_remove,
5991 +- .id_table = cht_int33fe_i2c_id,
5992 +- .disable_i2c_core_irq_mapping = true,
5993 + };
5994 +
5995 +-module_i2c_driver(cht_int33fe_driver);
5996 ++module_platform_driver(cht_int33fe_driver);
5997 +
5998 + MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE pseudo device driver");
5999 + MODULE_AUTHOR("Hans de Goede <hdegoede@××××××.com>");
6000 +diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
6001 +index 4721a264bac2..1e69c1c9ec09 100644
6002 +--- a/drivers/pwm/pwm-lpss.c
6003 ++++ b/drivers/pwm/pwm-lpss.c
6004 +@@ -97,7 +97,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
6005 + unsigned long long on_time_div;
6006 + unsigned long c = lpwm->info->clk_rate, base_unit_range;
6007 + unsigned long long base_unit, freq = NSEC_PER_SEC;
6008 +- u32 ctrl;
6009 ++ u32 orig_ctrl, ctrl;
6010 +
6011 + do_div(freq, period_ns);
6012 +
6013 +@@ -114,13 +114,17 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
6014 + do_div(on_time_div, period_ns);
6015 + on_time_div = 255ULL - on_time_div;
6016 +
6017 +- ctrl = pwm_lpss_read(pwm);
6018 ++ orig_ctrl = ctrl = pwm_lpss_read(pwm);
6019 + ctrl &= ~PWM_ON_TIME_DIV_MASK;
6020 + ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
6021 + base_unit &= base_unit_range;
6022 + ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
6023 + ctrl |= on_time_div;
6024 +- pwm_lpss_write(pwm, ctrl);
6025 ++
6026 ++ if (orig_ctrl != ctrl) {
6027 ++ pwm_lpss_write(pwm, ctrl);
6028 ++ pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE);
6029 ++ }
6030 + }
6031 +
6032 + static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
6033 +@@ -144,7 +148,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
6034 + return ret;
6035 + }
6036 + pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
6037 +- pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
6038 + pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
6039 + ret = pwm_lpss_wait_for_update(pwm);
6040 + if (ret) {
6041 +@@ -157,7 +160,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
6042 + if (ret)
6043 + return ret;
6044 + pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
6045 +- pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
6046 + return pwm_lpss_wait_for_update(pwm);
6047 + }
6048 + } else if (pwm_is_enabled(pwm)) {
6049 +diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
6050 +index 77feb603cd4c..3c64dbb08109 100644
6051 +--- a/drivers/rtc/rtc-s35390a.c
6052 ++++ b/drivers/rtc/rtc-s35390a.c
6053 +@@ -108,7 +108,7 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
6054 +
6055 + static int s35390a_init(struct s35390a *s35390a)
6056 + {
6057 +- char buf;
6058 ++ u8 buf;
6059 + int ret;
6060 + unsigned initcount = 0;
6061 +
6062 +diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
6063 +index 3d0c96a5c873..c19c26e0e405 100644
6064 +--- a/drivers/scsi/bfa/bfa_defs_svc.h
6065 ++++ b/drivers/scsi/bfa/bfa_defs_svc.h
6066 +@@ -1453,7 +1453,7 @@ union bfa_aen_data_u {
6067 + struct bfa_aen_entry_s {
6068 + struct list_head qe;
6069 + enum bfa_aen_category aen_category;
6070 +- u32 aen_type;
6071 ++ int aen_type;
6072 + union bfa_aen_data_u aen_data;
6073 + u64 aen_tv_sec;
6074 + u64 aen_tv_usec;
6075 +diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
6076 +index e61ed8dad0b4..bd4ac187fd8e 100644
6077 +--- a/drivers/scsi/bfa/bfad_im.h
6078 ++++ b/drivers/scsi/bfa/bfad_im.h
6079 +@@ -143,7 +143,7 @@ struct bfad_im_s {
6080 + static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
6081 + struct bfad_s *drv, int cnt,
6082 + enum bfa_aen_category cat,
6083 +- enum bfa_ioc_aen_event evt)
6084 ++ int evt)
6085 + {
6086 + struct timespec64 ts;
6087 +
6088 +diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
6089 +index 1ed2cd82129d..3943347ec3c7 100644
6090 +--- a/drivers/scsi/dc395x.c
6091 ++++ b/drivers/scsi/dc395x.c
6092 +@@ -1969,6 +1969,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
6093 + xferred -= psge->length;
6094 + } else {
6095 + /* Partial SG entry done */
6096 ++ pci_dma_sync_single_for_cpu(srb->dcb->
6097 ++ acb->dev,
6098 ++ srb->sg_bus_addr,
6099 ++ SEGMENTX_LEN,
6100 ++ PCI_DMA_TODEVICE);
6101 + psge->length -= xferred;
6102 + psge->address += xferred;
6103 + srb->sg_index = idx;
6104 +@@ -3447,14 +3452,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
6105 + }
6106 + }
6107 +
6108 +- if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
6109 +- pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
6110 +- scsi_sg_count(cmd), dir);
6111 +-
6112 + ckc_only = 0;
6113 + /* Check Error Conditions */
6114 + ckc_e:
6115 +
6116 ++ pci_unmap_srb(acb, srb);
6117 ++
6118 + if (cmd->cmnd[0] == INQUIRY) {
6119 + unsigned char *base = NULL;
6120 + struct ScsiInqData *ptr;
6121 +@@ -3507,7 +3510,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
6122 + cmd, cmd->result);
6123 + srb_free_insert(acb, srb);
6124 + }
6125 +- pci_unmap_srb(acb, srb);
6126 +
6127 + cmd->scsi_done(cmd);
6128 + waiting_process_next(acb);
6129 +diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
6130 +index fd9d82c9033d..f478d1f50dfc 100644
6131 +--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
6132 ++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
6133 +@@ -906,6 +906,9 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
6134 + _r.maximum_linkrate = max;
6135 + _r.minimum_linkrate = min;
6136 +
6137 ++ sas_phy->phy->maximum_linkrate = max;
6138 ++ sas_phy->phy->minimum_linkrate = min;
6139 ++
6140 + hisi_hba->hw->phy_disable(hisi_hba, phy_no);
6141 + msleep(100);
6142 + hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
6143 +@@ -952,8 +955,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
6144 +
6145 + static void hisi_sas_task_done(struct sas_task *task)
6146 + {
6147 +- if (!del_timer(&task->slow_task->timer))
6148 +- return;
6149 ++ del_timer(&task->slow_task->timer);
6150 + complete(&task->slow_task->completion);
6151 + }
6152 +
6153 +@@ -962,13 +964,17 @@ static void hisi_sas_tmf_timedout(struct timer_list *t)
6154 + struct sas_task_slow *slow = from_timer(slow, t, timer);
6155 + struct sas_task *task = slow->task;
6156 + unsigned long flags;
6157 ++ bool is_completed = true;
6158 +
6159 + spin_lock_irqsave(&task->task_state_lock, flags);
6160 +- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
6161 ++ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
6162 + task->task_state_flags |= SAS_TASK_STATE_ABORTED;
6163 ++ is_completed = false;
6164 ++ }
6165 + spin_unlock_irqrestore(&task->task_state_lock, flags);
6166 +
6167 +- complete(&task->slow_task->completion);
6168 ++ if (!is_completed)
6169 ++ complete(&task->slow_task->completion);
6170 + }
6171 +
6172 + #define TASK_TIMEOUT 20
6173 +@@ -1021,8 +1027,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
6174 + struct hisi_sas_slot *slot = task->lldd_task;
6175 +
6176 + dev_err(dev, "abort tmf: TMF task timeout and not done\n");
6177 +- if (slot)
6178 ++ if (slot) {
6179 ++ struct hisi_sas_cq *cq =
6180 ++ &hisi_hba->cq[slot->dlvry_queue];
6181 ++ /*
6182 ++ * flush tasklet to avoid free'ing task
6183 ++ * before using task in IO completion
6184 ++ */
6185 ++ tasklet_kill(&cq->tasklet);
6186 + slot->task = NULL;
6187 ++ }
6188 +
6189 + goto ex_err;
6190 + } else
6191 +@@ -1398,6 +1412,17 @@ static int hisi_sas_abort_task(struct sas_task *task)
6192 +
6193 + spin_lock_irqsave(&task->task_state_lock, flags);
6194 + if (task->task_state_flags & SAS_TASK_STATE_DONE) {
6195 ++ struct hisi_sas_slot *slot = task->lldd_task;
6196 ++ struct hisi_sas_cq *cq;
6197 ++
6198 ++ if (slot) {
6199 ++ /*
6200 ++ * flush tasklet to avoid free'ing task
6201 ++ * before using task in IO completion
6202 ++ */
6203 ++ cq = &hisi_hba->cq[slot->dlvry_queue];
6204 ++ tasklet_kill(&cq->tasklet);
6205 ++ }
6206 + spin_unlock_irqrestore(&task->task_state_lock, flags);
6207 + rc = TMF_RESP_FUNC_COMPLETE;
6208 + goto out;
6209 +@@ -1453,12 +1478,19 @@ static int hisi_sas_abort_task(struct sas_task *task)
6210 + /* SMP */
6211 + struct hisi_sas_slot *slot = task->lldd_task;
6212 + u32 tag = slot->idx;
6213 ++ struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
6214 +
6215 + rc = hisi_sas_internal_task_abort(hisi_hba, device,
6216 + HISI_SAS_INT_ABT_CMD, tag);
6217 + if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
6218 +- task->lldd_task)
6219 +- hisi_sas_do_release_task(hisi_hba, task, slot);
6220 ++ task->lldd_task) {
6221 ++ /*
6222 ++ * flush tasklet to avoid free'ing task
6223 ++ * before using task in IO completion
6224 ++ */
6225 ++ tasklet_kill(&cq->tasklet);
6226 ++ slot->task = NULL;
6227 ++ }
6228 + }
6229 +
6230 + out:
6231 +@@ -1825,8 +1857,16 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
6232 + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
6233 + struct hisi_sas_slot *slot = task->lldd_task;
6234 +
6235 +- if (slot)
6236 ++ if (slot) {
6237 ++ struct hisi_sas_cq *cq =
6238 ++ &hisi_hba->cq[slot->dlvry_queue];
6239 ++ /*
6240 ++ * flush tasklet to avoid free'ing task
6241 ++ * before using task in IO completion
6242 ++ */
6243 ++ tasklet_kill(&cq->tasklet);
6244 + slot->task = NULL;
6245 ++ }
6246 + dev_err(dev, "internal task abort: timeout and not done.\n");
6247 + res = -EIO;
6248 + goto exit;
6249 +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
6250 +index 1c4ea58da1ae..c4774d63d5d0 100644
6251 +--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
6252 ++++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
6253 +@@ -2481,7 +2481,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
6254 + }
6255 +
6256 + out:
6257 +- hisi_sas_slot_task_free(hisi_hba, task, slot);
6258 + sts = ts->stat;
6259 + spin_lock_irqsave(&task->task_state_lock, flags);
6260 + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
6261 +@@ -2491,6 +2490,7 @@ out:
6262 + }
6263 + task->task_state_flags |= SAS_TASK_STATE_DONE;
6264 + spin_unlock_irqrestore(&task->task_state_lock, flags);
6265 ++ hisi_sas_slot_task_free(hisi_hba, task, slot);
6266 +
6267 + if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
6268 + spin_lock_irqsave(&device->done_lock, flags);
6269 +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
6270 +index 3922b17e2ea3..fb2a5969181b 100644
6271 +--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
6272 ++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
6273 +@@ -1749,7 +1749,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
6274 + }
6275 +
6276 + out:
6277 +- hisi_sas_slot_task_free(hisi_hba, task, slot);
6278 + sts = ts->stat;
6279 + spin_lock_irqsave(&task->task_state_lock, flags);
6280 + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
6281 +@@ -1759,6 +1758,7 @@ out:
6282 + }
6283 + task->task_state_flags |= SAS_TASK_STATE_DONE;
6284 + spin_unlock_irqrestore(&task->task_state_lock, flags);
6285 ++ hisi_sas_slot_task_free(hisi_hba, task, slot);
6286 +
6287 + if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
6288 + spin_lock_irqsave(&device->done_lock, flags);
6289 +diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
6290 +index bd6ac6b5980a..fe587ef1741d 100644
6291 +--- a/drivers/scsi/ips.c
6292 ++++ b/drivers/scsi/ips.c
6293 +@@ -3485,6 +3485,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
6294 +
6295 + case START_STOP:
6296 + scb->scsi_cmd->result = DID_OK << 16;
6297 ++ break;
6298 +
6299 + case TEST_UNIT_READY:
6300 + case INQUIRY:
6301 +diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
6302 +index 1ee3868ade07..7b5deae68d33 100644
6303 +--- a/drivers/scsi/isci/host.c
6304 ++++ b/drivers/scsi/isci/host.c
6305 +@@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
6306 + * the task management request.
6307 + * @task_request: the handle to the task request object to start.
6308 + */
6309 +-enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
6310 +- struct isci_remote_device *idev,
6311 +- struct isci_request *ireq)
6312 ++enum sci_status sci_controller_start_task(struct isci_host *ihost,
6313 ++ struct isci_remote_device *idev,
6314 ++ struct isci_request *ireq)
6315 + {
6316 + enum sci_status status;
6317 +
6318 +@@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
6319 + "%s: SCIC Controller starting task from invalid "
6320 + "state\n",
6321 + __func__);
6322 +- return SCI_TASK_FAILURE_INVALID_STATE;
6323 ++ return SCI_FAILURE_INVALID_STATE;
6324 + }
6325 +
6326 + status = sci_remote_device_start_task(ihost, idev, ireq);
6327 +diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
6328 +index b3539928073c..6bc3f022630a 100644
6329 +--- a/drivers/scsi/isci/host.h
6330 ++++ b/drivers/scsi/isci/host.h
6331 +@@ -489,7 +489,7 @@ enum sci_status sci_controller_start_io(
6332 + struct isci_remote_device *idev,
6333 + struct isci_request *ireq);
6334 +
6335 +-enum sci_task_status sci_controller_start_task(
6336 ++enum sci_status sci_controller_start_task(
6337 + struct isci_host *ihost,
6338 + struct isci_remote_device *idev,
6339 + struct isci_request *ireq);
6340 +diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
6341 +index ed197bc8e801..2f151708b59a 100644
6342 +--- a/drivers/scsi/isci/request.c
6343 ++++ b/drivers/scsi/isci/request.c
6344 +@@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
6345 +
6346 + if (status == SCI_SUCCESS) {
6347 + if (ireq->stp.rsp.status & ATA_ERR)
6348 +- status = SCI_IO_FAILURE_RESPONSE_VALID;
6349 ++ status = SCI_FAILURE_IO_RESPONSE_VALID;
6350 + } else {
6351 +- status = SCI_IO_FAILURE_RESPONSE_VALID;
6352 ++ status = SCI_FAILURE_IO_RESPONSE_VALID;
6353 + }
6354 +
6355 + if (status != SCI_SUCCESS) {
6356 +diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
6357 +index 6dcaed0c1fc8..fb6eba331ac6 100644
6358 +--- a/drivers/scsi/isci/task.c
6359 ++++ b/drivers/scsi/isci/task.c
6360 +@@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
6361 + struct isci_tmf *tmf, unsigned long timeout_ms)
6362 + {
6363 + DECLARE_COMPLETION_ONSTACK(completion);
6364 +- enum sci_task_status status = SCI_TASK_FAILURE;
6365 ++ enum sci_status status = SCI_FAILURE;
6366 + struct isci_request *ireq;
6367 + int ret = TMF_RESP_FUNC_FAILED;
6368 + unsigned long flags;
6369 +@@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
6370 + /* start the TMF io. */
6371 + status = sci_controller_start_task(ihost, idev, ireq);
6372 +
6373 +- if (status != SCI_TASK_SUCCESS) {
6374 ++ if (status != SCI_SUCCESS) {
6375 + dev_dbg(&ihost->pdev->dev,
6376 + "%s: start_io failed - status = 0x%x, request = %p\n",
6377 + __func__,
6378 +diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
6379 +index b025a0b74341..23354f206533 100644
6380 +--- a/drivers/scsi/iscsi_tcp.c
6381 ++++ b/drivers/scsi/iscsi_tcp.c
6382 +@@ -800,7 +800,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
6383 + return rc;
6384 +
6385 + return iscsi_conn_get_addr_param((struct sockaddr_storage *)
6386 +- &addr, param, buf);
6387 ++ &addr,
6388 ++ (enum iscsi_param)param, buf);
6389 + default:
6390 + return iscsi_host_get_param(shost, param, buf);
6391 + }
6392 +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
6393 +index 43732e8d1347..ebcfcbb8b4cc 100644
6394 +--- a/drivers/scsi/lpfc/lpfc.h
6395 ++++ b/drivers/scsi/lpfc/lpfc.h
6396 +@@ -490,6 +490,7 @@ struct lpfc_vport {
6397 + struct nvme_fc_local_port *localport;
6398 + uint8_t nvmei_support; /* driver supports NVME Initiator */
6399 + uint32_t last_fcp_wqidx;
6400 ++ uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */
6401 + };
6402 +
6403 + struct hbq_s {
6404 +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
6405 +index f3c6801c0b31..222fa9b7f478 100644
6406 +--- a/drivers/scsi/lpfc/lpfc_els.c
6407 ++++ b/drivers/scsi/lpfc/lpfc_els.c
6408 +@@ -1057,9 +1057,9 @@ stop_rr_fcf_flogi:
6409 + goto flogifail;
6410 +
6411 + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
6412 +- "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n",
6413 ++ "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n",
6414 + irsp->ulpStatus, irsp->un.ulpWord[4],
6415 +- irsp->ulpTimeout);
6416 ++ cmdiocb->sli4_xritag, irsp->ulpTimeout);
6417 +
6418 + /* FLOGI failed, so there is no fabric */
6419 + spin_lock_irq(shost->host_lock);
6420 +@@ -1113,7 +1113,8 @@ stop_rr_fcf_flogi:
6421 + /* FLOGI completes successfully */
6422 + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6423 + "0101 FLOGI completes successfully, I/O tag:x%x, "
6424 +- "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
6425 ++ "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
6426 ++ cmdiocb->iotag, cmdiocb->sli4_xritag,
6427 + irsp->un.ulpWord[4], sp->cmn.e_d_tov,
6428 + sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
6429 + vport->port_state, vport->fc_flag);
6430 +@@ -1157,6 +1158,7 @@ stop_rr_fcf_flogi:
6431 + phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
6432 + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
6433 + spin_unlock_irq(&phba->hbalock);
6434 ++ phba->fcf.fcf_redisc_attempted = 0; /* reset */
6435 + goto out;
6436 + }
6437 + if (!rc) {
6438 +@@ -1171,6 +1173,7 @@ stop_rr_fcf_flogi:
6439 + phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
6440 + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
6441 + spin_unlock_irq(&phba->hbalock);
6442 ++ phba->fcf.fcf_redisc_attempted = 0; /* reset */
6443 + goto out;
6444 + }
6445 + }
6446 +@@ -1553,8 +1556,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
6447 + */
6448 + new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
6449 +
6450 ++ /* return immediately if the WWPN matches ndlp */
6451 + if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
6452 + return ndlp;
6453 ++
6454 + if (phba->sli_rev == LPFC_SLI_REV4) {
6455 + active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
6456 + GFP_KERNEL);
6457 +@@ -1563,9 +1568,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
6458 + phba->cfg_rrq_xri_bitmap_sz);
6459 + }
6460 +
6461 +- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6462 +- "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
6463 +- ndlp, ndlp->nlp_DID, new_ndlp);
6464 ++ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
6465 ++ "3178 PLOGI confirm: ndlp x%x x%x x%x: "
6466 ++ "new_ndlp x%x x%x x%x\n",
6467 ++ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type,
6468 ++ (new_ndlp ? new_ndlp->nlp_DID : 0),
6469 ++ (new_ndlp ? new_ndlp->nlp_flag : 0),
6470 ++ (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
6471 +
6472 + if (!new_ndlp) {
6473 + rc = memcmp(&ndlp->nlp_portname, name,
6474 +@@ -1614,6 +1623,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
6475 + phba->cfg_rrq_xri_bitmap_sz);
6476 + }
6477 +
6478 ++ /* At this point in this routine, we know new_ndlp will be
6479 ++ * returned. however, any previous GID_FTs that were done
6480 ++ * would have updated nlp_fc4_type in ndlp, so we must ensure
6481 ++ * new_ndlp has the right value.
6482 ++ */
6483 ++ if (vport->fc_flag & FC_FABRIC)
6484 ++ new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
6485 ++
6486 + lpfc_unreg_rpi(vport, new_ndlp);
6487 + new_ndlp->nlp_DID = ndlp->nlp_DID;
6488 + new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
6489 +@@ -1663,7 +1680,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
6490 + if (ndlp->nrport) {
6491 + ndlp->nrport = NULL;
6492 + lpfc_nlp_put(ndlp);
6493 +- new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
6494 + }
6495 +
6496 + /* We shall actually free the ndlp with both nlp_DID and
6497 +@@ -1737,6 +1753,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
6498 + active_rrqs_xri_bitmap)
6499 + mempool_free(active_rrqs_xri_bitmap,
6500 + phba->active_rrq_pool);
6501 ++
6502 ++ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
6503 ++ "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
6504 ++ new_ndlp->nlp_DID, new_ndlp->nlp_flag,
6505 ++ new_ndlp->nlp_fc4_type);
6506 ++
6507 + return new_ndlp;
6508 + }
6509 +
6510 +@@ -4264,14 +4286,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
6511 + default:
6512 + return 1;
6513 + }
6514 +- /* Xmit ELS ACC response tag <ulpIoTag> */
6515 +- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6516 +- "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
6517 +- "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
6518 +- "fc_flag x%x\n",
6519 +- elsiocb->iotag, elsiocb->iocb.ulpContext,
6520 +- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6521 +- ndlp->nlp_rpi, vport->fc_flag);
6522 + if (ndlp->nlp_flag & NLP_LOGO_ACC) {
6523 + spin_lock_irq(shost->host_lock);
6524 + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
6525 +@@ -4440,6 +4454,15 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
6526 + lpfc_els_free_iocb(phba, elsiocb);
6527 + return 1;
6528 + }
6529 ++
6530 ++ /* Xmit ELS ACC response tag <ulpIoTag> */
6531 ++ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6532 ++ "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
6533 ++ "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
6534 ++ "RPI: x%x, fc_flag x%x\n",
6535 ++ rc, elsiocb->iotag, elsiocb->sli4_xritag,
6536 ++ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6537 ++ ndlp->nlp_rpi, vport->fc_flag);
6538 + return 0;
6539 + }
6540 +
6541 +@@ -6450,6 +6473,11 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6542 + port_state = vport->port_state;
6543 + vport->fc_flag |= FC_PT2PT;
6544 + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6545 ++
6546 ++ /* Acking an unsol FLOGI. Count 1 for link bounce
6547 ++ * work-around.
6548 ++ */
6549 ++ vport->rcv_flogi_cnt++;
6550 + spin_unlock_irq(shost->host_lock);
6551 + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6552 + "3311 Rcv Flogi PS x%x new PS x%x "
6553 +@@ -7847,8 +7875,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6554 + struct ls_rjt stat;
6555 + uint32_t *payload;
6556 + uint32_t cmd, did, newnode;
6557 +- uint8_t rjt_exp, rjt_err = 0;
6558 ++ uint8_t rjt_exp, rjt_err = 0, init_link = 0;
6559 + IOCB_t *icmd = &elsiocb->iocb;
6560 ++ LPFC_MBOXQ_t *mbox;
6561 +
6562 + if (!vport || !(elsiocb->context2))
6563 + goto dropit;
6564 +@@ -7997,6 +8026,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6565 + did, vport->port_state, ndlp->nlp_flag);
6566 +
6567 + phba->fc_stat.elsRcvFLOGI++;
6568 ++
6569 ++ /* If the driver believes fabric discovery is done and is ready,
6570 ++ * bounce the link. There is some descrepancy.
6571 ++ */
6572 ++ if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
6573 ++ vport->fc_flag & FC_PT2PT &&
6574 ++ vport->rcv_flogi_cnt >= 1) {
6575 ++ rjt_err = LSRJT_LOGICAL_BSY;
6576 ++ rjt_exp = LSEXP_NOTHING_MORE;
6577 ++ init_link++;
6578 ++ goto lsrjt;
6579 ++ }
6580 ++
6581 + lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
6582 + if (newnode)
6583 + lpfc_nlp_put(ndlp);
6584 +@@ -8225,6 +8267,27 @@ lsrjt:
6585 +
6586 + lpfc_nlp_put(elsiocb->context1);
6587 + elsiocb->context1 = NULL;
6588 ++
6589 ++ /* Special case. Driver received an unsolicited command that
6590 ++ * unsupportable given the driver's current state. Reset the
6591 ++ * link and start over.
6592 ++ */
6593 ++ if (init_link) {
6594 ++ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6595 ++ if (!mbox)
6596 ++ return;
6597 ++ lpfc_linkdown(phba);
6598 ++ lpfc_init_link(phba, mbox,
6599 ++ phba->cfg_topology,
6600 ++ phba->cfg_link_speed);
6601 ++ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
6602 ++ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6603 ++ mbox->vport = vport;
6604 ++ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
6605 ++ MBX_NOT_FINISHED)
6606 ++ mempool_free(mbox, phba->mbox_mem_pool);
6607 ++ }
6608 ++
6609 + return;
6610 +
6611 + dropit:
6612 +diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
6613 +index db183d1f34ab..68f223882d96 100644
6614 +--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
6615 ++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
6616 +@@ -952,6 +952,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
6617 + }
6618 + spin_lock_irq(shost->host_lock);
6619 + phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
6620 ++ phba->pport->rcv_flogi_cnt = 0;
6621 + spin_unlock_irq(shost->host_lock);
6622 + }
6623 + return 0;
6624 +@@ -1023,6 +1024,7 @@ lpfc_linkup(struct lpfc_hba *phba)
6625 + {
6626 + struct lpfc_vport **vports;
6627 + int i;
6628 ++ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6629 +
6630 + phba->link_state = LPFC_LINK_UP;
6631 +
6632 +@@ -1036,6 +1038,13 @@ lpfc_linkup(struct lpfc_hba *phba)
6633 + lpfc_linkup_port(vports[i]);
6634 + lpfc_destroy_vport_work_array(phba, vports);
6635 +
6636 ++ /* Clear the pport flogi counter in case the link down was
6637 ++ * absorbed without an ACQE. No lock here - in worker thread
6638 ++ * and discovery is synchronized.
6639 ++ */
6640 ++ spin_lock_irq(shost->host_lock);
6641 ++ phba->pport->rcv_flogi_cnt = 0;
6642 ++ spin_unlock_irq(shost->host_lock);
6643 + return 0;
6644 + }
6645 +
6646 +@@ -1997,6 +2006,26 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
6647 + "failover and change port state:x%x/x%x\n",
6648 + phba->pport->port_state, LPFC_VPORT_UNKNOWN);
6649 + phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6650 ++
6651 ++ if (!phba->fcf.fcf_redisc_attempted) {
6652 ++ lpfc_unregister_fcf(phba);
6653 ++
6654 ++ rc = lpfc_sli4_redisc_fcf_table(phba);
6655 ++ if (!rc) {
6656 ++ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
6657 ++ "3195 Rediscover FCF table\n");
6658 ++ phba->fcf.fcf_redisc_attempted = 1;
6659 ++ lpfc_sli4_clear_fcf_rr_bmask(phba);
6660 ++ } else {
6661 ++ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
6662 ++ "3196 Rediscover FCF table "
6663 ++ "failed. Status:x%x\n", rc);
6664 ++ }
6665 ++ } else {
6666 ++ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
6667 ++ "3197 Already rediscover FCF table "
6668 ++ "attempted. No more retry\n");
6669 ++ }
6670 + goto stop_flogi_current_fcf;
6671 + } else {
6672 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
6673 +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
6674 +index 9acb5b44ce4c..a7d3e532e0f5 100644
6675 +--- a/drivers/scsi/lpfc/lpfc_init.c
6676 ++++ b/drivers/scsi/lpfc/lpfc_init.c
6677 +@@ -5044,7 +5044,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6678 + break;
6679 + }
6680 + /* If fast FCF failover rescan event is pending, do nothing */
6681 +- if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
6682 ++ if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6683 + spin_unlock_irq(&phba->hbalock);
6684 + break;
6685 + }
6686 +diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
6687 +index c15f3265eefe..bd8dc6a2243c 100644
6688 +--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
6689 ++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
6690 +@@ -2868,8 +2868,9 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6691 + /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
6692 + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6693 + "0211 DSM in event x%x on NPort x%x in "
6694 +- "state %d Data: x%x\n",
6695 +- evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
6696 ++ "state %d Data: x%x x%x\n",
6697 ++ evt, ndlp->nlp_DID, cur_state,
6698 ++ ndlp->nlp_flag, ndlp->nlp_fc4_type);
6699 +
6700 + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
6701 + "DSM in: evt:%d ste:%d did:x%x",
6702 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
6703 +index e704297618e0..3361ae75578f 100644
6704 +--- a/drivers/scsi/lpfc/lpfc_sli.c
6705 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
6706 +@@ -18431,15 +18431,8 @@ next_priority:
6707 + goto initial_priority;
6708 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
6709 + "2844 No roundrobin failover FCF available\n");
6710 +- if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
6711 +- return LPFC_FCOE_FCF_NEXT_NONE;
6712 +- else {
6713 +- lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
6714 +- "3063 Only FCF available idx %d, flag %x\n",
6715 +- next_fcf_index,
6716 +- phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
6717 +- return next_fcf_index;
6718 +- }
6719 ++
6720 ++ return LPFC_FCOE_FCF_NEXT_NONE;
6721 + }
6722 +
6723 + if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
6724 +diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
6725 +index 399c0015c546..3dcc6615a23b 100644
6726 +--- a/drivers/scsi/lpfc/lpfc_sli4.h
6727 ++++ b/drivers/scsi/lpfc/lpfc_sli4.h
6728 +@@ -279,6 +279,7 @@ struct lpfc_fcf {
6729 + #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
6730 + #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
6731 + #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
6732 ++ uint16_t fcf_redisc_attempted;
6733 + uint32_t addr_mode;
6734 + uint32_t eligible_fcf_cnt;
6735 + struct lpfc_fcf_rec current_rec;
6736 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
6737 +index bc37666f998e..2f31d266339f 100644
6738 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
6739 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
6740 +@@ -3894,12 +3894,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
6741 + /*
6742 + * The cur_state should not last for more than max_wait secs
6743 + */
6744 +- for (i = 0; i < (max_wait * 1000); i++) {
6745 ++ for (i = 0; i < max_wait; i++) {
6746 + curr_abs_state = instance->instancet->
6747 + read_fw_status_reg(instance->reg_set);
6748 +
6749 + if (abs_state == curr_abs_state) {
6750 +- msleep(1);
6751 ++ msleep(1000);
6752 + } else
6753 + break;
6754 + }
6755 +@@ -5410,7 +5410,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
6756 + if (!instance->msix_vectors) {
6757 + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6758 + if (i < 0)
6759 +- goto fail_setup_irqs;
6760 ++ goto fail_init_adapter;
6761 + }
6762 +
6763 + megasas_setup_reply_map(instance);
6764 +@@ -5619,9 +5619,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
6765 +
6766 + fail_get_ld_pd_list:
6767 + instance->instancet->disable_intr(instance);
6768 +-fail_init_adapter:
6769 + megasas_destroy_irqs(instance);
6770 +-fail_setup_irqs:
6771 ++fail_init_adapter:
6772 + if (instance->msix_vectors)
6773 + pci_free_irq_vectors(instance->pdev);
6774 + instance->msix_vectors = 0;
6775 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
6776 +index d2ab52026014..2c556c7fcf0d 100644
6777 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
6778 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
6779 +@@ -4117,7 +4117,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
6780 + * flag unset in NVDATA.
6781 + */
6782 + mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
6783 +- if (ioc->manu_pg11.EEDPTagMode == 0) {
6784 ++ if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
6785 + pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
6786 + ioc->name);
6787 + ioc->manu_pg11.EEDPTagMode &= ~0x3;
6788 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
6789 +index d29a2dcc7d0e..9b01c5a7aebd 100644
6790 +--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
6791 ++++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
6792 +@@ -692,10 +692,6 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
6793 + r = _config_request(ioc, &mpi_request, mpi_reply,
6794 + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
6795 + sizeof(*config_page));
6796 +- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
6797 +- r = _config_request(ioc, &mpi_request, mpi_reply,
6798 +- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
6799 +- sizeof(*config_page));
6800 + out:
6801 + return r;
6802 + }
6803 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
6804 +index 73d661a0ecbb..d3c944d99703 100644
6805 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
6806 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
6807 +@@ -3791,6 +3791,40 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
6808 + return _scsih_check_for_pending_tm(ioc, smid);
6809 + }
6810 +
6811 ++/** _scsih_allow_scmd_to_device - check whether scmd needs to
6812 ++ * issue to IOC or not.
6813 ++ * @ioc: per adapter object
6814 ++ * @scmd: pointer to scsi command object
6815 ++ *
6816 ++ * Returns true if scmd can be issued to IOC otherwise returns false.
6817 ++ */
6818 ++inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
6819 ++ struct scsi_cmnd *scmd)
6820 ++{
6821 ++
6822 ++ if (ioc->pci_error_recovery)
6823 ++ return false;
6824 ++
6825 ++ if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
6826 ++ if (ioc->remove_host)
6827 ++ return false;
6828 ++
6829 ++ return true;
6830 ++ }
6831 ++
6832 ++ if (ioc->remove_host) {
6833 ++
6834 ++ switch (scmd->cmnd[0]) {
6835 ++ case SYNCHRONIZE_CACHE:
6836 ++ case START_STOP:
6837 ++ return true;
6838 ++ default:
6839 ++ return false;
6840 ++ }
6841 ++ }
6842 ++
6843 ++ return true;
6844 ++}
6845 +
6846 + /**
6847 + * _scsih_sas_control_complete - completion routine
6848 +@@ -4623,7 +4657,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
6849 + return 0;
6850 + }
6851 +
6852 +- if (ioc->pci_error_recovery || ioc->remove_host) {
6853 ++ if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
6854 + scmd->result = DID_NO_CONNECT << 16;
6855 + scmd->scsi_done(scmd);
6856 + return 0;
6857 +diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
6858 +index bb70882e6b56..be79127db594 100644
6859 +--- a/drivers/scsi/zorro_esp.c
6860 ++++ b/drivers/scsi/zorro_esp.c
6861 +@@ -245,7 +245,7 @@ static int fastlane_esp_irq_pending(struct esp *esp)
6862 + static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
6863 + u32 dma_len)
6864 + {
6865 +- return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len;
6866 ++ return dma_len > 0xFFFF ? 0xFFFF : dma_len;
6867 + }
6868 +
6869 + static void zorro_esp_reset_dma(struct esp *esp)
6870 +@@ -484,7 +484,6 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
6871 + scsi_esp_cmd(esp, ESP_CMD_DMA);
6872 + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
6873 + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
6874 +- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
6875 +
6876 + scsi_esp_cmd(esp, cmd);
6877 + }
6878 +@@ -529,7 +528,6 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
6879 + scsi_esp_cmd(esp, ESP_CMD_DMA);
6880 + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
6881 + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
6882 +- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
6883 +
6884 + scsi_esp_cmd(esp, cmd);
6885 + }
6886 +@@ -574,7 +572,6 @@ static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
6887 + scsi_esp_cmd(esp, ESP_CMD_DMA);
6888 + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
6889 + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
6890 +- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
6891 +
6892 + scsi_esp_cmd(esp, cmd);
6893 + }
6894 +@@ -599,7 +596,6 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
6895 +
6896 + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
6897 + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
6898 +- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
6899 +
6900 + if (write) {
6901 + /* DMA receive */
6902 +@@ -649,7 +645,6 @@ static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
6903 +
6904 + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
6905 + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
6906 +- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
6907 +
6908 + if (write) {
6909 + /* DMA receive */
6910 +@@ -691,7 +686,6 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
6911 +
6912 + zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
6913 + zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
6914 +- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
6915 +
6916 + if (write) {
6917 + /* DMA receive */
6918 +diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
6919 +index a5577dd5eb08..8ee06347447c 100644
6920 +--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
6921 ++++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
6922 +@@ -404,7 +404,7 @@ noinline int brcmstb_pm_s3_finish(void)
6923 + {
6924 + struct brcmstb_s3_params *params = ctrl.s3_params;
6925 + dma_addr_t params_pa = ctrl.s3_params_pa;
6926 +- phys_addr_t reentry = virt_to_phys(&cpu_resume);
6927 ++ phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
6928 + enum bsp_initiate_command cmd;
6929 + u32 flags;
6930 +
6931 +diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
6932 +index e2be7da74343..eb2d2de172af 100644
6933 +--- a/drivers/spi/spi-omap2-mcspi.c
6934 ++++ b/drivers/spi/spi-omap2-mcspi.c
6935 +@@ -299,7 +299,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
6936 + struct omap2_mcspi_cs *cs = spi->controller_state;
6937 + struct omap2_mcspi *mcspi;
6938 + unsigned int wcnt;
6939 +- int max_fifo_depth, fifo_depth, bytes_per_word;
6940 ++ int max_fifo_depth, bytes_per_word;
6941 + u32 chconf, xferlevel;
6942 +
6943 + mcspi = spi_master_get_devdata(master);
6944 +@@ -315,10 +315,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
6945 + else
6946 + max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
6947 +
6948 +- fifo_depth = gcd(t->len, max_fifo_depth);
6949 +- if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
6950 +- goto disable_fifo;
6951 +-
6952 + wcnt = t->len / bytes_per_word;
6953 + if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
6954 + goto disable_fifo;
6955 +@@ -326,16 +322,17 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
6956 + xferlevel = wcnt << 16;
6957 + if (t->rx_buf != NULL) {
6958 + chconf |= OMAP2_MCSPI_CHCONF_FFER;
6959 +- xferlevel |= (fifo_depth - 1) << 8;
6960 ++ xferlevel |= (bytes_per_word - 1) << 8;
6961 + }
6962 ++
6963 + if (t->tx_buf != NULL) {
6964 + chconf |= OMAP2_MCSPI_CHCONF_FFET;
6965 +- xferlevel |= fifo_depth - 1;
6966 ++ xferlevel |= bytes_per_word - 1;
6967 + }
6968 +
6969 + mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
6970 + mcspi_write_chconf0(spi, chconf);
6971 +- mcspi->fifo_depth = fifo_depth;
6972 ++ mcspi->fifo_depth = max_fifo_depth;
6973 +
6974 + return;
6975 + }
6976 +@@ -585,7 +582,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
6977 + struct dma_slave_config cfg;
6978 + enum dma_slave_buswidth width;
6979 + unsigned es;
6980 +- u32 burst;
6981 + void __iomem *chstat_reg;
6982 + void __iomem *irqstat_reg;
6983 + int wait_res;
6984 +@@ -605,22 +601,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
6985 + }
6986 +
6987 + count = xfer->len;
6988 +- burst = 1;
6989 +-
6990 +- if (mcspi->fifo_depth > 0) {
6991 +- if (count > mcspi->fifo_depth)
6992 +- burst = mcspi->fifo_depth / es;
6993 +- else
6994 +- burst = count / es;
6995 +- }
6996 +
6997 + memset(&cfg, 0, sizeof(cfg));
6998 + cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
6999 + cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
7000 + cfg.src_addr_width = width;
7001 + cfg.dst_addr_width = width;
7002 +- cfg.src_maxburst = burst;
7003 +- cfg.dst_maxburst = burst;
7004 ++ cfg.src_maxburst = 1;
7005 ++ cfg.dst_maxburst = 1;
7006 +
7007 + rx = xfer->rx_buf;
7008 + tx = xfer->tx_buf;
7009 +diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
7010 +index 101cd6aae2ea..30ea0a2068e0 100644
7011 +--- a/drivers/spi/spi-sh-msiof.c
7012 ++++ b/drivers/spi/spi-sh-msiof.c
7013 +@@ -1343,8 +1343,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
7014 +
7015 + i = platform_get_irq(pdev, 0);
7016 + if (i < 0) {
7017 +- dev_err(&pdev->dev, "cannot get platform IRQ\n");
7018 +- ret = -ENOENT;
7019 ++ dev_err(&pdev->dev, "cannot get IRQ\n");
7020 ++ ret = i;
7021 + goto err1;
7022 + }
7023 +
7024 +diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
7025 +index e18c0723b760..0d38589c2600 100644
7026 +--- a/drivers/staging/comedi/drivers/usbduxfast.c
7027 ++++ b/drivers/staging/comedi/drivers/usbduxfast.c
7028 +@@ -1,6 +1,6 @@
7029 + // SPDX-License-Identifier: GPL-2.0+
7030 + /*
7031 +- * Copyright (C) 2004-2014 Bernd Porr, mail@××××××××××××.uk
7032 ++ * Copyright (C) 2004-2019 Bernd Porr, mail@××××××××××××.uk
7033 + */
7034 +
7035 + /*
7036 +@@ -8,7 +8,7 @@
7037 + * Description: University of Stirling USB DAQ & INCITE Technology Limited
7038 + * Devices: [ITL] USB-DUX-FAST (usbduxfast)
7039 + * Author: Bernd Porr <mail@××××××××××××.uk>
7040 +- * Updated: 10 Oct 2014
7041 ++ * Updated: 16 Nov 2019
7042 + * Status: stable
7043 + */
7044 +
7045 +@@ -22,6 +22,7 @@
7046 + *
7047 + *
7048 + * Revision history:
7049 ++ * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest
7050 + * 0.9: Dropping the first data packet which seems to be from the last transfer.
7051 + * Buffer overflows in the FX2 are handed over to comedi.
7052 + * 0.92: Dropping now 4 packets. The quad buffer has to be emptied.
7053 +@@ -350,6 +351,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
7054 + struct comedi_cmd *cmd)
7055 + {
7056 + int err = 0;
7057 ++ int err2 = 0;
7058 + unsigned int steps;
7059 + unsigned int arg;
7060 +
7061 +@@ -399,11 +401,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
7062 + */
7063 + steps = (cmd->convert_arg * 30) / 1000;
7064 + if (cmd->chanlist_len != 1)
7065 +- err |= comedi_check_trigger_arg_min(&steps,
7066 +- MIN_SAMPLING_PERIOD);
7067 +- err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
7068 +- arg = (steps * 1000) / 30;
7069 +- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
7070 ++ err2 |= comedi_check_trigger_arg_min(&steps,
7071 ++ MIN_SAMPLING_PERIOD);
7072 ++ else
7073 ++ err2 |= comedi_check_trigger_arg_min(&steps, 1);
7074 ++ err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
7075 ++ if (err2) {
7076 ++ err |= err2;
7077 ++ arg = (steps * 1000) / 30;
7078 ++ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
7079 ++ }
7080 +
7081 + if (cmd->stop_src == TRIG_COUNT)
7082 + err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
7083 +diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
7084 +index e16b3cb1808c..1c9830b2c84d 100644
7085 +--- a/drivers/thermal/armada_thermal.c
7086 ++++ b/drivers/thermal/armada_thermal.c
7087 +@@ -526,8 +526,8 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev,
7088 +
7089 + /* First memory region points towards the status register */
7090 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7091 +- if (IS_ERR(res))
7092 +- return PTR_ERR(res);
7093 ++ if (!res)
7094 ++ return -EIO;
7095 +
7096 + /*
7097 + * Edit the resource start address and length to map over all the
7098 +diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
7099 +index 8df2ce94c28d..4dc30e7890f6 100644
7100 +--- a/drivers/thermal/rcar_thermal.c
7101 ++++ b/drivers/thermal/rcar_thermal.c
7102 +@@ -434,8 +434,8 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
7103 + rcar_thermal_for_each_priv(priv, common) {
7104 + if (rcar_thermal_had_changed(priv, status)) {
7105 + rcar_thermal_irq_disable(priv);
7106 +- schedule_delayed_work(&priv->work,
7107 +- msecs_to_jiffies(300));
7108 ++ queue_delayed_work(system_freezable_wq, &priv->work,
7109 ++ msecs_to_jiffies(300));
7110 + }
7111 + }
7112 +
7113 +@@ -493,7 +493,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
7114 + pm_runtime_get_sync(dev);
7115 +
7116 + for (i = 0; i < chip->nirqs; i++) {
7117 +- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
7118 ++ irq = platform_get_resource(pdev, IORESOURCE_IRQ, i);
7119 + if (!irq)
7120 + continue;
7121 + if (!common->base) {
7122 +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
7123 +index 678406e0948b..00099a8439d2 100644
7124 +--- a/drivers/tty/pty.c
7125 ++++ b/drivers/tty/pty.c
7126 +@@ -28,6 +28,7 @@
7127 + #include <linux/mount.h>
7128 + #include <linux/file.h>
7129 + #include <linux/ioctl.h>
7130 ++#include <linux/compat.h>
7131 +
7132 + #undef TTY_DEBUG_HANGUP
7133 + #ifdef TTY_DEBUG_HANGUP
7134 +@@ -488,6 +489,7 @@ static int pty_bsd_ioctl(struct tty_struct *tty,
7135 + return -ENOIOCTLCMD;
7136 + }
7137 +
7138 ++#ifdef CONFIG_COMPAT
7139 + static long pty_bsd_compat_ioctl(struct tty_struct *tty,
7140 + unsigned int cmd, unsigned long arg)
7141 + {
7142 +@@ -495,8 +497,11 @@ static long pty_bsd_compat_ioctl(struct tty_struct *tty,
7143 + * PTY ioctls don't require any special translation between 32-bit and
7144 + * 64-bit userspace, they are already compatible.
7145 + */
7146 +- return pty_bsd_ioctl(tty, cmd, arg);
7147 ++ return pty_bsd_ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
7148 + }
7149 ++#else
7150 ++#define pty_bsd_compat_ioctl NULL
7151 ++#endif
7152 +
7153 + static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
7154 + /*
7155 +@@ -676,6 +681,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
7156 + return -ENOIOCTLCMD;
7157 + }
7158 +
7159 ++#ifdef CONFIG_COMPAT
7160 + static long pty_unix98_compat_ioctl(struct tty_struct *tty,
7161 + unsigned int cmd, unsigned long arg)
7162 + {
7163 +@@ -683,8 +689,12 @@ static long pty_unix98_compat_ioctl(struct tty_struct *tty,
7164 + * PTY ioctls don't require any special translation between 32-bit and
7165 + * 64-bit userspace, they are already compatible.
7166 + */
7167 +- return pty_unix98_ioctl(tty, cmd, arg);
7168 ++ return pty_unix98_ioctl(tty, cmd,
7169 ++ cmd == TIOCSIG ? arg : (unsigned long)compat_ptr(arg));
7170 + }
7171 ++#else
7172 ++#define pty_unix98_compat_ioctl NULL
7173 ++#endif
7174 +
7175 + /**
7176 + * ptm_unix98_lookup - find a pty master
7177 +diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
7178 +index a94086597ebd..b88ecf102764 100644
7179 +--- a/drivers/tty/synclink_gt.c
7180 ++++ b/drivers/tty/synclink_gt.c
7181 +@@ -1186,14 +1186,13 @@ static long slgt_compat_ioctl(struct tty_struct *tty,
7182 + unsigned int cmd, unsigned long arg)
7183 + {
7184 + struct slgt_info *info = tty->driver_data;
7185 +- int rc = -ENOIOCTLCMD;
7186 ++ int rc;
7187 +
7188 + if (sanity_check(info, tty->name, "compat_ioctl"))
7189 + return -ENODEV;
7190 + DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
7191 +
7192 + switch (cmd) {
7193 +-
7194 + case MGSL_IOCSPARAMS32:
7195 + rc = set_params32(info, compat_ptr(arg));
7196 + break;
7197 +@@ -1213,18 +1212,11 @@ static long slgt_compat_ioctl(struct tty_struct *tty,
7198 + case MGSL_IOCWAITGPIO:
7199 + case MGSL_IOCGXSYNC:
7200 + case MGSL_IOCGXCTRL:
7201 +- case MGSL_IOCSTXIDLE:
7202 +- case MGSL_IOCTXENABLE:
7203 +- case MGSL_IOCRXENABLE:
7204 +- case MGSL_IOCTXABORT:
7205 +- case TIOCMIWAIT:
7206 +- case MGSL_IOCSIF:
7207 +- case MGSL_IOCSXSYNC:
7208 +- case MGSL_IOCSXCTRL:
7209 +- rc = ioctl(tty, cmd, arg);
7210 ++ rc = ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
7211 + break;
7212 ++ default:
7213 ++ rc = ioctl(tty, cmd, arg);
7214 + }
7215 +-
7216 + DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
7217 + return rc;
7218 + }
7219 +diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
7220 +index 1c6da8d6cccf..718d692b07ac 100644
7221 +--- a/drivers/usb/misc/appledisplay.c
7222 ++++ b/drivers/usb/misc/appledisplay.c
7223 +@@ -148,8 +148,11 @@ static int appledisplay_bl_update_status(struct backlight_device *bd)
7224 + pdata->msgdata, 2,
7225 + ACD_USB_TIMEOUT);
7226 + mutex_unlock(&pdata->sysfslock);
7227 +-
7228 +- return retval;
7229 ++
7230 ++ if (retval < 0)
7231 ++ return retval;
7232 ++ else
7233 ++ return 0;
7234 + }
7235 +
7236 + static int appledisplay_bl_get_brightness(struct backlight_device *bd)
7237 +@@ -167,7 +170,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
7238 + 0,
7239 + pdata->msgdata, 2,
7240 + ACD_USB_TIMEOUT);
7241 +- brightness = pdata->msgdata[1];
7242 ++ if (retval < 2) {
7243 ++ if (retval >= 0)
7244 ++ retval = -EMSGSIZE;
7245 ++ } else {
7246 ++ brightness = pdata->msgdata[1];
7247 ++ }
7248 + mutex_unlock(&pdata->sysfslock);
7249 +
7250 + if (retval < 0)
7251 +@@ -302,6 +310,7 @@ error:
7252 + if (pdata) {
7253 + if (pdata->urb) {
7254 + usb_kill_urb(pdata->urb);
7255 ++ cancel_delayed_work_sync(&pdata->work);
7256 + if (pdata->urbdata)
7257 + usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
7258 + pdata->urbdata, pdata->urb->transfer_dma);
7259 +diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
7260 +index 34e6cd6f40d3..87067c3d6109 100644
7261 +--- a/drivers/usb/misc/chaoskey.c
7262 ++++ b/drivers/usb/misc/chaoskey.c
7263 +@@ -384,13 +384,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
7264 + !dev->reading,
7265 + (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
7266 +
7267 +- if (result < 0)
7268 ++ if (result < 0) {
7269 ++ usb_kill_urb(dev->urb);
7270 + goto out;
7271 ++ }
7272 +
7273 +- if (result == 0)
7274 ++ if (result == 0) {
7275 + result = -ETIMEDOUT;
7276 +- else
7277 ++ usb_kill_urb(dev->urb);
7278 ++ } else {
7279 + result = dev->valid;
7280 ++ }
7281 + out:
7282 + /* Let the device go back to sleep eventually */
7283 + usb_autopm_put_interface(dev->interface);
7284 +@@ -526,7 +530,21 @@ static int chaoskey_suspend(struct usb_interface *interface,
7285 +
7286 + static int chaoskey_resume(struct usb_interface *interface)
7287 + {
7288 ++ struct chaoskey *dev;
7289 ++ struct usb_device *udev = interface_to_usbdev(interface);
7290 ++
7291 + usb_dbg(interface, "resume");
7292 ++ dev = usb_get_intfdata(interface);
7293 ++
7294 ++ /*
7295 ++ * We may have lost power.
7296 ++ * In that case the device that needs a long time
7297 ++ * for the first requests needs an extended timeout
7298 ++ * again
7299 ++ */
7300 ++ if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID)
7301 ++ dev->reads_started = false;
7302 ++
7303 + return 0;
7304 + }
7305 + #else
7306 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
7307 +index e732949f6567..7ae121567098 100644
7308 +--- a/drivers/usb/serial/cp210x.c
7309 ++++ b/drivers/usb/serial/cp210x.c
7310 +@@ -125,6 +125,7 @@ static const struct usb_device_id id_table[] = {
7311 + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
7312 + { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
7313 + { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
7314 ++ { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
7315 + { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
7316 + { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
7317 + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
7318 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
7319 +index e8f275a0326d..c0232b67a40f 100644
7320 +--- a/drivers/usb/serial/mos7720.c
7321 ++++ b/drivers/usb/serial/mos7720.c
7322 +@@ -1894,10 +1894,6 @@ static int mos7720_startup(struct usb_serial *serial)
7323 + product = le16_to_cpu(serial->dev->descriptor.idProduct);
7324 + dev = serial->dev;
7325 +
7326 +- /* setting configuration feature to one */
7327 +- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
7328 +- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
7329 +-
7330 + if (product == MOSCHIP_DEVICE_ID_7715) {
7331 + struct urb *urb = serial->port[0]->interrupt_in_urb;
7332 +
7333 +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
7334 +index b42bad85097a..4a7bd26841af 100644
7335 +--- a/drivers/usb/serial/mos7840.c
7336 ++++ b/drivers/usb/serial/mos7840.c
7337 +@@ -118,11 +118,15 @@
7338 + /* This driver also supports
7339 + * ATEN UC2324 device using Moschip MCS7840
7340 + * ATEN UC2322 device using Moschip MCS7820
7341 ++ * MOXA UPort 2210 device using Moschip MCS7820
7342 + */
7343 + #define USB_VENDOR_ID_ATENINTL 0x0557
7344 + #define ATENINTL_DEVICE_ID_UC2324 0x2011
7345 + #define ATENINTL_DEVICE_ID_UC2322 0x7820
7346 +
7347 ++#define USB_VENDOR_ID_MOXA 0x110a
7348 ++#define MOXA_DEVICE_ID_2210 0x2210
7349 ++
7350 + /* Interrupt Routine Defines */
7351 +
7352 + #define SERIAL_IIR_RLS 0x06
7353 +@@ -193,6 +197,7 @@ static const struct usb_device_id id_table[] = {
7354 + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
7355 + {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
7356 + {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
7357 ++ {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)},
7358 + {} /* terminating entry */
7359 + };
7360 + MODULE_DEVICE_TABLE(usb, id_table);
7361 +@@ -2053,6 +2058,7 @@ static int mos7840_probe(struct usb_serial *serial,
7362 + const struct usb_device_id *id)
7363 + {
7364 + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
7365 ++ u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor);
7366 + u8 *buf;
7367 + int device_type;
7368 +
7369 +@@ -2062,6 +2068,11 @@ static int mos7840_probe(struct usb_serial *serial,
7370 + goto out;
7371 + }
7372 +
7373 ++ if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) {
7374 ++ device_type = MOSCHIP_DEVICE_ID_7820;
7375 ++ goto out;
7376 ++ }
7377 ++
7378 + buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
7379 + if (!buf)
7380 + return -ENOMEM;
7381 +@@ -2314,11 +2325,6 @@ out:
7382 + goto error;
7383 + } else
7384 + dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
7385 +-
7386 +- /* setting configuration feature to one */
7387 +- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
7388 +- 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
7389 +- MOS_WDR_TIMEOUT);
7390 + }
7391 + return 0;
7392 + error:
7393 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
7394 +index 3cc659a62782..2905274e3626 100644
7395 +--- a/drivers/usb/serial/option.c
7396 ++++ b/drivers/usb/serial/option.c
7397 +@@ -197,6 +197,7 @@ static void option_instat_callback(struct urb *urb);
7398 + #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
7399 +
7400 + #define DELL_PRODUCT_5821E 0x81d7
7401 ++#define DELL_PRODUCT_5821E_ESIM 0x81e0
7402 +
7403 + #define KYOCERA_VENDOR_ID 0x0c88
7404 + #define KYOCERA_PRODUCT_KPC650 0x17da
7405 +@@ -1044,6 +1045,8 @@ static const struct usb_device_id option_ids[] = {
7406 + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
7407 + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
7408 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
7409 ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
7410 ++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
7411 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
7412 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
7413 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
7414 +@@ -1990,6 +1993,10 @@ static const struct usb_device_id option_ids[] = {
7415 + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
7416 + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
7417 + { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
7418 ++ { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
7419 ++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
7420 ++ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
7421 ++ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
7422 + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
7423 + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
7424 + { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
7425 +diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
7426 +index 819ae3b2bd7e..39cf19001239 100644
7427 +--- a/drivers/usb/typec/tcpm.c
7428 ++++ b/drivers/usb/typec/tcpm.c
7429 +@@ -3322,7 +3322,8 @@ static void run_state_machine(struct tcpm_port *port)
7430 + case SNK_HARD_RESET_SINK_OFF:
7431 + memset(&port->pps_data, 0, sizeof(port->pps_data));
7432 + tcpm_set_vconn(port, false);
7433 +- tcpm_set_charge(port, false);
7434 ++ if (port->pd_capable)
7435 ++ tcpm_set_charge(port, false);
7436 + tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
7437 + TYPEC_DEVICE);
7438 + /*
7439 +@@ -3354,6 +3355,12 @@ static void run_state_machine(struct tcpm_port *port)
7440 + * Similar, dual-mode ports in source mode should transition
7441 + * to PE_SNK_Transition_to_default.
7442 + */
7443 ++ if (port->pd_capable) {
7444 ++ tcpm_set_current_limit(port,
7445 ++ tcpm_get_current_limit(port),
7446 ++ 5000);
7447 ++ tcpm_set_charge(port, true);
7448 ++ }
7449 + tcpm_set_attached_state(port, true);
7450 + tcpm_set_state(port, SNK_STARTUP, 0);
7451 + break;
7452 +diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
7453 +index a20b65cb6678..8276a20ecea7 100644
7454 +--- a/drivers/usb/usbip/Kconfig
7455 ++++ b/drivers/usb/usbip/Kconfig
7456 +@@ -2,6 +2,7 @@ config USBIP_CORE
7457 + tristate "USB/IP support"
7458 + depends on NET
7459 + select USB_COMMON
7460 ++ select SGL_ALLOC
7461 + ---help---
7462 + This enables pushing USB packets over IP to allow remote
7463 + machines direct access to USB devices. It provides the
7464 +diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
7465 +index 75d8756c6d27..8c55cd833098 100644
7466 +--- a/drivers/usb/usbip/stub_rx.c
7467 ++++ b/drivers/usb/usbip/stub_rx.c
7468 +@@ -470,18 +470,50 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
7469 + if (pipe == -1)
7470 + return;
7471 +
7472 ++ /*
7473 ++ * Smatch reported the error case where use_sg is true and buf_len is 0.
7474 ++ * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
7475 ++ * released by stub event handler and connection will be shut down.
7476 ++ */
7477 + priv = stub_priv_alloc(sdev, pdu);
7478 + if (!priv)
7479 + return;
7480 +
7481 + buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
7482 +
7483 ++ if (use_sg && !buf_len) {
7484 ++ dev_err(&udev->dev, "sg buffer with zero length\n");
7485 ++ goto err_malloc;
7486 ++ }
7487 ++
7488 + /* allocate urb transfer buffer, if needed */
7489 + if (buf_len) {
7490 + if (use_sg) {
7491 + sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
7492 + if (!sgl)
7493 + goto err_malloc;
7494 ++
7495 ++ /* Check if the server's HCD supports SG */
7496 ++ if (!udev->bus->sg_tablesize) {
7497 ++ /*
7498 ++ * If the server's HCD doesn't support SG, break
7499 ++ * a single SG request into several URBs and map
7500 ++ * each SG list entry to corresponding URB
7501 ++ * buffer. The previously allocated SG list is
7502 ++ * stored in priv->sgl (If the server's HCD
7503 ++ * support SG, SG list is stored only in
7504 ++ * urb->sg) and it is used as an indicator that
7505 ++ * the server split single SG request into
7506 ++ * several URBs. Later, priv->sgl is used by
7507 ++ * stub_complete() and stub_send_ret_submit() to
7508 ++ * reassemble the divied URBs.
7509 ++ */
7510 ++ support_sg = 0;
7511 ++ num_urbs = nents;
7512 ++ priv->completed_urbs = 0;
7513 ++ pdu->u.cmd_submit.transfer_flags &=
7514 ++ ~URB_DMA_MAP_SG;
7515 ++ }
7516 + } else {
7517 + buffer = kzalloc(buf_len, GFP_KERNEL);
7518 + if (!buffer)
7519 +@@ -489,24 +521,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
7520 + }
7521 + }
7522 +
7523 +- /* Check if the server's HCD supports SG */
7524 +- if (use_sg && !udev->bus->sg_tablesize) {
7525 +- /*
7526 +- * If the server's HCD doesn't support SG, break a single SG
7527 +- * request into several URBs and map each SG list entry to
7528 +- * corresponding URB buffer. The previously allocated SG
7529 +- * list is stored in priv->sgl (If the server's HCD support SG,
7530 +- * SG list is stored only in urb->sg) and it is used as an
7531 +- * indicator that the server split single SG request into
7532 +- * several URBs. Later, priv->sgl is used by stub_complete() and
7533 +- * stub_send_ret_submit() to reassemble the divied URBs.
7534 +- */
7535 +- support_sg = 0;
7536 +- num_urbs = nents;
7537 +- priv->completed_urbs = 0;
7538 +- pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG;
7539 +- }
7540 +-
7541 + /* allocate urb array */
7542 + priv->num_urbs = num_urbs;
7543 + priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
7544 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
7545 +index bab495d73195..8dcee4faf701 100644
7546 +--- a/drivers/vhost/vsock.c
7547 ++++ b/drivers/vhost/vsock.c
7548 +@@ -103,7 +103,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
7549 + struct iov_iter iov_iter;
7550 + unsigned out, in;
7551 + size_t nbytes;
7552 +- size_t len;
7553 ++ size_t iov_len, payload_len;
7554 + int head;
7555 +
7556 + spin_lock_bh(&vsock->send_pkt_list_lock);
7557 +@@ -148,8 +148,24 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
7558 + break;
7559 + }
7560 +
7561 +- len = iov_length(&vq->iov[out], in);
7562 +- iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
7563 ++ iov_len = iov_length(&vq->iov[out], in);
7564 ++ if (iov_len < sizeof(pkt->hdr)) {
7565 ++ virtio_transport_free_pkt(pkt);
7566 ++ vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
7567 ++ break;
7568 ++ }
7569 ++
7570 ++ iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
7571 ++ payload_len = pkt->len - pkt->off;
7572 ++
7573 ++ /* If the packet is greater than the space available in the
7574 ++ * buffer, we split it using multiple buffers.
7575 ++ */
7576 ++ if (payload_len > iov_len - sizeof(pkt->hdr))
7577 ++ payload_len = iov_len - sizeof(pkt->hdr);
7578 ++
7579 ++ /* Set the correct length in the header */
7580 ++ pkt->hdr.len = cpu_to_le32(payload_len);
7581 +
7582 + nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
7583 + if (nbytes != sizeof(pkt->hdr)) {
7584 +@@ -158,33 +174,47 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
7585 + break;
7586 + }
7587 +
7588 +- nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
7589 +- if (nbytes != pkt->len) {
7590 ++ nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
7591 ++ &iov_iter);
7592 ++ if (nbytes != payload_len) {
7593 + virtio_transport_free_pkt(pkt);
7594 + vq_err(vq, "Faulted on copying pkt buf\n");
7595 + break;
7596 + }
7597 +
7598 +- vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
7599 ++ vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
7600 + added = true;
7601 +
7602 +- if (pkt->reply) {
7603 +- int val;
7604 +-
7605 +- val = atomic_dec_return(&vsock->queued_replies);
7606 +-
7607 +- /* Do we have resources to resume tx processing? */
7608 +- if (val + 1 == tx_vq->num)
7609 +- restart_tx = true;
7610 +- }
7611 +-
7612 + /* Deliver to monitoring devices all correctly transmitted
7613 + * packets.
7614 + */
7615 + virtio_transport_deliver_tap_pkt(pkt);
7616 +
7617 +- total_len += pkt->len;
7618 +- virtio_transport_free_pkt(pkt);
7619 ++ pkt->off += payload_len;
7620 ++ total_len += payload_len;
7621 ++
7622 ++ /* If we didn't send all the payload we can requeue the packet
7623 ++ * to send it with the next available buffer.
7624 ++ */
7625 ++ if (pkt->off < pkt->len) {
7626 ++ spin_lock_bh(&vsock->send_pkt_list_lock);
7627 ++ list_add(&pkt->list, &vsock->send_pkt_list);
7628 ++ spin_unlock_bh(&vsock->send_pkt_list_lock);
7629 ++ } else {
7630 ++ if (pkt->reply) {
7631 ++ int val;
7632 ++
7633 ++ val = atomic_dec_return(&vsock->queued_replies);
7634 ++
7635 ++ /* Do we have resources to resume tx
7636 ++ * processing?
7637 ++ */
7638 ++ if (val + 1 == tx_vq->num)
7639 ++ restart_tx = true;
7640 ++ }
7641 ++
7642 ++ virtio_transport_free_pkt(pkt);
7643 ++ }
7644 + } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
7645 + if (added)
7646 + vhost_signal(&vsock->dev, vq);
7647 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
7648 +index 9529e28e1822..6228b48d1e12 100644
7649 +--- a/drivers/virtio/virtio_ring.c
7650 ++++ b/drivers/virtio/virtio_ring.c
7651 +@@ -431,7 +431,7 @@ unmap_release:
7652 + kfree(desc);
7653 +
7654 + END_USE(vq);
7655 +- return -EIO;
7656 ++ return -ENOMEM;
7657 + }
7658 +
7659 + /**
7660 +diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
7661 +index bf641a191d07..7c4e33dbee4d 100644
7662 +--- a/drivers/w1/slaves/w1_ds2438.c
7663 ++++ b/drivers/w1/slaves/w1_ds2438.c
7664 +@@ -186,8 +186,8 @@ static int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
7665 + return -1;
7666 + }
7667 +
7668 +-static uint16_t w1_ds2438_get_voltage(struct w1_slave *sl,
7669 +- int adc_input, uint16_t *voltage)
7670 ++static int w1_ds2438_get_voltage(struct w1_slave *sl,
7671 ++ int adc_input, uint16_t *voltage)
7672 + {
7673 + unsigned int retries = W1_DS2438_RETRIES;
7674 + u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
7675 +@@ -235,6 +235,25 @@ post_unlock:
7676 + return ret;
7677 + }
7678 +
7679 ++static int w1_ds2438_get_current(struct w1_slave *sl, int16_t *voltage)
7680 ++{
7681 ++ u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
7682 ++ int ret;
7683 ++
7684 ++ mutex_lock(&sl->master->bus_mutex);
7685 ++
7686 ++ if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
7687 ++ /* The voltage measured across current sense resistor RSENS. */
7688 ++ *voltage = (((int16_t) w1_buf[DS2438_CURRENT_MSB]) << 8) | ((int16_t) w1_buf[DS2438_CURRENT_LSB]);
7689 ++ ret = 0;
7690 ++ } else
7691 ++ ret = -1;
7692 ++
7693 ++ mutex_unlock(&sl->master->bus_mutex);
7694 ++
7695 ++ return ret;
7696 ++}
7697 ++
7698 + static ssize_t iad_write(struct file *filp, struct kobject *kobj,
7699 + struct bin_attribute *bin_attr, char *buf,
7700 + loff_t off, size_t count)
7701 +@@ -257,6 +276,27 @@ static ssize_t iad_write(struct file *filp, struct kobject *kobj,
7702 + return ret;
7703 + }
7704 +
7705 ++static ssize_t iad_read(struct file *filp, struct kobject *kobj,
7706 ++ struct bin_attribute *bin_attr, char *buf,
7707 ++ loff_t off, size_t count)
7708 ++{
7709 ++ struct w1_slave *sl = kobj_to_w1_slave(kobj);
7710 ++ int ret;
7711 ++ int16_t voltage;
7712 ++
7713 ++ if (off != 0)
7714 ++ return 0;
7715 ++ if (!buf)
7716 ++ return -EINVAL;
7717 ++
7718 ++ if (w1_ds2438_get_current(sl, &voltage) == 0) {
7719 ++ ret = snprintf(buf, count, "%i\n", voltage);
7720 ++ } else
7721 ++ ret = -EIO;
7722 ++
7723 ++ return ret;
7724 ++}
7725 ++
7726 + static ssize_t page0_read(struct file *filp, struct kobject *kobj,
7727 + struct bin_attribute *bin_attr, char *buf,
7728 + loff_t off, size_t count)
7729 +@@ -272,9 +312,13 @@ static ssize_t page0_read(struct file *filp, struct kobject *kobj,
7730 +
7731 + mutex_lock(&sl->master->bus_mutex);
7732 +
7733 ++ /* Read no more than page0 size */
7734 ++ if (count > DS2438_PAGE_SIZE)
7735 ++ count = DS2438_PAGE_SIZE;
7736 ++
7737 + if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
7738 +- memcpy(buf, &w1_buf, DS2438_PAGE_SIZE);
7739 +- ret = DS2438_PAGE_SIZE;
7740 ++ memcpy(buf, &w1_buf, count);
7741 ++ ret = count;
7742 + } else
7743 + ret = -EIO;
7744 +
7745 +@@ -289,7 +333,6 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
7746 + {
7747 + struct w1_slave *sl = kobj_to_w1_slave(kobj);
7748 + int ret;
7749 +- ssize_t c = PAGE_SIZE;
7750 + int16_t temp;
7751 +
7752 + if (off != 0)
7753 +@@ -298,8 +341,7 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
7754 + return -EINVAL;
7755 +
7756 + if (w1_ds2438_get_temperature(sl, &temp) == 0) {
7757 +- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", temp);
7758 +- ret = PAGE_SIZE - c;
7759 ++ ret = snprintf(buf, count, "%i\n", temp);
7760 + } else
7761 + ret = -EIO;
7762 +
7763 +@@ -312,7 +354,6 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
7764 + {
7765 + struct w1_slave *sl = kobj_to_w1_slave(kobj);
7766 + int ret;
7767 +- ssize_t c = PAGE_SIZE;
7768 + uint16_t voltage;
7769 +
7770 + if (off != 0)
7771 +@@ -321,8 +362,7 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
7772 + return -EINVAL;
7773 +
7774 + if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0) {
7775 +- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage);
7776 +- ret = PAGE_SIZE - c;
7777 ++ ret = snprintf(buf, count, "%u\n", voltage);
7778 + } else
7779 + ret = -EIO;
7780 +
7781 +@@ -335,7 +375,6 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
7782 + {
7783 + struct w1_slave *sl = kobj_to_w1_slave(kobj);
7784 + int ret;
7785 +- ssize_t c = PAGE_SIZE;
7786 + uint16_t voltage;
7787 +
7788 + if (off != 0)
7789 +@@ -344,15 +383,14 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
7790 + return -EINVAL;
7791 +
7792 + if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0) {
7793 +- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage);
7794 +- ret = PAGE_SIZE - c;
7795 ++ ret = snprintf(buf, count, "%u\n", voltage);
7796 + } else
7797 + ret = -EIO;
7798 +
7799 + return ret;
7800 + }
7801 +
7802 +-static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, NULL, iad_write, 1);
7803 ++static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, iad_read, iad_write, 0);
7804 + static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
7805 + static BIN_ATTR_RO(temperature, 0/* real length varies */);
7806 + static BIN_ATTR_RO(vad, 0/* real length varies */);
7807 +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
7808 +index d4e8b717ce2b..747a15acbce3 100644
7809 +--- a/drivers/xen/balloon.c
7810 ++++ b/drivers/xen/balloon.c
7811 +@@ -350,7 +350,10 @@ static enum bp_state reserve_additional_memory(void)
7812 + * callers drop the mutex before trying again.
7813 + */
7814 + mutex_unlock(&balloon_mutex);
7815 ++ /* add_memory_resource() requires the device_hotplug lock */
7816 ++ lock_device_hotplug();
7817 + rc = add_memory_resource(nid, resource, memhp_auto_online);
7818 ++ unlock_device_hotplug();
7819 + mutex_lock(&balloon_mutex);
7820 +
7821 + if (rc) {
7822 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
7823 +index 9fd383285f0e..fc764f350f05 100644
7824 +--- a/fs/btrfs/ctree.c
7825 ++++ b/fs/btrfs/ctree.c
7826 +@@ -3031,6 +3031,10 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
7827 +
7828 + again:
7829 + b = get_old_root(root, time_seq);
7830 ++ if (!b) {
7831 ++ ret = -EIO;
7832 ++ goto done;
7833 ++ }
7834 + level = btrfs_header_level(b);
7835 + p->locks[level] = BTRFS_READ_LOCK;
7836 +
7837 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
7838 +index 7592beb53fc4..00ff4349b457 100644
7839 +--- a/fs/btrfs/ioctl.c
7840 ++++ b/fs/btrfs/ioctl.c
7841 +@@ -1337,7 +1337,7 @@ again:
7842 +
7843 + if (i_done != page_cnt) {
7844 + spin_lock(&BTRFS_I(inode)->lock);
7845 +- BTRFS_I(inode)->outstanding_extents++;
7846 ++ btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
7847 + spin_unlock(&BTRFS_I(inode)->lock);
7848 + btrfs_delalloc_release_space(inode, data_reserved,
7849 + start_index << PAGE_SHIFT,
7850 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
7851 +index 8888337a95b6..ddbad8d50949 100644
7852 +--- a/fs/btrfs/super.c
7853 ++++ b/fs/btrfs/super.c
7854 +@@ -1919,7 +1919,7 @@ restore:
7855 + }
7856 +
7857 + /* Used to sort the devices by max_avail(descending sort) */
7858 +-static int btrfs_cmp_device_free_bytes(const void *dev_info1,
7859 ++static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
7860 + const void *dev_info2)
7861 + {
7862 + if (((struct btrfs_device_info *)dev_info1)->max_avail >
7863 +@@ -1948,8 +1948,8 @@ static inline void btrfs_descending_sort_devices(
7864 + * The helper to calc the free space on the devices that can be used to store
7865 + * file data.
7866 + */
7867 +-static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
7868 +- u64 *free_bytes)
7869 ++static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
7870 ++ u64 *free_bytes)
7871 + {
7872 + struct btrfs_device_info *devices_info;
7873 + struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7874 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
7875 +index 92ab20433682..91a7ad259bcf 100644
7876 +--- a/fs/ceph/file.c
7877 ++++ b/fs/ceph/file.c
7878 +@@ -1735,7 +1735,6 @@ static long ceph_fallocate(struct file *file, int mode,
7879 + struct ceph_file_info *fi = file->private_data;
7880 + struct inode *inode = file_inode(file);
7881 + struct ceph_inode_info *ci = ceph_inode(inode);
7882 +- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
7883 + struct ceph_cap_flush *prealloc_cf;
7884 + int want, got = 0;
7885 + int dirty;
7886 +@@ -1743,10 +1742,7 @@ static long ceph_fallocate(struct file *file, int mode,
7887 + loff_t endoff = 0;
7888 + loff_t size;
7889 +
7890 +- if ((offset + length) > max(i_size_read(inode), fsc->max_file_size))
7891 +- return -EFBIG;
7892 +-
7893 +- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
7894 ++ if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
7895 + return -EOPNOTSUPP;
7896 +
7897 + if (!S_ISREG(inode->i_mode))
7898 +@@ -1763,18 +1759,6 @@ static long ceph_fallocate(struct file *file, int mode,
7899 + goto unlock;
7900 + }
7901 +
7902 +- if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) &&
7903 +- ceph_quota_is_max_bytes_exceeded(inode, offset + length)) {
7904 +- ret = -EDQUOT;
7905 +- goto unlock;
7906 +- }
7907 +-
7908 +- if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL) &&
7909 +- !(mode & FALLOC_FL_PUNCH_HOLE)) {
7910 +- ret = -ENOSPC;
7911 +- goto unlock;
7912 +- }
7913 +-
7914 + if (ci->i_inline_version != CEPH_INLINE_NONE) {
7915 + ret = ceph_uninline_data(file, NULL);
7916 + if (ret < 0)
7917 +@@ -1782,12 +1766,12 @@ static long ceph_fallocate(struct file *file, int mode,
7918 + }
7919 +
7920 + size = i_size_read(inode);
7921 +- if (!(mode & FALLOC_FL_KEEP_SIZE)) {
7922 +- endoff = offset + length;
7923 +- ret = inode_newsize_ok(inode, endoff);
7924 +- if (ret)
7925 +- goto unlock;
7926 +- }
7927 ++
7928 ++ /* Are we punching a hole beyond EOF? */
7929 ++ if (offset >= size)
7930 ++ goto unlock;
7931 ++ if ((offset + length) > size)
7932 ++ length = size - offset;
7933 +
7934 + if (fi->fmode & CEPH_FILE_MODE_LAZY)
7935 + want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
7936 +@@ -1798,16 +1782,8 @@ static long ceph_fallocate(struct file *file, int mode,
7937 + if (ret < 0)
7938 + goto unlock;
7939 +
7940 +- if (mode & FALLOC_FL_PUNCH_HOLE) {
7941 +- if (offset < size)
7942 +- ceph_zero_pagecache_range(inode, offset, length);
7943 +- ret = ceph_zero_objects(inode, offset, length);
7944 +- } else if (endoff > size) {
7945 +- truncate_pagecache_range(inode, size, -1);
7946 +- if (ceph_inode_set_size(inode, endoff))
7947 +- ceph_check_caps(ceph_inode(inode),
7948 +- CHECK_CAPS_AUTHONLY, NULL);
7949 +- }
7950 ++ ceph_zero_pagecache_range(inode, offset, length);
7951 ++ ret = ceph_zero_objects(inode, offset, length);
7952 +
7953 + if (!ret) {
7954 + spin_lock(&ci->i_ceph_lock);
7955 +@@ -1817,9 +1793,6 @@ static long ceph_fallocate(struct file *file, int mode,
7956 + spin_unlock(&ci->i_ceph_lock);
7957 + if (dirty)
7958 + __mark_inode_dirty(inode, dirty);
7959 +- if ((endoff > size) &&
7960 +- ceph_quota_is_max_bytes_approaching(inode, endoff))
7961 +- ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
7962 + }
7963 +
7964 + ceph_put_cap_refs(ci, got);
7965 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
7966 +index acb70a6a82f0..1e438e0faf77 100644
7967 +--- a/fs/ceph/inode.c
7968 ++++ b/fs/ceph/inode.c
7969 +@@ -1694,7 +1694,6 @@ retry_lookup:
7970 + if (IS_ERR(realdn)) {
7971 + err = PTR_ERR(realdn);
7972 + d_drop(dn);
7973 +- dn = NULL;
7974 + goto next_item;
7975 + }
7976 + dn = realdn;
7977 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
7978 +index b1f5d0d28335..9194f17675c8 100644
7979 +--- a/fs/cifs/smb2pdu.c
7980 ++++ b/fs/cifs/smb2pdu.c
7981 +@@ -2283,7 +2283,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
7982 + struct cifs_ses *ses = tcon->ses;
7983 + struct kvec iov[SMB2_CREATE_IOV_SIZE];
7984 + struct kvec rsp_iov = {NULL, 0};
7985 +- int resp_buftype;
7986 ++ int resp_buftype = CIFS_NO_BUFFER;
7987 + int rc = 0;
7988 + int flags = 0;
7989 +
7990 +@@ -2570,7 +2570,7 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
7991 + struct cifs_ses *ses = tcon->ses;
7992 + struct kvec iov[1];
7993 + struct kvec rsp_iov;
7994 +- int resp_buftype;
7995 ++ int resp_buftype = CIFS_NO_BUFFER;
7996 + int rc = 0;
7997 +
7998 + cifs_dbg(FYI, "Close\n");
7999 +@@ -2723,7 +2723,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
8000 + struct kvec iov[1];
8001 + struct kvec rsp_iov;
8002 + int rc = 0;
8003 +- int resp_buftype;
8004 ++ int resp_buftype = CIFS_NO_BUFFER;
8005 + struct cifs_ses *ses = tcon->ses;
8006 + int flags = 0;
8007 +
8008 +diff --git a/fs/dlm/member.c b/fs/dlm/member.c
8009 +index 3fda3832cf6a..cad6d85911a8 100644
8010 +--- a/fs/dlm/member.c
8011 ++++ b/fs/dlm/member.c
8012 +@@ -680,7 +680,7 @@ int dlm_ls_start(struct dlm_ls *ls)
8013 +
8014 + error = dlm_config_nodes(ls->ls_name, &nodes, &count);
8015 + if (error < 0)
8016 +- goto fail;
8017 ++ goto fail_rv;
8018 +
8019 + spin_lock(&ls->ls_recover_lock);
8020 +
8021 +@@ -712,8 +712,9 @@ int dlm_ls_start(struct dlm_ls *ls)
8022 + return 0;
8023 +
8024 + fail:
8025 +- kfree(rv);
8026 + kfree(nodes);
8027 ++ fail_rv:
8028 ++ kfree(rv);
8029 + return error;
8030 + }
8031 +
8032 +diff --git a/fs/dlm/user.c b/fs/dlm/user.c
8033 +index 2a669390cd7f..13f29409600b 100644
8034 +--- a/fs/dlm/user.c
8035 ++++ b/fs/dlm/user.c
8036 +@@ -702,7 +702,7 @@ static int copy_result_to_user(struct dlm_user_args *ua, int compat,
8037 + result.version[0] = DLM_DEVICE_VERSION_MAJOR;
8038 + result.version[1] = DLM_DEVICE_VERSION_MINOR;
8039 + result.version[2] = DLM_DEVICE_VERSION_PATCH;
8040 +- memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
8041 ++ memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
8042 + result.user_lksb = ua->user_lksb;
8043 +
8044 + /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
8045 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
8046 +index 3a2fd6676966..a7436ad19458 100644
8047 +--- a/fs/f2fs/data.c
8048 ++++ b/fs/f2fs/data.c
8049 +@@ -1782,6 +1782,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
8050 + /* This page is already truncated */
8051 + if (fio->old_blkaddr == NULL_ADDR) {
8052 + ClearPageUptodate(page);
8053 ++ clear_cold_data(page);
8054 + goto out_writepage;
8055 + }
8056 + got_it:
8057 +@@ -1957,8 +1958,10 @@ done:
8058 +
8059 + out:
8060 + inode_dec_dirty_pages(inode);
8061 +- if (err)
8062 ++ if (err) {
8063 + ClearPageUptodate(page);
8064 ++ clear_cold_data(page);
8065 ++ }
8066 +
8067 + if (wbc->for_reclaim) {
8068 + f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
8069 +@@ -2573,6 +2576,8 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
8070 + }
8071 + }
8072 +
8073 ++ clear_cold_data(page);
8074 ++
8075 + /* This is atomic written page, keep Private */
8076 + if (IS_ATOMIC_WRITTEN_PAGE(page))
8077 + return f2fs_drop_inmem_page(inode, page);
8078 +@@ -2591,6 +2596,7 @@ int f2fs_release_page(struct page *page, gfp_t wait)
8079 + if (IS_ATOMIC_WRITTEN_PAGE(page))
8080 + return 0;
8081 +
8082 ++ clear_cold_data(page);
8083 + set_page_private(page, 0);
8084 + ClearPagePrivate(page);
8085 + return 1;
8086 +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
8087 +index ecc3a4e2be96..cd611a57d04d 100644
8088 +--- a/fs/f2fs/dir.c
8089 ++++ b/fs/f2fs/dir.c
8090 +@@ -733,6 +733,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
8091 + clear_page_dirty_for_io(page);
8092 + ClearPagePrivate(page);
8093 + ClearPageUptodate(page);
8094 ++ clear_cold_data(page);
8095 + inode_dec_dirty_pages(dir);
8096 + f2fs_remove_dirty_inode(dir);
8097 + }
8098 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
8099 +index 2dc49a541907..34e48bcf5087 100644
8100 +--- a/fs/f2fs/f2fs.h
8101 ++++ b/fs/f2fs/f2fs.h
8102 +@@ -3388,7 +3388,7 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
8103 + {
8104 + #ifdef CONFIG_F2FS_FS_ENCRYPTION
8105 + file_set_encrypt(inode);
8106 +- inode->i_flags |= S_ENCRYPTED;
8107 ++ f2fs_set_inode_flags(inode);
8108 + #endif
8109 + }
8110 +
8111 +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
8112 +index 1f67e389169f..6b23dcbf52f4 100644
8113 +--- a/fs/f2fs/namei.c
8114 ++++ b/fs/f2fs/namei.c
8115 +@@ -124,6 +124,8 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
8116 + if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL)
8117 + set_inode_flag(inode, FI_PROJ_INHERIT);
8118 +
8119 ++ f2fs_set_inode_flags(inode);
8120 ++
8121 + trace_f2fs_new_inode(inode, 0);
8122 + return inode;
8123 +
8124 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
8125 +index d78009694f3f..43a07514c357 100644
8126 +--- a/fs/f2fs/segment.c
8127 ++++ b/fs/f2fs/segment.c
8128 +@@ -277,8 +277,10 @@ retry:
8129 + }
8130 + next:
8131 + /* we don't need to invalidate this in the sccessful status */
8132 +- if (drop || recover)
8133 ++ if (drop || recover) {
8134 + ClearPageUptodate(page);
8135 ++ clear_cold_data(page);
8136 ++ }
8137 + set_page_private(page, 0);
8138 + ClearPagePrivate(page);
8139 + f2fs_put_page(page, 1);
8140 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
8141 +index 15779123d089..7a9cc64f5ca3 100644
8142 +--- a/fs/f2fs/super.c
8143 ++++ b/fs/f2fs/super.c
8144 +@@ -1837,8 +1837,7 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
8145 +
8146 + inode_lock(inode);
8147 + F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
8148 +- inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
8149 +- S_NOATIME | S_IMMUTABLE);
8150 ++ f2fs_set_inode_flags(inode);
8151 + inode_unlock(inode);
8152 + f2fs_mark_inode_dirty_sync(inode, false);
8153 +
8154 +@@ -1863,7 +1862,7 @@ static int f2fs_quota_off(struct super_block *sb, int type)
8155 +
8156 + inode_lock(inode);
8157 + F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
8158 +- inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
8159 ++ f2fs_set_inode_flags(inode);
8160 + inode_unlock(inode);
8161 + f2fs_mark_inode_dirty_sync(inode, false);
8162 + out_put:
8163 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
8164 +index 63e5387c84d2..c94c4ac1ae78 100644
8165 +--- a/fs/gfs2/rgrp.c
8166 ++++ b/fs/gfs2/rgrp.c
8167 +@@ -642,7 +642,10 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
8168 + RB_CLEAR_NODE(&rs->rs_node);
8169 +
8170 + if (rs->rs_free) {
8171 +- struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
8172 ++ u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
8173 ++ rs->rs_free - 1;
8174 ++ struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
8175 ++ struct gfs2_bitmap *start, *last;
8176 +
8177 + /* return reserved blocks to the rgrp */
8178 + BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
8179 +@@ -653,7 +656,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
8180 + it will force the number to be recalculated later. */
8181 + rgd->rd_extfail_pt += rs->rs_free;
8182 + rs->rs_free = 0;
8183 +- clear_bit(GBF_FULL, &bi->bi_flags);
8184 ++ if (gfs2_rbm_from_block(&last_rbm, last_block))
8185 ++ return;
8186 ++ start = rbm_bi(&rs->rs_rbm);
8187 ++ last = rbm_bi(&last_rbm);
8188 ++ do
8189 ++ clear_bit(GBF_FULL, &start->bi_flags);
8190 ++ while (start++ != last);
8191 + }
8192 + }
8193 +
8194 +diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
8195 +index da25c49203cc..896396554bcc 100644
8196 +--- a/fs/hfs/brec.c
8197 ++++ b/fs/hfs/brec.c
8198 +@@ -445,6 +445,7 @@ skip:
8199 + /* restore search_key */
8200 + hfs_bnode_read_key(node, fd->search_key, 14);
8201 + }
8202 ++ new_node = NULL;
8203 + }
8204 +
8205 + if (!rec && node->parent)
8206 +diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
8207 +index 9bdff5e40626..19017d296173 100644
8208 +--- a/fs/hfs/btree.c
8209 ++++ b/fs/hfs/btree.c
8210 +@@ -220,25 +220,17 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
8211 + return node;
8212 + }
8213 +
8214 +-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
8215 ++/* Make sure @tree has enough space for the @rsvd_nodes */
8216 ++int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
8217 + {
8218 +- struct hfs_bnode *node, *next_node;
8219 +- struct page **pagep;
8220 +- u32 nidx, idx;
8221 +- unsigned off;
8222 +- u16 off16;
8223 +- u16 len;
8224 +- u8 *data, byte, m;
8225 +- int i;
8226 +-
8227 +- while (!tree->free_nodes) {
8228 +- struct inode *inode = tree->inode;
8229 +- u32 count;
8230 +- int res;
8231 ++ struct inode *inode = tree->inode;
8232 ++ u32 count;
8233 ++ int res;
8234 +
8235 ++ while (tree->free_nodes < rsvd_nodes) {
8236 + res = hfs_extend_file(inode);
8237 + if (res)
8238 +- return ERR_PTR(res);
8239 ++ return res;
8240 + HFS_I(inode)->phys_size = inode->i_size =
8241 + (loff_t)HFS_I(inode)->alloc_blocks *
8242 + HFS_SB(tree->sb)->alloc_blksz;
8243 +@@ -246,9 +238,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
8244 + tree->sb->s_blocksize_bits;
8245 + inode_set_bytes(inode, inode->i_size);
8246 + count = inode->i_size >> tree->node_size_shift;
8247 +- tree->free_nodes = count - tree->node_count;
8248 ++ tree->free_nodes += count - tree->node_count;
8249 + tree->node_count = count;
8250 + }
8251 ++ return 0;
8252 ++}
8253 ++
8254 ++struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
8255 ++{
8256 ++ struct hfs_bnode *node, *next_node;
8257 ++ struct page **pagep;
8258 ++ u32 nidx, idx;
8259 ++ unsigned off;
8260 ++ u16 off16;
8261 ++ u16 len;
8262 ++ u8 *data, byte, m;
8263 ++ int i, res;
8264 ++
8265 ++ res = hfs_bmap_reserve(tree, 1);
8266 ++ if (res)
8267 ++ return ERR_PTR(res);
8268 +
8269 + nidx = 0;
8270 + node = hfs_bnode_find(tree, nidx);
8271 +diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
8272 +index c8b252dbb26c..dcc2aab1b2c4 100644
8273 +--- a/fs/hfs/btree.h
8274 ++++ b/fs/hfs/btree.h
8275 +@@ -82,6 +82,7 @@ struct hfs_find_data {
8276 + extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp);
8277 + extern void hfs_btree_close(struct hfs_btree *);
8278 + extern void hfs_btree_write(struct hfs_btree *);
8279 ++extern int hfs_bmap_reserve(struct hfs_btree *, int);
8280 + extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *);
8281 + extern void hfs_bmap_free(struct hfs_bnode *node);
8282 +
8283 +diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
8284 +index 8a66405b0f8b..d365bf0b8c77 100644
8285 +--- a/fs/hfs/catalog.c
8286 ++++ b/fs/hfs/catalog.c
8287 +@@ -97,6 +97,14 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
8288 + if (err)
8289 + return err;
8290 +
8291 ++ /*
8292 ++ * Fail early and avoid ENOSPC during the btree operations. We may
8293 ++ * have to split the root node at most once.
8294 ++ */
8295 ++ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
8296 ++ if (err)
8297 ++ goto err2;
8298 ++
8299 + hfs_cat_build_key(sb, fd.search_key, cnid, NULL);
8300 + entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
8301 + HFS_CDR_THD : HFS_CDR_FTH,
8302 +@@ -295,6 +303,14 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
8303 + return err;
8304 + dst_fd = src_fd;
8305 +
8306 ++ /*
8307 ++ * Fail early and avoid ENOSPC during the btree operations. We may
8308 ++ * have to split the root node at most once.
8309 ++ */
8310 ++ err = hfs_bmap_reserve(src_fd.tree, 2 * src_fd.tree->depth);
8311 ++ if (err)
8312 ++ goto out;
8313 ++
8314 + /* find the old dir entry and read the data */
8315 + hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
8316 + err = hfs_brec_find(&src_fd);
8317 +diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
8318 +index 5d0182654580..263d5028d9d1 100644
8319 +--- a/fs/hfs/extent.c
8320 ++++ b/fs/hfs/extent.c
8321 +@@ -117,6 +117,10 @@ static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
8322 + if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
8323 + if (res != -ENOENT)
8324 + return res;
8325 ++ /* Fail early and avoid ENOSPC during the btree operation */
8326 ++ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
8327 ++ if (res)
8328 ++ return res;
8329 + hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
8330 + HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
8331 + } else {
8332 +@@ -300,7 +304,7 @@ int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
8333 + return 0;
8334 +
8335 + blocks = 0;
8336 +- for (i = 0; i < 3; extent++, i++)
8337 ++ for (i = 0; i < 3; i++)
8338 + blocks += be16_to_cpu(extent[i].count);
8339 +
8340 + res = hfs_free_extents(sb, extent, blocks, blocks);
8341 +@@ -341,7 +345,9 @@ int hfs_get_block(struct inode *inode, sector_t block,
8342 + ablock = (u32)block / HFS_SB(sb)->fs_div;
8343 +
8344 + if (block >= HFS_I(inode)->fs_blocks) {
8345 +- if (block > HFS_I(inode)->fs_blocks || !create)
8346 ++ if (!create)
8347 ++ return 0;
8348 ++ if (block > HFS_I(inode)->fs_blocks)
8349 + return -EIO;
8350 + if (ablock >= HFS_I(inode)->alloc_blocks) {
8351 + res = hfs_extend_file(inode);
8352 +diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
8353 +index a2dfa1b2a89c..da243c84e93b 100644
8354 +--- a/fs/hfs/inode.c
8355 ++++ b/fs/hfs/inode.c
8356 +@@ -642,6 +642,8 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
8357 +
8358 + truncate_setsize(inode, attr->ia_size);
8359 + hfs_file_truncate(inode);
8360 ++ inode->i_atime = inode->i_mtime = inode->i_ctime =
8361 ++ current_time(inode);
8362 + }
8363 +
8364 + setattr_copy(inode, attr);
8365 +diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
8366 +index 2bab6b3cdba4..e6d554476db4 100644
8367 +--- a/fs/hfsplus/attributes.c
8368 ++++ b/fs/hfsplus/attributes.c
8369 +@@ -217,6 +217,11 @@ int hfsplus_create_attr(struct inode *inode,
8370 + if (err)
8371 + goto failed_init_create_attr;
8372 +
8373 ++ /* Fail early and avoid ENOSPC during the btree operation */
8374 ++ err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1);
8375 ++ if (err)
8376 ++ goto failed_create_attr;
8377 ++
8378 + if (name) {
8379 + err = hfsplus_attr_build_key(sb, fd.search_key,
8380 + inode->i_ino, name);
8381 +@@ -313,6 +318,11 @@ int hfsplus_delete_attr(struct inode *inode, const char *name)
8382 + if (err)
8383 + return err;
8384 +
8385 ++ /* Fail early and avoid ENOSPC during the btree operation */
8386 ++ err = hfs_bmap_reserve(fd.tree, fd.tree->depth);
8387 ++ if (err)
8388 ++ goto out;
8389 ++
8390 + if (name) {
8391 + err = hfsplus_attr_build_key(sb, fd.search_key,
8392 + inode->i_ino, name);
8393 +diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
8394 +index aa17a392b414..1918544a7871 100644
8395 +--- a/fs/hfsplus/brec.c
8396 ++++ b/fs/hfsplus/brec.c
8397 +@@ -449,6 +449,7 @@ skip:
8398 + /* restore search_key */
8399 + hfs_bnode_read_key(node, fd->search_key, 14);
8400 + }
8401 ++ new_node = NULL;
8402 + }
8403 +
8404 + if (!rec && node->parent)
8405 +diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
8406 +index 3de3bc4918b5..66774f4cb4fd 100644
8407 +--- a/fs/hfsplus/btree.c
8408 ++++ b/fs/hfsplus/btree.c
8409 +@@ -342,26 +342,21 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
8410 + return node;
8411 + }
8412 +
8413 +-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
8414 ++/* Make sure @tree has enough space for the @rsvd_nodes */
8415 ++int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
8416 + {
8417 +- struct hfs_bnode *node, *next_node;
8418 +- struct page **pagep;
8419 +- u32 nidx, idx;
8420 +- unsigned off;
8421 +- u16 off16;
8422 +- u16 len;
8423 +- u8 *data, byte, m;
8424 +- int i;
8425 ++ struct inode *inode = tree->inode;
8426 ++ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
8427 ++ u32 count;
8428 ++ int res;
8429 +
8430 +- while (!tree->free_nodes) {
8431 +- struct inode *inode = tree->inode;
8432 +- struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
8433 +- u32 count;
8434 +- int res;
8435 ++ if (rsvd_nodes <= 0)
8436 ++ return 0;
8437 +
8438 ++ while (tree->free_nodes < rsvd_nodes) {
8439 + res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
8440 + if (res)
8441 +- return ERR_PTR(res);
8442 ++ return res;
8443 + hip->phys_size = inode->i_size =
8444 + (loff_t)hip->alloc_blocks <<
8445 + HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
8446 +@@ -369,9 +364,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
8447 + hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
8448 + inode_set_bytes(inode, inode->i_size);
8449 + count = inode->i_size >> tree->node_size_shift;
8450 +- tree->free_nodes = count - tree->node_count;
8451 ++ tree->free_nodes += count - tree->node_count;
8452 + tree->node_count = count;
8453 + }
8454 ++ return 0;
8455 ++}
8456 ++
8457 ++struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
8458 ++{
8459 ++ struct hfs_bnode *node, *next_node;
8460 ++ struct page **pagep;
8461 ++ u32 nidx, idx;
8462 ++ unsigned off;
8463 ++ u16 off16;
8464 ++ u16 len;
8465 ++ u8 *data, byte, m;
8466 ++ int i, res;
8467 ++
8468 ++ res = hfs_bmap_reserve(tree, 1);
8469 ++ if (res)
8470 ++ return ERR_PTR(res);
8471 +
8472 + nidx = 0;
8473 + node = hfs_bnode_find(tree, nidx);
8474 +diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
8475 +index a196369ba779..35472cba750e 100644
8476 +--- a/fs/hfsplus/catalog.c
8477 ++++ b/fs/hfsplus/catalog.c
8478 +@@ -265,6 +265,14 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
8479 + if (err)
8480 + return err;
8481 +
8482 ++ /*
8483 ++ * Fail early and avoid ENOSPC during the btree operations. We may
8484 ++ * have to split the root node at most once.
8485 ++ */
8486 ++ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
8487 ++ if (err)
8488 ++ goto err2;
8489 ++
8490 + hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
8491 + entry_size = hfsplus_fill_cat_thread(sb, &entry,
8492 + S_ISDIR(inode->i_mode) ?
8493 +@@ -333,6 +341,14 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
8494 + if (err)
8495 + return err;
8496 +
8497 ++ /*
8498 ++ * Fail early and avoid ENOSPC during the btree operations. We may
8499 ++ * have to split the root node at most once.
8500 ++ */
8501 ++ err = hfs_bmap_reserve(fd.tree, 2 * (int)fd.tree->depth - 2);
8502 ++ if (err)
8503 ++ goto out;
8504 ++
8505 + if (!str) {
8506 + int len;
8507 +
8508 +@@ -433,6 +449,14 @@ int hfsplus_rename_cat(u32 cnid,
8509 + return err;
8510 + dst_fd = src_fd;
8511 +
8512 ++ /*
8513 ++ * Fail early and avoid ENOSPC during the btree operations. We may
8514 ++ * have to split the root node at most twice.
8515 ++ */
8516 ++ err = hfs_bmap_reserve(src_fd.tree, 4 * (int)src_fd.tree->depth - 1);
8517 ++ if (err)
8518 ++ goto out;
8519 ++
8520 + /* find the old dir entry and read the data */
8521 + err = hfsplus_cat_build_key(sb, src_fd.search_key,
8522 + src_dir->i_ino, src_name);
8523 +diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
8524 +index 8e0f59767694..a930ddd15681 100644
8525 +--- a/fs/hfsplus/extents.c
8526 ++++ b/fs/hfsplus/extents.c
8527 +@@ -100,6 +100,10 @@ static int __hfsplus_ext_write_extent(struct inode *inode,
8528 + if (hip->extent_state & HFSPLUS_EXT_NEW) {
8529 + if (res != -ENOENT)
8530 + return res;
8531 ++ /* Fail early and avoid ENOSPC during the btree operation */
8532 ++ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
8533 ++ if (res)
8534 ++ return res;
8535 + hfs_brec_insert(fd, hip->cached_extents,
8536 + sizeof(hfsplus_extent_rec));
8537 + hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
8538 +@@ -233,7 +237,9 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
8539 + ablock = iblock >> sbi->fs_shift;
8540 +
8541 + if (iblock >= hip->fs_blocks) {
8542 +- if (iblock > hip->fs_blocks || !create)
8543 ++ if (!create)
8544 ++ return 0;
8545 ++ if (iblock > hip->fs_blocks)
8546 + return -EIO;
8547 + if (ablock >= hip->alloc_blocks) {
8548 + res = hfsplus_file_extend(inode, false);
8549 +diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
8550 +index 8e039435958a..dd7ad9f13e3a 100644
8551 +--- a/fs/hfsplus/hfsplus_fs.h
8552 ++++ b/fs/hfsplus/hfsplus_fs.h
8553 +@@ -311,6 +311,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
8554 + #define hfs_btree_open hfsplus_btree_open
8555 + #define hfs_btree_close hfsplus_btree_close
8556 + #define hfs_btree_write hfsplus_btree_write
8557 ++#define hfs_bmap_reserve hfsplus_bmap_reserve
8558 + #define hfs_bmap_alloc hfsplus_bmap_alloc
8559 + #define hfs_bmap_free hfsplus_bmap_free
8560 + #define hfs_bnode_read hfsplus_bnode_read
8561 +@@ -395,6 +396,7 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors,
8562 + struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id);
8563 + void hfs_btree_close(struct hfs_btree *tree);
8564 + int hfs_btree_write(struct hfs_btree *tree);
8565 ++int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes);
8566 + struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree);
8567 + void hfs_bmap_free(struct hfs_bnode *node);
8568 +
8569 +diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
8570 +index 8e9427a42b81..d7ab9d8c4b67 100644
8571 +--- a/fs/hfsplus/inode.c
8572 ++++ b/fs/hfsplus/inode.c
8573 +@@ -261,6 +261,7 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
8574 + }
8575 + truncate_setsize(inode, attr->ia_size);
8576 + hfsplus_file_truncate(inode);
8577 ++ inode->i_mtime = inode->i_ctime = current_time(inode);
8578 + }
8579 +
8580 + setattr_copy(inode, attr);
8581 +diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
8582 +index 9f8250df99f1..f9b84f7a3e4b 100644
8583 +--- a/fs/ocfs2/buffer_head_io.c
8584 ++++ b/fs/ocfs2/buffer_head_io.c
8585 +@@ -99,25 +99,34 @@ out:
8586 + return ret;
8587 + }
8588 +
8589 ++/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
8590 ++ * will be easier to handle read failure.
8591 ++ */
8592 + int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
8593 + unsigned int nr, struct buffer_head *bhs[])
8594 + {
8595 + int status = 0;
8596 + unsigned int i;
8597 + struct buffer_head *bh;
8598 ++ int new_bh = 0;
8599 +
8600 + trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
8601 +
8602 + if (!nr)
8603 + goto bail;
8604 +
8605 ++ /* Don't put buffer head and re-assign it to NULL if it is allocated
8606 ++ * outside since the caller can't be aware of this alternation!
8607 ++ */
8608 ++ new_bh = (bhs[0] == NULL);
8609 ++
8610 + for (i = 0 ; i < nr ; i++) {
8611 + if (bhs[i] == NULL) {
8612 + bhs[i] = sb_getblk(osb->sb, block++);
8613 + if (bhs[i] == NULL) {
8614 + status = -ENOMEM;
8615 + mlog_errno(status);
8616 +- goto bail;
8617 ++ break;
8618 + }
8619 + }
8620 + bh = bhs[i];
8621 +@@ -157,9 +166,26 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
8622 + submit_bh(REQ_OP_READ, 0, bh);
8623 + }
8624 +
8625 ++read_failure:
8626 + for (i = nr; i > 0; i--) {
8627 + bh = bhs[i - 1];
8628 +
8629 ++ if (unlikely(status)) {
8630 ++ if (new_bh && bh) {
8631 ++ /* If middle bh fails, let previous bh
8632 ++ * finish its read and then put it to
8633 ++ * aovoid bh leak
8634 ++ */
8635 ++ if (!buffer_jbd(bh))
8636 ++ wait_on_buffer(bh);
8637 ++ put_bh(bh);
8638 ++ bhs[i - 1] = NULL;
8639 ++ } else if (bh && buffer_uptodate(bh)) {
8640 ++ clear_buffer_uptodate(bh);
8641 ++ }
8642 ++ continue;
8643 ++ }
8644 ++
8645 + /* No need to wait on the buffer if it's managed by JBD. */
8646 + if (!buffer_jbd(bh))
8647 + wait_on_buffer(bh);
8648 +@@ -169,8 +195,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
8649 + * so we can safely record this and loop back
8650 + * to cleanup the other buffers. */
8651 + status = -EIO;
8652 +- put_bh(bh);
8653 +- bhs[i - 1] = NULL;
8654 ++ goto read_failure;
8655 + }
8656 + }
8657 +
8658 +@@ -178,6 +203,9 @@ bail:
8659 + return status;
8660 + }
8661 +
8662 ++/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
8663 ++ * will be easier to handle read failure.
8664 ++ */
8665 + int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
8666 + struct buffer_head *bhs[], int flags,
8667 + int (*validate)(struct super_block *sb,
8668 +@@ -187,6 +215,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
8669 + int i, ignore_cache = 0;
8670 + struct buffer_head *bh;
8671 + struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
8672 ++ int new_bh = 0;
8673 +
8674 + trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
8675 +
8676 +@@ -212,6 +241,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
8677 + goto bail;
8678 + }
8679 +
8680 ++ /* Don't put buffer head and re-assign it to NULL if it is allocated
8681 ++ * outside since the caller can't be aware of this alternation!
8682 ++ */
8683 ++ new_bh = (bhs[0] == NULL);
8684 ++
8685 + ocfs2_metadata_cache_io_lock(ci);
8686 + for (i = 0 ; i < nr ; i++) {
8687 + if (bhs[i] == NULL) {
8688 +@@ -220,7 +254,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
8689 + ocfs2_metadata_cache_io_unlock(ci);
8690 + status = -ENOMEM;
8691 + mlog_errno(status);
8692 +- goto bail;
8693 ++ /* Don't forget to put previous bh! */
8694 ++ break;
8695 + }
8696 + }
8697 + bh = bhs[i];
8698 +@@ -314,16 +349,27 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
8699 + }
8700 + }
8701 +
8702 +- status = 0;
8703 +-
8704 ++read_failure:
8705 + for (i = (nr - 1); i >= 0; i--) {
8706 + bh = bhs[i];
8707 +
8708 + if (!(flags & OCFS2_BH_READAHEAD)) {
8709 +- if (status) {
8710 +- /* Clear the rest of the buffers on error */
8711 +- put_bh(bh);
8712 +- bhs[i] = NULL;
8713 ++ if (unlikely(status)) {
8714 ++ /* Clear the buffers on error including those
8715 ++ * ever succeeded in reading
8716 ++ */
8717 ++ if (new_bh && bh) {
8718 ++ /* If middle bh fails, let previous bh
8719 ++ * finish its read and then put it to
8720 ++ * aovoid bh leak
8721 ++ */
8722 ++ if (!buffer_jbd(bh))
8723 ++ wait_on_buffer(bh);
8724 ++ put_bh(bh);
8725 ++ bhs[i] = NULL;
8726 ++ } else if (bh && buffer_uptodate(bh)) {
8727 ++ clear_buffer_uptodate(bh);
8728 ++ }
8729 + continue;
8730 + }
8731 + /* We know this can't have changed as we hold the
8732 +@@ -341,9 +387,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
8733 + * uptodate. */
8734 + status = -EIO;
8735 + clear_buffer_needs_validate(bh);
8736 +- put_bh(bh);
8737 +- bhs[i] = NULL;
8738 +- continue;
8739 ++ goto read_failure;
8740 + }
8741 +
8742 + if (buffer_needs_validate(bh)) {
8743 +@@ -353,11 +397,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
8744 + BUG_ON(buffer_jbd(bh));
8745 + clear_buffer_needs_validate(bh);
8746 + status = validate(sb, bh);
8747 +- if (status) {
8748 +- put_bh(bh);
8749 +- bhs[i] = NULL;
8750 +- continue;
8751 +- }
8752 ++ if (status)
8753 ++ goto read_failure;
8754 + }
8755 + }
8756 +
8757 +diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
8758 +index 9b984cae4c4e..1d6dc8422899 100644
8759 +--- a/fs/ocfs2/dlm/dlmdebug.c
8760 ++++ b/fs/ocfs2/dlm/dlmdebug.c
8761 +@@ -329,7 +329,7 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle)
8762 + {
8763 + char *buf;
8764 +
8765 +- buf = (char *) get_zeroed_page(GFP_NOFS);
8766 ++ buf = (char *) get_zeroed_page(GFP_ATOMIC);
8767 + if (buf) {
8768 + dump_mle(mle, buf, PAGE_SIZE - 1);
8769 + free_page((unsigned long)buf);
8770 +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
8771 +index 933aac5da193..178cb9e6772a 100644
8772 +--- a/fs/ocfs2/dlmglue.c
8773 ++++ b/fs/ocfs2/dlmglue.c
8774 +@@ -3603,7 +3603,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
8775 + * we can recover correctly from node failure. Otherwise, we may get
8776 + * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
8777 + */
8778 +- if (!ocfs2_is_o2cb_active() &&
8779 ++ if (ocfs2_userspace_stack(osb) &&
8780 + lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
8781 + lvb = 1;
8782 +
8783 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
8784 +index a847fe52c56e..a3e077fcfeb9 100644
8785 +--- a/fs/ocfs2/file.c
8786 ++++ b/fs/ocfs2/file.c
8787 +@@ -2389,7 +2389,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
8788 +
8789 + written = __generic_file_write_iter(iocb, from);
8790 + /* buffered aio wouldn't have proper lock coverage today */
8791 +- BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
8792 ++ BUG_ON(written == -EIOCBQUEUED && !direct_io);
8793 +
8794 + /*
8795 + * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
8796 +@@ -2509,7 +2509,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
8797 + trace_generic_file_read_iter_ret(ret);
8798 +
8799 + /* buffered aio wouldn't have proper lock coverage today */
8800 +- BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
8801 ++ BUG_ON(ret == -EIOCBQUEUED && !direct_io);
8802 +
8803 + /* see ocfs2_file_write_iter */
8804 + if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
8805 +diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
8806 +index c492cbb2410f..babb0ec76d67 100644
8807 +--- a/fs/ocfs2/journal.c
8808 ++++ b/fs/ocfs2/journal.c
8809 +@@ -1379,15 +1379,23 @@ static int __ocfs2_recovery_thread(void *arg)
8810 + int rm_quota_used = 0, i;
8811 + struct ocfs2_quota_recovery *qrec;
8812 +
8813 ++ /* Whether the quota supported. */
8814 ++ int quota_enabled = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
8815 ++ OCFS2_FEATURE_RO_COMPAT_USRQUOTA)
8816 ++ || OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
8817 ++ OCFS2_FEATURE_RO_COMPAT_GRPQUOTA);
8818 ++
8819 + status = ocfs2_wait_on_mount(osb);
8820 + if (status < 0) {
8821 + goto bail;
8822 + }
8823 +
8824 +- rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS);
8825 +- if (!rm_quota) {
8826 +- status = -ENOMEM;
8827 +- goto bail;
8828 ++ if (quota_enabled) {
8829 ++ rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS);
8830 ++ if (!rm_quota) {
8831 ++ status = -ENOMEM;
8832 ++ goto bail;
8833 ++ }
8834 + }
8835 + restart:
8836 + status = ocfs2_super_lock(osb, 1);
8837 +@@ -1423,9 +1431,14 @@ restart:
8838 + * then quota usage would be out of sync until some node takes
8839 + * the slot. So we remember which nodes need quota recovery
8840 + * and when everything else is done, we recover quotas. */
8841 +- for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++);
8842 +- if (i == rm_quota_used)
8843 +- rm_quota[rm_quota_used++] = slot_num;
8844 ++ if (quota_enabled) {
8845 ++ for (i = 0; i < rm_quota_used
8846 ++ && rm_quota[i] != slot_num; i++)
8847 ++ ;
8848 ++
8849 ++ if (i == rm_quota_used)
8850 ++ rm_quota[rm_quota_used++] = slot_num;
8851 ++ }
8852 +
8853 + status = ocfs2_recover_node(osb, node_num, slot_num);
8854 + skip_recovery:
8855 +@@ -1453,16 +1466,19 @@ skip_recovery:
8856 + /* Now it is right time to recover quotas... We have to do this under
8857 + * superblock lock so that no one can start using the slot (and crash)
8858 + * before we recover it */
8859 +- for (i = 0; i < rm_quota_used; i++) {
8860 +- qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
8861 +- if (IS_ERR(qrec)) {
8862 +- status = PTR_ERR(qrec);
8863 +- mlog_errno(status);
8864 +- continue;
8865 ++ if (quota_enabled) {
8866 ++ for (i = 0; i < rm_quota_used; i++) {
8867 ++ qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
8868 ++ if (IS_ERR(qrec)) {
8869 ++ status = PTR_ERR(qrec);
8870 ++ mlog_errno(status);
8871 ++ continue;
8872 ++ }
8873 ++ ocfs2_queue_recovery_completion(osb->journal,
8874 ++ rm_quota[i],
8875 ++ NULL, NULL, qrec,
8876 ++ ORPHAN_NEED_TRUNCATE);
8877 + }
8878 +- ocfs2_queue_recovery_completion(osb->journal, rm_quota[i],
8879 +- NULL, NULL, qrec,
8880 +- ORPHAN_NEED_TRUNCATE);
8881 + }
8882 +
8883 + ocfs2_super_unlock(osb, 1);
8884 +@@ -1484,7 +1500,8 @@ bail:
8885 +
8886 + mutex_unlock(&osb->recovery_lock);
8887 +
8888 +- kfree(rm_quota);
8889 ++ if (quota_enabled)
8890 ++ kfree(rm_quota);
8891 +
8892 + /* no one is callint kthread_stop() for us so the kthread() api
8893 + * requires that we call do_exit(). And it isn't exported, but
8894 +diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
8895 +index f55f82ca3425..1565dd8e8856 100644
8896 +--- a/fs/ocfs2/move_extents.c
8897 ++++ b/fs/ocfs2/move_extents.c
8898 +@@ -25,6 +25,7 @@
8899 + #include "ocfs2_ioctl.h"
8900 +
8901 + #include "alloc.h"
8902 ++#include "localalloc.h"
8903 + #include "aops.h"
8904 + #include "dlmglue.h"
8905 + #include "extent_map.h"
8906 +@@ -222,6 +223,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
8907 + struct ocfs2_refcount_tree *ref_tree = NULL;
8908 + u32 new_phys_cpos, new_len;
8909 + u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
8910 ++ int need_free = 0;
8911 +
8912 + if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
8913 + BUG_ON(!ocfs2_is_refcount_inode(inode));
8914 +@@ -312,6 +314,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
8915 + if (!partial) {
8916 + context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
8917 + ret = -ENOSPC;
8918 ++ need_free = 1;
8919 + goto out_commit;
8920 + }
8921 + }
8922 +@@ -336,6 +339,20 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
8923 + mlog_errno(ret);
8924 +
8925 + out_commit:
8926 ++ if (need_free && context->data_ac) {
8927 ++ struct ocfs2_alloc_context *data_ac = context->data_ac;
8928 ++
8929 ++ if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL)
8930 ++ ocfs2_free_local_alloc_bits(osb, handle, data_ac,
8931 ++ new_phys_cpos, new_len);
8932 ++ else
8933 ++ ocfs2_free_clusters(handle,
8934 ++ data_ac->ac_inode,
8935 ++ data_ac->ac_bh,
8936 ++ ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos),
8937 ++ new_len);
8938 ++ }
8939 ++
8940 + ocfs2_commit_trans(osb, handle);
8941 +
8942 + out_unlock_mutex:
8943 +diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
8944 +index d6c350ba25b9..c4b029c43464 100644
8945 +--- a/fs/ocfs2/stackglue.c
8946 ++++ b/fs/ocfs2/stackglue.c
8947 +@@ -48,12 +48,6 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
8948 + */
8949 + static struct ocfs2_stack_plugin *active_stack;
8950 +
8951 +-inline int ocfs2_is_o2cb_active(void)
8952 +-{
8953 +- return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
8954 +-}
8955 +-EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
8956 +-
8957 + static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
8958 + {
8959 + struct ocfs2_stack_plugin *p;
8960 +diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
8961 +index e3036e1790e8..f2dce10fae54 100644
8962 +--- a/fs/ocfs2/stackglue.h
8963 ++++ b/fs/ocfs2/stackglue.h
8964 +@@ -298,9 +298,6 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
8965 + int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
8966 + void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
8967 +
8968 +-/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
8969 +-int ocfs2_is_o2cb_active(void);
8970 +-
8971 + extern struct kset *ocfs2_kset;
8972 +
8973 + #endif /* STACKGLUE_H */
8974 +diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
8975 +index 0d80e0df6c24..c146e12a8601 100644
8976 +--- a/fs/ocfs2/xattr.c
8977 ++++ b/fs/ocfs2/xattr.c
8978 +@@ -1498,6 +1498,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
8979 + return loc->xl_ops->xlo_check_space(loc, xi);
8980 + }
8981 +
8982 ++static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
8983 ++{
8984 ++ loc->xl_ops->xlo_add_entry(loc, name_hash);
8985 ++ loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
8986 ++ /*
8987 ++ * We can't leave the new entry's xe_name_offset at zero or
8988 ++ * add_namevalue() will go nuts. We set it to the size of our
8989 ++ * storage so that it can never be less than any other entry.
8990 ++ */
8991 ++ loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
8992 ++}
8993 ++
8994 + static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
8995 + struct ocfs2_xattr_info *xi)
8996 + {
8997 +@@ -2129,31 +2141,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
8998 + if (rc)
8999 + goto out;
9000 +
9001 +- if (!loc->xl_entry) {
9002 +- rc = -EINVAL;
9003 +- goto out;
9004 +- }
9005 +-
9006 +- if (ocfs2_xa_can_reuse_entry(loc, xi)) {
9007 +- orig_value_size = loc->xl_entry->xe_value_size;
9008 +- rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
9009 +- if (rc)
9010 +- goto out;
9011 +- goto alloc_value;
9012 +- }
9013 ++ if (loc->xl_entry) {
9014 ++ if (ocfs2_xa_can_reuse_entry(loc, xi)) {
9015 ++ orig_value_size = loc->xl_entry->xe_value_size;
9016 ++ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
9017 ++ if (rc)
9018 ++ goto out;
9019 ++ goto alloc_value;
9020 ++ }
9021 +
9022 +- if (!ocfs2_xattr_is_local(loc->xl_entry)) {
9023 +- orig_clusters = ocfs2_xa_value_clusters(loc);
9024 +- rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
9025 +- if (rc) {
9026 +- mlog_errno(rc);
9027 +- ocfs2_xa_cleanup_value_truncate(loc,
9028 +- "overwriting",
9029 +- orig_clusters);
9030 +- goto out;
9031 ++ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
9032 ++ orig_clusters = ocfs2_xa_value_clusters(loc);
9033 ++ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
9034 ++ if (rc) {
9035 ++ mlog_errno(rc);
9036 ++ ocfs2_xa_cleanup_value_truncate(loc,
9037 ++ "overwriting",
9038 ++ orig_clusters);
9039 ++ goto out;
9040 ++ }
9041 + }
9042 +- }
9043 +- ocfs2_xa_wipe_namevalue(loc);
9044 ++ ocfs2_xa_wipe_namevalue(loc);
9045 ++ } else
9046 ++ ocfs2_xa_add_entry(loc, name_hash);
9047 +
9048 + /*
9049 + * If we get here, we have a blank entry. Fill it. We grow our
9050 +diff --git a/fs/read_write.c b/fs/read_write.c
9051 +index 5fb5ee5b8cd7..2195380620d0 100644
9052 +--- a/fs/read_write.c
9053 ++++ b/fs/read_write.c
9054 +@@ -1715,6 +1715,34 @@ static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write)
9055 +
9056 + return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
9057 + }
9058 ++/*
9059 ++ * Ensure that we don't remap a partial EOF block in the middle of something
9060 ++ * else. Assume that the offsets have already been checked for block
9061 ++ * alignment.
9062 ++ *
9063 ++ * For deduplication we always scale down to the previous block because we
9064 ++ * can't meaningfully compare post-EOF contents.
9065 ++ *
9066 ++ * For clone we only link a partial EOF block above the destination file's EOF.
9067 ++ */
9068 ++static int generic_remap_check_len(struct inode *inode_in,
9069 ++ struct inode *inode_out,
9070 ++ loff_t pos_out,
9071 ++ u64 *len,
9072 ++ bool is_dedupe)
9073 ++{
9074 ++ u64 blkmask = i_blocksize(inode_in) - 1;
9075 ++
9076 ++ if ((*len & blkmask) == 0)
9077 ++ return 0;
9078 ++
9079 ++ if (is_dedupe)
9080 ++ *len &= ~blkmask;
9081 ++ else if (pos_out + *len < i_size_read(inode_out))
9082 ++ return -EINVAL;
9083 ++
9084 ++ return 0;
9085 ++}
9086 +
9087 + /*
9088 + * Check that the two inodes are eligible for cloning, the ranges make
9089 +@@ -1821,6 +1849,11 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
9090 + return -EBADE;
9091 + }
9092 +
9093 ++ ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
9094 ++ is_dedupe);
9095 ++ if (ret)
9096 ++ return ret;
9097 ++
9098 + return 1;
9099 + }
9100 + EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
9101 +diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
9102 +index e839907e8492..e36124546d0d 100644
9103 +--- a/fs/xfs/xfs_buf.c
9104 ++++ b/fs/xfs/xfs_buf.c
9105 +@@ -37,6 +37,32 @@ static kmem_zone_t *xfs_buf_zone;
9106 + #define xb_to_gfp(flags) \
9107 + ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
9108 +
9109 ++/*
9110 ++ * Locking orders
9111 ++ *
9112 ++ * xfs_buf_ioacct_inc:
9113 ++ * xfs_buf_ioacct_dec:
9114 ++ * b_sema (caller holds)
9115 ++ * b_lock
9116 ++ *
9117 ++ * xfs_buf_stale:
9118 ++ * b_sema (caller holds)
9119 ++ * b_lock
9120 ++ * lru_lock
9121 ++ *
9122 ++ * xfs_buf_rele:
9123 ++ * b_lock
9124 ++ * pag_buf_lock
9125 ++ * lru_lock
9126 ++ *
9127 ++ * xfs_buftarg_wait_rele
9128 ++ * lru_lock
9129 ++ * b_lock (trylock due to inversion)
9130 ++ *
9131 ++ * xfs_buftarg_isolate
9132 ++ * lru_lock
9133 ++ * b_lock (trylock due to inversion)
9134 ++ */
9135 +
9136 + static inline int
9137 + xfs_buf_is_vmapped(
9138 +@@ -1006,8 +1032,18 @@ xfs_buf_rele(
9139 +
9140 + ASSERT(atomic_read(&bp->b_hold) > 0);
9141 +
9142 +- release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
9143 ++ /*
9144 ++ * We grab the b_lock here first to serialise racing xfs_buf_rele()
9145 ++ * calls. The pag_buf_lock being taken on the last reference only
9146 ++ * serialises against racing lookups in xfs_buf_find(). IOWs, the second
9147 ++ * to last reference we drop here is not serialised against the last
9148 ++ * reference until we take bp->b_lock. Hence if we don't grab b_lock
9149 ++ * first, the last "release" reference can win the race to the lock and
9150 ++ * free the buffer before the second-to-last reference is processed,
9151 ++ * leading to a use-after-free scenario.
9152 ++ */
9153 + spin_lock(&bp->b_lock);
9154 ++ release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
9155 + if (!release) {
9156 + /*
9157 + * Drop the in-flight state if the buffer is already on the LRU
9158 +@@ -1989,6 +2025,13 @@ xfs_buf_delwri_submit_buffers(
9159 + * is only safely useable for callers that can track I/O completion by higher
9160 + * level means, e.g. AIL pushing as the @buffer_list is consumed in this
9161 + * function.
9162 ++ *
9163 ++ * Note: this function will skip buffers it would block on, and in doing so
9164 ++ * leaves them on @buffer_list so they can be retried on a later pass. As such,
9165 ++ * it is up to the caller to ensure that the buffer list is fully submitted or
9166 ++ * cancelled appropriately when they are finished with the list. Failure to
9167 ++ * cancel or resubmit the list until it is empty will result in leaked buffers
9168 ++ * at unmount time.
9169 + */
9170 + int
9171 + xfs_buf_delwri_submit_nowait(
9172 +diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
9173 +index 55326f971cb3..d3a4e89bf4a0 100644
9174 +--- a/fs/xfs/xfs_trans_ail.c
9175 ++++ b/fs/xfs/xfs_trans_ail.c
9176 +@@ -531,17 +531,33 @@ xfsaild(
9177 + set_current_state(TASK_INTERRUPTIBLE);
9178 +
9179 + /*
9180 +- * Check kthread_should_stop() after we set the task state
9181 +- * to guarantee that we either see the stop bit and exit or
9182 +- * the task state is reset to runnable such that it's not
9183 +- * scheduled out indefinitely and detects the stop bit at
9184 +- * next iteration.
9185 +- *
9186 ++ * Check kthread_should_stop() after we set the task state to
9187 ++ * guarantee that we either see the stop bit and exit or the
9188 ++ * task state is reset to runnable such that it's not scheduled
9189 ++ * out indefinitely and detects the stop bit at next iteration.
9190 + * A memory barrier is included in above task state set to
9191 + * serialize again kthread_stop().
9192 + */
9193 + if (kthread_should_stop()) {
9194 + __set_current_state(TASK_RUNNING);
9195 ++
9196 ++ /*
9197 ++ * The caller forces out the AIL before stopping the
9198 ++ * thread in the common case, which means the delwri
9199 ++ * queue is drained. In the shutdown case, the queue may
9200 ++ * still hold relogged buffers that haven't been
9201 ++ * submitted because they were pinned since added to the
9202 ++ * queue.
9203 ++ *
9204 ++ * Log I/O error processing stales the underlying buffer
9205 ++ * and clears the delwri state, expecting the buf to be
9206 ++ * removed on the next submission attempt. That won't
9207 ++ * happen if we're shutting down, so this is the last
9208 ++ * opportunity to release such buffers from the queue.
9209 ++ */
9210 ++ ASSERT(list_empty(&ailp->ail_buf_list) ||
9211 ++ XFS_FORCED_SHUTDOWN(ailp->ail_mount));
9212 ++ xfs_buf_delwri_cancel(&ailp->ail_buf_list);
9213 + break;
9214 + }
9215 +
9216 +diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
9217 +index acf5e8df3504..b71a033c781e 100644
9218 +--- a/include/linux/bitmap.h
9219 ++++ b/include/linux/bitmap.h
9220 +@@ -204,8 +204,13 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
9221 + #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
9222 + #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
9223 +
9224 ++/*
9225 ++ * The static inlines below do not handle constant nbits==0 correctly,
9226 ++ * so make such users (should any ever turn up) call the out-of-line
9227 ++ * versions.
9228 ++ */
9229 + #define small_const_nbits(nbits) \
9230 +- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
9231 ++ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
9232 +
9233 + static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
9234 + {
9235 +@@ -398,7 +403,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
9236 + }
9237 +
9238 + static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
9239 +- unsigned int shift, int nbits)
9240 ++ unsigned int shift, unsigned int nbits)
9241 + {
9242 + if (small_const_nbits(nbits))
9243 + *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
9244 +diff --git a/include/linux/futex.h b/include/linux/futex.h
9245 +index 821ae502d3d8..ccaef0097785 100644
9246 +--- a/include/linux/futex.h
9247 ++++ b/include/linux/futex.h
9248 +@@ -9,9 +9,6 @@ struct inode;
9249 + struct mm_struct;
9250 + struct task_struct;
9251 +
9252 +-extern int
9253 +-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
9254 +-
9255 + /*
9256 + * Futexes are matched on equal values of this key.
9257 + * The key type depends on whether it's a shared or private mapping.
9258 +@@ -55,11 +52,6 @@ extern void exit_robust_list(struct task_struct *curr);
9259 +
9260 + long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
9261 + u32 __user *uaddr2, u32 val2, u32 val3);
9262 +-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
9263 +-#define futex_cmpxchg_enabled 1
9264 +-#else
9265 +-extern int futex_cmpxchg_enabled;
9266 +-#endif
9267 + #else
9268 + static inline void exit_robust_list(struct task_struct *curr)
9269 + {
9270 +diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
9271 +index c759d1cbcedd..a64f21a97369 100644
9272 +--- a/include/linux/inetdevice.h
9273 ++++ b/include/linux/inetdevice.h
9274 +@@ -37,7 +37,9 @@ struct in_device {
9275 + unsigned long mr_v1_seen;
9276 + unsigned long mr_v2_seen;
9277 + unsigned long mr_maxdelay;
9278 +- unsigned char mr_qrv;
9279 ++ unsigned long mr_qi; /* Query Interval */
9280 ++ unsigned long mr_qri; /* Query Response Interval */
9281 ++ unsigned char mr_qrv; /* Query Robustness Variable */
9282 + unsigned char mr_gq_running;
9283 + unsigned char mr_ifc_count;
9284 + struct timer_list mr_gq_timer; /* general query timer */
9285 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
9286 +index 96207939d862..748016ae01e3 100644
9287 +--- a/include/linux/kvm_host.h
9288 ++++ b/include/linux/kvm_host.h
9289 +@@ -911,6 +911,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
9290 + void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
9291 +
9292 + bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
9293 ++bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
9294 +
9295 + struct kvm_irq_ack_notifier {
9296 + struct hlist_node link;
9297 +diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
9298 +index 34a28227068d..16487052017d 100644
9299 +--- a/include/linux/memory_hotplug.h
9300 ++++ b/include/linux/memory_hotplug.h
9301 +@@ -322,6 +322,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
9302 + extern void __ref free_area_init_core_hotplug(int nid);
9303 + extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
9304 + void *arg, int (*func)(struct memory_block *, void *));
9305 ++extern int __add_memory(int nid, u64 start, u64 size);
9306 + extern int add_memory(int nid, u64 start, u64 size);
9307 + extern int add_memory_resource(int nid, struct resource *resource, bool online);
9308 + extern int arch_add_memory(int nid, u64 start, u64 size,
9309 +diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
9310 +index 5aacdb017a9f..806a4f095312 100644
9311 +--- a/include/linux/mfd/intel_soc_pmic.h
9312 ++++ b/include/linux/mfd/intel_soc_pmic.h
9313 +@@ -25,6 +25,7 @@ struct intel_soc_pmic {
9314 + int irq;
9315 + struct regmap *regmap;
9316 + struct regmap_irq_chip_data *irq_chip_data;
9317 ++ struct regmap_irq_chip_data *irq_chip_data_pwrbtn;
9318 + struct regmap_irq_chip_data *irq_chip_data_tmu;
9319 + struct regmap_irq_chip_data *irq_chip_data_bcu;
9320 + struct regmap_irq_chip_data *irq_chip_data_adc;
9321 +diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
9322 +index cf815577bd68..3ae1fe743bc3 100644
9323 +--- a/include/linux/mfd/max8997.h
9324 ++++ b/include/linux/mfd/max8997.h
9325 +@@ -178,7 +178,6 @@ struct max8997_led_platform_data {
9326 + struct max8997_platform_data {
9327 + /* IRQ */
9328 + int ono;
9329 +- int wakeup;
9330 +
9331 + /* ---- PMIC ---- */
9332 + struct max8997_regulator_data *regulators;
9333 +diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
9334 +index 54a3cd808f9e..2ad9bdc0a5ec 100644
9335 +--- a/include/linux/mfd/mc13xxx.h
9336 ++++ b/include/linux/mfd/mc13xxx.h
9337 +@@ -249,6 +249,7 @@ struct mc13xxx_platform_data {
9338 + #define MC13XXX_ADC0_TSMOD0 (1 << 12)
9339 + #define MC13XXX_ADC0_TSMOD1 (1 << 13)
9340 + #define MC13XXX_ADC0_TSMOD2 (1 << 14)
9341 ++#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15)
9342 + #define MC13XXX_ADC0_ADINC1 (1 << 16)
9343 + #define MC13XXX_ADC0_ADINC2 (1 << 17)
9344 +
9345 +diff --git a/kernel/Makefile b/kernel/Makefile
9346 +index df5e3ca30acd..ad4b324d8906 100644
9347 +--- a/kernel/Makefile
9348 ++++ b/kernel/Makefile
9349 +@@ -50,9 +50,6 @@ obj-$(CONFIG_PROFILING) += profile.o
9350 + obj-$(CONFIG_STACKTRACE) += stacktrace.o
9351 + obj-y += time/
9352 + obj-$(CONFIG_FUTEX) += futex.o
9353 +-ifeq ($(CONFIG_COMPAT),y)
9354 +-obj-$(CONFIG_FUTEX) += futex_compat.o
9355 +-endif
9356 + obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
9357 + obj-$(CONFIG_SMP) += smp.o
9358 + ifneq ($(CONFIG_SMP),y)
9359 +diff --git a/kernel/auditsc.c b/kernel/auditsc.c
9360 +index b2d1f043f17f..1513873e23bd 100644
9361 +--- a/kernel/auditsc.c
9362 ++++ b/kernel/auditsc.c
9363 +@@ -1107,7 +1107,7 @@ static void audit_log_execve_info(struct audit_context *context,
9364 + }
9365 +
9366 + /* write as much as we can to the audit log */
9367 +- if (len_buf > 0) {
9368 ++ if (len_buf >= 0) {
9369 + /* NOTE: some magic numbers here - basically if we
9370 + * can't fit a reasonable amount of data into the
9371 + * existing audit buffer, flush it and start with
9372 +diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
9373 +index 378cef70341c..cfa27b7d1168 100644
9374 +--- a/kernel/bpf/btf.c
9375 ++++ b/kernel/bpf/btf.c
9376 +@@ -2067,50 +2067,44 @@ static int btf_check_sec_info(struct btf_verifier_env *env,
9377 + return 0;
9378 + }
9379 +
9380 +-static int btf_parse_hdr(struct btf_verifier_env *env, void __user *btf_data,
9381 +- u32 btf_data_size)
9382 ++static int btf_parse_hdr(struct btf_verifier_env *env)
9383 + {
9384 ++ u32 hdr_len, hdr_copy, btf_data_size;
9385 + const struct btf_header *hdr;
9386 +- u32 hdr_len, hdr_copy;
9387 +- /*
9388 +- * Minimal part of the "struct btf_header" that
9389 +- * contains the hdr_len.
9390 +- */
9391 +- struct btf_min_header {
9392 +- u16 magic;
9393 +- u8 version;
9394 +- u8 flags;
9395 +- u32 hdr_len;
9396 +- } __user *min_hdr;
9397 + struct btf *btf;
9398 + int err;
9399 +
9400 + btf = env->btf;
9401 +- min_hdr = btf_data;
9402 ++ btf_data_size = btf->data_size;
9403 +
9404 +- if (btf_data_size < sizeof(*min_hdr)) {
9405 ++ if (btf_data_size <
9406 ++ offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
9407 + btf_verifier_log(env, "hdr_len not found");
9408 + return -EINVAL;
9409 + }
9410 +
9411 +- if (get_user(hdr_len, &min_hdr->hdr_len))
9412 +- return -EFAULT;
9413 +-
9414 ++ hdr = btf->data;
9415 ++ hdr_len = hdr->hdr_len;
9416 + if (btf_data_size < hdr_len) {
9417 + btf_verifier_log(env, "btf_header not found");
9418 + return -EINVAL;
9419 + }
9420 +
9421 +- err = bpf_check_uarg_tail_zero(btf_data, sizeof(btf->hdr), hdr_len);
9422 +- if (err) {
9423 +- if (err == -E2BIG)
9424 +- btf_verifier_log(env, "Unsupported btf_header");
9425 +- return err;
9426 ++ /* Ensure the unsupported header fields are zero */
9427 ++ if (hdr_len > sizeof(btf->hdr)) {
9428 ++ u8 *expected_zero = btf->data + sizeof(btf->hdr);
9429 ++ u8 *end = btf->data + hdr_len;
9430 ++
9431 ++ for (; expected_zero < end; expected_zero++) {
9432 ++ if (*expected_zero) {
9433 ++ btf_verifier_log(env, "Unsupported btf_header");
9434 ++ return -E2BIG;
9435 ++ }
9436 ++ }
9437 + }
9438 +
9439 + hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
9440 +- if (copy_from_user(&btf->hdr, btf_data, hdr_copy))
9441 +- return -EFAULT;
9442 ++ memcpy(&btf->hdr, btf->data, hdr_copy);
9443 +
9444 + hdr = &btf->hdr;
9445 +
9446 +@@ -2186,10 +2180,6 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
9447 + }
9448 + env->btf = btf;
9449 +
9450 +- err = btf_parse_hdr(env, btf_data, btf_data_size);
9451 +- if (err)
9452 +- goto errout;
9453 +-
9454 + data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
9455 + if (!data) {
9456 + err = -ENOMEM;
9457 +@@ -2198,13 +2188,18 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
9458 +
9459 + btf->data = data;
9460 + btf->data_size = btf_data_size;
9461 +- btf->nohdr_data = btf->data + btf->hdr.hdr_len;
9462 +
9463 + if (copy_from_user(data, btf_data, btf_data_size)) {
9464 + err = -EFAULT;
9465 + goto errout;
9466 + }
9467 +
9468 ++ err = btf_parse_hdr(env);
9469 ++ if (err)
9470 ++ goto errout;
9471 ++
9472 ++ btf->nohdr_data = btf->data + btf->hdr.hdr_len;
9473 ++
9474 + err = btf_parse_str_sec(env);
9475 + if (err)
9476 + goto errout;
9477 +diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
9478 +index fc500ca464d0..1defea4b2755 100644
9479 +--- a/kernel/bpf/devmap.c
9480 ++++ b/kernel/bpf/devmap.c
9481 +@@ -520,8 +520,7 @@ static int dev_map_notification(struct notifier_block *notifier,
9482 + struct bpf_dtab_netdev *dev, *odev;
9483 +
9484 + dev = READ_ONCE(dtab->netdev_map[i]);
9485 +- if (!dev ||
9486 +- dev->dev->ifindex != netdev->ifindex)
9487 ++ if (!dev || netdev != dev->dev)
9488 + continue;
9489 + odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
9490 + if (dev == odev)
9491 +diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
9492 +index 4f8a6dbf0b60..2a8c41f12d45 100644
9493 +--- a/kernel/dma/swiotlb.c
9494 ++++ b/kernel/dma/swiotlb.c
9495 +@@ -761,34 +761,6 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size,
9496 + return true;
9497 + }
9498 +
9499 +-static void
9500 +-swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
9501 +- int do_panic)
9502 +-{
9503 +- if (swiotlb_force == SWIOTLB_NO_FORCE)
9504 +- return;
9505 +-
9506 +- /*
9507 +- * Ran out of IOMMU space for this operation. This is very bad.
9508 +- * Unfortunately the drivers cannot handle this operation properly.
9509 +- * unless they check for dma_mapping_error (most don't)
9510 +- * When the mapping is small enough return a static buffer to limit
9511 +- * the damage, or panic when the transfer is too big.
9512 +- */
9513 +- dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
9514 +- size);
9515 +-
9516 +- if (size <= io_tlb_overflow || !do_panic)
9517 +- return;
9518 +-
9519 +- if (dir == DMA_BIDIRECTIONAL)
9520 +- panic("DMA: Random memory could be DMA accessed\n");
9521 +- if (dir == DMA_FROM_DEVICE)
9522 +- panic("DMA: Random memory could be DMA written\n");
9523 +- if (dir == DMA_TO_DEVICE)
9524 +- panic("DMA: Random memory could be DMA read\n");
9525 +-}
9526 +-
9527 + /*
9528 + * Map a single buffer of the indicated size for DMA in streaming mode. The
9529 + * physical address to use is returned.
9530 +@@ -817,10 +789,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
9531 +
9532 + /* Oh well, have to allocate and map a bounce buffer. */
9533 + map = map_single(dev, phys, size, dir, attrs);
9534 +- if (map == SWIOTLB_MAP_ERROR) {
9535 +- swiotlb_full(dev, size, dir, 1);
9536 ++ if (map == SWIOTLB_MAP_ERROR)
9537 + return __phys_to_dma(dev, io_tlb_overflow_buffer);
9538 +- }
9539 +
9540 + dev_addr = __phys_to_dma(dev, map);
9541 +
9542 +@@ -954,7 +924,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
9543 + if (map == SWIOTLB_MAP_ERROR) {
9544 + /* Don't panic here, we expect map_sg users
9545 + to do proper error handling. */
9546 +- swiotlb_full(hwdev, sg->length, dir, 0);
9547 + attrs |= DMA_ATTR_SKIP_CPU_SYNC;
9548 + swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
9549 + attrs);
9550 +diff --git a/kernel/futex.c b/kernel/futex.c
9551 +index afdc5eadce6e..e75ad30aa7bc 100644
9552 +--- a/kernel/futex.c
9553 ++++ b/kernel/futex.c
9554 +@@ -44,6 +44,7 @@
9555 + * along with this program; if not, write to the Free Software
9556 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
9557 + */
9558 ++#include <linux/compat.h>
9559 + #include <linux/slab.h>
9560 + #include <linux/poll.h>
9561 + #include <linux/fs.h>
9562 +@@ -173,8 +174,10 @@
9563 + * double_lock_hb() and double_unlock_hb(), respectively.
9564 + */
9565 +
9566 +-#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
9567 +-int __read_mostly futex_cmpxchg_enabled;
9568 ++#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
9569 ++#define futex_cmpxchg_enabled 1
9570 ++#else
9571 ++static int __read_mostly futex_cmpxchg_enabled;
9572 + #endif
9573 +
9574 + /*
9575 +@@ -3454,11 +3457,16 @@ err_unlock:
9576 + return ret;
9577 + }
9578 +
9579 ++/* Constants for the pending_op argument of handle_futex_death */
9580 ++#define HANDLE_DEATH_PENDING true
9581 ++#define HANDLE_DEATH_LIST false
9582 ++
9583 + /*
9584 + * Process a futex-list entry, check whether it's owned by the
9585 + * dying task, and do notification if so:
9586 + */
9587 +-int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
9588 ++static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
9589 ++ bool pi, bool pending_op)
9590 + {
9591 + u32 uval, uninitialized_var(nval), mval;
9592 + int err;
9593 +@@ -3471,6 +3479,42 @@ retry:
9594 + if (get_user(uval, uaddr))
9595 + return -1;
9596 +
9597 ++ /*
9598 ++ * Special case for regular (non PI) futexes. The unlock path in
9599 ++ * user space has two race scenarios:
9600 ++ *
9601 ++ * 1. The unlock path releases the user space futex value and
9602 ++ * before it can execute the futex() syscall to wake up
9603 ++ * waiters it is killed.
9604 ++ *
9605 ++ * 2. A woken up waiter is killed before it can acquire the
9606 ++ * futex in user space.
9607 ++ *
9608 ++ * In both cases the TID validation below prevents a wakeup of
9609 ++ * potential waiters which can cause these waiters to block
9610 ++ * forever.
9611 ++ *
9612 ++ * In both cases the following conditions are met:
9613 ++ *
9614 ++ * 1) task->robust_list->list_op_pending != NULL
9615 ++ * @pending_op == true
9616 ++ * 2) User space futex value == 0
9617 ++ * 3) Regular futex: @pi == false
9618 ++ *
9619 ++ * If these conditions are met, it is safe to attempt waking up a
9620 ++ * potential waiter without touching the user space futex value and
9621 ++ * trying to set the OWNER_DIED bit. The user space futex value is
9622 ++ * uncontended and the rest of the user space mutex state is
9623 ++ * consistent, so a woken waiter will just take over the
9624 ++ * uncontended futex. Setting the OWNER_DIED bit would create
9625 ++ * inconsistent state and malfunction of the user space owner died
9626 ++ * handling.
9627 ++ */
9628 ++ if (pending_op && !pi && !uval) {
9629 ++ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
9630 ++ return 0;
9631 ++ }
9632 ++
9633 + if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
9634 + return 0;
9635 +
9636 +@@ -3590,10 +3634,11 @@ void exit_robust_list(struct task_struct *curr)
9637 + * A pending lock might already be on the list, so
9638 + * don't process it twice:
9639 + */
9640 +- if (entry != pending)
9641 ++ if (entry != pending) {
9642 + if (handle_futex_death((void __user *)entry + futex_offset,
9643 +- curr, pi))
9644 ++ curr, pi, HANDLE_DEATH_LIST))
9645 + return;
9646 ++ }
9647 + if (rc)
9648 + return;
9649 + entry = next_entry;
9650 +@@ -3607,9 +3652,10 @@ void exit_robust_list(struct task_struct *curr)
9651 + cond_resched();
9652 + }
9653 +
9654 +- if (pending)
9655 ++ if (pending) {
9656 + handle_futex_death((void __user *)pending + futex_offset,
9657 +- curr, pip);
9658 ++ curr, pip, HANDLE_DEATH_PENDING);
9659 ++ }
9660 + }
9661 +
9662 + long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
9663 +@@ -3707,6 +3753,193 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
9664 + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
9665 + }
9666 +
9667 ++#ifdef CONFIG_COMPAT
9668 ++/*
9669 ++ * Fetch a robust-list pointer. Bit 0 signals PI futexes:
9670 ++ */
9671 ++static inline int
9672 ++compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
9673 ++ compat_uptr_t __user *head, unsigned int *pi)
9674 ++{
9675 ++ if (get_user(*uentry, head))
9676 ++ return -EFAULT;
9677 ++
9678 ++ *entry = compat_ptr((*uentry) & ~1);
9679 ++ *pi = (unsigned int)(*uentry) & 1;
9680 ++
9681 ++ return 0;
9682 ++}
9683 ++
9684 ++static void __user *futex_uaddr(struct robust_list __user *entry,
9685 ++ compat_long_t futex_offset)
9686 ++{
9687 ++ compat_uptr_t base = ptr_to_compat(entry);
9688 ++ void __user *uaddr = compat_ptr(base + futex_offset);
9689 ++
9690 ++ return uaddr;
9691 ++}
9692 ++
9693 ++/*
9694 ++ * Walk curr->robust_list (very carefully, it's a userspace list!)
9695 ++ * and mark any locks found there dead, and notify any waiters.
9696 ++ *
9697 ++ * We silently return on any sign of list-walking problem.
9698 ++ */
9699 ++void compat_exit_robust_list(struct task_struct *curr)
9700 ++{
9701 ++ struct compat_robust_list_head __user *head = curr->compat_robust_list;
9702 ++ struct robust_list __user *entry, *next_entry, *pending;
9703 ++ unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
9704 ++ unsigned int uninitialized_var(next_pi);
9705 ++ compat_uptr_t uentry, next_uentry, upending;
9706 ++ compat_long_t futex_offset;
9707 ++ int rc;
9708 ++
9709 ++ if (!futex_cmpxchg_enabled)
9710 ++ return;
9711 ++
9712 ++ /*
9713 ++ * Fetch the list head (which was registered earlier, via
9714 ++ * sys_set_robust_list()):
9715 ++ */
9716 ++ if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
9717 ++ return;
9718 ++ /*
9719 ++ * Fetch the relative futex offset:
9720 ++ */
9721 ++ if (get_user(futex_offset, &head->futex_offset))
9722 ++ return;
9723 ++ /*
9724 ++ * Fetch any possibly pending lock-add first, and handle it
9725 ++ * if it exists:
9726 ++ */
9727 ++ if (compat_fetch_robust_entry(&upending, &pending,
9728 ++ &head->list_op_pending, &pip))
9729 ++ return;
9730 ++
9731 ++ next_entry = NULL; /* avoid warning with gcc */
9732 ++ while (entry != (struct robust_list __user *) &head->list) {
9733 ++ /*
9734 ++ * Fetch the next entry in the list before calling
9735 ++ * handle_futex_death:
9736 ++ */
9737 ++ rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
9738 ++ (compat_uptr_t __user *)&entry->next, &next_pi);
9739 ++ /*
9740 ++ * A pending lock might already be on the list, so
9741 ++ * dont process it twice:
9742 ++ */
9743 ++ if (entry != pending) {
9744 ++ void __user *uaddr = futex_uaddr(entry, futex_offset);
9745 ++
9746 ++ if (handle_futex_death(uaddr, curr, pi,
9747 ++ HANDLE_DEATH_LIST))
9748 ++ return;
9749 ++ }
9750 ++ if (rc)
9751 ++ return;
9752 ++ uentry = next_uentry;
9753 ++ entry = next_entry;
9754 ++ pi = next_pi;
9755 ++ /*
9756 ++ * Avoid excessively long or circular lists:
9757 ++ */
9758 ++ if (!--limit)
9759 ++ break;
9760 ++
9761 ++ cond_resched();
9762 ++ }
9763 ++ if (pending) {
9764 ++ void __user *uaddr = futex_uaddr(pending, futex_offset);
9765 ++
9766 ++ handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
9767 ++ }
9768 ++}
9769 ++
9770 ++COMPAT_SYSCALL_DEFINE2(set_robust_list,
9771 ++ struct compat_robust_list_head __user *, head,
9772 ++ compat_size_t, len)
9773 ++{
9774 ++ if (!futex_cmpxchg_enabled)
9775 ++ return -ENOSYS;
9776 ++
9777 ++ if (unlikely(len != sizeof(*head)))
9778 ++ return -EINVAL;
9779 ++
9780 ++ current->compat_robust_list = head;
9781 ++
9782 ++ return 0;
9783 ++}
9784 ++
9785 ++COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
9786 ++ compat_uptr_t __user *, head_ptr,
9787 ++ compat_size_t __user *, len_ptr)
9788 ++{
9789 ++ struct compat_robust_list_head __user *head;
9790 ++ unsigned long ret;
9791 ++ struct task_struct *p;
9792 ++
9793 ++ if (!futex_cmpxchg_enabled)
9794 ++ return -ENOSYS;
9795 ++
9796 ++ rcu_read_lock();
9797 ++
9798 ++ ret = -ESRCH;
9799 ++ if (!pid)
9800 ++ p = current;
9801 ++ else {
9802 ++ p = find_task_by_vpid(pid);
9803 ++ if (!p)
9804 ++ goto err_unlock;
9805 ++ }
9806 ++
9807 ++ ret = -EPERM;
9808 ++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
9809 ++ goto err_unlock;
9810 ++
9811 ++ head = p->compat_robust_list;
9812 ++ rcu_read_unlock();
9813 ++
9814 ++ if (put_user(sizeof(*head), len_ptr))
9815 ++ return -EFAULT;
9816 ++ return put_user(ptr_to_compat(head), head_ptr);
9817 ++
9818 ++err_unlock:
9819 ++ rcu_read_unlock();
9820 ++
9821 ++ return ret;
9822 ++}
9823 ++
9824 ++COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
9825 ++ struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
9826 ++ u32, val3)
9827 ++{
9828 ++ struct timespec ts;
9829 ++ ktime_t t, *tp = NULL;
9830 ++ int val2 = 0;
9831 ++ int cmd = op & FUTEX_CMD_MASK;
9832 ++
9833 ++ if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
9834 ++ cmd == FUTEX_WAIT_BITSET ||
9835 ++ cmd == FUTEX_WAIT_REQUEUE_PI)) {
9836 ++ if (compat_get_timespec(&ts, utime))
9837 ++ return -EFAULT;
9838 ++ if (!timespec_valid(&ts))
9839 ++ return -EINVAL;
9840 ++
9841 ++ t = timespec_to_ktime(ts);
9842 ++ if (cmd == FUTEX_WAIT)
9843 ++ t = ktime_add_safe(ktime_get(), t);
9844 ++ tp = &t;
9845 ++ }
9846 ++ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
9847 ++ cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
9848 ++ val2 = (int) (unsigned long) utime;
9849 ++
9850 ++ return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
9851 ++}
9852 ++#endif /* CONFIG_COMPAT */
9853 ++
9854 + static void __init futex_detect_cmpxchg(void)
9855 + {
9856 + #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
9857 +diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
9858 +deleted file mode 100644
9859 +index 83f830acbb5f..000000000000
9860 +--- a/kernel/futex_compat.c
9861 ++++ /dev/null
9862 +@@ -1,202 +0,0 @@
9863 +-// SPDX-License-Identifier: GPL-2.0
9864 +-/*
9865 +- * linux/kernel/futex_compat.c
9866 +- *
9867 +- * Futex compatibililty routines.
9868 +- *
9869 +- * Copyright 2006, Red Hat, Inc., Ingo Molnar
9870 +- */
9871 +-
9872 +-#include <linux/linkage.h>
9873 +-#include <linux/compat.h>
9874 +-#include <linux/nsproxy.h>
9875 +-#include <linux/futex.h>
9876 +-#include <linux/ptrace.h>
9877 +-#include <linux/syscalls.h>
9878 +-
9879 +-#include <linux/uaccess.h>
9880 +-
9881 +-
9882 +-/*
9883 +- * Fetch a robust-list pointer. Bit 0 signals PI futexes:
9884 +- */
9885 +-static inline int
9886 +-fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
9887 +- compat_uptr_t __user *head, unsigned int *pi)
9888 +-{
9889 +- if (get_user(*uentry, head))
9890 +- return -EFAULT;
9891 +-
9892 +- *entry = compat_ptr((*uentry) & ~1);
9893 +- *pi = (unsigned int)(*uentry) & 1;
9894 +-
9895 +- return 0;
9896 +-}
9897 +-
9898 +-static void __user *futex_uaddr(struct robust_list __user *entry,
9899 +- compat_long_t futex_offset)
9900 +-{
9901 +- compat_uptr_t base = ptr_to_compat(entry);
9902 +- void __user *uaddr = compat_ptr(base + futex_offset);
9903 +-
9904 +- return uaddr;
9905 +-}
9906 +-
9907 +-/*
9908 +- * Walk curr->robust_list (very carefully, it's a userspace list!)
9909 +- * and mark any locks found there dead, and notify any waiters.
9910 +- *
9911 +- * We silently return on any sign of list-walking problem.
9912 +- */
9913 +-void compat_exit_robust_list(struct task_struct *curr)
9914 +-{
9915 +- struct compat_robust_list_head __user *head = curr->compat_robust_list;
9916 +- struct robust_list __user *entry, *next_entry, *pending;
9917 +- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
9918 +- unsigned int uninitialized_var(next_pi);
9919 +- compat_uptr_t uentry, next_uentry, upending;
9920 +- compat_long_t futex_offset;
9921 +- int rc;
9922 +-
9923 +- if (!futex_cmpxchg_enabled)
9924 +- return;
9925 +-
9926 +- /*
9927 +- * Fetch the list head (which was registered earlier, via
9928 +- * sys_set_robust_list()):
9929 +- */
9930 +- if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
9931 +- return;
9932 +- /*
9933 +- * Fetch the relative futex offset:
9934 +- */
9935 +- if (get_user(futex_offset, &head->futex_offset))
9936 +- return;
9937 +- /*
9938 +- * Fetch any possibly pending lock-add first, and handle it
9939 +- * if it exists:
9940 +- */
9941 +- if (fetch_robust_entry(&upending, &pending,
9942 +- &head->list_op_pending, &pip))
9943 +- return;
9944 +-
9945 +- next_entry = NULL; /* avoid warning with gcc */
9946 +- while (entry != (struct robust_list __user *) &head->list) {
9947 +- /*
9948 +- * Fetch the next entry in the list before calling
9949 +- * handle_futex_death:
9950 +- */
9951 +- rc = fetch_robust_entry(&next_uentry, &next_entry,
9952 +- (compat_uptr_t __user *)&entry->next, &next_pi);
9953 +- /*
9954 +- * A pending lock might already be on the list, so
9955 +- * dont process it twice:
9956 +- */
9957 +- if (entry != pending) {
9958 +- void __user *uaddr = futex_uaddr(entry, futex_offset);
9959 +-
9960 +- if (handle_futex_death(uaddr, curr, pi))
9961 +- return;
9962 +- }
9963 +- if (rc)
9964 +- return;
9965 +- uentry = next_uentry;
9966 +- entry = next_entry;
9967 +- pi = next_pi;
9968 +- /*
9969 +- * Avoid excessively long or circular lists:
9970 +- */
9971 +- if (!--limit)
9972 +- break;
9973 +-
9974 +- cond_resched();
9975 +- }
9976 +- if (pending) {
9977 +- void __user *uaddr = futex_uaddr(pending, futex_offset);
9978 +-
9979 +- handle_futex_death(uaddr, curr, pip);
9980 +- }
9981 +-}
9982 +-
9983 +-COMPAT_SYSCALL_DEFINE2(set_robust_list,
9984 +- struct compat_robust_list_head __user *, head,
9985 +- compat_size_t, len)
9986 +-{
9987 +- if (!futex_cmpxchg_enabled)
9988 +- return -ENOSYS;
9989 +-
9990 +- if (unlikely(len != sizeof(*head)))
9991 +- return -EINVAL;
9992 +-
9993 +- current->compat_robust_list = head;
9994 +-
9995 +- return 0;
9996 +-}
9997 +-
9998 +-COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
9999 +- compat_uptr_t __user *, head_ptr,
10000 +- compat_size_t __user *, len_ptr)
10001 +-{
10002 +- struct compat_robust_list_head __user *head;
10003 +- unsigned long ret;
10004 +- struct task_struct *p;
10005 +-
10006 +- if (!futex_cmpxchg_enabled)
10007 +- return -ENOSYS;
10008 +-
10009 +- rcu_read_lock();
10010 +-
10011 +- ret = -ESRCH;
10012 +- if (!pid)
10013 +- p = current;
10014 +- else {
10015 +- p = find_task_by_vpid(pid);
10016 +- if (!p)
10017 +- goto err_unlock;
10018 +- }
10019 +-
10020 +- ret = -EPERM;
10021 +- if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
10022 +- goto err_unlock;
10023 +-
10024 +- head = p->compat_robust_list;
10025 +- rcu_read_unlock();
10026 +-
10027 +- if (put_user(sizeof(*head), len_ptr))
10028 +- return -EFAULT;
10029 +- return put_user(ptr_to_compat(head), head_ptr);
10030 +-
10031 +-err_unlock:
10032 +- rcu_read_unlock();
10033 +-
10034 +- return ret;
10035 +-}
10036 +-
10037 +-COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
10038 +- struct compat_timespec __user *, utime, u32 __user *, uaddr2,
10039 +- u32, val3)
10040 +-{
10041 +- struct timespec ts;
10042 +- ktime_t t, *tp = NULL;
10043 +- int val2 = 0;
10044 +- int cmd = op & FUTEX_CMD_MASK;
10045 +-
10046 +- if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
10047 +- cmd == FUTEX_WAIT_BITSET ||
10048 +- cmd == FUTEX_WAIT_REQUEUE_PI)) {
10049 +- if (compat_get_timespec(&ts, utime))
10050 +- return -EFAULT;
10051 +- if (!timespec_valid(&ts))
10052 +- return -EINVAL;
10053 +-
10054 +- t = timespec_to_ktime(ts);
10055 +- if (cmd == FUTEX_WAIT)
10056 +- t = ktime_add_safe(ktime_get(), t);
10057 +- tp = &t;
10058 +- }
10059 +- if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
10060 +- cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
10061 +- val2 = (int) (unsigned long) utime;
10062 +-
10063 +- return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
10064 +-}
10065 +diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
10066 +index 92337703ca9f..30cc217b8631 100644
10067 +--- a/kernel/irq/matrix.c
10068 ++++ b/kernel/irq/matrix.c
10069 +@@ -8,7 +8,7 @@
10070 + #include <linux/cpu.h>
10071 + #include <linux/irq.h>
10072 +
10073 +-#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long))
10074 ++#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS))
10075 +
10076 + struct cpumap {
10077 + unsigned int available;
10078 +diff --git a/kernel/panic.c b/kernel/panic.c
10079 +index 72e001e3753e..8138a676fb7d 100644
10080 +--- a/kernel/panic.c
10081 ++++ b/kernel/panic.c
10082 +@@ -636,7 +636,7 @@ device_initcall(register_warn_debugfs);
10083 + */
10084 + __visible void __stack_chk_fail(void)
10085 + {
10086 +- panic("stack-protector: Kernel stack is corrupted in: %pB\n",
10087 ++ panic("stack-protector: Kernel stack is corrupted in: %pB",
10088 + __builtin_return_address(0));
10089 + }
10090 + EXPORT_SYMBOL(__stack_chk_fail);
10091 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
10092 +index c7b3d5489937..845efadaf7ec 100644
10093 +--- a/kernel/printk/printk.c
10094 ++++ b/kernel/printk/printk.c
10095 +@@ -1105,7 +1105,7 @@ void __init setup_log_buf(int early)
10096 + {
10097 + unsigned long flags;
10098 + char *new_log_buf;
10099 +- int free;
10100 ++ unsigned int free;
10101 +
10102 + if (log_buf != __log_buf)
10103 + return;
10104 +@@ -1901,8 +1901,9 @@ asmlinkage int vprintk_emit(int facility, int level,
10105 + const char *fmt, va_list args)
10106 + {
10107 + int printed_len;
10108 +- bool in_sched = false;
10109 ++ bool in_sched = false, pending_output;
10110 + unsigned long flags;
10111 ++ u64 curr_log_seq;
10112 +
10113 + if (level == LOGLEVEL_SCHED) {
10114 + level = LOGLEVEL_DEFAULT;
10115 +@@ -1914,11 +1915,13 @@ asmlinkage int vprintk_emit(int facility, int level,
10116 +
10117 + /* This stops the holder of console_sem just where we want him */
10118 + logbuf_lock_irqsave(flags);
10119 ++ curr_log_seq = log_next_seq;
10120 + printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
10121 ++ pending_output = (curr_log_seq != log_next_seq);
10122 + logbuf_unlock_irqrestore(flags);
10123 +
10124 + /* If called from the scheduler, we can not call up(). */
10125 +- if (!in_sched) {
10126 ++ if (!in_sched && pending_output) {
10127 + /*
10128 + * Disable preemption to avoid being preempted while holding
10129 + * console_sem which would prevent anyone from printing to
10130 +@@ -1935,7 +1938,8 @@ asmlinkage int vprintk_emit(int facility, int level,
10131 + preempt_enable();
10132 + }
10133 +
10134 +- wake_up_klogd();
10135 ++ if (pending_output)
10136 ++ wake_up_klogd();
10137 + return printed_len;
10138 + }
10139 + EXPORT_SYMBOL(vprintk_emit);
10140 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
10141 +index e5e8f6721872..f77fcd37b226 100644
10142 +--- a/kernel/sched/fair.c
10143 ++++ b/kernel/sched/fair.c
10144 +@@ -8819,13 +8819,22 @@ out_all_pinned:
10145 + sd->nr_balance_failed = 0;
10146 +
10147 + out_one_pinned:
10148 ++ ld_moved = 0;
10149 ++
10150 ++ /*
10151 ++ * idle_balance() disregards balance intervals, so we could repeatedly
10152 ++ * reach this code, which would lead to balance_interval skyrocketting
10153 ++ * in a short amount of time. Skip the balance_interval increase logic
10154 ++ * to avoid that.
10155 ++ */
10156 ++ if (env.idle == CPU_NEWLY_IDLE)
10157 ++ goto out;
10158 ++
10159 + /* tune up the balancing interval */
10160 + if (((env.flags & LBF_ALL_PINNED) &&
10161 + sd->balance_interval < MAX_PINNED_INTERVAL) ||
10162 + (sd->balance_interval < sd->max_interval))
10163 + sd->balance_interval *= 2;
10164 +-
10165 +- ld_moved = 0;
10166 + out:
10167 + return ld_moved;
10168 + }
10169 +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
10170 +index c0a751464971..74b694392f2f 100644
10171 +--- a/kernel/sched/topology.c
10172 ++++ b/kernel/sched/topology.c
10173 +@@ -1329,7 +1329,7 @@ void sched_init_numa(void)
10174 + int level = 0;
10175 + int i, j, k;
10176 +
10177 +- sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
10178 ++ sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), GFP_KERNEL);
10179 + if (!sched_domains_numa_distance)
10180 + return;
10181 +
10182 +diff --git a/lib/bitmap.c b/lib/bitmap.c
10183 +index 2fd07f6df0b8..c4ca9ceb09fe 100644
10184 +--- a/lib/bitmap.c
10185 ++++ b/lib/bitmap.c
10186 +@@ -13,6 +13,7 @@
10187 + #include <linux/bitops.h>
10188 + #include <linux/bug.h>
10189 + #include <linux/kernel.h>
10190 ++#include <linux/mm.h>
10191 + #include <linux/slab.h>
10192 + #include <linux/string.h>
10193 + #include <linux/uaccess.h>
10194 +@@ -466,14 +467,15 @@ EXPORT_SYMBOL(bitmap_parse_user);
10195 + * ranges if list is specified or hex digits grouped into comma-separated
10196 + * sets of 8 digits/set. Returns the number of characters written to buf.
10197 + *
10198 +- * It is assumed that @buf is a pointer into a PAGE_SIZE area and that
10199 +- * sufficient storage remains at @buf to accommodate the
10200 +- * bitmap_print_to_pagebuf() output.
10201 ++ * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
10202 ++ * area and that sufficient storage remains at @buf to accommodate the
10203 ++ * bitmap_print_to_pagebuf() output. Returns the number of characters
10204 ++ * actually printed to @buf, excluding terminating '\0'.
10205 + */
10206 + int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
10207 + int nmaskbits)
10208 + {
10209 +- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
10210 ++ ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
10211 + int n = 0;
10212 +
10213 + if (len > 1)
10214 +diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
10215 +index 7405c9d89d65..7e6f2d2dafb5 100644
10216 +--- a/mm/gup_benchmark.c
10217 ++++ b/mm/gup_benchmark.c
10218 +@@ -23,6 +23,9 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
10219 + int nr;
10220 + struct page **pages;
10221 +
10222 ++ if (gup->size > ULONG_MAX)
10223 ++ return -EINVAL;
10224 ++
10225 + nr_pages = gup->size / PAGE_SIZE;
10226 + pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL);
10227 + if (!pages)
10228 +diff --git a/mm/ksm.c b/mm/ksm.c
10229 +index 5b0894b45ee5..b3ea0f0316eb 100644
10230 +--- a/mm/ksm.c
10231 ++++ b/mm/ksm.c
10232 +@@ -870,13 +870,13 @@ static int remove_stable_node(struct stable_node *stable_node)
10233 + return 0;
10234 + }
10235 +
10236 +- if (WARN_ON_ONCE(page_mapped(page))) {
10237 +- /*
10238 +- * This should not happen: but if it does, just refuse to let
10239 +- * merge_across_nodes be switched - there is no need to panic.
10240 +- */
10241 +- err = -EBUSY;
10242 +- } else {
10243 ++ /*
10244 ++ * Page could be still mapped if this races with __mmput() running in
10245 ++ * between ksm_exit() and exit_mmap(). Just refuse to let
10246 ++ * merge_across_nodes/max_page_sharing be switched.
10247 ++ */
10248 ++ err = -EBUSY;
10249 ++ if (!page_mapped(page)) {
10250 + /*
10251 + * The stable node did not yet appear stale to get_ksm_page(),
10252 + * since that allows for an unmapped ksm page to be recognized
10253 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
10254 +index 5af38d8a9afd..3a3d109dce21 100644
10255 +--- a/mm/memcontrol.c
10256 ++++ b/mm/memcontrol.c
10257 +@@ -2678,7 +2678,7 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
10258 + struct mem_cgroup *memcg;
10259 + int ret = 0;
10260 +
10261 +- if (memcg_kmem_bypass())
10262 ++ if (mem_cgroup_disabled() || memcg_kmem_bypass())
10263 + return 0;
10264 +
10265 + memcg = get_mem_cgroup_from_current();
10266 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
10267 +index 7965112eb063..413f6709039a 100644
10268 +--- a/mm/memory_hotplug.c
10269 ++++ b/mm/memory_hotplug.c
10270 +@@ -321,12 +321,8 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
10271 + unsigned long start_pfn,
10272 + unsigned long end_pfn)
10273 + {
10274 +- struct mem_section *ms;
10275 +-
10276 + for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
10277 +- ms = __pfn_to_section(start_pfn);
10278 +-
10279 +- if (unlikely(!valid_section(ms)))
10280 ++ if (unlikely(!pfn_to_online_page(start_pfn)))
10281 + continue;
10282 +
10283 + if (unlikely(pfn_to_nid(start_pfn) != nid))
10284 +@@ -346,15 +342,12 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
10285 + unsigned long start_pfn,
10286 + unsigned long end_pfn)
10287 + {
10288 +- struct mem_section *ms;
10289 + unsigned long pfn;
10290 +
10291 + /* pfn is the end pfn of a memory section. */
10292 + pfn = end_pfn - 1;
10293 + for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
10294 +- ms = __pfn_to_section(pfn);
10295 +-
10296 +- if (unlikely(!valid_section(ms)))
10297 ++ if (unlikely(!pfn_to_online_page(pfn)))
10298 + continue;
10299 +
10300 + if (unlikely(pfn_to_nid(pfn) != nid))
10301 +@@ -376,7 +369,6 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
10302 + unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
10303 + unsigned long zone_end_pfn = z;
10304 + unsigned long pfn;
10305 +- struct mem_section *ms;
10306 + int nid = zone_to_nid(zone);
10307 +
10308 + zone_span_writelock(zone);
10309 +@@ -414,9 +406,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
10310 + */
10311 + pfn = zone_start_pfn;
10312 + for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
10313 +- ms = __pfn_to_section(pfn);
10314 +-
10315 +- if (unlikely(!valid_section(ms)))
10316 ++ if (unlikely(!pfn_to_online_page(pfn)))
10317 + continue;
10318 +
10319 + if (page_zone(pfn_to_page(pfn)) != zone)
10320 +@@ -472,6 +462,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn)
10321 + int nr_pages = PAGES_PER_SECTION;
10322 + unsigned long flags;
10323 +
10324 ++#ifdef CONFIG_ZONE_DEVICE
10325 ++ /*
10326 ++ * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
10327 ++ * we will not try to shrink the zones - which is okay as
10328 ++ * set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
10329 ++ */
10330 ++ if (zone_idx(zone) == ZONE_DEVICE)
10331 ++ return;
10332 ++#endif
10333 ++
10334 + pgdat_resize_lock(zone->zone_pgdat, &flags);
10335 + shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
10336 + update_pgdat_span(pgdat);
10337 +@@ -846,7 +846,6 @@ static struct zone * __meminit move_pfn_range(int online_type, int nid,
10338 + return zone;
10339 + }
10340 +
10341 +-/* Must be protected by mem_hotplug_begin() or a device_lock */
10342 + int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
10343 + {
10344 + unsigned long flags;
10345 +@@ -858,6 +857,8 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
10346 + struct memory_notify arg;
10347 + struct memory_block *mem;
10348 +
10349 ++ mem_hotplug_begin();
10350 ++
10351 + /*
10352 + * We can't use pfn_to_nid() because nid might be stored in struct page
10353 + * which is not yet initialized. Instead, we find nid from memory block.
10354 +@@ -923,6 +924,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
10355 +
10356 + if (onlined_pages)
10357 + memory_notify(MEM_ONLINE, &arg);
10358 ++ mem_hotplug_done();
10359 + return 0;
10360 +
10361 + failed_addition:
10362 +@@ -930,6 +932,7 @@ failed_addition:
10363 + (unsigned long long) pfn << PAGE_SHIFT,
10364 + (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
10365 + memory_notify(MEM_CANCEL_ONLINE, &arg);
10366 ++ mem_hotplug_done();
10367 + return ret;
10368 + }
10369 + #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
10370 +@@ -1077,7 +1080,12 @@ static int online_memory_block(struct memory_block *mem, void *arg)
10371 + return device_online(&mem->dev);
10372 + }
10373 +
10374 +-/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
10375 ++/*
10376 ++ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
10377 ++ * and online/offline operations (triggered e.g. by sysfs).
10378 ++ *
10379 ++ * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
10380 ++ */
10381 + int __ref add_memory_resource(int nid, struct resource *res, bool online)
10382 + {
10383 + u64 start, size;
10384 +@@ -1129,26 +1137,26 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
10385 + /* create new memmap entry */
10386 + firmware_map_add_hotplug(start, start + size, "System RAM");
10387 +
10388 ++ /* device_online() will take the lock when calling online_pages() */
10389 ++ mem_hotplug_done();
10390 ++
10391 + /* online pages if requested */
10392 + if (online)
10393 + walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
10394 + NULL, online_memory_block);
10395 +
10396 +- goto out;
10397 +-
10398 ++ return ret;
10399 + error:
10400 + /* rollback pgdat allocation and others */
10401 + if (new_node)
10402 + rollback_node_hotadd(nid);
10403 + memblock_remove(start, size);
10404 +-
10405 +-out:
10406 + mem_hotplug_done();
10407 + return ret;
10408 + }
10409 +-EXPORT_SYMBOL_GPL(add_memory_resource);
10410 +
10411 +-int __ref add_memory(int nid, u64 start, u64 size)
10412 ++/* requires device_hotplug_lock, see add_memory_resource() */
10413 ++int __ref __add_memory(int nid, u64 start, u64 size)
10414 + {
10415 + struct resource *res;
10416 + int ret;
10417 +@@ -1162,6 +1170,17 @@ int __ref add_memory(int nid, u64 start, u64 size)
10418 + release_memory_resource(res);
10419 + return ret;
10420 + }
10421 ++
10422 ++int add_memory(int nid, u64 start, u64 size)
10423 ++{
10424 ++ int rc;
10425 ++
10426 ++ lock_device_hotplug();
10427 ++ rc = __add_memory(nid, start, size);
10428 ++ unlock_device_hotplug();
10429 ++
10430 ++ return rc;
10431 ++}
10432 + EXPORT_SYMBOL_GPL(add_memory);
10433 +
10434 + #ifdef CONFIG_MEMORY_HOTREMOVE
10435 +@@ -1598,10 +1617,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
10436 + return -EINVAL;
10437 + if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
10438 + return -EINVAL;
10439 ++
10440 ++ mem_hotplug_begin();
10441 ++
10442 + /* This makes hotplug much easier...and readable.
10443 + we assume this for now. .*/
10444 +- if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
10445 ++ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
10446 ++ &valid_end)) {
10447 ++ mem_hotplug_done();
10448 + return -EINVAL;
10449 ++ }
10450 +
10451 + zone = page_zone(pfn_to_page(valid_start));
10452 + node = zone_to_nid(zone);
10453 +@@ -1610,8 +1635,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
10454 + /* set above range as isolated */
10455 + ret = start_isolate_page_range(start_pfn, end_pfn,
10456 + MIGRATE_MOVABLE, true);
10457 +- if (ret)
10458 ++ if (ret) {
10459 ++ mem_hotplug_done();
10460 + return ret;
10461 ++ }
10462 +
10463 + arg.start_pfn = start_pfn;
10464 + arg.nr_pages = nr_pages;
10465 +@@ -1682,6 +1709,7 @@ repeat:
10466 + writeback_set_ratelimit();
10467 +
10468 + memory_notify(MEM_OFFLINE, &arg);
10469 ++ mem_hotplug_done();
10470 + return 0;
10471 +
10472 + failed_removal:
10473 +@@ -1691,10 +1719,10 @@ failed_removal:
10474 + memory_notify(MEM_CANCEL_OFFLINE, &arg);
10475 + /* pushback to free area */
10476 + undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
10477 ++ mem_hotplug_done();
10478 + return ret;
10479 + }
10480 +
10481 +-/* Must be protected by mem_hotplug_begin() or a device_lock */
10482 + int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
10483 + {
10484 + return __offline_pages(start_pfn, start_pfn + nr_pages);
10485 +diff --git a/mm/migrate.c b/mm/migrate.c
10486 +index 0c48191a9036..4d3588c01203 100644
10487 +--- a/mm/migrate.c
10488 ++++ b/mm/migrate.c
10489 +@@ -2048,15 +2048,26 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
10490 + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
10491 +
10492 + /*
10493 +- * Clear the old entry under pagetable lock and establish the new PTE.
10494 +- * Any parallel GUP will either observe the old page blocking on the
10495 +- * page lock, block on the page table lock or observe the new page.
10496 +- * The SetPageUptodate on the new page and page_add_new_anon_rmap
10497 +- * guarantee the copy is visible before the pagetable update.
10498 ++ * Overwrite the old entry under pagetable lock and establish
10499 ++ * the new PTE. Any parallel GUP will either observe the old
10500 ++ * page blocking on the page lock, block on the page table
10501 ++ * lock or observe the new page. The SetPageUptodate on the
10502 ++ * new page and page_add_new_anon_rmap guarantee the copy is
10503 ++ * visible before the pagetable update.
10504 + */
10505 + flush_cache_range(vma, mmun_start, mmun_end);
10506 + page_add_anon_rmap(new_page, vma, mmun_start, true);
10507 +- pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
10508 ++ /*
10509 ++ * At this point the pmd is numa/protnone (i.e. non present) and the TLB
10510 ++ * has already been flushed globally. So no TLB can be currently
10511 ++ * caching this non present pmd mapping. There's no need to clear the
10512 ++ * pmd before doing set_pmd_at(), nor to flush the TLB after
10513 ++ * set_pmd_at(). Clearing the pmd here would introduce a race
10514 ++ * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
10515 ++ * mmap_sem for reading. If the pmd is set to NULL at any given time,
10516 ++ * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
10517 ++ * pmd.
10518 ++ */
10519 + set_pmd_at(mm, mmun_start, pmd, entry);
10520 + update_mmu_cache_pmd(vma, address, &entry);
10521 +
10522 +@@ -2070,7 +2081,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
10523 + * No need to double call mmu_notifier->invalidate_range() callback as
10524 + * the above pmdp_huge_clear_flush_notify() did already call it.
10525 + */
10526 +- mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end);
10527 ++ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
10528 +
10529 + /* Take an "isolate" reference and put new page on the LRU. */
10530 + get_page(new_page);
10531 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
10532 +index ea4fd3af3b4b..43df0c52e1cc 100644
10533 +--- a/mm/page-writeback.c
10534 ++++ b/mm/page-writeback.c
10535 +@@ -2149,6 +2149,13 @@ EXPORT_SYMBOL(tag_pages_for_writeback);
10536 + * not miss some pages (e.g., because some other process has cleared TOWRITE
10537 + * tag we set). The rule we follow is that TOWRITE tag can be cleared only
10538 + * by the process clearing the DIRTY tag (and submitting the page for IO).
10539 ++ *
10540 ++ * To avoid deadlocks between range_cyclic writeback and callers that hold
10541 ++ * pages in PageWriteback to aggregate IO until write_cache_pages() returns,
10542 ++ * we do not loop back to the start of the file. Doing so causes a page
10543 ++ * lock/page writeback access order inversion - we should only ever lock
10544 ++ * multiple pages in ascending page->index order, and looping back to the start
10545 ++ * of the file violates that rule and causes deadlocks.
10546 + */
10547 + int write_cache_pages(struct address_space *mapping,
10548 + struct writeback_control *wbc, writepage_t writepage,
10549 +@@ -2163,7 +2170,6 @@ int write_cache_pages(struct address_space *mapping,
10550 + pgoff_t index;
10551 + pgoff_t end; /* Inclusive */
10552 + pgoff_t done_index;
10553 +- int cycled;
10554 + int range_whole = 0;
10555 + int tag;
10556 +
10557 +@@ -2171,23 +2177,17 @@ int write_cache_pages(struct address_space *mapping,
10558 + if (wbc->range_cyclic) {
10559 + writeback_index = mapping->writeback_index; /* prev offset */
10560 + index = writeback_index;
10561 +- if (index == 0)
10562 +- cycled = 1;
10563 +- else
10564 +- cycled = 0;
10565 + end = -1;
10566 + } else {
10567 + index = wbc->range_start >> PAGE_SHIFT;
10568 + end = wbc->range_end >> PAGE_SHIFT;
10569 + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
10570 + range_whole = 1;
10571 +- cycled = 1; /* ignore range_cyclic tests */
10572 + }
10573 + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
10574 + tag = PAGECACHE_TAG_TOWRITE;
10575 + else
10576 + tag = PAGECACHE_TAG_DIRTY;
10577 +-retry:
10578 + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
10579 + tag_pages_for_writeback(mapping, index, end);
10580 + done_index = index;
10581 +@@ -2279,17 +2279,14 @@ continue_unlock:
10582 + pagevec_release(&pvec);
10583 + cond_resched();
10584 + }
10585 +- if (!cycled && !done) {
10586 +- /*
10587 +- * range_cyclic:
10588 +- * We hit the last page and there is more work to be done: wrap
10589 +- * back to the start of the file
10590 +- */
10591 +- cycled = 1;
10592 +- index = 0;
10593 +- end = writeback_index - 1;
10594 +- goto retry;
10595 +- }
10596 ++
10597 ++ /*
10598 ++ * If we hit the last page and there is more work to be done: wrap
10599 ++ * back the index back to the start of the file for the next
10600 ++ * time we are called.
10601 ++ */
10602 ++ if (wbc->range_cyclic && !done)
10603 ++ done_index = 0;
10604 + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
10605 + mapping->writeback_index = done_index;
10606 +
10607 +diff --git a/mm/page_io.c b/mm/page_io.c
10608 +index aafd19ec1db4..08d2eae58fce 100644
10609 +--- a/mm/page_io.c
10610 ++++ b/mm/page_io.c
10611 +@@ -76,6 +76,7 @@ static void swap_slot_free_notify(struct page *page)
10612 + {
10613 + struct swap_info_struct *sis;
10614 + struct gendisk *disk;
10615 ++ swp_entry_t entry;
10616 +
10617 + /*
10618 + * There is no guarantee that the page is in swap cache - the software
10619 +@@ -107,11 +108,11 @@ static void swap_slot_free_notify(struct page *page)
10620 + * we again wish to reclaim it.
10621 + */
10622 + disk = sis->bdev->bd_disk;
10623 +- if (disk->fops->swap_slot_free_notify) {
10624 +- swp_entry_t entry;
10625 ++ entry.val = page_private(page);
10626 ++ if (disk->fops->swap_slot_free_notify &&
10627 ++ __swap_count(sis, entry) == 1) {
10628 + unsigned long offset;
10629 +
10630 +- entry.val = page_private(page);
10631 + offset = swp_offset(entry);
10632 +
10633 + SetPageDirty(page);
10634 +diff --git a/net/core/dev.c b/net/core/dev.c
10635 +index e96c88b1465d..91179febdeee 100644
10636 +--- a/net/core/dev.c
10637 ++++ b/net/core/dev.c
10638 +@@ -3277,7 +3277,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
10639 + }
10640 +
10641 + skb = next;
10642 +- if (netif_xmit_stopped(txq) && skb) {
10643 ++ if (netif_tx_queue_stopped(txq) && skb) {
10644 + rc = NETDEV_TX_BUSY;
10645 + break;
10646 + }
10647 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
10648 +index c0de73b12580..dbb3c0c7c132 100644
10649 +--- a/net/core/rtnetlink.c
10650 ++++ b/net/core/rtnetlink.c
10651 +@@ -2126,6 +2126,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
10652 + if (tb[IFLA_VF_MAC]) {
10653 + struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
10654 +
10655 ++ if (ivm->vf >= INT_MAX)
10656 ++ return -EINVAL;
10657 + err = -EOPNOTSUPP;
10658 + if (ops->ndo_set_vf_mac)
10659 + err = ops->ndo_set_vf_mac(dev, ivm->vf,
10660 +@@ -2137,6 +2139,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
10661 + if (tb[IFLA_VF_VLAN]) {
10662 + struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
10663 +
10664 ++ if (ivv->vf >= INT_MAX)
10665 ++ return -EINVAL;
10666 + err = -EOPNOTSUPP;
10667 + if (ops->ndo_set_vf_vlan)
10668 + err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
10669 +@@ -2169,6 +2173,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
10670 + if (len == 0)
10671 + return -EINVAL;
10672 +
10673 ++ if (ivvl[0]->vf >= INT_MAX)
10674 ++ return -EINVAL;
10675 + err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
10676 + ivvl[0]->qos, ivvl[0]->vlan_proto);
10677 + if (err < 0)
10678 +@@ -2179,6 +2185,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
10679 + struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
10680 + struct ifla_vf_info ivf;
10681 +
10682 ++ if (ivt->vf >= INT_MAX)
10683 ++ return -EINVAL;
10684 + err = -EOPNOTSUPP;
10685 + if (ops->ndo_get_vf_config)
10686 + err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
10687 +@@ -2197,6 +2205,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
10688 + if (tb[IFLA_VF_RATE]) {
10689 + struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
10690 +
10691 ++ if (ivt->vf >= INT_MAX)
10692 ++ return -EINVAL;
10693 + err = -EOPNOTSUPP;
10694 + if (ops->ndo_set_vf_rate)
10695 + err = ops->ndo_set_vf_rate(dev, ivt->vf,
10696 +@@ -2209,6 +2219,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
10697 + if (tb[IFLA_VF_SPOOFCHK]) {
10698 + struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
10699 +
10700 ++ if (ivs->vf >= INT_MAX)
10701 ++ return -EINVAL;
10702 + err = -EOPNOTSUPP;
10703 + if (ops->ndo_set_vf_spoofchk)
10704 + err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
10705 +@@ -2220,6 +2232,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
10706 + if (tb[IFLA_VF_LINK_STATE]) {
10707 + struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
10708 +
10709 ++ if (ivl->vf >= INT_MAX)
10710 ++ return -EINVAL;
10711 + err = -EOPNOTSUPP;
10712 + if (ops->ndo_set_vf_link_state)
10713 + err = ops->ndo_set_vf_link_state(dev, ivl->vf,
10714 +@@ -2233,6 +2247,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
10715 +
10716 + err = -EOPNOTSUPP;
10717 + ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
10718 ++ if (ivrssq_en->vf >= INT_MAX)
10719 ++ return -EINVAL;
10720 + if (ops->ndo_set_vf_rss_query_en)
10721 + err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
10722 + ivrssq_en->setting);
10723 +@@ -2243,6 +2259,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
10724 + if (tb[IFLA_VF_TRUST]) {
10725 + struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
10726 +
10727 ++ if (ivt->vf >= INT_MAX)
10728 ++ return -EINVAL;
10729 + err = -EOPNOTSUPP;
10730 + if (ops->ndo_set_vf_trust)
10731 + err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
10732 +@@ -2253,15 +2271,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
10733 + if (tb[IFLA_VF_IB_NODE_GUID]) {
10734 + struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
10735 +
10736 ++ if (ivt->vf >= INT_MAX)
10737 ++ return -EINVAL;
10738 + if (!ops->ndo_set_vf_guid)
10739 + return -EOPNOTSUPP;
10740 +-
10741 + return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
10742 + }
10743 +
10744 + if (tb[IFLA_VF_IB_PORT_GUID]) {
10745 + struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
10746 +
10747 ++ if (ivt->vf >= INT_MAX)
10748 ++ return -EINVAL;
10749 + if (!ops->ndo_set_vf_guid)
10750 + return -EOPNOTSUPP;
10751 +
10752 +diff --git a/net/core/sock.c b/net/core/sock.c
10753 +index 6c1107821776..ba4f843cdd1d 100644
10754 +--- a/net/core/sock.c
10755 ++++ b/net/core/sock.c
10756 +@@ -3347,6 +3347,7 @@ int sock_load_diag_module(int family, int protocol)
10757 +
10758 + #ifdef CONFIG_INET
10759 + if (family == AF_INET &&
10760 ++ protocol != IPPROTO_RAW &&
10761 + !rcu_access_pointer(inet_protos[protocol]))
10762 + return -ENOENT;
10763 + #endif
10764 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
10765 +index b2240b7f225d..523d26f5e22e 100644
10766 +--- a/net/ipv4/igmp.c
10767 ++++ b/net/ipv4/igmp.c
10768 +@@ -111,13 +111,10 @@
10769 + #ifdef CONFIG_IP_MULTICAST
10770 + /* Parameter names and values are taken from igmp-v2-06 draft */
10771 +
10772 +-#define IGMP_V1_ROUTER_PRESENT_TIMEOUT (400*HZ)
10773 +-#define IGMP_V2_ROUTER_PRESENT_TIMEOUT (400*HZ)
10774 + #define IGMP_V2_UNSOLICITED_REPORT_INTERVAL (10*HZ)
10775 + #define IGMP_V3_UNSOLICITED_REPORT_INTERVAL (1*HZ)
10776 ++#define IGMP_QUERY_INTERVAL (125*HZ)
10777 + #define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ)
10778 +-#define IGMP_QUERY_ROBUSTNESS_VARIABLE 2
10779 +-
10780 +
10781 + #define IGMP_INITIAL_REPORT_DELAY (1)
10782 +
10783 +@@ -953,13 +950,15 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
10784 +
10785 + max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
10786 + in_dev->mr_v1_seen = jiffies +
10787 +- IGMP_V1_ROUTER_PRESENT_TIMEOUT;
10788 ++ (in_dev->mr_qrv * in_dev->mr_qi) +
10789 ++ in_dev->mr_qri;
10790 + group = 0;
10791 + } else {
10792 + /* v2 router present */
10793 + max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
10794 + in_dev->mr_v2_seen = jiffies +
10795 +- IGMP_V2_ROUTER_PRESENT_TIMEOUT;
10796 ++ (in_dev->mr_qrv * in_dev->mr_qi) +
10797 ++ in_dev->mr_qri;
10798 + }
10799 + /* cancel the interface change timer */
10800 + in_dev->mr_ifc_count = 0;
10801 +@@ -999,8 +998,21 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
10802 + if (!max_delay)
10803 + max_delay = 1; /* can't mod w/ 0 */
10804 + in_dev->mr_maxdelay = max_delay;
10805 +- if (ih3->qrv)
10806 +- in_dev->mr_qrv = ih3->qrv;
10807 ++
10808 ++ /* RFC3376, 4.1.6. QRV and 4.1.7. QQIC, when the most recently
10809 ++ * received value was zero, use the default or statically
10810 ++ * configured value.
10811 ++ */
10812 ++ in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv;
10813 ++ in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
10814 ++
10815 ++ /* RFC3376, 8.3. Query Response Interval:
10816 ++ * The number of seconds represented by the [Query Response
10817 ++ * Interval] must be less than the [Query Interval].
10818 ++ */
10819 ++ if (in_dev->mr_qri >= in_dev->mr_qi)
10820 ++ in_dev->mr_qri = (in_dev->mr_qi/HZ - 1)*HZ;
10821 ++
10822 + if (!group) { /* general query */
10823 + if (ih3->nsrcs)
10824 + return true; /* no sources allowed */
10825 +@@ -1738,18 +1750,30 @@ void ip_mc_down(struct in_device *in_dev)
10826 + ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
10827 + }
10828 +
10829 +-void ip_mc_init_dev(struct in_device *in_dev)
10830 +-{
10831 + #ifdef CONFIG_IP_MULTICAST
10832 ++static void ip_mc_reset(struct in_device *in_dev)
10833 ++{
10834 + struct net *net = dev_net(in_dev->dev);
10835 ++
10836 ++ in_dev->mr_qi = IGMP_QUERY_INTERVAL;
10837 ++ in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL;
10838 ++ in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
10839 ++}
10840 ++#else
10841 ++static void ip_mc_reset(struct in_device *in_dev)
10842 ++{
10843 ++}
10844 + #endif
10845 ++
10846 ++void ip_mc_init_dev(struct in_device *in_dev)
10847 ++{
10848 + ASSERT_RTNL();
10849 +
10850 + #ifdef CONFIG_IP_MULTICAST
10851 + timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0);
10852 + timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0);
10853 +- in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
10854 + #endif
10855 ++ ip_mc_reset(in_dev);
10856 +
10857 + spin_lock_init(&in_dev->mc_tomb_lock);
10858 + }
10859 +@@ -1759,15 +1783,10 @@ void ip_mc_init_dev(struct in_device *in_dev)
10860 + void ip_mc_up(struct in_device *in_dev)
10861 + {
10862 + struct ip_mc_list *pmc;
10863 +-#ifdef CONFIG_IP_MULTICAST
10864 +- struct net *net = dev_net(in_dev->dev);
10865 +-#endif
10866 +
10867 + ASSERT_RTNL();
10868 +
10869 +-#ifdef CONFIG_IP_MULTICAST
10870 +- in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
10871 +-#endif
10872 ++ ip_mc_reset(in_dev);
10873 + ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
10874 +
10875 + for_each_pmc_rtnl(in_dev, pmc) {
10876 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
10877 +index b7a26120d552..82f341e84fae 100644
10878 +--- a/net/ipv4/ip_sockglue.c
10879 ++++ b/net/ipv4/ip_sockglue.c
10880 +@@ -1244,7 +1244,7 @@ int ip_setsockopt(struct sock *sk, int level,
10881 + return -ENOPROTOOPT;
10882 +
10883 + err = do_ip_setsockopt(sk, level, optname, optval, optlen);
10884 +-#ifdef CONFIG_BPFILTER
10885 ++#if IS_ENABLED(CONFIG_BPFILTER_UMH)
10886 + if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
10887 + optname < BPFILTER_IPT_SET_MAX)
10888 + err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
10889 +@@ -1557,7 +1557,7 @@ int ip_getsockopt(struct sock *sk, int level,
10890 + int err;
10891 +
10892 + err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
10893 +-#ifdef CONFIG_BPFILTER
10894 ++#if IS_ENABLED(CONFIG_BPFILTER_UMH)
10895 + if (optname >= BPFILTER_IPT_SO_GET_INFO &&
10896 + optname < BPFILTER_IPT_GET_MAX)
10897 + err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
10898 +@@ -1594,7 +1594,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
10899 + err = do_ip_getsockopt(sk, level, optname, optval, optlen,
10900 + MSG_CMSG_COMPAT);
10901 +
10902 +-#ifdef CONFIG_BPFILTER
10903 ++#if IS_ENABLED(CONFIG_BPFILTER_UMH)
10904 + if (optname >= BPFILTER_IPT_SO_GET_INFO &&
10905 + optname < BPFILTER_IPT_GET_MAX)
10906 + err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
10907 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
10908 +index e7cdfa92c382..9a117a79af65 100644
10909 +--- a/net/ipv6/tcp_ipv6.c
10910 ++++ b/net/ipv6/tcp_ipv6.c
10911 +@@ -734,6 +734,7 @@ static void tcp_v6_init_req(struct request_sock *req,
10912 + const struct sock *sk_listener,
10913 + struct sk_buff *skb)
10914 + {
10915 ++ bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
10916 + struct inet_request_sock *ireq = inet_rsk(req);
10917 + const struct ipv6_pinfo *np = inet6_sk(sk_listener);
10918 +
10919 +@@ -741,7 +742,7 @@ static void tcp_v6_init_req(struct request_sock *req,
10920 + ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
10921 +
10922 + /* So that link locals have meaning */
10923 +- if (!sk_listener->sk_bound_dev_if &&
10924 ++ if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
10925 + ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
10926 + ireq->ir_iif = tcp_v6_iif(skb);
10927 +
10928 +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
10929 +index 35ae64cbef33..46aa1aa51db4 100644
10930 +--- a/net/openvswitch/conntrack.c
10931 ++++ b/net/openvswitch/conntrack.c
10932 +@@ -1199,7 +1199,8 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
10933 + &info->labels.mask);
10934 + if (err)
10935 + return err;
10936 +- } else if (labels_nonzero(&info->labels.mask)) {
10937 ++ } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
10938 ++ labels_nonzero(&info->labels.mask)) {
10939 + err = ovs_ct_set_labels(ct, key, &info->labels.value,
10940 + &info->labels.mask);
10941 + if (err)
10942 +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
10943 +index 33c0cc5ef229..ce14fafb36a1 100644
10944 +--- a/net/sched/act_pedit.c
10945 ++++ b/net/sched/act_pedit.c
10946 +@@ -46,7 +46,7 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
10947 + int err = -EINVAL;
10948 + int rem;
10949 +
10950 +- if (!nla || !n)
10951 ++ if (!nla)
10952 + return NULL;
10953 +
10954 + keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL);
10955 +@@ -169,6 +169,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
10956 + }
10957 +
10958 + parm = nla_data(pattr);
10959 ++ if (!parm->nkeys) {
10960 ++ NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
10961 ++ return -EINVAL;
10962 ++ }
10963 + ksize = parm->nkeys * sizeof(struct tc_pedit_key);
10964 + if (nla_len(pattr) < sizeof(*parm) + ksize) {
10965 + NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
10966 +@@ -182,12 +186,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
10967 + index = parm->index;
10968 + err = tcf_idr_check_alloc(tn, &index, a, bind);
10969 + if (!err) {
10970 +- if (!parm->nkeys) {
10971 +- tcf_idr_cleanup(tn, index);
10972 +- NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
10973 +- ret = -EINVAL;
10974 +- goto out_free;
10975 +- }
10976 + ret = tcf_idr_create(tn, index, est, a,
10977 + &act_pedit_ops, bind, false);
10978 + if (ret) {
10979 +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
10980 +index 43309ff2b5dc..e4fc6b2bc29d 100644
10981 +--- a/net/sched/act_tunnel_key.c
10982 ++++ b/net/sched/act_tunnel_key.c
10983 +@@ -137,6 +137,10 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
10984 + if (opt_len < 0)
10985 + return opt_len;
10986 + opts_len += opt_len;
10987 ++ if (opts_len > IP_TUNNEL_OPTS_MAX) {
10988 ++ NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
10989 ++ return -EINVAL;
10990 ++ }
10991 + if (dst) {
10992 + dst_len -= opt_len;
10993 + dst += opt_len;
10994 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
10995 +index c76631552722..e7a11cd7633f 100644
10996 +--- a/net/sctp/socket.c
10997 ++++ b/net/sctp/socket.c
10998 +@@ -83,7 +83,7 @@
10999 + #include <net/sctp/stream_sched.h>
11000 +
11001 + /* Forward declarations for internal helper functions. */
11002 +-static int sctp_writeable(struct sock *sk);
11003 ++static bool sctp_writeable(struct sock *sk);
11004 + static void sctp_wfree(struct sk_buff *skb);
11005 + static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
11006 + size_t msg_len);
11007 +@@ -119,25 +119,10 @@ static void sctp_enter_memory_pressure(struct sock *sk)
11008 + /* Get the sndbuf space available at the time on the association. */
11009 + static inline int sctp_wspace(struct sctp_association *asoc)
11010 + {
11011 +- int amt;
11012 ++ struct sock *sk = asoc->base.sk;
11013 +
11014 +- if (asoc->ep->sndbuf_policy)
11015 +- amt = asoc->sndbuf_used;
11016 +- else
11017 +- amt = sk_wmem_alloc_get(asoc->base.sk);
11018 +-
11019 +- if (amt >= asoc->base.sk->sk_sndbuf) {
11020 +- if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
11021 +- amt = 0;
11022 +- else {
11023 +- amt = sk_stream_wspace(asoc->base.sk);
11024 +- if (amt < 0)
11025 +- amt = 0;
11026 +- }
11027 +- } else {
11028 +- amt = asoc->base.sk->sk_sndbuf - amt;
11029 +- }
11030 +- return amt;
11031 ++ return asoc->ep->sndbuf_policy ? sk->sk_sndbuf - asoc->sndbuf_used
11032 ++ : sk_stream_wspace(sk);
11033 + }
11034 +
11035 + /* Increment the used sndbuf space count of the corresponding association by
11036 +@@ -1928,10 +1913,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
11037 + asoc->pmtu_pending = 0;
11038 + }
11039 +
11040 +- if (sctp_wspace(asoc) < msg_len)
11041 ++ if (sctp_wspace(asoc) < (int)msg_len)
11042 + sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
11043 +
11044 +- if (!sctp_wspace(asoc)) {
11045 ++ if (sctp_wspace(asoc) <= 0) {
11046 + timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
11047 + err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
11048 + if (err)
11049 +@@ -8516,7 +8501,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
11050 + goto do_error;
11051 + if (signal_pending(current))
11052 + goto do_interrupted;
11053 +- if (msg_len <= sctp_wspace(asoc))
11054 ++ if ((int)msg_len <= sctp_wspace(asoc))
11055 + break;
11056 +
11057 + /* Let another process have a go. Since we are going
11058 +@@ -8591,14 +8576,9 @@ void sctp_write_space(struct sock *sk)
11059 + * UDP-style sockets or TCP-style sockets, this code should work.
11060 + * - Daisy
11061 + */
11062 +-static int sctp_writeable(struct sock *sk)
11063 ++static bool sctp_writeable(struct sock *sk)
11064 + {
11065 +- int amt = 0;
11066 +-
11067 +- amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
11068 +- if (amt < 0)
11069 +- amt = 0;
11070 +- return amt;
11071 ++ return sk->sk_sndbuf > sk->sk_wmem_queued;
11072 + }
11073 +
11074 + /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
11075 +diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
11076 +index eaad9bc7a0bd..e1f0571843c8 100644
11077 +--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
11078 ++++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
11079 +@@ -63,6 +63,7 @@
11080 + #include <linux/sunrpc/gss_krb5.h>
11081 + #include <linux/random.h>
11082 + #include <linux/crypto.h>
11083 ++#include <linux/atomic.h>
11084 +
11085 + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
11086 + # define RPCDBG_FACILITY RPCDBG_AUTH
11087 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
11088 +index c0d7875a64ff..9dc059dea689 100644
11089 +--- a/net/sunrpc/xprtsock.c
11090 ++++ b/net/sunrpc/xprtsock.c
11091 +@@ -129,7 +129,7 @@ static struct ctl_table xs_tunables_table[] = {
11092 + .mode = 0644,
11093 + .proc_handler = proc_dointvec_minmax,
11094 + .extra1 = &xprt_min_resvport_limit,
11095 +- .extra2 = &xprt_max_resvport
11096 ++ .extra2 = &xprt_max_resvport_limit
11097 + },
11098 + {
11099 + .procname = "max_resvport",
11100 +@@ -137,7 +137,7 @@ static struct ctl_table xs_tunables_table[] = {
11101 + .maxlen = sizeof(unsigned int),
11102 + .mode = 0644,
11103 + .proc_handler = proc_dointvec_minmax,
11104 +- .extra1 = &xprt_min_resvport,
11105 ++ .extra1 = &xprt_min_resvport_limit,
11106 + .extra2 = &xprt_max_resvport_limit
11107 + },
11108 + {
11109 +@@ -1776,11 +1776,17 @@ static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
11110 + spin_unlock_bh(&xprt->transport_lock);
11111 + }
11112 +
11113 +-static unsigned short xs_get_random_port(void)
11114 ++static int xs_get_random_port(void)
11115 + {
11116 +- unsigned short range = xprt_max_resvport - xprt_min_resvport + 1;
11117 +- unsigned short rand = (unsigned short) prandom_u32() % range;
11118 +- return rand + xprt_min_resvport;
11119 ++ unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
11120 ++ unsigned short range;
11121 ++ unsigned short rand;
11122 ++
11123 ++ if (max < min)
11124 ++ return -EADDRINUSE;
11125 ++ range = max - min + 1;
11126 ++ rand = (unsigned short) prandom_u32() % range;
11127 ++ return rand + min;
11128 + }
11129 +
11130 + /**
11131 +@@ -1836,9 +1842,9 @@ static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
11132 + transport->srcport = xs_sock_getport(sock);
11133 + }
11134 +
11135 +-static unsigned short xs_get_srcport(struct sock_xprt *transport)
11136 ++static int xs_get_srcport(struct sock_xprt *transport)
11137 + {
11138 +- unsigned short port = transport->srcport;
11139 ++ int port = transport->srcport;
11140 +
11141 + if (port == 0 && transport->xprt.resvport)
11142 + port = xs_get_random_port();
11143 +@@ -1859,7 +1865,7 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
11144 + {
11145 + struct sockaddr_storage myaddr;
11146 + int err, nloop = 0;
11147 +- unsigned short port = xs_get_srcport(transport);
11148 ++ int port = xs_get_srcport(transport);
11149 + unsigned short last;
11150 +
11151 + /*
11152 +@@ -1877,8 +1883,8 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
11153 + * transport->xprt.resvport == 1) xs_get_srcport above will
11154 + * ensure that port is non-zero and we will bind as needed.
11155 + */
11156 +- if (port == 0)
11157 +- return 0;
11158 ++ if (port <= 0)
11159 ++ return port;
11160 +
11161 + memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
11162 + do {
11163 +@@ -3319,12 +3325,8 @@ static int param_set_uint_minmax(const char *val,
11164 +
11165 + static int param_set_portnr(const char *val, const struct kernel_param *kp)
11166 + {
11167 +- if (kp->arg == &xprt_min_resvport)
11168 +- return param_set_uint_minmax(val, kp,
11169 +- RPC_MIN_RESVPORT,
11170 +- xprt_max_resvport);
11171 + return param_set_uint_minmax(val, kp,
11172 +- xprt_min_resvport,
11173 ++ RPC_MIN_RESVPORT,
11174 + RPC_MAX_RESVPORT);
11175 + }
11176 +
11177 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
11178 +index 231b6c032d2c..d2d6ff0c6265 100644
11179 +--- a/net/unix/af_unix.c
11180 ++++ b/net/unix/af_unix.c
11181 +@@ -225,6 +225,8 @@ static inline void unix_release_addr(struct unix_address *addr)
11182 +
11183 + static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
11184 + {
11185 ++ *hashp = 0;
11186 ++
11187 + if (len <= sizeof(short) || len > sizeof(*sunaddr))
11188 + return -EINVAL;
11189 + if (!sunaddr || sunaddr->sun_family != AF_UNIX)
11190 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
11191 +index 2a8651aa90c8..52242a148c70 100644
11192 +--- a/net/vmw_vsock/virtio_transport_common.c
11193 ++++ b/net/vmw_vsock/virtio_transport_common.c
11194 +@@ -92,8 +92,17 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
11195 + struct virtio_vsock_pkt *pkt = opaque;
11196 + struct af_vsockmon_hdr *hdr;
11197 + struct sk_buff *skb;
11198 ++ size_t payload_len;
11199 ++ void *payload_buf;
11200 +
11201 +- skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + pkt->len,
11202 ++ /* A packet could be split to fit the RX buffer, so we can retrieve
11203 ++ * the payload length from the header and the buffer pointer taking
11204 ++ * care of the offset in the original packet.
11205 ++ */
11206 ++ payload_len = le32_to_cpu(pkt->hdr.len);
11207 ++ payload_buf = pkt->buf + pkt->off;
11208 ++
11209 ++ skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
11210 + GFP_ATOMIC);
11211 + if (!skb)
11212 + return NULL;
11213 +@@ -133,8 +142,8 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
11214 +
11215 + skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
11216 +
11217 +- if (pkt->len) {
11218 +- skb_put_data(skb, pkt->buf, pkt->len);
11219 ++ if (payload_len) {
11220 ++ skb_put_data(skb, payload_buf, payload_len);
11221 + }
11222 +
11223 + return skb;
11224 +diff --git a/net/wireless/ap.c b/net/wireless/ap.c
11225 +index 882d97bdc6bf..550ac9d827fe 100644
11226 +--- a/net/wireless/ap.c
11227 ++++ b/net/wireless/ap.c
11228 +@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
11229 + cfg80211_sched_dfs_chan_update(rdev);
11230 + }
11231 +
11232 ++ schedule_work(&cfg80211_disconnect_work);
11233 ++
11234 + return err;
11235 + }
11236 +
11237 +diff --git a/net/wireless/core.h b/net/wireless/core.h
11238 +index 7f52ef569320..f5d58652108d 100644
11239 +--- a/net/wireless/core.h
11240 ++++ b/net/wireless/core.h
11241 +@@ -430,6 +430,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
11242 + bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
11243 + u32 center_freq_khz, u32 bw_khz);
11244 +
11245 ++extern struct work_struct cfg80211_disconnect_work;
11246 ++
11247 + /**
11248 + * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
11249 + * @wiphy: the wiphy to validate against
11250 +diff --git a/net/wireless/sme.c b/net/wireless/sme.c
11251 +index d536b07582f8..07c2196e9d57 100644
11252 +--- a/net/wireless/sme.c
11253 ++++ b/net/wireless/sme.c
11254 +@@ -642,11 +642,15 @@ static bool cfg80211_is_all_idle(void)
11255 + * All devices must be idle as otherwise if you are actively
11256 + * scanning some new beacon hints could be learned and would
11257 + * count as new regulatory hints.
11258 ++ * Also if there is any other active beaconing interface we
11259 ++ * need not issue a disconnect hint and reset any info such
11260 ++ * as chan dfs state, etc.
11261 + */
11262 + list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
11263 + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
11264 + wdev_lock(wdev);
11265 +- if (wdev->conn || wdev->current_bss)
11266 ++ if (wdev->conn || wdev->current_bss ||
11267 ++ cfg80211_beaconing_iface_active(wdev))
11268 + is_all_idle = false;
11269 + wdev_unlock(wdev);
11270 + }
11271 +@@ -663,7 +667,7 @@ static void disconnect_work(struct work_struct *work)
11272 + rtnl_unlock();
11273 + }
11274 +
11275 +-static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
11276 ++DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
11277 +
11278 +
11279 + /*
11280 +diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
11281 +index 30957477e005..0717ab9e48e3 100644
11282 +--- a/sound/firewire/isight.c
11283 ++++ b/sound/firewire/isight.c
11284 +@@ -640,7 +640,7 @@ static int isight_probe(struct fw_unit *unit,
11285 + if (!isight->audio_base) {
11286 + dev_err(&unit->device, "audio unit base not found\n");
11287 + err = -ENXIO;
11288 +- goto err_unit;
11289 ++ goto error;
11290 + }
11291 + fw_iso_resources_init(&isight->resources, unit);
11292 +
11293 +@@ -669,12 +669,12 @@ static int isight_probe(struct fw_unit *unit,
11294 + dev_set_drvdata(&unit->device, isight);
11295 +
11296 + return 0;
11297 +-
11298 +-err_unit:
11299 +- fw_unit_put(isight->unit);
11300 +- mutex_destroy(&isight->mutex);
11301 + error:
11302 + snd_card_free(card);
11303 ++
11304 ++ mutex_destroy(&isight->mutex);
11305 ++ fw_unit_put(isight->unit);
11306 ++
11307 + return err;
11308 + }
11309 +
11310 +diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
11311 +index 2647309bc675..8afa2f888466 100644
11312 +--- a/sound/i2c/cs8427.c
11313 ++++ b/sound/i2c/cs8427.c
11314 +@@ -118,7 +118,7 @@ static int snd_cs8427_send_corudata(struct snd_i2c_device *device,
11315 + struct cs8427 *chip = device->private_data;
11316 + char *hw_data = udata ?
11317 + chip->playback.hw_udata : chip->playback.hw_status;
11318 +- char data[32];
11319 ++ unsigned char data[32];
11320 + int err, idx;
11321 +
11322 + if (!memcmp(hw_data, ndata, count))
11323 +diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
11324 +index 45a4aa9d2a47..901457da25ec 100644
11325 +--- a/sound/soc/tegra/tegra_sgtl5000.c
11326 ++++ b/sound/soc/tegra/tegra_sgtl5000.c
11327 +@@ -149,14 +149,14 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
11328 + dev_err(&pdev->dev,
11329 + "Property 'nvidia,i2s-controller' missing/invalid\n");
11330 + ret = -EINVAL;
11331 +- goto err;
11332 ++ goto err_put_codec_of_node;
11333 + }
11334 +
11335 + tegra_sgtl5000_dai.platform_of_node = tegra_sgtl5000_dai.cpu_of_node;
11336 +
11337 + ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
11338 + if (ret)
11339 +- goto err;
11340 ++ goto err_put_cpu_of_node;
11341 +
11342 + ret = snd_soc_register_card(card);
11343 + if (ret) {
11344 +@@ -169,6 +169,13 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
11345 +
11346 + err_fini_utils:
11347 + tegra_asoc_utils_fini(&machine->util_data);
11348 ++err_put_cpu_of_node:
11349 ++ of_node_put(tegra_sgtl5000_dai.cpu_of_node);
11350 ++ tegra_sgtl5000_dai.cpu_of_node = NULL;
11351 ++ tegra_sgtl5000_dai.platform_of_node = NULL;
11352 ++err_put_codec_of_node:
11353 ++ of_node_put(tegra_sgtl5000_dai.codec_of_node);
11354 ++ tegra_sgtl5000_dai.codec_of_node = NULL;
11355 + err:
11356 + return ret;
11357 + }
11358 +@@ -183,6 +190,12 @@ static int tegra_sgtl5000_driver_remove(struct platform_device *pdev)
11359 +
11360 + tegra_asoc_utils_fini(&machine->util_data);
11361 +
11362 ++ of_node_put(tegra_sgtl5000_dai.cpu_of_node);
11363 ++ tegra_sgtl5000_dai.cpu_of_node = NULL;
11364 ++ tegra_sgtl5000_dai.platform_of_node = NULL;
11365 ++ of_node_put(tegra_sgtl5000_dai.codec_of_node);
11366 ++ tegra_sgtl5000_dai.codec_of_node = NULL;
11367 ++
11368 + return ret;
11369 + }
11370 +
11371 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
11372 +index 726cbd63a0c7..d7778f2bcbf8 100644
11373 +--- a/sound/usb/mixer.c
11374 ++++ b/sound/usb/mixer.c
11375 +@@ -2949,6 +2949,9 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer,
11376 + continue;
11377 +
11378 + iface = usb_ifnum_to_if(dev, intf);
11379 ++ if (!iface)
11380 ++ continue;
11381 ++
11382 + num = iface->num_altsetting;
11383 +
11384 + if (num < 2)
11385 +diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
11386 +index 598066c40191..c2b6b2176f3b 100644
11387 +--- a/tools/bpf/bpftool/bash-completion/bpftool
11388 ++++ b/tools/bpf/bpftool/bash-completion/bpftool
11389 +@@ -143,7 +143,7 @@ _bpftool_map_update_map_type()
11390 + local type
11391 + type=$(bpftool -jp map show $keyword $ref | \
11392 + command sed -n 's/.*"type": "\(.*\)",$/\1/p')
11393 +- printf $type
11394 ++ [[ -n $type ]] && printf $type
11395 + }
11396 +
11397 + _bpftool_map_update_get_id()
11398 +diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
11399 +index be7aebff0c1e..158469f57461 100644
11400 +--- a/tools/bpf/bpftool/common.c
11401 ++++ b/tools/bpf/bpftool/common.c
11402 +@@ -130,16 +130,17 @@ static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
11403 + return 0;
11404 + }
11405 +
11406 +-int open_obj_pinned(char *path)
11407 ++int open_obj_pinned(char *path, bool quiet)
11408 + {
11409 + int fd;
11410 +
11411 + fd = bpf_obj_get(path);
11412 + if (fd < 0) {
11413 +- p_err("bpf obj get (%s): %s", path,
11414 +- errno == EACCES && !is_bpffs(dirname(path)) ?
11415 +- "directory not in bpf file system (bpffs)" :
11416 +- strerror(errno));
11417 ++ if (!quiet)
11418 ++ p_err("bpf obj get (%s): %s", path,
11419 ++ errno == EACCES && !is_bpffs(dirname(path)) ?
11420 ++ "directory not in bpf file system (bpffs)" :
11421 ++ strerror(errno));
11422 + return -1;
11423 + }
11424 +
11425 +@@ -151,7 +152,7 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
11426 + enum bpf_obj_type type;
11427 + int fd;
11428 +
11429 +- fd = open_obj_pinned(path);
11430 ++ fd = open_obj_pinned(path, false);
11431 + if (fd < 0)
11432 + return -1;
11433 +
11434 +@@ -384,7 +385,7 @@ int build_pinned_obj_table(struct pinned_obj_table *tab,
11435 + while ((ftse = fts_read(fts))) {
11436 + if (!(ftse->fts_info & FTS_F))
11437 + continue;
11438 +- fd = open_obj_pinned(ftse->fts_path);
11439 ++ fd = open_obj_pinned(ftse->fts_path, true);
11440 + if (fd < 0)
11441 + continue;
11442 +
11443 +diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
11444 +index 238e734d75b3..057a227bdb9f 100644
11445 +--- a/tools/bpf/bpftool/main.h
11446 ++++ b/tools/bpf/bpftool/main.h
11447 +@@ -126,7 +126,7 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv,
11448 + int get_fd_type(int fd);
11449 + const char *get_fd_type_name(enum bpf_obj_type type);
11450 + char *get_fdinfo(int fd, const char *key);
11451 +-int open_obj_pinned(char *path);
11452 ++int open_obj_pinned(char *path, bool quiet);
11453 + int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
11454 + int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
11455 + int do_pin_fd(int fd, const char *name);
11456 +diff --git a/tools/gpio/Build b/tools/gpio/Build
11457 +index 620c1937d957..4141f35837db 100644
11458 +--- a/tools/gpio/Build
11459 ++++ b/tools/gpio/Build
11460 +@@ -1,3 +1,4 @@
11461 ++gpio-utils-y += gpio-utils.o
11462 + lsgpio-y += lsgpio.o gpio-utils.o
11463 + gpio-hammer-y += gpio-hammer.o gpio-utils.o
11464 + gpio-event-mon-y += gpio-event-mon.o gpio-utils.o
11465 +diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
11466 +index f8bc8656a544..6a73c06e069c 100644
11467 +--- a/tools/gpio/Makefile
11468 ++++ b/tools/gpio/Makefile
11469 +@@ -35,11 +35,15 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h
11470 +
11471 + prepare: $(OUTPUT)include/linux/gpio.h
11472 +
11473 ++GPIO_UTILS_IN := $(output)gpio-utils-in.o
11474 ++$(GPIO_UTILS_IN): prepare FORCE
11475 ++ $(Q)$(MAKE) $(build)=gpio-utils
11476 ++
11477 + #
11478 + # lsgpio
11479 + #
11480 + LSGPIO_IN := $(OUTPUT)lsgpio-in.o
11481 +-$(LSGPIO_IN): prepare FORCE
11482 ++$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
11483 + $(Q)$(MAKE) $(build)=lsgpio
11484 + $(OUTPUT)lsgpio: $(LSGPIO_IN)
11485 + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
11486 +@@ -48,7 +52,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN)
11487 + # gpio-hammer
11488 + #
11489 + GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o
11490 +-$(GPIO_HAMMER_IN): prepare FORCE
11491 ++$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
11492 + $(Q)$(MAKE) $(build)=gpio-hammer
11493 + $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
11494 + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
11495 +@@ -57,7 +61,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
11496 + # gpio-event-mon
11497 + #
11498 + GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o
11499 +-$(GPIO_EVENT_MON_IN): prepare FORCE
11500 ++$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
11501 + $(Q)$(MAKE) $(build)=gpio-event-mon
11502 + $(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN)
11503 + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
11504 +diff --git a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
11505 +index b02a36b2c14f..a42015b305f4 100644
11506 +--- a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
11507 ++++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
11508 +@@ -69,7 +69,7 @@ BEGIN {
11509 +
11510 + lprefix1_expr = "\\((66|!F3)\\)"
11511 + lprefix2_expr = "\\(F3\\)"
11512 +- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
11513 ++ lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
11514 + lprefix_expr = "\\((66|F2|F3)\\)"
11515 + max_lprefix = 4
11516 +
11517 +@@ -257,7 +257,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
11518 + return add_flags(imm, mod)
11519 + }
11520 +
11521 +-/^[0-9a-f]+\:/ {
11522 ++/^[0-9a-f]+:/ {
11523 + if (NR == 1)
11524 + next
11525 + # get index
11526 +diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
11527 +index db213171f8d9..2d9b94b631cb 100644
11528 +--- a/tools/power/acpi/tools/acpidump/apmain.c
11529 ++++ b/tools/power/acpi/tools/acpidump/apmain.c
11530 +@@ -106,7 +106,7 @@ static int ap_insert_action(char *argument, u32 to_be_done)
11531 +
11532 + current_action++;
11533 + if (current_action > AP_MAX_ACTIONS) {
11534 +- fprintf(stderr, "Too many table options (max %u)\n",
11535 ++ fprintf(stderr, "Too many table options (max %d)\n",
11536 + AP_MAX_ACTIONS);
11537 + return (-1);
11538 + }
11539 +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
11540 +index 823bbc741ad7..02d123871ef9 100644
11541 +--- a/tools/power/x86/turbostat/turbostat.c
11542 ++++ b/tools/power/x86/turbostat/turbostat.c
11543 +@@ -1,6 +1,6 @@
11544 + /*
11545 + * turbostat -- show CPU frequency and C-state residency
11546 +- * on modern Intel turbo-capable processors.
11547 ++ * on modern Intel and AMD processors.
11548 + *
11549 + * Copyright (c) 2013 Intel Corporation.
11550 + * Len Brown <len.brown@×××××.com>
11551 +@@ -71,6 +71,8 @@ unsigned int do_irtl_snb;
11552 + unsigned int do_irtl_hsw;
11553 + unsigned int units = 1000000; /* MHz etc */
11554 + unsigned int genuine_intel;
11555 ++unsigned int authentic_amd;
11556 ++unsigned int max_level, max_extended_level;
11557 + unsigned int has_invariant_tsc;
11558 + unsigned int do_nhm_platform_info;
11559 + unsigned int no_MSR_MISC_PWR_MGMT;
11560 +@@ -1667,30 +1669,51 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
11561 +
11562 + void get_apic_id(struct thread_data *t)
11563 + {
11564 +- unsigned int eax, ebx, ecx, edx, max_level;
11565 ++ unsigned int eax, ebx, ecx, edx;
11566 +
11567 +- eax = ebx = ecx = edx = 0;
11568 ++ if (DO_BIC(BIC_APIC)) {
11569 ++ eax = ebx = ecx = edx = 0;
11570 ++ __cpuid(1, eax, ebx, ecx, edx);
11571 +
11572 +- if (!genuine_intel)
11573 ++ t->apic_id = (ebx >> 24) & 0xff;
11574 ++ }
11575 ++
11576 ++ if (!DO_BIC(BIC_X2APIC))
11577 + return;
11578 +
11579 +- __cpuid(0, max_level, ebx, ecx, edx);
11580 ++ if (authentic_amd) {
11581 ++ unsigned int topology_extensions;
11582 +
11583 +- __cpuid(1, eax, ebx, ecx, edx);
11584 +- t->apic_id = (ebx >> 24) & 0xf;
11585 ++ if (max_extended_level < 0x8000001e)
11586 ++ return;
11587 +
11588 +- if (max_level < 0xb)
11589 ++ eax = ebx = ecx = edx = 0;
11590 ++ __cpuid(0x80000001, eax, ebx, ecx, edx);
11591 ++ topology_extensions = ecx & (1 << 22);
11592 ++
11593 ++ if (topology_extensions == 0)
11594 ++ return;
11595 ++
11596 ++ eax = ebx = ecx = edx = 0;
11597 ++ __cpuid(0x8000001e, eax, ebx, ecx, edx);
11598 ++
11599 ++ t->x2apic_id = eax;
11600 + return;
11601 ++ }
11602 +
11603 +- if (!DO_BIC(BIC_X2APIC))
11604 ++ if (!genuine_intel)
11605 ++ return;
11606 ++
11607 ++ if (max_level < 0xb)
11608 + return;
11609 +
11610 + ecx = 0;
11611 + __cpuid(0xb, eax, ebx, ecx, edx);
11612 + t->x2apic_id = edx;
11613 +
11614 +- if (debug && (t->apic_id != t->x2apic_id))
11615 +- fprintf(outf, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
11616 ++ if (debug && (t->apic_id != (t->x2apic_id & 0xff)))
11617 ++ fprintf(outf, "cpu%d: BIOS BUG: apic 0x%x x2apic 0x%x\n",
11618 ++ t->cpu_id, t->apic_id, t->x2apic_id);
11619 + }
11620 +
11621 + /*
11622 +@@ -4439,16 +4462,18 @@ void decode_c6_demotion_policy_msr(void)
11623 +
11624 + void process_cpuid()
11625 + {
11626 +- unsigned int eax, ebx, ecx, edx, max_level, max_extended_level;
11627 +- unsigned int fms, family, model, stepping;
11628 ++ unsigned int eax, ebx, ecx, edx;
11629 ++ unsigned int fms, family, model, stepping, ecx_flags, edx_flags;
11630 + unsigned int has_turbo;
11631 +
11632 + eax = ebx = ecx = edx = 0;
11633 +
11634 + __cpuid(0, max_level, ebx, ecx, edx);
11635 +
11636 +- if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
11637 ++ if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
11638 + genuine_intel = 1;
11639 ++ else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
11640 ++ authentic_amd = 1;
11641 +
11642 + if (!quiet)
11643 + fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
11644 +@@ -4462,25 +4487,8 @@ void process_cpuid()
11645 + family += (fms >> 20) & 0xff;
11646 + if (family >= 6)
11647 + model += ((fms >> 16) & 0xf) << 4;
11648 +-
11649 +- if (!quiet) {
11650 +- fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
11651 +- max_level, family, model, stepping, family, model, stepping);
11652 +- fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
11653 +- ecx & (1 << 0) ? "SSE3" : "-",
11654 +- ecx & (1 << 3) ? "MONITOR" : "-",
11655 +- ecx & (1 << 6) ? "SMX" : "-",
11656 +- ecx & (1 << 7) ? "EIST" : "-",
11657 +- ecx & (1 << 8) ? "TM2" : "-",
11658 +- edx & (1 << 4) ? "TSC" : "-",
11659 +- edx & (1 << 5) ? "MSR" : "-",
11660 +- edx & (1 << 22) ? "ACPI-TM" : "-",
11661 +- edx & (1 << 28) ? "HT" : "-",
11662 +- edx & (1 << 29) ? "TM" : "-");
11663 +- }
11664 +-
11665 +- if (!(edx & (1 << 5)))
11666 +- errx(1, "CPUID: no MSR");
11667 ++ ecx_flags = ecx;
11668 ++ edx_flags = edx;
11669 +
11670 + /*
11671 + * check max extended function levels of CPUID.
11672 +@@ -4490,6 +4498,25 @@ void process_cpuid()
11673 + ebx = ecx = edx = 0;
11674 + __cpuid(0x80000000, max_extended_level, ebx, ecx, edx);
11675 +
11676 ++ if (!quiet) {
11677 ++ fprintf(outf, "0x%x CPUID levels; 0x%x xlevels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
11678 ++ max_level, max_extended_level, family, model, stepping, family, model, stepping);
11679 ++ fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
11680 ++ ecx_flags & (1 << 0) ? "SSE3" : "-",
11681 ++ ecx_flags & (1 << 3) ? "MONITOR" : "-",
11682 ++ ecx_flags & (1 << 6) ? "SMX" : "-",
11683 ++ ecx_flags & (1 << 7) ? "EIST" : "-",
11684 ++ ecx_flags & (1 << 8) ? "TM2" : "-",
11685 ++ edx_flags & (1 << 4) ? "TSC" : "-",
11686 ++ edx_flags & (1 << 5) ? "MSR" : "-",
11687 ++ edx_flags & (1 << 22) ? "ACPI-TM" : "-",
11688 ++ edx_flags & (1 << 28) ? "HT" : "-",
11689 ++ edx_flags & (1 << 29) ? "TM" : "-");
11690 ++ }
11691 ++
11692 ++ if (!(edx_flags & (1 << 5)))
11693 ++ errx(1, "CPUID: no MSR");
11694 ++
11695 + if (max_extended_level >= 0x80000007) {
11696 +
11697 + /*
11698 +diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
11699 +index 8b1bc96d8e0c..2989b2e2d856 100755
11700 +--- a/tools/testing/selftests/bpf/test_libbpf.sh
11701 ++++ b/tools/testing/selftests/bpf/test_libbpf.sh
11702 +@@ -6,7 +6,7 @@ export TESTNAME=test_libbpf
11703 + # Determine selftest success via shell exit code
11704 + exit_handler()
11705 + {
11706 +- if (( $? == 0 )); then
11707 ++ if [ $? -eq 0 ]; then
11708 + echo "selftests: $TESTNAME [PASS]";
11709 + else
11710 + echo "$TESTNAME: failed at file $LAST_LOADED" 1>&2
11711 +diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
11712 +index cf156b353679..82922f13dcd3 100644
11713 +--- a/tools/testing/selftests/bpf/trace_helpers.c
11714 ++++ b/tools/testing/selftests/bpf/trace_helpers.c
11715 +@@ -41,6 +41,7 @@ int load_kallsyms(void)
11716 + syms[i].name = strdup(func);
11717 + i++;
11718 + }
11719 ++ fclose(f);
11720 + sym_cnt = i;
11721 + qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
11722 + return 0;
11723 +diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
11724 +index d026ff4e562f..92ffb3bd33d8 100644
11725 +--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
11726 ++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
11727 +@@ -78,8 +78,11 @@ test_badarg "\$stackp" "\$stack0+10" "\$stack1-10"
11728 + echo "r ${PROBEFUNC} \$retval" > kprobe_events
11729 + ! echo "p ${PROBEFUNC} \$retval" > kprobe_events
11730 +
11731 ++# $comm was introduced in 4.8, older kernels reject it.
11732 ++if grep -A1 "fetcharg:" README | grep -q '\$comm' ; then
11733 + : "Comm access"
11734 + test_goodarg "\$comm"
11735 ++fi
11736 +
11737 + : "Indirect memory access"
11738 + test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \
11739 +diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
11740 +index 0c2cdc105f96..a9c4b5e21d7e 100644
11741 +--- a/tools/testing/selftests/kvm/dirty_log_test.c
11742 ++++ b/tools/testing/selftests/kvm/dirty_log_test.c
11743 +@@ -31,9 +31,9 @@
11744 + /* How many pages to dirty for each guest loop */
11745 + #define TEST_PAGES_PER_LOOP 1024
11746 + /* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
11747 +-#define TEST_HOST_LOOP_N 32
11748 ++#define TEST_HOST_LOOP_N 32UL
11749 + /* Interval for each host loop (ms) */
11750 +-#define TEST_HOST_LOOP_INTERVAL 10
11751 ++#define TEST_HOST_LOOP_INTERVAL 10UL
11752 +
11753 + /*
11754 + * Guest variables. We use these variables to share data between host
11755 +diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile
11756 +index ede4d3dae750..689f6c8ebcd8 100644
11757 +--- a/tools/testing/selftests/powerpc/cache_shape/Makefile
11758 ++++ b/tools/testing/selftests/powerpc/cache_shape/Makefile
11759 +@@ -1,12 +1,7 @@
11760 + # SPDX-License-Identifier: GPL-2.0
11761 +-TEST_PROGS := cache_shape
11762 +-
11763 +-all: $(TEST_PROGS)
11764 +-
11765 +-$(TEST_PROGS): ../harness.c ../utils.c
11766 ++TEST_GEN_PROGS := cache_shape
11767 +
11768 + top_srcdir = ../../../../..
11769 + include ../../lib.mk
11770 +
11771 +-clean:
11772 +- rm -f $(TEST_PROGS) *.o
11773 ++$(TEST_GEN_PROGS): ../harness.c ../utils.c
11774 +diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
11775 +index 923d531265f8..9f9423430059 100644
11776 +--- a/tools/testing/selftests/powerpc/ptrace/Makefile
11777 ++++ b/tools/testing/selftests/powerpc/ptrace/Makefile
11778 +@@ -1,5 +1,5 @@
11779 + # SPDX-License-Identifier: GPL-2.0
11780 +-TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
11781 ++TEST_GEN_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
11782 + ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
11783 + ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
11784 + perf-hwbreak
11785 +@@ -7,14 +7,9 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
11786 + top_srcdir = ../../../../..
11787 + include ../../lib.mk
11788 +
11789 +-all: $(TEST_PROGS)
11790 +-
11791 + CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm -fno-pie
11792 +
11793 +-ptrace-pkey core-pkey: child.h
11794 +-ptrace-pkey core-pkey: LDLIBS += -pthread
11795 +-
11796 +-$(TEST_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
11797 ++$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: child.h
11798 ++$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: LDLIBS += -pthread
11799 +
11800 +-clean:
11801 +- rm -f $(TEST_PROGS) *.o
11802 ++$(TEST_GEN_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
11803 +diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile
11804 +index 1fca25c6ace0..209a958dca12 100644
11805 +--- a/tools/testing/selftests/powerpc/signal/Makefile
11806 ++++ b/tools/testing/selftests/powerpc/signal/Makefile
11807 +@@ -1,15 +1,10 @@
11808 + # SPDX-License-Identifier: GPL-2.0
11809 +-TEST_PROGS := signal signal_tm
11810 +-
11811 +-all: $(TEST_PROGS)
11812 +-
11813 +-$(TEST_PROGS): ../harness.c ../utils.c signal.S
11814 ++TEST_GEN_PROGS := signal signal_tm
11815 +
11816 + CFLAGS += -maltivec
11817 +-signal_tm: CFLAGS += -mhtm
11818 ++$(OUTPUT)/signal_tm: CFLAGS += -mhtm
11819 +
11820 + top_srcdir = ../../../../..
11821 + include ../../lib.mk
11822 +
11823 +-clean:
11824 +- rm -f $(TEST_PROGS) *.o
11825 ++$(TEST_GEN_PROGS): ../harness.c ../utils.c signal.S
11826 +diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile
11827 +index fcd2dcb8972b..bdc081afedb0 100644
11828 +--- a/tools/testing/selftests/powerpc/switch_endian/Makefile
11829 ++++ b/tools/testing/selftests/powerpc/switch_endian/Makefile
11830 +@@ -8,6 +8,7 @@ EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
11831 + top_srcdir = ../../../../..
11832 + include ../../lib.mk
11833 +
11834 ++$(OUTPUT)/switch_endian_test: ASFLAGS += -I $(OUTPUT)
11835 + $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
11836 +
11837 + $(OUTPUT)/check-reversed.o: $(OUTPUT)/check.o
11838 +diff --git a/tools/testing/selftests/proc/fd-001-lookup.c b/tools/testing/selftests/proc/fd-001-lookup.c
11839 +index a2010dfb2110..60d7948e7124 100644
11840 +--- a/tools/testing/selftests/proc/fd-001-lookup.c
11841 ++++ b/tools/testing/selftests/proc/fd-001-lookup.c
11842 +@@ -14,7 +14,7 @@
11843 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
11844 + */
11845 + // Test /proc/*/fd lookup.
11846 +-#define _GNU_SOURCE
11847 ++
11848 + #undef NDEBUG
11849 + #include <assert.h>
11850 + #include <dirent.h>
11851 +diff --git a/tools/testing/selftests/proc/fd-003-kthread.c b/tools/testing/selftests/proc/fd-003-kthread.c
11852 +index 1d659d55368c..dc591f97b63d 100644
11853 +--- a/tools/testing/selftests/proc/fd-003-kthread.c
11854 ++++ b/tools/testing/selftests/proc/fd-003-kthread.c
11855 +@@ -14,7 +14,7 @@
11856 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
11857 + */
11858 + // Test that /proc/$KERNEL_THREAD/fd/ is empty.
11859 +-#define _GNU_SOURCE
11860 ++
11861 + #undef NDEBUG
11862 + #include <sys/syscall.h>
11863 + #include <assert.h>
11864 +diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
11865 +index 9601bc24454d..17da711f26af 100644
11866 +--- a/tools/testing/selftests/vm/gup_benchmark.c
11867 ++++ b/tools/testing/selftests/vm/gup_benchmark.c
11868 +@@ -51,6 +51,7 @@ int main(int argc, char **argv)
11869 + break;
11870 + case 'w':
11871 + write = 1;
11872 ++ break;
11873 + default:
11874 + return -1;
11875 + }
11876 +diff --git a/tools/testing/selftests/watchdog/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c
11877 +index 6e290874b70e..f1c6e025cbe5 100644
11878 +--- a/tools/testing/selftests/watchdog/watchdog-test.c
11879 ++++ b/tools/testing/selftests/watchdog/watchdog-test.c
11880 +@@ -89,7 +89,13 @@ int main(int argc, char *argv[])
11881 + fd = open("/dev/watchdog", O_WRONLY);
11882 +
11883 + if (fd == -1) {
11884 +- printf("Watchdog device not enabled.\n");
11885 ++ if (errno == ENOENT)
11886 ++ printf("Watchdog device not enabled.\n");
11887 ++ else if (errno == EACCES)
11888 ++ printf("Run watchdog as root.\n");
11889 ++ else
11890 ++ printf("Watchdog device open failed %s\n",
11891 ++ strerror(errno));
11892 + exit(-1);
11893 + }
11894 +
11895 +@@ -103,7 +109,7 @@ int main(int argc, char *argv[])
11896 + printf("Last boot is caused by: %s.\n", (flags != 0) ?
11897 + "Watchdog" : "Power-On-Reset");
11898 + else
11899 +- printf("WDIOC_GETBOOTSTATUS errno '%s'\n", strerror(errno));
11900 ++ printf("WDIOC_GETBOOTSTATUS error '%s'\n", strerror(errno));
11901 + break;
11902 + case 'd':
11903 + flags = WDIOS_DISABLECARD;
11904 +@@ -111,7 +117,7 @@ int main(int argc, char *argv[])
11905 + if (!ret)
11906 + printf("Watchdog card disabled.\n");
11907 + else
11908 +- printf("WDIOS_DISABLECARD errno '%s'\n", strerror(errno));
11909 ++ printf("WDIOS_DISABLECARD error '%s'\n", strerror(errno));
11910 + break;
11911 + case 'e':
11912 + flags = WDIOS_ENABLECARD;
11913 +@@ -119,7 +125,7 @@ int main(int argc, char *argv[])
11914 + if (!ret)
11915 + printf("Watchdog card enabled.\n");
11916 + else
11917 +- printf("WDIOS_ENABLECARD errno '%s'\n", strerror(errno));
11918 ++ printf("WDIOS_ENABLECARD error '%s'\n", strerror(errno));
11919 + break;
11920 + case 'p':
11921 + ping_rate = strtoul(optarg, NULL, 0);
11922 +@@ -133,7 +139,7 @@ int main(int argc, char *argv[])
11923 + if (!ret)
11924 + printf("Watchdog timeout set to %u seconds.\n", flags);
11925 + else
11926 +- printf("WDIOC_SETTIMEOUT errno '%s'\n", strerror(errno));
11927 ++ printf("WDIOC_SETTIMEOUT error '%s'\n", strerror(errno));
11928 + break;
11929 + default:
11930 + usage(argv[0]);
11931 +diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
11932 +index dc93fadbee96..b0f7489d069d 100644
11933 +--- a/tools/usb/usbip/libsrc/usbip_host_common.c
11934 ++++ b/tools/usb/usbip/libsrc/usbip_host_common.c
11935 +@@ -43,7 +43,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
11936 + int size;
11937 + int fd;
11938 + int length;
11939 +- char status;
11940 ++ char status[2] = { 0 };
11941 + int value = 0;
11942 +
11943 + size = snprintf(status_attr_path, sizeof(status_attr_path),
11944 +@@ -61,15 +61,15 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
11945 + return -1;
11946 + }
11947 +
11948 +- length = read(fd, &status, 1);
11949 ++ length = read(fd, status, 1);
11950 + if (length < 0) {
11951 + err("error reading attribute %s", status_attr_path);
11952 + close(fd);
11953 + return -1;
11954 + }
11955 +
11956 +- value = atoi(&status);
11957 +-
11958 ++ value = atoi(status);
11959 ++ close(fd);
11960 + return value;
11961 + }
11962 +
11963 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
11964 +index 7a0d86d52230..df3fc0f214ec 100644
11965 +--- a/virt/kvm/kvm_main.c
11966 ++++ b/virt/kvm/kvm_main.c
11967 +@@ -147,10 +147,30 @@ __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
11968 + return 0;
11969 + }
11970 +
11971 ++bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
11972 ++{
11973 ++ /*
11974 ++ * The metadata used by is_zone_device_page() to determine whether or
11975 ++ * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
11976 ++ * the device has been pinned, e.g. by get_user_pages(). WARN if the
11977 ++ * page_count() is zero to help detect bad usage of this helper.
11978 ++ */
11979 ++ if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
11980 ++ return false;
11981 ++
11982 ++ return is_zone_device_page(pfn_to_page(pfn));
11983 ++}
11984 ++
11985 + bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
11986 + {
11987 ++ /*
11988 ++ * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
11989 ++ * perspective they are "normal" pages, albeit with slightly different
11990 ++ * usage rules.
11991 ++ */
11992 + if (pfn_valid(pfn))
11993 +- return PageReserved(pfn_to_page(pfn));
11994 ++ return PageReserved(pfn_to_page(pfn)) &&
11995 ++ !kvm_is_zone_device_pfn(pfn);
11996 +
11997 + return true;
11998 + }
11999 +@@ -1727,7 +1747,7 @@ EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
12000 +
12001 + void kvm_set_pfn_dirty(kvm_pfn_t pfn)
12002 + {
12003 +- if (!kvm_is_reserved_pfn(pfn)) {
12004 ++ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
12005 + struct page *page = pfn_to_page(pfn);
12006 +
12007 + if (!PageReserved(page))
12008 +@@ -1738,7 +1758,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
12009 +
12010 + void kvm_set_pfn_accessed(kvm_pfn_t pfn)
12011 + {
12012 +- if (!kvm_is_reserved_pfn(pfn))
12013 ++ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
12014 + mark_page_accessed(pfn_to_page(pfn));
12015 + }
12016 + EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);