Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Thu, 10 Jun 2021 11:59:10
Message-Id: 1623326333.ec4d12b78bf27d396119c1a6bad2d74b1fe151c1.mpagano@gentoo
1 commit: ec4d12b78bf27d396119c1a6bad2d74b1fe151c1
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jun 10 11:58:53 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Jun 10 11:58:53 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ec4d12b7
7
8 Linux patch 5.4.125
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1124_linux-5.4.125.patch | 3019 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3023 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index fbcce52..ebe64c3 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -539,6 +539,10 @@ Patch: 1123_linux-5.4.124.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.124
23
24 +Patch: 1124_linux-5.4.125.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.125
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1124_linux-5.4.125.patch b/1124_linux-5.4.125.patch
33 new file mode 100644
34 index 0000000..21a9802
35 --- /dev/null
36 +++ b/1124_linux-5.4.125.patch
37 @@ -0,0 +1,3019 @@
38 +diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
39 +index fcedc5349ace4..2ad3c1fce5795 100644
40 +--- a/Documentation/core-api/xarray.rst
41 ++++ b/Documentation/core-api/xarray.rst
42 +@@ -461,13 +461,15 @@ or iterations will move the index to the first index in the range.
43 + Each entry will only be returned once, no matter how many indices it
44 + occupies.
45 +
46 +-Using xas_next() or xas_prev() with a multi-index xa_state
47 +-is not supported. Using either of these functions on a multi-index entry
48 +-will reveal sibling entries; these should be skipped over by the caller.
49 +-
50 +-Storing ``NULL`` into any index of a multi-index entry will set the entry
51 +-at every index to ``NULL`` and dissolve the tie. Splitting a multi-index
52 +-entry into entries occupying smaller ranges is not yet supported.
53 ++Using xas_next() or xas_prev() with a multi-index xa_state is not
54 ++supported. Using either of these functions on a multi-index entry will
55 ++reveal sibling entries; these should be skipped over by the caller.
56 ++
57 ++Storing ``NULL`` into any index of a multi-index entry will set the
58 ++entry at every index to ``NULL`` and dissolve the tie. A multi-index
59 ++entry can be split into entries occupying smaller ranges by calling
60 ++xas_split_alloc() without the xa_lock held, followed by taking the lock
61 ++and calling xas_split().
62 +
63 + Functions and structures
64 + ========================
65 +diff --git a/Makefile b/Makefile
66 +index 22668742d3d04..43e7b07eea80e 100644
67 +--- a/Makefile
68 ++++ b/Makefile
69 +@@ -1,7 +1,7 @@
70 + # SPDX-License-Identifier: GPL-2.0
71 + VERSION = 5
72 + PATCHLEVEL = 4
73 +-SUBLEVEL = 124
74 ++SUBLEVEL = 125
75 + EXTRAVERSION =
76 + NAME = Kleptomaniac Octopus
77 +
78 +diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
79 +index e8d800fec6379..ce4a5a8074422 100644
80 +--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
81 ++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
82 +@@ -99,9 +99,13 @@
83 + phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
84 + phy-reset-duration = <20>;
85 + phy-supply = <&sw2_reg>;
86 +- phy-handle = <&ethphy0>;
87 + status = "okay";
88 +
89 ++ fixed-link {
90 ++ speed = <1000>;
91 ++ full-duplex;
92 ++ };
93 ++
94 + mdio {
95 + #address-cells = <1>;
96 + #size-cells = <0>;
97 +diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
98 +index 08a2e17e0539b..621894d13dcbc 100644
99 +--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
100 ++++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
101 +@@ -408,6 +408,18 @@
102 + vin-supply = <&sw1_reg>;
103 + };
104 +
105 ++&reg_pu {
106 ++ vin-supply = <&sw1_reg>;
107 ++};
108 ++
109 ++&reg_vdd1p1 {
110 ++ vin-supply = <&sw2_reg>;
111 ++};
112 ++
113 ++&reg_vdd2p5 {
114 ++ vin-supply = <&sw2_reg>;
115 ++};
116 ++
117 + &uart1 {
118 + pinctrl-names = "default";
119 + pinctrl-0 = <&pinctrl_uart1>;
120 +diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
121 +index 828cf3e39784a..c4e146f3341bb 100644
122 +--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
123 ++++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
124 +@@ -126,7 +126,7 @@
125 + compatible = "nxp,pca8574";
126 + reg = <0x3a>;
127 + gpio-controller;
128 +- #gpio-cells = <1>;
129 ++ #gpio-cells = <2>;
130 + };
131 + };
132 +
133 +diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts b/arch/arm/boot/dts/imx7d-meerkat96.dts
134 +index 5339210b63d0f..dd8003bd1fc09 100644
135 +--- a/arch/arm/boot/dts/imx7d-meerkat96.dts
136 ++++ b/arch/arm/boot/dts/imx7d-meerkat96.dts
137 +@@ -193,7 +193,7 @@
138 + pinctrl-names = "default";
139 + pinctrl-0 = <&pinctrl_usdhc1>;
140 + keep-power-in-suspend;
141 +- tuning-step = <2>;
142 ++ fsl,tuning-step = <2>;
143 + vmmc-supply = <&reg_3p3v>;
144 + no-1-8-v;
145 + broken-cd;
146 +diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
147 +index 6f50ebf31a0ab..8a8df54ff5639 100644
148 +--- a/arch/arm/boot/dts/imx7d-pico.dtsi
149 ++++ b/arch/arm/boot/dts/imx7d-pico.dtsi
150 +@@ -307,7 +307,7 @@
151 + pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
152 + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
153 + bus-width = <4>;
154 +- tuning-step = <2>;
155 ++ fsl,tuning-step = <2>;
156 + vmmc-supply = <&reg_3p3v>;
157 + wakeup-source;
158 + no-1-8-v;
159 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
160 +index bd99fa68b7630..5a2a188debd1d 100644
161 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
162 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
163 +@@ -151,8 +151,8 @@
164 + ddr: memory-controller@1080000 {
165 + compatible = "fsl,qoriq-memory-controller";
166 + reg = <0x0 0x1080000 0x0 0x1000>;
167 +- interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
168 +- big-endian;
169 ++ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
170 ++ little-endian;
171 + };
172 +
173 + dcfg: syscon@1e00000 {
174 +diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
175 +index 32ce14936b013..f385b143b3086 100644
176 +--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
177 ++++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
178 +@@ -45,8 +45,8 @@
179 + reg_12p0_main: regulator-12p0-main {
180 + compatible = "regulator-fixed";
181 + regulator-name = "12V_MAIN";
182 +- regulator-min-microvolt = <5000000>;
183 +- regulator-max-microvolt = <5000000>;
184 ++ regulator-min-microvolt = <12000000>;
185 ++ regulator-max-microvolt = <12000000>;
186 + regulator-always-on;
187 + };
188 +
189 +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
190 +index 98a177dd1f89f..da649e90240c8 100644
191 +--- a/arch/arm64/kvm/sys_regs.c
192 ++++ b/arch/arm64/kvm/sys_regs.c
193 +@@ -432,14 +432,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
194 + struct sys_reg_params *p,
195 + const struct sys_reg_desc *rd)
196 + {
197 +- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
198 ++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
199 +
200 + if (p->is_write)
201 + reg_to_dbg(vcpu, p, dbg_reg);
202 + else
203 + dbg_to_reg(vcpu, p, dbg_reg);
204 +
205 +- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
206 ++ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
207 +
208 + return true;
209 + }
210 +@@ -447,7 +447,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
211 + static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
212 + const struct kvm_one_reg *reg, void __user *uaddr)
213 + {
214 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
215 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
216 +
217 + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
218 + return -EFAULT;
219 +@@ -457,7 +457,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
220 + static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
221 + const struct kvm_one_reg *reg, void __user *uaddr)
222 + {
223 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
224 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
225 +
226 + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
227 + return -EFAULT;
228 +@@ -467,21 +467,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
229 + static void reset_bvr(struct kvm_vcpu *vcpu,
230 + const struct sys_reg_desc *rd)
231 + {
232 +- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
233 ++ vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
234 + }
235 +
236 + static bool trap_bcr(struct kvm_vcpu *vcpu,
237 + struct sys_reg_params *p,
238 + const struct sys_reg_desc *rd)
239 + {
240 +- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
241 ++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
242 +
243 + if (p->is_write)
244 + reg_to_dbg(vcpu, p, dbg_reg);
245 + else
246 + dbg_to_reg(vcpu, p, dbg_reg);
247 +
248 +- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
249 ++ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
250 +
251 + return true;
252 + }
253 +@@ -489,7 +489,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
254 + static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
255 + const struct kvm_one_reg *reg, void __user *uaddr)
256 + {
257 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
258 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
259 +
260 + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
261 + return -EFAULT;
262 +@@ -500,7 +500,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
263 + static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
264 + const struct kvm_one_reg *reg, void __user *uaddr)
265 + {
266 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
267 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
268 +
269 + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
270 + return -EFAULT;
271 +@@ -510,22 +510,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
272 + static void reset_bcr(struct kvm_vcpu *vcpu,
273 + const struct sys_reg_desc *rd)
274 + {
275 +- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
276 ++ vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
277 + }
278 +
279 + static bool trap_wvr(struct kvm_vcpu *vcpu,
280 + struct sys_reg_params *p,
281 + const struct sys_reg_desc *rd)
282 + {
283 +- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
284 ++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
285 +
286 + if (p->is_write)
287 + reg_to_dbg(vcpu, p, dbg_reg);
288 + else
289 + dbg_to_reg(vcpu, p, dbg_reg);
290 +
291 +- trace_trap_reg(__func__, rd->reg, p->is_write,
292 +- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
293 ++ trace_trap_reg(__func__, rd->CRm, p->is_write,
294 ++ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
295 +
296 + return true;
297 + }
298 +@@ -533,7 +533,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
299 + static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
300 + const struct kvm_one_reg *reg, void __user *uaddr)
301 + {
302 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
303 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
304 +
305 + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
306 + return -EFAULT;
307 +@@ -543,7 +543,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
308 + static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
309 + const struct kvm_one_reg *reg, void __user *uaddr)
310 + {
311 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
312 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
313 +
314 + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
315 + return -EFAULT;
316 +@@ -553,21 +553,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
317 + static void reset_wvr(struct kvm_vcpu *vcpu,
318 + const struct sys_reg_desc *rd)
319 + {
320 +- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
321 ++ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
322 + }
323 +
324 + static bool trap_wcr(struct kvm_vcpu *vcpu,
325 + struct sys_reg_params *p,
326 + const struct sys_reg_desc *rd)
327 + {
328 +- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
329 ++ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
330 +
331 + if (p->is_write)
332 + reg_to_dbg(vcpu, p, dbg_reg);
333 + else
334 + dbg_to_reg(vcpu, p, dbg_reg);
335 +
336 +- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
337 ++ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
338 +
339 + return true;
340 + }
341 +@@ -575,7 +575,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
342 + static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
343 + const struct kvm_one_reg *reg, void __user *uaddr)
344 + {
345 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
346 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
347 +
348 + if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
349 + return -EFAULT;
350 +@@ -585,7 +585,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
351 + static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
352 + const struct kvm_one_reg *reg, void __user *uaddr)
353 + {
354 +- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
355 ++ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
356 +
357 + if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
358 + return -EFAULT;
359 +@@ -595,7 +595,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
360 + static void reset_wcr(struct kvm_vcpu *vcpu,
361 + const struct sys_reg_desc *rd)
362 + {
363 +- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
364 ++ vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
365 + }
366 +
367 + static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
368 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
369 +index 5bef1575708dc..a49b1aeb2147b 100644
370 +--- a/arch/x86/include/asm/apic.h
371 ++++ b/arch/x86/include/asm/apic.h
372 +@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
373 + extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
374 + extern void lapic_assign_system_vectors(void);
375 + extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
376 ++extern void lapic_update_legacy_vectors(void);
377 + extern void lapic_online(void);
378 + extern void lapic_offline(void);
379 + extern bool apic_needs_pit(void);
380 +diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
381 +index 9b4df6eaa11a6..f913f62eb6c35 100644
382 +--- a/arch/x86/include/asm/kvm_para.h
383 ++++ b/arch/x86/include/asm/kvm_para.h
384 +@@ -6,8 +6,6 @@
385 + #include <asm/alternative.h>
386 + #include <uapi/asm/kvm_para.h>
387 +
388 +-extern void kvmclock_init(void);
389 +-
390 + #ifdef CONFIG_KVM_GUEST
391 + bool kvm_check_and_clear_guest_paused(void);
392 + #else
393 +@@ -85,13 +83,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
394 + }
395 +
396 + #ifdef CONFIG_KVM_GUEST
397 ++void kvmclock_init(void);
398 ++void kvmclock_disable(void);
399 + bool kvm_para_available(void);
400 + unsigned int kvm_arch_para_features(void);
401 + unsigned int kvm_arch_para_hints(void);
402 + void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
403 + void kvm_async_pf_task_wake(u32 token);
404 + u32 kvm_read_and_reset_pf_reason(void);
405 +-extern void kvm_disable_steal_time(void);
406 + void do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
407 +
408 + #ifdef CONFIG_PARAVIRT_SPINLOCKS
409 +@@ -125,11 +124,6 @@ static inline u32 kvm_read_and_reset_pf_reason(void)
410 + {
411 + return 0;
412 + }
413 +-
414 +-static inline void kvm_disable_steal_time(void)
415 +-{
416 +- return;
417 +-}
418 + #endif
419 +
420 + #endif /* _ASM_X86_KVM_PARA_H */
421 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
422 +index 7fafa859e9f25..4e4476b832be2 100644
423 +--- a/arch/x86/kernel/apic/apic.c
424 ++++ b/arch/x86/kernel/apic/apic.c
425 +@@ -2579,6 +2579,7 @@ static void __init apic_bsp_setup(bool upmode)
426 + end_local_APIC_setup();
427 + irq_remap_enable_fault_handling();
428 + setup_IO_APIC();
429 ++ lapic_update_legacy_vectors();
430 + }
431 +
432 + #ifdef CONFIG_UP_LATE_INIT
433 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
434 +index bf6662d37a334..6b8b6bf6c5d1c 100644
435 +--- a/arch/x86/kernel/apic/vector.c
436 ++++ b/arch/x86/kernel/apic/vector.c
437 +@@ -680,6 +680,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
438 + irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
439 + }
440 +
441 ++void __init lapic_update_legacy_vectors(void)
442 ++{
443 ++ unsigned int i;
444 ++
445 ++ if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
446 ++ return;
447 ++
448 ++ /*
449 ++ * If the IO/APIC is disabled via config, kernel command line or
450 ++ * lack of enumeration then all legacy interrupts are routed
451 ++ * through the PIC. Make sure that they are marked as legacy
452 ++ * vectors. PIC_CASCADE_IRQ has already been marked in
453 ++ * lapic_assign_system_vectors().
454 ++ */
455 ++ for (i = 0; i < nr_legacy_irqs(); i++) {
456 ++ if (i != PIC_CASCADE_IR)
457 ++ lapic_assign_legacy_vector(i, true);
458 ++ }
459 ++}
460 ++
461 + void __init lapic_assign_system_vectors(void)
462 + {
463 + unsigned int i, vector = 0;
464 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
465 +index e820568ed4d5c..6ff2c7cac4c46 100644
466 +--- a/arch/x86/kernel/kvm.c
467 ++++ b/arch/x86/kernel/kvm.c
468 +@@ -24,6 +24,7 @@
469 + #include <linux/debugfs.h>
470 + #include <linux/nmi.h>
471 + #include <linux/swait.h>
472 ++#include <linux/syscore_ops.h>
473 + #include <asm/timer.h>
474 + #include <asm/cpu.h>
475 + #include <asm/traps.h>
476 +@@ -33,6 +34,7 @@
477 + #include <asm/apicdef.h>
478 + #include <asm/hypervisor.h>
479 + #include <asm/tlb.h>
480 ++#include <asm/reboot.h>
481 +
482 + static int kvmapf = 1;
483 +
484 +@@ -351,6 +353,14 @@ static void kvm_pv_disable_apf(void)
485 + smp_processor_id());
486 + }
487 +
488 ++static void kvm_disable_steal_time(void)
489 ++{
490 ++ if (!has_steal_clock)
491 ++ return;
492 ++
493 ++ wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
494 ++}
495 ++
496 + static void kvm_pv_guest_cpu_reboot(void *unused)
497 + {
498 + /*
499 +@@ -393,14 +403,6 @@ static u64 kvm_steal_clock(int cpu)
500 + return steal;
501 + }
502 +
503 +-void kvm_disable_steal_time(void)
504 +-{
505 +- if (!has_steal_clock)
506 +- return;
507 +-
508 +- wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
509 +-}
510 +-
511 + static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
512 + {
513 + early_set_memory_decrypted((unsigned long) ptr, size);
514 +@@ -428,6 +430,27 @@ static void __init sev_map_percpu_data(void)
515 + }
516 + }
517 +
518 ++static void kvm_guest_cpu_offline(bool shutdown)
519 ++{
520 ++ kvm_disable_steal_time();
521 ++ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
522 ++ wrmsrl(MSR_KVM_PV_EOI_EN, 0);
523 ++ kvm_pv_disable_apf();
524 ++ if (!shutdown)
525 ++ apf_task_wake_all();
526 ++ kvmclock_disable();
527 ++}
528 ++
529 ++static int kvm_cpu_online(unsigned int cpu)
530 ++{
531 ++ unsigned long flags;
532 ++
533 ++ local_irq_save(flags);
534 ++ kvm_guest_cpu_init();
535 ++ local_irq_restore(flags);
536 ++ return 0;
537 ++}
538 ++
539 + #ifdef CONFIG_SMP
540 + #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
541 +
542 +@@ -547,29 +570,46 @@ static void __init kvm_smp_prepare_boot_cpu(void)
543 + kvm_spinlock_init();
544 + }
545 +
546 +-static void kvm_guest_cpu_offline(void)
547 ++static int kvm_cpu_down_prepare(unsigned int cpu)
548 + {
549 +- kvm_disable_steal_time();
550 +- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
551 +- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
552 +- kvm_pv_disable_apf();
553 +- apf_task_wake_all();
554 ++ unsigned long flags;
555 ++
556 ++ local_irq_save(flags);
557 ++ kvm_guest_cpu_offline(false);
558 ++ local_irq_restore(flags);
559 ++ return 0;
560 + }
561 +
562 +-static int kvm_cpu_online(unsigned int cpu)
563 ++#endif
564 ++
565 ++static int kvm_suspend(void)
566 + {
567 +- local_irq_disable();
568 +- kvm_guest_cpu_init();
569 +- local_irq_enable();
570 ++ kvm_guest_cpu_offline(false);
571 ++
572 + return 0;
573 + }
574 +
575 +-static int kvm_cpu_down_prepare(unsigned int cpu)
576 ++static void kvm_resume(void)
577 + {
578 +- local_irq_disable();
579 +- kvm_guest_cpu_offline();
580 +- local_irq_enable();
581 +- return 0;
582 ++ kvm_cpu_online(raw_smp_processor_id());
583 ++}
584 ++
585 ++static struct syscore_ops kvm_syscore_ops = {
586 ++ .suspend = kvm_suspend,
587 ++ .resume = kvm_resume,
588 ++};
589 ++
590 ++/*
591 ++ * After a PV feature is registered, the host will keep writing to the
592 ++ * registered memory location. If the guest happens to shutdown, this memory
593 ++ * won't be valid. In cases like kexec, in which you install a new kernel, this
594 ++ * means a random memory location will be kept being written.
595 ++ */
596 ++#ifdef CONFIG_KEXEC_CORE
597 ++static void kvm_crash_shutdown(struct pt_regs *regs)
598 ++{
599 ++ kvm_guest_cpu_offline(true);
600 ++ native_machine_crash_shutdown(regs);
601 + }
602 + #endif
603 +
604 +@@ -649,6 +689,12 @@ static void __init kvm_guest_init(void)
605 + kvm_guest_cpu_init();
606 + #endif
607 +
608 ++#ifdef CONFIG_KEXEC_CORE
609 ++ machine_ops.crash_shutdown = kvm_crash_shutdown;
610 ++#endif
611 ++
612 ++ register_syscore_ops(&kvm_syscore_ops);
613 ++
614 + /*
615 + * Hard lockup detection is enabled by default. Disable it, as guests
616 + * can get false positives too easily, for example if the host is
617 +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
618 +index 904494b924c13..4a0802af2e3e0 100644
619 +--- a/arch/x86/kernel/kvmclock.c
620 ++++ b/arch/x86/kernel/kvmclock.c
621 +@@ -20,7 +20,6 @@
622 + #include <asm/hypervisor.h>
623 + #include <asm/mem_encrypt.h>
624 + #include <asm/x86_init.h>
625 +-#include <asm/reboot.h>
626 + #include <asm/kvmclock.h>
627 +
628 + static int kvmclock __initdata = 1;
629 +@@ -197,28 +196,9 @@ static void kvm_setup_secondary_clock(void)
630 + }
631 + #endif
632 +
633 +-/*
634 +- * After the clock is registered, the host will keep writing to the
635 +- * registered memory location. If the guest happens to shutdown, this memory
636 +- * won't be valid. In cases like kexec, in which you install a new kernel, this
637 +- * means a random memory location will be kept being written. So before any
638 +- * kind of shutdown from our side, we unregister the clock by writing anything
639 +- * that does not have the 'enable' bit set in the msr
640 +- */
641 +-#ifdef CONFIG_KEXEC_CORE
642 +-static void kvm_crash_shutdown(struct pt_regs *regs)
643 +-{
644 +- native_write_msr(msr_kvm_system_time, 0, 0);
645 +- kvm_disable_steal_time();
646 +- native_machine_crash_shutdown(regs);
647 +-}
648 +-#endif
649 +-
650 +-static void kvm_shutdown(void)
651 ++void kvmclock_disable(void)
652 + {
653 + native_write_msr(msr_kvm_system_time, 0, 0);
654 +- kvm_disable_steal_time();
655 +- native_machine_shutdown();
656 + }
657 +
658 + static void __init kvmclock_init_mem(void)
659 +@@ -346,10 +326,6 @@ void __init kvmclock_init(void)
660 + #endif
661 + x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
662 + x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
663 +- machine_ops.shutdown = kvm_shutdown;
664 +-#ifdef CONFIG_KEXEC_CORE
665 +- machine_ops.crash_shutdown = kvm_crash_shutdown;
666 +-#endif
667 + kvm_get_preset_lpj();
668 +
669 + /*
670 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
671 +index b9d14fdbd2d81..074cd170912aa 100644
672 +--- a/arch/x86/kvm/svm.c
673 ++++ b/arch/x86/kvm/svm.c
674 +@@ -4057,7 +4057,7 @@ static int cr_interception(struct vcpu_svm *svm)
675 + err = 0;
676 + if (cr >= 16) { /* mov to cr */
677 + cr -= 16;
678 +- val = kvm_register_read(&svm->vcpu, reg);
679 ++ val = kvm_register_readl(&svm->vcpu, reg);
680 + switch (cr) {
681 + case 0:
682 + if (!check_selective_cr0_intercepted(svm, val))
683 +@@ -4102,7 +4102,7 @@ static int cr_interception(struct vcpu_svm *svm)
684 + kvm_queue_exception(&svm->vcpu, UD_VECTOR);
685 + return 1;
686 + }
687 +- kvm_register_write(&svm->vcpu, reg, val);
688 ++ kvm_register_writel(&svm->vcpu, reg, val);
689 + }
690 + return kvm_complete_insn_gp(&svm->vcpu, err);
691 + }
692 +@@ -4132,13 +4132,13 @@ static int dr_interception(struct vcpu_svm *svm)
693 + if (dr >= 16) { /* mov to DRn */
694 + if (!kvm_require_dr(&svm->vcpu, dr - 16))
695 + return 1;
696 +- val = kvm_register_read(&svm->vcpu, reg);
697 ++ val = kvm_register_readl(&svm->vcpu, reg);
698 + kvm_set_dr(&svm->vcpu, dr - 16, val);
699 + } else {
700 + if (!kvm_require_dr(&svm->vcpu, dr))
701 + return 1;
702 + kvm_get_dr(&svm->vcpu, dr, &val);
703 +- kvm_register_write(&svm->vcpu, reg, val);
704 ++ kvm_register_writel(&svm->vcpu, reg, val);
705 + }
706 +
707 + return kvm_skip_emulated_instruction(&svm->vcpu);
708 +diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
709 +index 4c0d4e4341961..72d2c0b656339 100644
710 +--- a/drivers/acpi/acpica/utdelete.c
711 ++++ b/drivers/acpi/acpica/utdelete.c
712 +@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
713 + }
714 + break;
715 +
716 ++ case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
717 ++
718 ++ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
719 ++ "***** Address handler %p\n", object));
720 ++
721 ++ acpi_os_delete_mutex(object->address_space.context_mutex);
722 ++ break;
723 ++
724 + default:
725 +
726 + break;
727 +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
728 +index d59e1ca9990b6..90053c4a8290d 100644
729 +--- a/drivers/bus/ti-sysc.c
730 ++++ b/drivers/bus/ti-sysc.c
731 +@@ -1376,9 +1376,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
732 + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
733 + /* Uarts on omap4 and later */
734 + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
735 +- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
736 ++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
737 + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
738 +- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
739 ++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
740 +
741 + /* Quirks that need to be set based on the module address */
742 + SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
743 +diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
744 +index b1af0de2e1008..e48298687b76d 100644
745 +--- a/drivers/firmware/efi/cper.c
746 ++++ b/drivers/firmware/efi/cper.c
747 +@@ -263,8 +263,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
748 + if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
749 + return 0;
750 +
751 +- n = 0;
752 +- len = CPER_REC_LEN - 1;
753 ++ len = CPER_REC_LEN;
754 + dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
755 + if (bank && device)
756 + n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
757 +@@ -273,7 +272,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
758 + "DIMM location: not present. DMI handle: 0x%.4x ",
759 + mem->mem_dev_handle);
760 +
761 +- msg[n] = '\0';
762 + return n;
763 + }
764 +
765 +diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
766 +index 58452fde92cc0..5d343dc8e5354 100644
767 +--- a/drivers/firmware/efi/memattr.c
768 ++++ b/drivers/firmware/efi/memattr.c
769 +@@ -66,11 +66,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
770 + return false;
771 + }
772 +
773 +- if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
774 +- pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
775 +- return false;
776 +- }
777 +-
778 + if (PAGE_SIZE > EFI_PAGE_SIZE &&
779 + (!PAGE_ALIGNED(in->phys_addr) ||
780 + !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
781 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
782 +index 2cdaf3b2a7217..39ca0718ced0c 100644
783 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
784 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
785 +@@ -351,7 +351,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
786 + {
787 + struct amdgpu_ctx *ctx;
788 + struct amdgpu_ctx_mgr *mgr;
789 +- unsigned long ras_counter;
790 +
791 + if (!fpriv)
792 + return -EINVAL;
793 +@@ -376,21 +375,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
794 + if (atomic_read(&ctx->guilty))
795 + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
796 +
797 +- /*query ue count*/
798 +- ras_counter = amdgpu_ras_query_error_count(adev, false);
799 +- /*ras counter is monotonic increasing*/
800 +- if (ras_counter != ctx->ras_counter_ue) {
801 +- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
802 +- ctx->ras_counter_ue = ras_counter;
803 +- }
804 +-
805 +- /*query ce count*/
806 +- ras_counter = amdgpu_ras_query_error_count(adev, true);
807 +- if (ras_counter != ctx->ras_counter_ce) {
808 +- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
809 +- ctx->ras_counter_ce = ras_counter;
810 +- }
811 +-
812 + mutex_unlock(&mgr->lock);
813 + return 0;
814 + }
815 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
816 +index 217084d56ab8c..9deef20a02699 100644
817 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
818 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
819 +@@ -354,6 +354,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
820 +
821 + error:
822 + dma_fence_put(fence);
823 ++ amdgpu_bo_unpin(bo);
824 + amdgpu_bo_unreserve(bo);
825 + amdgpu_bo_unref(&bo);
826 + return r;
827 +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
828 +index abd86903875f0..fc4c074597539 100644
829 +--- a/drivers/hid/hid-magicmouse.c
830 ++++ b/drivers/hid/hid-magicmouse.c
831 +@@ -597,7 +597,7 @@ static int magicmouse_probe(struct hid_device *hdev,
832 + if (id->vendor == USB_VENDOR_ID_APPLE &&
833 + id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
834 + hdev->type != HID_TYPE_USBMOUSE)
835 +- return 0;
836 ++ return -ENODEV;
837 +
838 + msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
839 + if (msc == NULL) {
840 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
841 +index d91e6679afb18..f290ba856323a 100644
842 +--- a/drivers/hid/hid-multitouch.c
843 ++++ b/drivers/hid/hid-multitouch.c
844 +@@ -611,9 +611,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
845 + if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
846 + continue;
847 +
848 +- for (n = 0; n < field->report_count; n++) {
849 +- if (field->usage[n].hid == HID_DG_CONTACTID)
850 +- rdata->is_mt_collection = true;
851 ++ if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
852 ++ for (n = 0; n < field->report_count; n++) {
853 ++ if (field->usage[n].hid == HID_DG_CONTACTID) {
854 ++ rdata->is_mt_collection = true;
855 ++ break;
856 ++ }
857 ++ }
858 + }
859 + }
860 +
861 +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
862 +index 96898983db990..6f7a3702b5fba 100644
863 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c
864 ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
865 +@@ -50,6 +50,7 @@
866 + #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
867 + #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
868 + #define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
869 ++#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7)
870 +
871 +
872 + /* flags */
873 +@@ -185,6 +186,11 @@ static const struct i2c_hid_quirks {
874 + I2C_HID_QUIRK_RESET_ON_RESUME },
875 + { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
876 + I2C_HID_QUIRK_BAD_INPUT_SIZE },
877 ++ /*
878 ++ * Sending the wakeup after reset actually break ELAN touchscreen controller
879 ++ */
880 ++ { USB_VENDOR_ID_ELAN, HID_ANY_ID,
881 ++ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
882 + { 0, 0 }
883 + };
884 +
885 +@@ -468,7 +474,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
886 + }
887 +
888 + /* At least some SIS devices need this after reset */
889 +- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
890 ++ if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
891 ++ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
892 +
893 + out_unlock:
894 + mutex_unlock(&ihid->reset_lock);
895 +@@ -1114,8 +1121,8 @@ static int i2c_hid_probe(struct i2c_client *client,
896 + hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
897 + hid->product = le16_to_cpu(ihid->hdesc.wProductID);
898 +
899 +- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
900 +- client->name, hid->vendor, hid->product);
901 ++ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
902 ++ client->name, (u16)hid->vendor, (u16)hid->product);
903 + strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
904 +
905 + ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
906 +diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
907 +index fddac7c72f645..07a9fe97d2e05 100644
908 +--- a/drivers/hid/usbhid/hid-pidff.c
909 ++++ b/drivers/hid/usbhid/hid-pidff.c
910 +@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
911 +
912 + if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
913 + pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
914 ++ error = -EPERM;
915 + hid_notice(hid,
916 + "device does not support device managed pool\n");
917 + goto fail;
918 +diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
919 +index 4212d022d2534..35c00420d855b 100644
920 +--- a/drivers/hwmon/dell-smm-hwmon.c
921 ++++ b/drivers/hwmon/dell-smm-hwmon.c
922 +@@ -792,10 +792,10 @@ static struct attribute *i8k_attrs[] = {
923 + static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
924 + int index)
925 + {
926 +- if (disallow_fan_support && index >= 8)
927 ++ if (disallow_fan_support && index >= 20)
928 + return 0;
929 + if (disallow_fan_type_call &&
930 +- (index == 9 || index == 12 || index == 15))
931 ++ (index == 21 || index == 25 || index == 28))
932 + return 0;
933 + if (index >= 0 && index <= 1 &&
934 + !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
935 +diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
936 +index b56a427fb928f..c73b997899af8 100644
937 +--- a/drivers/i2c/busses/i2c-qcom-geni.c
938 ++++ b/drivers/i2c/busses/i2c-qcom-geni.c
939 +@@ -641,6 +641,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
940 + return 0;
941 + }
942 +
943 ++static void geni_i2c_shutdown(struct platform_device *pdev)
944 ++{
945 ++ struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
946 ++
947 ++ /* Make client i2c transfers start failing */
948 ++ i2c_mark_adapter_suspended(&gi2c->adap);
949 ++}
950 ++
951 + static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
952 + {
953 + int ret;
954 +@@ -677,6 +685,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
955 + {
956 + struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
957 +
958 ++ i2c_mark_adapter_suspended(&gi2c->adap);
959 ++
960 + if (!gi2c->suspended) {
961 + geni_i2c_runtime_suspend(dev);
962 + pm_runtime_disable(dev);
963 +@@ -686,8 +696,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
964 + return 0;
965 + }
966 +
967 ++static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
968 ++{
969 ++ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
970 ++
971 ++ i2c_mark_adapter_resumed(&gi2c->adap);
972 ++ return 0;
973 ++}
974 ++
975 + static const struct dev_pm_ops geni_i2c_pm_ops = {
976 +- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL)
977 ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
978 + SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
979 + NULL)
980 + };
981 +@@ -701,6 +719,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
982 + static struct platform_driver geni_i2c_driver = {
983 + .probe = geni_i2c_probe,
984 + .remove = geni_i2c_remove,
985 ++ .shutdown = geni_i2c_shutdown,
986 + .driver = {
987 + .name = "geni_i2c",
988 + .pm = &geni_i2c_pm_ops,
989 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
990 +index 0dba28bb309a2..00ae7a9a42bfe 100644
991 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
992 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
993 +@@ -7003,7 +7003,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
994 +
995 + pf->fw_fid = le16_to_cpu(resp->fid);
996 + pf->port_id = le16_to_cpu(resp->port_id);
997 +- bp->dev->dev_port = pf->port_id;
998 + memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
999 + pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
1000 + pf->max_vfs = le16_to_cpu(resp->max_vfs);
1001 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
1002 +index 218aada8949d9..68a2fcf4c0bf5 100644
1003 +--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
1004 ++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
1005 +@@ -2233,15 +2233,20 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
1006 + case XDP_TX:
1007 + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
1008 + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
1009 ++ if (result == I40E_XDP_CONSUMED)
1010 ++ goto out_failure;
1011 + break;
1012 + case XDP_REDIRECT:
1013 + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
1014 +- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
1015 ++ if (err)
1016 ++ goto out_failure;
1017 ++ result = I40E_XDP_REDIR;
1018 + break;
1019 + default:
1020 + bpf_warn_invalid_xdp_action(act);
1021 + /* fall through */
1022 + case XDP_ABORTED:
1023 ++out_failure:
1024 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1025 + /* fall through -- handle aborts by dropping packet */
1026 + case XDP_DROP:
1027 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
1028 +index c9d4534fbdf02..a9ad788c4913d 100644
1029 +--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
1030 ++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
1031 +@@ -212,21 +212,28 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
1032 +
1033 + xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
1034 +
1035 ++ if (likely(act == XDP_REDIRECT)) {
1036 ++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
1037 ++ if (err)
1038 ++ goto out_failure;
1039 ++ rcu_read_unlock();
1040 ++ return I40E_XDP_REDIR;
1041 ++ }
1042 ++
1043 + switch (act) {
1044 + case XDP_PASS:
1045 + break;
1046 + case XDP_TX:
1047 + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
1048 + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
1049 +- break;
1050 +- case XDP_REDIRECT:
1051 +- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
1052 +- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
1053 ++ if (result == I40E_XDP_CONSUMED)
1054 ++ goto out_failure;
1055 + break;
1056 + default:
1057 + bpf_warn_invalid_xdp_action(act);
1058 + /* fall through */
1059 + case XDP_ABORTED:
1060 ++out_failure:
1061 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1062 + /* fallthrough -- handle aborts by dropping packet */
1063 + case XDP_DROP:
1064 +diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
1065 +index 9138b19de87e0..f2bb83af4d9e8 100644
1066 +--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
1067 ++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
1068 +@@ -34,6 +34,7 @@
1069 + #define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
1070 + #define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
1071 + #define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
1072 ++#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4))
1073 + #define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
1074 + #define PF_FW_ATQT 0x00080400
1075 + #define PF_MBX_ARQBAH 0x0022E400
1076 +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
1077 +index 33dd103035dcd..2b55efe5ed963 100644
1078 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
1079 ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
1080 +@@ -2109,6 +2109,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
1081 + struct ice_tx_offload_params offload = { 0 };
1082 + struct ice_vsi *vsi = tx_ring->vsi;
1083 + struct ice_tx_buf *first;
1084 ++ struct ethhdr *eth;
1085 + unsigned int count;
1086 + int tso, csum;
1087 +
1088 +@@ -2156,7 +2157,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
1089 + goto out_drop;
1090 +
1091 + /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
1092 +- if (unlikely(skb->priority == TC_PRIO_CONTROL &&
1093 ++ eth = (struct ethhdr *)skb_mac_header(skb);
1094 ++ if (unlikely((skb->priority == TC_PRIO_CONTROL ||
1095 ++ eth->h_proto == htons(ETH_P_LLDP)) &&
1096 + vsi->type == ICE_VSI_PF &&
1097 + vsi->port_info->is_sw_lldp))
1098 + offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1099 +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1100 +index e92a00a617556..5e97fdca5fab2 100644
1101 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1102 ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
1103 +@@ -384,13 +384,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
1104 + */
1105 + clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
1106 +
1107 +- /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
1108 +- * in the case of VFR. If this is done for PFR, it can mess up VF
1109 +- * resets because the VF driver may already have started cleanup
1110 +- * by the time we get here.
1111 ++ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
1112 ++ * needs to clear them in the case of VFR/VFLR. If this is done for
1113 ++ * PFR, it can mess up VF resets because the VF driver may already
1114 ++ * have started cleanup by the time we get here.
1115 + */
1116 +- if (!is_pfr)
1117 +- wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
1118 ++ if (!is_pfr) {
1119 ++ wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
1120 ++ wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
1121 ++ }
1122 +
1123 + /* In the case of a VFLR, the HW has already reset the VF and we
1124 + * just need to clean up, so don't hit the VFRTRIG register.
1125 +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
1126 +index 64ec0e7c64b49..be8e6d4e376ec 100644
1127 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
1128 ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
1129 +@@ -1079,11 +1079,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
1130 + case XDP_TX:
1131 + xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
1132 + result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
1133 ++ if (result == IXGBEVF_XDP_CONSUMED)
1134 ++ goto out_failure;
1135 + break;
1136 + default:
1137 + bpf_warn_invalid_xdp_action(act);
1138 + /* fallthrough */
1139 + case XDP_ABORTED:
1140 ++out_failure:
1141 + trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
1142 + /* fallthrough -- handle aborts by dropping packet */
1143 + case XDP_DROP:
1144 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
1145 +index cbe7f35eac982..0646bcd269682 100644
1146 +--- a/drivers/net/usb/cdc_ncm.c
1147 ++++ b/drivers/net/usb/cdc_ncm.c
1148 +@@ -1589,6 +1589,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
1149 + uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
1150 + uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
1151 +
1152 ++ /* if the speed hasn't changed, don't report it.
1153 ++ * RTL8156 shipped before 2021 sends notification about every 32ms.
1154 ++ */
1155 ++ if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
1156 ++ return;
1157 ++
1158 ++ dev->rx_speed = rx_speed;
1159 ++ dev->tx_speed = tx_speed;
1160 ++
1161 + /*
1162 + * Currently the USB-NET API does not support reporting the actual
1163 + * device speed. Do print it instead.
1164 +@@ -1629,7 +1638,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1165 + * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
1166 + * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
1167 + */
1168 +- usbnet_link_change(dev, !!event->wValue, 0);
1169 ++ if (netif_carrier_ok(dev->net) != !!event->wValue)
1170 ++ usbnet_link_change(dev, !!event->wValue, 0);
1171 + break;
1172 +
1173 + case USB_CDC_NOTIFY_SPEED_CHANGE:
1174 +diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
1175 +index 9da27ec22d588..44d74584c7275 100644
1176 +--- a/drivers/usb/dwc2/core_intr.c
1177 ++++ b/drivers/usb/dwc2/core_intr.c
1178 +@@ -712,7 +712,11 @@ static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
1179 + dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
1180 +
1181 + hsotg->hibernated = 0;
1182 ++
1183 ++#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \
1184 ++ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
1185 + hsotg->bus_suspended = 0;
1186 ++#endif
1187 +
1188 + if (gpwrdn & GPWRDN_IDSTS) {
1189 + hsotg->op_state = OTG_STATE_B_PERIPHERAL;
1190 +diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
1191 +index 4abddbebd4b23..c691127bc805a 100644
1192 +--- a/drivers/vfio/pci/Kconfig
1193 ++++ b/drivers/vfio/pci/Kconfig
1194 +@@ -2,6 +2,7 @@
1195 + config VFIO_PCI
1196 + tristate "VFIO support for PCI devices"
1197 + depends on VFIO && PCI && EVENTFD
1198 ++ depends on MMU
1199 + select VFIO_VIRQFD
1200 + select IRQ_BYPASS_MANAGER
1201 + help
1202 +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
1203 +index bf32997c557ff..50cd17fcf7541 100644
1204 +--- a/drivers/vfio/pci/vfio_pci_config.c
1205 ++++ b/drivers/vfio/pci/vfio_pci_config.c
1206 +@@ -1576,7 +1576,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
1207 + if (len == 0xFF) {
1208 + len = vfio_ext_cap_len(vdev, ecap, epos);
1209 + if (len < 0)
1210 +- return ret;
1211 ++ return len;
1212 + }
1213 + }
1214 +
1215 +diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
1216 +index 152e5188183ce..6f727034679f1 100644
1217 +--- a/drivers/vfio/platform/vfio_platform_common.c
1218 ++++ b/drivers/vfio/platform/vfio_platform_common.c
1219 +@@ -289,7 +289,7 @@ err_irq:
1220 + vfio_platform_regions_cleanup(vdev);
1221 + err_reg:
1222 + mutex_unlock(&driver_lock);
1223 +- module_put(THIS_MODULE);
1224 ++ module_put(vdev->parent_module);
1225 + return ret;
1226 + }
1227 +
1228 +diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
1229 +index f6ba18191c0f9..30313084f06c1 100644
1230 +--- a/drivers/xen/xen-pciback/vpci.c
1231 ++++ b/drivers/xen/xen-pciback/vpci.c
1232 +@@ -69,7 +69,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
1233 + struct pci_dev *dev, int devid,
1234 + publish_pci_dev_cb publish_cb)
1235 + {
1236 +- int err = 0, slot, func = -1;
1237 ++ int err = 0, slot, func = PCI_FUNC(dev->devfn);
1238 + struct pci_dev_entry *t, *dev_entry;
1239 + struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
1240 +
1241 +@@ -94,23 +94,26 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
1242 +
1243 + /*
1244 + * Keep multi-function devices together on the virtual PCI bus, except
1245 +- * virtual functions.
1246 ++ * that we want to keep virtual functions at func 0 on their own. They
1247 ++ * aren't multi-function devices and hence their presence at func 0
1248 ++ * may cause guests to not scan the other functions.
1249 + */
1250 +- if (!dev->is_virtfn) {
1251 ++ if (!dev->is_virtfn || func) {
1252 + for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
1253 + if (list_empty(&vpci_dev->dev_list[slot]))
1254 + continue;
1255 +
1256 + t = list_entry(list_first(&vpci_dev->dev_list[slot]),
1257 + struct pci_dev_entry, list);
1258 ++ if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
1259 ++ continue;
1260 +
1261 + if (match_slot(dev, t->dev)) {
1262 + pr_info("vpci: %s: assign to virtual slot %d func %d\n",
1263 + pci_name(dev), slot,
1264 +- PCI_FUNC(dev->devfn));
1265 ++ func);
1266 + list_add_tail(&dev_entry->list,
1267 + &vpci_dev->dev_list[slot]);
1268 +- func = PCI_FUNC(dev->devfn);
1269 + goto unlock;
1270 + }
1271 + }
1272 +@@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
1273 + pci_name(dev), slot);
1274 + list_add_tail(&dev_entry->list,
1275 + &vpci_dev->dev_list[slot]);
1276 +- func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
1277 + goto unlock;
1278 + }
1279 + }
1280 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
1281 +index 6317394f02b86..832b40293907f 100644
1282 +--- a/fs/btrfs/extent-tree.c
1283 ++++ b/fs/btrfs/extent-tree.c
1284 +@@ -1338,16 +1338,20 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1285 + for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1286 + u64 bytes;
1287 + struct request_queue *req_q;
1288 ++ struct btrfs_device *device = stripe->dev;
1289 +
1290 +- if (!stripe->dev->bdev) {
1291 ++ if (!device->bdev) {
1292 + ASSERT(btrfs_test_opt(fs_info, DEGRADED));
1293 + continue;
1294 + }
1295 +- req_q = bdev_get_queue(stripe->dev->bdev);
1296 ++ req_q = bdev_get_queue(device->bdev);
1297 + if (!blk_queue_discard(req_q))
1298 + continue;
1299 +
1300 +- ret = btrfs_issue_discard(stripe->dev->bdev,
1301 ++ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
1302 ++ continue;
1303 ++
1304 ++ ret = btrfs_issue_discard(device->bdev,
1305 + stripe->physical,
1306 + stripe->length,
1307 + &bytes);
1308 +@@ -1879,7 +1883,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
1309 + trace_run_delayed_ref_head(fs_info, head, 0);
1310 + btrfs_delayed_ref_unlock(head);
1311 + btrfs_put_delayed_ref_head(head);
1312 +- return 0;
1313 ++ return ret;
1314 + }
1315 +
1316 + static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
1317 +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
1318 +index 2b8f29c07668b..61b82c69eed50 100644
1319 +--- a/fs/btrfs/file-item.c
1320 ++++ b/fs/btrfs/file-item.c
1321 +@@ -599,7 +599,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
1322 + u64 end_byte = bytenr + len;
1323 + u64 csum_end;
1324 + struct extent_buffer *leaf;
1325 +- int ret;
1326 ++ int ret = 0;
1327 + u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
1328 + int blocksize_bits = fs_info->sb->s_blocksize_bits;
1329 +
1330 +@@ -618,6 +618,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
1331 + path->leave_spinning = 1;
1332 + ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1333 + if (ret > 0) {
1334 ++ ret = 0;
1335 + if (path->slots[0] == 0)
1336 + break;
1337 + path->slots[0]--;
1338 +@@ -674,7 +675,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
1339 + ret = btrfs_del_items(trans, root, path,
1340 + path->slots[0], del_nr);
1341 + if (ret)
1342 +- goto out;
1343 ++ break;
1344 + if (key.offset == bytenr)
1345 + break;
1346 + } else if (key.offset < bytenr && csum_end > end_byte) {
1347 +@@ -718,8 +719,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
1348 + ret = btrfs_split_item(trans, root, path, &key, offset);
1349 + if (ret && ret != -EAGAIN) {
1350 + btrfs_abort_transaction(trans, ret);
1351 +- goto out;
1352 ++ break;
1353 + }
1354 ++ ret = 0;
1355 +
1356 + key.offset = end_byte - 1;
1357 + } else {
1358 +@@ -729,8 +731,6 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
1359 + }
1360 + btrfs_release_path(path);
1361 + }
1362 +- ret = 0;
1363 +-out:
1364 + btrfs_free_path(path);
1365 + return ret;
1366 + }
1367 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1368 +index 8ea9559c1919a..64dd702a5448c 100644
1369 +--- a/fs/btrfs/inode.c
1370 ++++ b/fs/btrfs/inode.c
1371 +@@ -3359,6 +3359,18 @@ out:
1372 + if (ret || truncated) {
1373 + u64 start, end;
1374 +
1375 ++ /*
1376 ++ * If we failed to finish this ordered extent for any reason we
1377 ++ * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
1378 ++ * extent, and mark the inode with the error if it wasn't
1379 ++ * already set. Any error during writeback would have already
1380 ++ * set the mapping error, so we need to set it if we're the ones
1381 ++ * marking this ordered extent as failed.
1382 ++ */
1383 ++ if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
1384 ++ &ordered_extent->flags))
1385 ++ mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
1386 ++
1387 + if (truncated)
1388 + start = ordered_extent->file_offset + logical_len;
1389 + else
1390 +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
1391 +index 7d06842a3d747..368c43c6cbd08 100644
1392 +--- a/fs/btrfs/tree-checker.c
1393 ++++ b/fs/btrfs/tree-checker.c
1394 +@@ -1285,22 +1285,14 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
1395 + return -EUCLEAN;
1396 + }
1397 + for (; ptr < end; ptr += sizeof(*dref)) {
1398 +- u64 root_objectid;
1399 +- u64 owner;
1400 + u64 offset;
1401 +- u64 hash;
1402 +
1403 ++ /*
1404 ++ * We cannot check the extent_data_ref hash due to possible
1405 ++ * overflow from the leaf due to hash collisions.
1406 ++ */
1407 + dref = (struct btrfs_extent_data_ref *)ptr;
1408 +- root_objectid = btrfs_extent_data_ref_root(leaf, dref);
1409 +- owner = btrfs_extent_data_ref_objectid(leaf, dref);
1410 + offset = btrfs_extent_data_ref_offset(leaf, dref);
1411 +- hash = hash_extent_data_ref(root_objectid, owner, offset);
1412 +- if (hash != key->offset) {
1413 +- extent_err(leaf, slot,
1414 +- "invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
1415 +- hash, key->offset);
1416 +- return -EUCLEAN;
1417 +- }
1418 + if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
1419 + extent_err(leaf, slot,
1420 + "invalid extent data backref offset, have %llu expect aligned to %u",
1421 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1422 +index 54647eb9c6ed2..4ff381c23cefc 100644
1423 +--- a/fs/btrfs/tree-log.c
1424 ++++ b/fs/btrfs/tree-log.c
1425 +@@ -1775,6 +1775,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1426 + break;
1427 +
1428 + if (ret == 1) {
1429 ++ ret = 0;
1430 + if (path->slots[0] == 0)
1431 + break;
1432 + path->slots[0]--;
1433 +@@ -1787,17 +1788,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1434 +
1435 + ret = btrfs_del_item(trans, root, path);
1436 + if (ret)
1437 +- goto out;
1438 ++ break;
1439 +
1440 + btrfs_release_path(path);
1441 + inode = read_one_inode(root, key.offset);
1442 +- if (!inode)
1443 +- return -EIO;
1444 ++ if (!inode) {
1445 ++ ret = -EIO;
1446 ++ break;
1447 ++ }
1448 +
1449 + ret = fixup_inode_link_count(trans, root, inode);
1450 + iput(inode);
1451 + if (ret)
1452 +- goto out;
1453 ++ break;
1454 +
1455 + /*
1456 + * fixup on a directory may create new entries,
1457 +@@ -1806,8 +1809,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1458 + */
1459 + key.offset = (u64)-1;
1460 + }
1461 +- ret = 0;
1462 +-out:
1463 + btrfs_release_path(path);
1464 + return ret;
1465 + }
1466 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1467 +index 3193f0b4a02d6..dbd0d7a101541 100644
1468 +--- a/fs/ext4/extents.c
1469 ++++ b/fs/ext4/extents.c
1470 +@@ -3378,7 +3378,10 @@ static int ext4_split_extent_at(handle_t *handle,
1471 + ext4_ext_mark_unwritten(ex2);
1472 +
1473 + err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
1474 +- if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
1475 ++ if (err != -ENOSPC && err != -EDQUOT)
1476 ++ goto out;
1477 ++
1478 ++ if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
1479 + if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
1480 + if (split_flag & EXT4_EXT_DATA_VALID1) {
1481 + err = ext4_ext_zeroout(inode, ex2);
1482 +@@ -3404,30 +3407,30 @@ static int ext4_split_extent_at(handle_t *handle,
1483 + ext4_ext_pblock(&orig_ex));
1484 + }
1485 +
1486 +- if (err)
1487 +- goto fix_extent_len;
1488 +- /* update the extent length and mark as initialized */
1489 +- ex->ee_len = cpu_to_le16(ee_len);
1490 +- ext4_ext_try_to_merge(handle, inode, path, ex);
1491 +- err = ext4_ext_dirty(handle, inode, path + path->p_depth);
1492 +- if (err)
1493 +- goto fix_extent_len;
1494 +-
1495 +- /* update extent status tree */
1496 +- err = ext4_zeroout_es(inode, &zero_ex);
1497 +-
1498 +- goto out;
1499 +- } else if (err)
1500 +- goto fix_extent_len;
1501 +-
1502 +-out:
1503 +- ext4_ext_show_leaf(inode, path);
1504 +- return err;
1505 ++ if (!err) {
1506 ++ /* update the extent length and mark as initialized */
1507 ++ ex->ee_len = cpu_to_le16(ee_len);
1508 ++ ext4_ext_try_to_merge(handle, inode, path, ex);
1509 ++ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
1510 ++ if (!err)
1511 ++ /* update extent status tree */
1512 ++ err = ext4_zeroout_es(inode, &zero_ex);
1513 ++ /* If we failed at this point, we don't know in which
1514 ++ * state the extent tree exactly is so don't try to fix
1515 ++ * length of the original extent as it may do even more
1516 ++ * damage.
1517 ++ */
1518 ++ goto out;
1519 ++ }
1520 ++ }
1521 +
1522 + fix_extent_len:
1523 + ex->ee_len = orig_ex.ee_len;
1524 + ext4_ext_dirty(handle, inode, path + path->p_depth);
1525 + return err;
1526 ++out:
1527 ++ ext4_ext_show_leaf(inode, path);
1528 ++ return err;
1529 + }
1530 +
1531 + /*
1532 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
1533 +index ab2b0d74ad03e..c2852d7cc14d4 100644
1534 +--- a/fs/ocfs2/file.c
1535 ++++ b/fs/ocfs2/file.c
1536 +@@ -1855,6 +1855,45 @@ out:
1537 + return ret;
1538 + }
1539 +
1540 ++/*
1541 ++ * zero out partial blocks of one cluster.
1542 ++ *
1543 ++ * start: file offset where zero starts, will be made upper block aligned.
1544 ++ * len: it will be trimmed to the end of current cluster if "start + len"
1545 ++ * is bigger than it.
1546 ++ */
1547 ++static int ocfs2_zeroout_partial_cluster(struct inode *inode,
1548 ++ u64 start, u64 len)
1549 ++{
1550 ++ int ret;
1551 ++ u64 start_block, end_block, nr_blocks;
1552 ++ u64 p_block, offset;
1553 ++ u32 cluster, p_cluster, nr_clusters;
1554 ++ struct super_block *sb = inode->i_sb;
1555 ++ u64 end = ocfs2_align_bytes_to_clusters(sb, start);
1556 ++
1557 ++ if (start + len < end)
1558 ++ end = start + len;
1559 ++
1560 ++ start_block = ocfs2_blocks_for_bytes(sb, start);
1561 ++ end_block = ocfs2_blocks_for_bytes(sb, end);
1562 ++ nr_blocks = end_block - start_block;
1563 ++ if (!nr_blocks)
1564 ++ return 0;
1565 ++
1566 ++ cluster = ocfs2_bytes_to_clusters(sb, start);
1567 ++ ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
1568 ++ &nr_clusters, NULL);
1569 ++ if (ret)
1570 ++ return ret;
1571 ++ if (!p_cluster)
1572 ++ return 0;
1573 ++
1574 ++ offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
1575 ++ p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
1576 ++ return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
1577 ++}
1578 ++
1579 + /*
1580 + * Parts of this function taken from xfs_change_file_space()
1581 + */
1582 +@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1583 + {
1584 + int ret;
1585 + s64 llen;
1586 +- loff_t size;
1587 ++ loff_t size, orig_isize;
1588 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1589 + struct buffer_head *di_bh = NULL;
1590 + handle_t *handle;
1591 +@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1592 + goto out_inode_unlock;
1593 + }
1594 +
1595 ++ orig_isize = i_size_read(inode);
1596 + switch (sr->l_whence) {
1597 + case 0: /*SEEK_SET*/
1598 + break;
1599 +@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1600 + sr->l_start += f_pos;
1601 + break;
1602 + case 2: /*SEEK_END*/
1603 +- sr->l_start += i_size_read(inode);
1604 ++ sr->l_start += orig_isize;
1605 + break;
1606 + default:
1607 + ret = -EINVAL;
1608 +@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1609 + default:
1610 + ret = -EINVAL;
1611 + }
1612 ++
1613 ++ /* zeroout eof blocks in the cluster. */
1614 ++ if (!ret && change_size && orig_isize < size) {
1615 ++ ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
1616 ++ size - orig_isize);
1617 ++ if (!ret)
1618 ++ i_size_write(inode, size);
1619 ++ }
1620 + up_write(&OCFS2_I(inode)->ip_alloc_sem);
1621 + if (ret) {
1622 + mlog_errno(ret);
1623 +@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1624 + goto out_inode_unlock;
1625 + }
1626 +
1627 +- if (change_size && i_size_read(inode) < size)
1628 +- i_size_write(inode, size);
1629 +-
1630 + inode->i_ctime = inode->i_mtime = current_time(inode);
1631 + ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1632 + if (ret < 0)
1633 +diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
1634 +index 93d5cf0bc7168..d8b86fd391134 100644
1635 +--- a/include/linux/huge_mm.h
1636 ++++ b/include/linux/huge_mm.h
1637 +@@ -231,6 +231,19 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
1638 + else
1639 + return NULL;
1640 + }
1641 ++
1642 ++/**
1643 ++ * thp_order - Order of a transparent huge page.
1644 ++ * @page: Head page of a transparent huge page.
1645 ++ */
1646 ++static inline unsigned int thp_order(struct page *page)
1647 ++{
1648 ++ VM_BUG_ON_PGFLAGS(PageTail(page), page);
1649 ++ if (PageHead(page))
1650 ++ return HPAGE_PMD_ORDER;
1651 ++ return 0;
1652 ++}
1653 ++
1654 + static inline int hpage_nr_pages(struct page *page)
1655 + {
1656 + if (unlikely(PageTransHuge(page)))
1657 +@@ -290,6 +303,12 @@ static inline struct list_head *page_deferred_list(struct page *page)
1658 + #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
1659 + #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
1660 +
1661 ++static inline unsigned int thp_order(struct page *page)
1662 ++{
1663 ++ VM_BUG_ON_PGFLAGS(PageTail(page), page);
1664 ++ return 0;
1665 ++}
1666 ++
1667 + #define hpage_nr_pages(x) 1
1668 +
1669 + static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
1670 +diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
1671 +index d8860f2d0976d..fc6ed1311589c 100644
1672 +--- a/include/linux/usb/usbnet.h
1673 ++++ b/include/linux/usb/usbnet.h
1674 +@@ -83,6 +83,8 @@ struct usbnet {
1675 + # define EVENT_LINK_CHANGE 11
1676 + # define EVENT_SET_RX_MODE 12
1677 + # define EVENT_NO_IP_ALIGN 13
1678 ++ u32 rx_speed; /* in bps - NOT Mbps */
1679 ++ u32 tx_speed; /* in bps - NOT Mbps */
1680 + };
1681 +
1682 + static inline struct usb_driver *driver_of(struct usb_interface *intf)
1683 +diff --git a/include/linux/xarray.h b/include/linux/xarray.h
1684 +index 3b257c97837db..2903f25bff5e4 100644
1685 +--- a/include/linux/xarray.h
1686 ++++ b/include/linux/xarray.h
1687 +@@ -1470,6 +1470,28 @@ void xas_pause(struct xa_state *);
1688 +
1689 + void xas_create_range(struct xa_state *);
1690 +
1691 ++#ifdef CONFIG_XARRAY_MULTI
1692 ++int xa_get_order(struct xarray *, unsigned long index);
1693 ++void xas_split(struct xa_state *, void *entry, unsigned int order);
1694 ++void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
1695 ++#else
1696 ++static inline int xa_get_order(struct xarray *xa, unsigned long index)
1697 ++{
1698 ++ return 0;
1699 ++}
1700 ++
1701 ++static inline void xas_split(struct xa_state *xas, void *entry,
1702 ++ unsigned int order)
1703 ++{
1704 ++ xas_store(xas, entry);
1705 ++}
1706 ++
1707 ++static inline void xas_split_alloc(struct xa_state *xas, void *entry,
1708 ++ unsigned int order, gfp_t gfp)
1709 ++{
1710 ++}
1711 ++#endif
1712 ++
1713 + /**
1714 + * xas_reload() - Refetch an entry from the xarray.
1715 + * @xas: XArray operation state.
1716 +diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
1717 +index 48ecca8530ffa..b655d8666f555 100644
1718 +--- a/include/net/caif/caif_dev.h
1719 ++++ b/include/net/caif/caif_dev.h
1720 +@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
1721 + * The link_support layer is used to add any Link Layer specific
1722 + * framing.
1723 + */
1724 +-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
1725 ++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
1726 + struct cflayer *link_support, int head_room,
1727 + struct cflayer **layer, int (**rcv_func)(
1728 + struct sk_buff *, struct net_device *,
1729 +diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
1730 +index 2aa5e91d84576..8819ff4db35a6 100644
1731 +--- a/include/net/caif/cfcnfg.h
1732 ++++ b/include/net/caif/cfcnfg.h
1733 +@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
1734 + * @fcs: Specify if checksum is used in CAIF Framing Layer.
1735 + * @head_room: Head space needed by link specific protocol.
1736 + */
1737 +-void
1738 ++int
1739 + cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
1740 + struct net_device *dev, struct cflayer *phy_layer,
1741 + enum cfcnfg_phy_preference pref,
1742 +diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
1743 +index 14a55e03bb3ce..67cce8757175a 100644
1744 +--- a/include/net/caif/cfserl.h
1745 ++++ b/include/net/caif/cfserl.h
1746 +@@ -9,4 +9,5 @@
1747 + #include <net/caif/caif_layer.h>
1748 +
1749 + struct cflayer *cfserl_create(int instance, bool use_stx);
1750 ++void cfserl_release(struct cflayer *layer);
1751 + #endif
1752 +diff --git a/init/main.c b/init/main.c
1753 +index fef9e610b74b7..e6a1fb14f3085 100644
1754 +--- a/init/main.c
1755 ++++ b/init/main.c
1756 +@@ -1174,7 +1174,7 @@ static noinline void __init kernel_init_freeable(void)
1757 + */
1758 + set_mems_allowed(node_states[N_MEMORY]);
1759 +
1760 +- cad_pid = task_pid(current);
1761 ++ cad_pid = get_pid(task_pid(current));
1762 +
1763 + smp_prepare_cpus(setup_max_cpus);
1764 +
1765 +diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
1766 +index 0c9d3ad17e0fc..4d0b59fa5550f 100644
1767 +--- a/lib/lz4/lz4_decompress.c
1768 ++++ b/lib/lz4/lz4_decompress.c
1769 +@@ -260,7 +260,11 @@ static FORCE_INLINE int LZ4_decompress_generic(
1770 + }
1771 + }
1772 +
1773 +- memcpy(op, ip, length);
1774 ++ /*
1775 ++ * supports overlapping memory regions; only matters
1776 ++ * for in-place decompression scenarios
1777 ++ */
1778 ++ LZ4_memmove(op, ip, length);
1779 + ip += length;
1780 + op += length;
1781 +
1782 +diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
1783 +index 1a7fa9d9170fb..369eb181d730c 100644
1784 +--- a/lib/lz4/lz4defs.h
1785 ++++ b/lib/lz4/lz4defs.h
1786 +@@ -137,6 +137,8 @@ static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
1787 + return put_unaligned_le16(value, memPtr);
1788 + }
1789 +
1790 ++#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
1791 ++
1792 + static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
1793 + {
1794 + #if LZ4_ARCH64
1795 +diff --git a/lib/test_xarray.c b/lib/test_xarray.c
1796 +index d4f97925dbd8d..8262c3f05a5d3 100644
1797 +--- a/lib/test_xarray.c
1798 ++++ b/lib/test_xarray.c
1799 +@@ -1503,6 +1503,49 @@ static noinline void check_store_range(struct xarray *xa)
1800 + }
1801 + }
1802 +
1803 ++#ifdef CONFIG_XARRAY_MULTI
1804 ++static void check_split_1(struct xarray *xa, unsigned long index,
1805 ++ unsigned int order)
1806 ++{
1807 ++ XA_STATE(xas, xa, index);
1808 ++ void *entry;
1809 ++ unsigned int i = 0;
1810 ++
1811 ++ xa_store_order(xa, index, order, xa, GFP_KERNEL);
1812 ++
1813 ++ xas_split_alloc(&xas, xa, order, GFP_KERNEL);
1814 ++ xas_lock(&xas);
1815 ++ xas_split(&xas, xa, order);
1816 ++ xas_unlock(&xas);
1817 ++
1818 ++ xa_for_each(xa, index, entry) {
1819 ++ XA_BUG_ON(xa, entry != xa);
1820 ++ i++;
1821 ++ }
1822 ++ XA_BUG_ON(xa, i != 1 << order);
1823 ++
1824 ++ xa_set_mark(xa, index, XA_MARK_0);
1825 ++ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
1826 ++
1827 ++ xa_destroy(xa);
1828 ++}
1829 ++
1830 ++static noinline void check_split(struct xarray *xa)
1831 ++{
1832 ++ unsigned int order;
1833 ++
1834 ++ XA_BUG_ON(xa, !xa_empty(xa));
1835 ++
1836 ++ for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
1837 ++ check_split_1(xa, 0, order);
1838 ++ check_split_1(xa, 1UL << order, order);
1839 ++ check_split_1(xa, 3UL << order, order);
1840 ++ }
1841 ++}
1842 ++#else
1843 ++static void check_split(struct xarray *xa) { }
1844 ++#endif
1845 ++
1846 + static void check_align_1(struct xarray *xa, char *name)
1847 + {
1848 + int i;
1849 +@@ -1649,6 +1692,26 @@ static noinline void check_account(struct xarray *xa)
1850 + #endif
1851 + }
1852 +
1853 ++static noinline void check_get_order(struct xarray *xa)
1854 ++{
1855 ++ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
1856 ++ unsigned int order;
1857 ++ unsigned long i, j;
1858 ++
1859 ++ for (i = 0; i < 3; i++)
1860 ++ XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
1861 ++
1862 ++ for (order = 0; order < max_order; order++) {
1863 ++ for (i = 0; i < 10; i++) {
1864 ++ xa_store_order(xa, i << order, order,
1865 ++ xa_mk_index(i << order), GFP_KERNEL);
1866 ++ for (j = i << order; j < (i + 1) << order; j++)
1867 ++ XA_BUG_ON(xa, xa_get_order(xa, j) != order);
1868 ++ xa_erase(xa, i << order);
1869 ++ }
1870 ++ }
1871 ++}
1872 ++
1873 + static noinline void check_destroy(struct xarray *xa)
1874 + {
1875 + unsigned long index;
1876 +@@ -1697,6 +1760,7 @@ static int xarray_checks(void)
1877 + check_reserve(&array);
1878 + check_reserve(&xa0);
1879 + check_multi_store(&array);
1880 ++ check_get_order(&array);
1881 + check_xa_alloc();
1882 + check_find(&array);
1883 + check_find_entry(&array);
1884 +@@ -1708,6 +1772,7 @@ static int xarray_checks(void)
1885 + check_store_range(&array);
1886 + check_store_iter(&array);
1887 + check_align(&xa0);
1888 ++ check_split(&array);
1889 +
1890 + check_workingset(&array, 0);
1891 + check_workingset(&array, 64);
1892 +diff --git a/lib/xarray.c b/lib/xarray.c
1893 +index 08d71c7b75990..7d22b30591275 100644
1894 +--- a/lib/xarray.c
1895 ++++ b/lib/xarray.c
1896 +@@ -266,13 +266,14 @@ static void xa_node_free(struct xa_node *node)
1897 + */
1898 + static void xas_destroy(struct xa_state *xas)
1899 + {
1900 +- struct xa_node *node = xas->xa_alloc;
1901 ++ struct xa_node *next, *node = xas->xa_alloc;
1902 +
1903 +- if (!node)
1904 +- return;
1905 +- XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
1906 +- kmem_cache_free(radix_tree_node_cachep, node);
1907 +- xas->xa_alloc = NULL;
1908 ++ while (node) {
1909 ++ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
1910 ++ next = rcu_dereference_raw(node->parent);
1911 ++ radix_tree_node_rcu_free(&node->rcu_head);
1912 ++ xas->xa_alloc = node = next;
1913 ++ }
1914 + }
1915 +
1916 + /**
1917 +@@ -304,6 +305,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
1918 + xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
1919 + if (!xas->xa_alloc)
1920 + return false;
1921 ++ xas->xa_alloc->parent = NULL;
1922 + XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
1923 + xas->xa_node = XAS_RESTART;
1924 + return true;
1925 +@@ -339,6 +341,7 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
1926 + }
1927 + if (!xas->xa_alloc)
1928 + return false;
1929 ++ xas->xa_alloc->parent = NULL;
1930 + XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
1931 + xas->xa_node = XAS_RESTART;
1932 + return true;
1933 +@@ -403,7 +406,7 @@ static unsigned long xas_size(const struct xa_state *xas)
1934 + /*
1935 + * Use this to calculate the maximum index that will need to be created
1936 + * in order to add the entry described by @xas. Because we cannot store a
1937 +- * multiple-index entry at index 0, the calculation is a little more complex
1938 ++ * multi-index entry at index 0, the calculation is a little more complex
1939 + * than you might expect.
1940 + */
1941 + static unsigned long xas_max(struct xa_state *xas)
1942 +@@ -946,6 +949,153 @@ void xas_init_marks(const struct xa_state *xas)
1943 + }
1944 + EXPORT_SYMBOL_GPL(xas_init_marks);
1945 +
1946 ++#ifdef CONFIG_XARRAY_MULTI
1947 ++static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
1948 ++{
1949 ++ unsigned int marks = 0;
1950 ++ xa_mark_t mark = XA_MARK_0;
1951 ++
1952 ++ for (;;) {
1953 ++ if (node_get_mark(node, offset, mark))
1954 ++ marks |= 1 << (__force unsigned int)mark;
1955 ++ if (mark == XA_MARK_MAX)
1956 ++ break;
1957 ++ mark_inc(mark);
1958 ++ }
1959 ++
1960 ++ return marks;
1961 ++}
1962 ++
1963 ++static void node_set_marks(struct xa_node *node, unsigned int offset,
1964 ++ struct xa_node *child, unsigned int marks)
1965 ++{
1966 ++ xa_mark_t mark = XA_MARK_0;
1967 ++
1968 ++ for (;;) {
1969 ++ if (marks & (1 << (__force unsigned int)mark)) {
1970 ++ node_set_mark(node, offset, mark);
1971 ++ if (child)
1972 ++ node_mark_all(child, mark);
1973 ++ }
1974 ++ if (mark == XA_MARK_MAX)
1975 ++ break;
1976 ++ mark_inc(mark);
1977 ++ }
1978 ++}
1979 ++
1980 ++/**
1981 ++ * xas_split_alloc() - Allocate memory for splitting an entry.
1982 ++ * @xas: XArray operation state.
1983 ++ * @entry: New entry which will be stored in the array.
1984 ++ * @order: New entry order.
1985 ++ * @gfp: Memory allocation flags.
1986 ++ *
1987 ++ * This function should be called before calling xas_split().
1988 ++ * If necessary, it will allocate new nodes (and fill them with @entry)
1989 ++ * to prepare for the upcoming split of an entry of @order size into
1990 ++ * entries of the order stored in the @xas.
1991 ++ *
1992 ++ * Context: May sleep if @gfp flags permit.
1993 ++ */
1994 ++void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
1995 ++ gfp_t gfp)
1996 ++{
1997 ++ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1998 ++ unsigned int mask = xas->xa_sibs;
1999 ++
2000 ++ /* XXX: no support for splitting really large entries yet */
2001 ++ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
2002 ++ goto nomem;
2003 ++ if (xas->xa_shift + XA_CHUNK_SHIFT > order)
2004 ++ return;
2005 ++
2006 ++ do {
2007 ++ unsigned int i;
2008 ++ void *sibling;
2009 ++ struct xa_node *node;
2010 ++
2011 ++ node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
2012 ++ if (!node)
2013 ++ goto nomem;
2014 ++ node->array = xas->xa;
2015 ++ for (i = 0; i < XA_CHUNK_SIZE; i++) {
2016 ++ if ((i & mask) == 0) {
2017 ++ RCU_INIT_POINTER(node->slots[i], entry);
2018 ++ sibling = xa_mk_sibling(0);
2019 ++ } else {
2020 ++ RCU_INIT_POINTER(node->slots[i], sibling);
2021 ++ }
2022 ++ }
2023 ++ RCU_INIT_POINTER(node->parent, xas->xa_alloc);
2024 ++ xas->xa_alloc = node;
2025 ++ } while (sibs-- > 0);
2026 ++
2027 ++ return;
2028 ++nomem:
2029 ++ xas_destroy(xas);
2030 ++ xas_set_err(xas, -ENOMEM);
2031 ++}
2032 ++EXPORT_SYMBOL_GPL(xas_split_alloc);
2033 ++
2034 ++/**
2035 ++ * xas_split() - Split a multi-index entry into smaller entries.
2036 ++ * @xas: XArray operation state.
2037 ++ * @entry: New entry to store in the array.
2038 ++ * @order: New entry order.
2039 ++ *
2040 ++ * The value in the entry is copied to all the replacement entries.
2041 ++ *
2042 ++ * Context: Any context. The caller should hold the xa_lock.
2043 ++ */
2044 ++void xas_split(struct xa_state *xas, void *entry, unsigned int order)
2045 ++{
2046 ++ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
2047 ++ unsigned int offset, marks;
2048 ++ struct xa_node *node;
2049 ++ void *curr = xas_load(xas);
2050 ++ int values = 0;
2051 ++
2052 ++ node = xas->xa_node;
2053 ++ if (xas_top(node))
2054 ++ return;
2055 ++
2056 ++ marks = node_get_marks(node, xas->xa_offset);
2057 ++
2058 ++ offset = xas->xa_offset + sibs;
2059 ++ do {
2060 ++ if (xas->xa_shift < node->shift) {
2061 ++ struct xa_node *child = xas->xa_alloc;
2062 ++
2063 ++ xas->xa_alloc = rcu_dereference_raw(child->parent);
2064 ++ child->shift = node->shift - XA_CHUNK_SHIFT;
2065 ++ child->offset = offset;
2066 ++ child->count = XA_CHUNK_SIZE;
2067 ++ child->nr_values = xa_is_value(entry) ?
2068 ++ XA_CHUNK_SIZE : 0;
2069 ++ RCU_INIT_POINTER(child->parent, node);
2070 ++ node_set_marks(node, offset, child, marks);
2071 ++ rcu_assign_pointer(node->slots[offset],
2072 ++ xa_mk_node(child));
2073 ++ if (xa_is_value(curr))
2074 ++ values--;
2075 ++ } else {
2076 ++ unsigned int canon = offset - xas->xa_sibs;
2077 ++
2078 ++ node_set_marks(node, canon, NULL, marks);
2079 ++ rcu_assign_pointer(node->slots[canon], entry);
2080 ++ while (offset > canon)
2081 ++ rcu_assign_pointer(node->slots[offset--],
2082 ++ xa_mk_sibling(canon));
2083 ++ values += (xa_is_value(entry) - xa_is_value(curr)) *
2084 ++ (xas->xa_sibs + 1);
2085 ++ }
2086 ++ } while (offset-- > xas->xa_offset);
2087 ++
2088 ++ node->nr_values += values;
2089 ++}
2090 ++EXPORT_SYMBOL_GPL(xas_split);
2091 ++#endif
2092 ++
2093 + /**
2094 + * xas_pause() - Pause a walk to drop a lock.
2095 + * @xas: XArray operation state.
2096 +@@ -1407,7 +1557,7 @@ EXPORT_SYMBOL(__xa_store);
2097 + * @gfp: Memory allocation flags.
2098 + *
2099 + * After this function returns, loads from this index will return @entry.
2100 +- * Storing into an existing multislot entry updates the entry of every index.
2101 ++ * Storing into an existing multi-index entry updates the entry of every index.
2102 + * The marks associated with @index are unaffected unless @entry is %NULL.
2103 + *
2104 + * Context: Any context. Takes and releases the xa_lock.
2105 +@@ -1549,7 +1699,7 @@ static void xas_set_range(struct xa_state *xas, unsigned long first,
2106 + *
2107 + * After this function returns, loads from any index between @first and @last,
2108 + * inclusive will return @entry.
2109 +- * Storing into an existing multislot entry updates the entry of every index.
2110 ++ * Storing into an existing multi-index entry updates the entry of every index.
2111 + * The marks associated with @index are unaffected unless @entry is %NULL.
2112 + *
2113 + * Context: Process context. Takes and releases the xa_lock. May sleep
2114 +@@ -1592,6 +1742,46 @@ unlock:
2115 + return xas_result(&xas, NULL);
2116 + }
2117 + EXPORT_SYMBOL(xa_store_range);
2118 ++
2119 ++/**
2120 ++ * xa_get_order() - Get the order of an entry.
2121 ++ * @xa: XArray.
2122 ++ * @index: Index of the entry.
2123 ++ *
2124 ++ * Return: A number between 0 and 63 indicating the order of the entry.
2125 ++ */
2126 ++int xa_get_order(struct xarray *xa, unsigned long index)
2127 ++{
2128 ++ XA_STATE(xas, xa, index);
2129 ++ void *entry;
2130 ++ int order = 0;
2131 ++
2132 ++ rcu_read_lock();
2133 ++ entry = xas_load(&xas);
2134 ++
2135 ++ if (!entry)
2136 ++ goto unlock;
2137 ++
2138 ++ if (!xas.xa_node)
2139 ++ goto unlock;
2140 ++
2141 ++ for (;;) {
2142 ++ unsigned int slot = xas.xa_offset + (1 << order);
2143 ++
2144 ++ if (slot >= XA_CHUNK_SIZE)
2145 ++ break;
2146 ++ if (!xa_is_sibling(xas.xa_node->slots[slot]))
2147 ++ break;
2148 ++ order++;
2149 ++ }
2150 ++
2151 ++ order += xas.xa_node->shift;
2152 ++unlock:
2153 ++ rcu_read_unlock();
2154 ++
2155 ++ return order;
2156 ++}
2157 ++EXPORT_SYMBOL(xa_get_order);
2158 + #endif /* CONFIG_XARRAY_MULTI */
2159 +
2160 + /**
2161 +diff --git a/mm/filemap.c b/mm/filemap.c
2162 +index db542b4948838..c10e237cc2c6e 100644
2163 +--- a/mm/filemap.c
2164 ++++ b/mm/filemap.c
2165 +@@ -856,7 +856,6 @@ noinline int __add_to_page_cache_locked(struct page *page,
2166 + int huge = PageHuge(page);
2167 + struct mem_cgroup *memcg;
2168 + int error;
2169 +- void *old;
2170 +
2171 + VM_BUG_ON_PAGE(!PageLocked(page), page);
2172 + VM_BUG_ON_PAGE(PageSwapBacked(page), page);
2173 +@@ -872,21 +871,41 @@ noinline int __add_to_page_cache_locked(struct page *page,
2174 + get_page(page);
2175 + page->mapping = mapping;
2176 + page->index = offset;
2177 ++ gfp_mask &= GFP_RECLAIM_MASK;
2178 +
2179 + do {
2180 ++ unsigned int order = xa_get_order(xas.xa, xas.xa_index);
2181 ++ void *entry, *old = NULL;
2182 ++
2183 ++ if (order > thp_order(page))
2184 ++ xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
2185 ++ order, gfp_mask);
2186 + xas_lock_irq(&xas);
2187 +- old = xas_load(&xas);
2188 +- if (old && !xa_is_value(old))
2189 +- xas_set_err(&xas, -EEXIST);
2190 ++ xas_for_each_conflict(&xas, entry) {
2191 ++ old = entry;
2192 ++ if (!xa_is_value(entry)) {
2193 ++ xas_set_err(&xas, -EEXIST);
2194 ++ goto unlock;
2195 ++ }
2196 ++ }
2197 ++
2198 ++ if (old) {
2199 ++ if (shadowp)
2200 ++ *shadowp = old;
2201 ++ /* entry may have been split before we acquired lock */
2202 ++ order = xa_get_order(xas.xa, xas.xa_index);
2203 ++ if (order > thp_order(page)) {
2204 ++ xas_split(&xas, old, order);
2205 ++ xas_reset(&xas);
2206 ++ }
2207 ++ }
2208 ++
2209 + xas_store(&xas, page);
2210 + if (xas_error(&xas))
2211 + goto unlock;
2212 +
2213 +- if (xa_is_value(old)) {
2214 ++ if (old)
2215 + mapping->nrexceptional--;
2216 +- if (shadowp)
2217 +- *shadowp = old;
2218 +- }
2219 + mapping->nrpages++;
2220 +
2221 + /* hugetlb pages do not participate in page cache accounting */
2222 +@@ -894,7 +913,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
2223 + __inc_node_page_state(page, NR_FILE_PAGES);
2224 + unlock:
2225 + xas_unlock_irq(&xas);
2226 +- } while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
2227 ++ } while (xas_nomem(&xas, gfp_mask));
2228 +
2229 + if (xas_error(&xas))
2230 + goto error;
2231 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2232 +index 3b08e34a775df..fe15e7d8220ab 100644
2233 +--- a/mm/hugetlb.c
2234 ++++ b/mm/hugetlb.c
2235 +@@ -4338,10 +4338,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
2236 + struct page *page;
2237 +
2238 + if (!*pagep) {
2239 +- ret = -ENOMEM;
2240 ++ /* If a page already exists, then it's UFFDIO_COPY for
2241 ++ * a non-missing case. Return -EEXIST.
2242 ++ */
2243 ++ if (vm_shared &&
2244 ++ hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
2245 ++ ret = -EEXIST;
2246 ++ goto out;
2247 ++ }
2248 ++
2249 + page = alloc_huge_page(dst_vma, dst_addr, 0);
2250 +- if (IS_ERR(page))
2251 ++ if (IS_ERR(page)) {
2252 ++ ret = -ENOMEM;
2253 + goto out;
2254 ++ }
2255 +
2256 + ret = copy_huge_page_from_user(page,
2257 + (const void __user *) src_addr,
2258 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
2259 +index 83b324419ad3d..21a7ea9b70c8a 100644
2260 +--- a/net/bluetooth/hci_core.c
2261 ++++ b/net/bluetooth/hci_core.c
2262 +@@ -1561,8 +1561,13 @@ setup_failed:
2263 + } else {
2264 + /* Init failed, cleanup */
2265 + flush_work(&hdev->tx_work);
2266 +- flush_work(&hdev->cmd_work);
2267 ++
2268 ++ /* Since hci_rx_work() is possible to awake new cmd_work
2269 ++ * it should be flushed first to avoid unexpected call of
2270 ++ * hci_cmd_work()
2271 ++ */
2272 + flush_work(&hdev->rx_work);
2273 ++ flush_work(&hdev->cmd_work);
2274 +
2275 + skb_queue_purge(&hdev->cmd_q);
2276 + skb_queue_purge(&hdev->rx_q);
2277 +diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
2278 +index 8159b344deef5..8d2c26c4b6d3d 100644
2279 +--- a/net/bluetooth/hci_sock.c
2280 ++++ b/net/bluetooth/hci_sock.c
2281 +@@ -755,7 +755,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
2282 + /* Detach sockets from device */
2283 + read_lock(&hci_sk_list.lock);
2284 + sk_for_each(sk, &hci_sk_list.head) {
2285 +- bh_lock_sock_nested(sk);
2286 ++ lock_sock(sk);
2287 + if (hci_pi(sk)->hdev == hdev) {
2288 + hci_pi(sk)->hdev = NULL;
2289 + sk->sk_err = EPIPE;
2290 +@@ -764,7 +764,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
2291 +
2292 + hci_dev_put(hdev);
2293 + }
2294 +- bh_unlock_sock(sk);
2295 ++ release_sock(sk);
2296 + }
2297 + read_unlock(&hci_sk_list.lock);
2298 + }
2299 +diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
2300 +index 03c7cdd8e4cbf..8a3c19b9a9958 100644
2301 +--- a/net/caif/caif_dev.c
2302 ++++ b/net/caif/caif_dev.c
2303 +@@ -307,7 +307,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
2304 + caifd_put(caifd);
2305 + }
2306 +
2307 +-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
2308 ++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
2309 + struct cflayer *link_support, int head_room,
2310 + struct cflayer **layer,
2311 + int (**rcv_func)(struct sk_buff *, struct net_device *,
2312 +@@ -318,11 +318,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
2313 + enum cfcnfg_phy_preference pref;
2314 + struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
2315 + struct caif_device_entry_list *caifdevs;
2316 ++ int res;
2317 +
2318 + caifdevs = caif_device_list(dev_net(dev));
2319 + caifd = caif_device_alloc(dev);
2320 + if (!caifd)
2321 +- return;
2322 ++ return -ENOMEM;
2323 + *layer = &caifd->layer;
2324 + spin_lock_init(&caifd->flow_lock);
2325 +
2326 +@@ -343,7 +344,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
2327 + strlcpy(caifd->layer.name, dev->name,
2328 + sizeof(caifd->layer.name));
2329 + caifd->layer.transmit = transmit;
2330 +- cfcnfg_add_phy_layer(cfg,
2331 ++ res = cfcnfg_add_phy_layer(cfg,
2332 + dev,
2333 + &caifd->layer,
2334 + pref,
2335 +@@ -353,6 +354,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
2336 + mutex_unlock(&caifdevs->lock);
2337 + if (rcv_func)
2338 + *rcv_func = receive;
2339 ++ return res;
2340 + }
2341 + EXPORT_SYMBOL(caif_enroll_dev);
2342 +
2343 +@@ -367,6 +369,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
2344 + struct cflayer *layer, *link_support;
2345 + int head_room = 0;
2346 + struct caif_device_entry_list *caifdevs;
2347 ++ int res;
2348 +
2349 + cfg = get_cfcnfg(dev_net(dev));
2350 + caifdevs = caif_device_list(dev_net(dev));
2351 +@@ -392,8 +395,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
2352 + break;
2353 + }
2354 + }
2355 +- caif_enroll_dev(dev, caifdev, link_support, head_room,
2356 ++ res = caif_enroll_dev(dev, caifdev, link_support, head_room,
2357 + &layer, NULL);
2358 ++ if (res)
2359 ++ cfserl_release(link_support);
2360 + caifdev->flowctrl = dev_flowctrl;
2361 + break;
2362 +
2363 +diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
2364 +index 76bd67891fb39..46c62dd1479b8 100644
2365 +--- a/net/caif/caif_usb.c
2366 ++++ b/net/caif/caif_usb.c
2367 +@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
2368 + return (struct cflayer *) this;
2369 + }
2370 +
2371 ++static void cfusbl_release(struct cflayer *layer)
2372 ++{
2373 ++ kfree(layer);
2374 ++}
2375 ++
2376 + static struct packet_type caif_usb_type __read_mostly = {
2377 + .type = cpu_to_be16(ETH_P_802_EX1),
2378 + };
2379 +@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
2380 + struct cflayer *layer, *link_support;
2381 + struct usbnet *usbnet;
2382 + struct usb_device *usbdev;
2383 ++ int res;
2384 +
2385 + /* Check whether we have a NCM device, and find its VID/PID. */
2386 + if (!(dev->dev.parent && dev->dev.parent->driver &&
2387 +@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
2388 + if (dev->num_tx_queues > 1)
2389 + pr_warn("USB device uses more than one tx queue\n");
2390 +
2391 +- caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
2392 ++ res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
2393 + &layer, &caif_usb_type.func);
2394 ++ if (res)
2395 ++ goto err;
2396 ++
2397 + if (!pack_added)
2398 + dev_add_pack(&caif_usb_type);
2399 + pack_added = true;
2400 +@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
2401 + strlcpy(layer->name, dev->name, sizeof(layer->name));
2402 +
2403 + return 0;
2404 ++err:
2405 ++ cfusbl_release(link_support);
2406 ++ return res;
2407 + }
2408 +
2409 + static struct notifier_block caif_device_notifier = {
2410 +diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
2411 +index 399239a14420f..cac30e676ac94 100644
2412 +--- a/net/caif/cfcnfg.c
2413 ++++ b/net/caif/cfcnfg.c
2414 +@@ -450,7 +450,7 @@ unlock:
2415 + rcu_read_unlock();
2416 + }
2417 +
2418 +-void
2419 ++int
2420 + cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
2421 + struct net_device *dev, struct cflayer *phy_layer,
2422 + enum cfcnfg_phy_preference pref,
2423 +@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
2424 + {
2425 + struct cflayer *frml;
2426 + struct cfcnfg_phyinfo *phyinfo = NULL;
2427 +- int i;
2428 ++ int i, res = 0;
2429 + u8 phyid;
2430 +
2431 + mutex_lock(&cnfg->lock);
2432 +@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
2433 + goto got_phyid;
2434 + }
2435 + pr_warn("Too many CAIF Link Layers (max 6)\n");
2436 ++ res = -EEXIST;
2437 + goto out;
2438 +
2439 + got_phyid:
2440 + phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
2441 +- if (!phyinfo)
2442 ++ if (!phyinfo) {
2443 ++ res = -ENOMEM;
2444 + goto out_err;
2445 ++ }
2446 +
2447 + phy_layer->id = phyid;
2448 + phyinfo->pref = pref;
2449 +@@ -492,8 +495,10 @@ got_phyid:
2450 +
2451 + frml = cffrml_create(phyid, fcs);
2452 +
2453 +- if (!frml)
2454 ++ if (!frml) {
2455 ++ res = -ENOMEM;
2456 + goto out_err;
2457 ++ }
2458 + phyinfo->frm_layer = frml;
2459 + layer_set_up(frml, cnfg->mux);
2460 +
2461 +@@ -511,11 +516,12 @@ got_phyid:
2462 + list_add_rcu(&phyinfo->node, &cnfg->phys);
2463 + out:
2464 + mutex_unlock(&cnfg->lock);
2465 +- return;
2466 ++ return res;
2467 +
2468 + out_err:
2469 + kfree(phyinfo);
2470 + mutex_unlock(&cnfg->lock);
2471 ++ return res;
2472 + }
2473 + EXPORT_SYMBOL(cfcnfg_add_phy_layer);
2474 +
2475 +diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
2476 +index e11725a4bb0ed..40cd57ad0a0f4 100644
2477 +--- a/net/caif/cfserl.c
2478 ++++ b/net/caif/cfserl.c
2479 +@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
2480 + static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
2481 + int phyid);
2482 +
2483 ++void cfserl_release(struct cflayer *layer)
2484 ++{
2485 ++ kfree(layer);
2486 ++}
2487 ++
2488 + struct cflayer *cfserl_create(int instance, bool use_stx)
2489 + {
2490 + struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
2491 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2492 +index 472a615775f32..f94d405358a21 100644
2493 +--- a/net/core/neighbour.c
2494 ++++ b/net/core/neighbour.c
2495 +@@ -239,6 +239,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
2496 +
2497 + write_lock(&n->lock);
2498 + if ((n->nud_state == NUD_FAILED) ||
2499 ++ (n->nud_state == NUD_NOARP) ||
2500 + (tbl->is_multicast &&
2501 + tbl->is_multicast(n->primary_key)) ||
2502 + time_after(tref, n->updated))
2503 +diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
2504 +index d19c40c684e80..71be751123210 100644
2505 +--- a/net/ieee802154/nl-mac.c
2506 ++++ b/net/ieee802154/nl-mac.c
2507 +@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
2508 + nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
2509 + nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
2510 + be32_to_cpu(params.frame_counter)) ||
2511 +- ieee802154_llsec_fill_key_id(msg, &params.out_key))
2512 ++ ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
2513 ++ rc = -ENOBUFS;
2514 + goto out_free;
2515 ++ }
2516 +
2517 + dev_put(dev);
2518 +
2519 +diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
2520 +index 2cdc7e63fe172..88215b5c93aa4 100644
2521 +--- a/net/ieee802154/nl-phy.c
2522 ++++ b/net/ieee802154/nl-phy.c
2523 +@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
2524 + }
2525 +
2526 + if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
2527 +- nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
2528 ++ nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
2529 ++ rc = -EMSGSIZE;
2530 + goto nla_put_failure;
2531 ++ }
2532 + dev_put(dev);
2533 +
2534 + wpan_phy_put(phy);
2535 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2536 +index 3a9bd9687e7d1..b903fe28ce507 100644
2537 +--- a/net/ipv6/route.c
2538 ++++ b/net/ipv6/route.c
2539 +@@ -3688,11 +3688,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
2540 + if (nh) {
2541 + if (rt->fib6_src.plen) {
2542 + NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
2543 +- goto out;
2544 ++ goto out_free;
2545 + }
2546 + if (!nexthop_get(nh)) {
2547 + NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
2548 +- goto out;
2549 ++ goto out_free;
2550 + }
2551 + rt->nh = nh;
2552 + fib6_nh = nexthop_fib6_nh(rt->nh);
2553 +@@ -3729,6 +3729,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
2554 + out:
2555 + fib6_info_release(rt);
2556 + return ERR_PTR(err);
2557 ++out_free:
2558 ++ ip_fib_metrics_put(rt->fib6_metrics);
2559 ++ kfree(rt);
2560 ++ return ERR_PTR(err);
2561 + }
2562 +
2563 + int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
2564 +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
2565 +index 99168af0c28d9..f93fa0e210979 100644
2566 +--- a/net/netfilter/ipvs/ip_vs_ctl.c
2567 ++++ b/net/netfilter/ipvs/ip_vs_ctl.c
2568 +@@ -1340,7 +1340,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
2569 + ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
2570 + svc->port = u->port;
2571 + svc->fwmark = u->fwmark;
2572 +- svc->flags = u->flags;
2573 ++ svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
2574 + svc->timeout = u->timeout * HZ;
2575 + svc->netmask = u->netmask;
2576 + svc->ipvs = ipvs;
2577 +diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
2578 +index aaf4293ddd459..75e6b429635da 100644
2579 +--- a/net/netfilter/nf_conntrack_proto.c
2580 ++++ b/net/netfilter/nf_conntrack_proto.c
2581 +@@ -660,7 +660,7 @@ int nf_conntrack_proto_init(void)
2582 +
2583 + #if IS_ENABLED(CONFIG_IPV6)
2584 + cleanup_sockopt:
2585 +- nf_unregister_sockopt(&so_getorigdst6);
2586 ++ nf_unregister_sockopt(&so_getorigdst);
2587 + #endif
2588 + return ret;
2589 + }
2590 +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
2591 +index 81406b93f126d..3d5fc07b2530b 100644
2592 +--- a/net/netfilter/nfnetlink_cthelper.c
2593 ++++ b/net/netfilter/nfnetlink_cthelper.c
2594 +@@ -380,10 +380,14 @@ static int
2595 + nfnl_cthelper_update(const struct nlattr * const tb[],
2596 + struct nf_conntrack_helper *helper)
2597 + {
2598 ++ u32 size;
2599 + int ret;
2600 +
2601 +- if (tb[NFCTH_PRIV_DATA_LEN])
2602 +- return -EBUSY;
2603 ++ if (tb[NFCTH_PRIV_DATA_LEN]) {
2604 ++ size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
2605 ++ if (size != helper->data_len)
2606 ++ return -EBUSY;
2607 ++ }
2608 +
2609 + if (tb[NFCTH_POLICY]) {
2610 + ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
2611 +diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
2612 +index 2042c6f4629cc..28991730728b9 100644
2613 +--- a/net/netfilter/nft_ct.c
2614 ++++ b/net/netfilter/nft_ct.c
2615 +@@ -1218,7 +1218,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
2616 + struct nf_conn *ct;
2617 +
2618 + ct = nf_ct_get(pkt->skb, &ctinfo);
2619 +- if (!ct || ctinfo == IP_CT_UNTRACKED) {
2620 ++ if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
2621 + regs->verdict.code = NFT_BREAK;
2622 + return;
2623 + }
2624 +diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
2625 +index 0d4246af6c02b..a7e861eede2d9 100644
2626 +--- a/net/nfc/llcp_sock.c
2627 ++++ b/net/nfc/llcp_sock.c
2628 +@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
2629 + if (!llcp_sock->service_name) {
2630 + nfc_llcp_local_put(llcp_sock->local);
2631 + llcp_sock->local = NULL;
2632 ++ llcp_sock->dev = NULL;
2633 + ret = -ENOMEM;
2634 + goto put_dev;
2635 + }
2636 +@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
2637 + llcp_sock->local = NULL;
2638 + kfree(llcp_sock->service_name);
2639 + llcp_sock->service_name = NULL;
2640 ++ llcp_sock->dev = NULL;
2641 + ret = -EADDRINUSE;
2642 + goto put_dev;
2643 + }
2644 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
2645 +index 6119c31dcd072..31eb8eefc8681 100644
2646 +--- a/net/sched/act_ct.c
2647 ++++ b/net/sched/act_ct.c
2648 +@@ -648,9 +648,6 @@ static int tcf_ct_fill_params(struct net *net,
2649 + sizeof(p->zone));
2650 + }
2651 +
2652 +- if (p->zone == NF_CT_DEFAULT_ZONE_ID)
2653 +- return 0;
2654 +-
2655 + nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
2656 + tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
2657 + if (!tmpl) {
2658 +diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
2659 +index 0214aa1c44278..8bd2454cc89dc 100644
2660 +--- a/net/tipc/bearer.c
2661 ++++ b/net/tipc/bearer.c
2662 +@@ -233,7 +233,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
2663 + */
2664 + static int tipc_enable_bearer(struct net *net, const char *name,
2665 + u32 disc_domain, u32 prio,
2666 +- struct nlattr *attr[])
2667 ++ struct nlattr *attr[],
2668 ++ struct netlink_ext_ack *extack)
2669 + {
2670 + struct tipc_net *tn = tipc_net(net);
2671 + struct tipc_bearer_names b_names;
2672 +@@ -244,20 +245,24 @@ static int tipc_enable_bearer(struct net *net, const char *name,
2673 + int bearer_id = 0;
2674 + int res = -EINVAL;
2675 + char *errstr = "";
2676 ++ u32 i;
2677 +
2678 + if (!bearer_name_validate(name, &b_names)) {
2679 + errstr = "illegal name";
2680 ++ NL_SET_ERR_MSG(extack, "Illegal name");
2681 + goto rejected;
2682 + }
2683 +
2684 + if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
2685 + errstr = "illegal priority";
2686 ++ NL_SET_ERR_MSG(extack, "Illegal priority");
2687 + goto rejected;
2688 + }
2689 +
2690 + m = tipc_media_find(b_names.media_name);
2691 + if (!m) {
2692 + errstr = "media not registered";
2693 ++ NL_SET_ERR_MSG(extack, "Media not registered");
2694 + goto rejected;
2695 + }
2696 +
2697 +@@ -265,33 +270,43 @@ static int tipc_enable_bearer(struct net *net, const char *name,
2698 + prio = m->priority;
2699 +
2700 + /* Check new bearer vs existing ones and find free bearer id if any */
2701 +- while (bearer_id < MAX_BEARERS) {
2702 +- b = rtnl_dereference(tn->bearer_list[bearer_id]);
2703 +- if (!b)
2704 +- break;
2705 ++ bearer_id = MAX_BEARERS;
2706 ++ i = MAX_BEARERS;
2707 ++ while (i-- != 0) {
2708 ++ b = rtnl_dereference(tn->bearer_list[i]);
2709 ++ if (!b) {
2710 ++ bearer_id = i;
2711 ++ continue;
2712 ++ }
2713 + if (!strcmp(name, b->name)) {
2714 + errstr = "already enabled";
2715 ++ NL_SET_ERR_MSG(extack, "Already enabled");
2716 + goto rejected;
2717 + }
2718 +- bearer_id++;
2719 +- if (b->priority != prio)
2720 +- continue;
2721 +- if (++with_this_prio <= 2)
2722 +- continue;
2723 +- pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
2724 +- name, prio);
2725 +- if (prio == TIPC_MIN_LINK_PRI) {
2726 +- errstr = "cannot adjust to lower";
2727 +- goto rejected;
2728 ++
2729 ++ if (b->priority == prio &&
2730 ++ (++with_this_prio > 2)) {
2731 ++ pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
2732 ++ name, prio);
2733 ++
2734 ++ if (prio == TIPC_MIN_LINK_PRI) {
2735 ++ errstr = "cannot adjust to lower";
2736 ++ NL_SET_ERR_MSG(extack, "Cannot adjust to lower");
2737 ++ goto rejected;
2738 ++ }
2739 ++
2740 ++ pr_warn("Bearer <%s>: trying with adjusted priority\n",
2741 ++ name);
2742 ++ prio--;
2743 ++ bearer_id = MAX_BEARERS;
2744 ++ i = MAX_BEARERS;
2745 ++ with_this_prio = 1;
2746 + }
2747 +- pr_warn("Bearer <%s>: trying with adjusted priority\n", name);
2748 +- prio--;
2749 +- bearer_id = 0;
2750 +- with_this_prio = 1;
2751 + }
2752 +
2753 + if (bearer_id >= MAX_BEARERS) {
2754 + errstr = "max 3 bearers permitted";
2755 ++ NL_SET_ERR_MSG(extack, "Max 3 bearers permitted");
2756 + goto rejected;
2757 + }
2758 +
2759 +@@ -305,6 +320,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
2760 + if (res) {
2761 + kfree(b);
2762 + errstr = "failed to enable media";
2763 ++ NL_SET_ERR_MSG(extack, "Failed to enable media");
2764 + goto rejected;
2765 + }
2766 +
2767 +@@ -320,6 +336,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
2768 + if (res) {
2769 + bearer_disable(net, b);
2770 + errstr = "failed to create discoverer";
2771 ++ NL_SET_ERR_MSG(extack, "Failed to create discoverer");
2772 + goto rejected;
2773 + }
2774 +
2775 +@@ -862,6 +879,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
2776 + bearer = tipc_bearer_find(net, name);
2777 + if (!bearer) {
2778 + err = -EINVAL;
2779 ++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
2780 + goto err_out;
2781 + }
2782 +
2783 +@@ -901,8 +919,10 @@ int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
2784 + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
2785 +
2786 + bearer = tipc_bearer_find(net, name);
2787 +- if (!bearer)
2788 ++ if (!bearer) {
2789 ++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
2790 + return -EINVAL;
2791 ++ }
2792 +
2793 + bearer_disable(net, bearer);
2794 +
2795 +@@ -960,7 +980,8 @@ int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
2796 + prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2797 + }
2798 +
2799 +- return tipc_enable_bearer(net, bearer, domain, prio, attrs);
2800 ++ return tipc_enable_bearer(net, bearer, domain, prio, attrs,
2801 ++ info->extack);
2802 + }
2803 +
2804 + int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
2805 +@@ -999,6 +1020,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
2806 + b = tipc_bearer_find(net, name);
2807 + if (!b) {
2808 + rtnl_unlock();
2809 ++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
2810 + return -EINVAL;
2811 + }
2812 +
2813 +@@ -1039,8 +1061,10 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
2814 + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
2815 +
2816 + b = tipc_bearer_find(net, name);
2817 +- if (!b)
2818 ++ if (!b) {
2819 ++ NL_SET_ERR_MSG(info->extack, "Bearer not found");
2820 + return -EINVAL;
2821 ++ }
2822 +
2823 + if (attrs[TIPC_NLA_BEARER_PROP]) {
2824 + struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2825 +@@ -1059,12 +1083,18 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
2826 + if (props[TIPC_NLA_PROP_WIN])
2827 + b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2828 + if (props[TIPC_NLA_PROP_MTU]) {
2829 +- if (b->media->type_id != TIPC_MEDIA_TYPE_UDP)
2830 ++ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
2831 ++ NL_SET_ERR_MSG(info->extack,
2832 ++ "MTU property is unsupported");
2833 + return -EINVAL;
2834 ++ }
2835 + #ifdef CONFIG_TIPC_MEDIA_UDP
2836 + if (tipc_udp_mtu_bad(nla_get_u32
2837 +- (props[TIPC_NLA_PROP_MTU])))
2838 ++ (props[TIPC_NLA_PROP_MTU]))) {
2839 ++ NL_SET_ERR_MSG(info->extack,
2840 ++ "MTU value is out-of-range");
2841 + return -EINVAL;
2842 ++ }
2843 + b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
2844 + tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU);
2845 + #endif
2846 +@@ -1192,6 +1222,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
2847 + rtnl_lock();
2848 + media = tipc_media_find(name);
2849 + if (!media) {
2850 ++ NL_SET_ERR_MSG(info->extack, "Media not found");
2851 + err = -EINVAL;
2852 + goto err_out;
2853 + }
2854 +@@ -1228,9 +1259,10 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
2855 + name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
2856 +
2857 + m = tipc_media_find(name);
2858 +- if (!m)
2859 ++ if (!m) {
2860 ++ NL_SET_ERR_MSG(info->extack, "Media not found");
2861 + return -EINVAL;
2862 +-
2863 ++ }
2864 + if (attrs[TIPC_NLA_MEDIA_PROP]) {
2865 + struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2866 +
2867 +@@ -1246,12 +1278,18 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
2868 + if (props[TIPC_NLA_PROP_WIN])
2869 + m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2870 + if (props[TIPC_NLA_PROP_MTU]) {
2871 +- if (m->type_id != TIPC_MEDIA_TYPE_UDP)
2872 ++ if (m->type_id != TIPC_MEDIA_TYPE_UDP) {
2873 ++ NL_SET_ERR_MSG(info->extack,
2874 ++ "MTU property is unsupported");
2875 + return -EINVAL;
2876 ++ }
2877 + #ifdef CONFIG_TIPC_MEDIA_UDP
2878 + if (tipc_udp_mtu_bad(nla_get_u32
2879 +- (props[TIPC_NLA_PROP_MTU])))
2880 ++ (props[TIPC_NLA_PROP_MTU]))) {
2881 ++ NL_SET_ERR_MSG(info->extack,
2882 ++ "MTU value is out-of-range");
2883 + return -EINVAL;
2884 ++ }
2885 + m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
2886 + #endif
2887 + }
2888 +diff --git a/net/wireless/core.h b/net/wireless/core.h
2889 +index d83c8e009448a..17621d22fb175 100644
2890 +--- a/net/wireless/core.h
2891 ++++ b/net/wireless/core.h
2892 +@@ -433,6 +433,8 @@ void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev);
2893 +
2894 + /* internal helpers */
2895 + bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher);
2896 ++bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev,
2897 ++ int key_idx, bool pairwise);
2898 + int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
2899 + struct key_params *params, int key_idx,
2900 + bool pairwise, const u8 *mac_addr);
2901 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
2902 +index 5bb2316befb98..7b170ed6923e7 100644
2903 +--- a/net/wireless/nl80211.c
2904 ++++ b/net/wireless/nl80211.c
2905 +@@ -3979,9 +3979,6 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
2906 + if (err)
2907 + return err;
2908 +
2909 +- if (key.idx < 0)
2910 +- return -EINVAL;
2911 +-
2912 + if (info->attrs[NL80211_ATTR_MAC])
2913 + mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2914 +
2915 +@@ -3997,6 +3994,10 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
2916 + key.type != NL80211_KEYTYPE_GROUP)
2917 + return -EINVAL;
2918 +
2919 ++ if (!cfg80211_valid_key_idx(rdev, key.idx,
2920 ++ key.type == NL80211_KEYTYPE_PAIRWISE))
2921 ++ return -EINVAL;
2922 ++
2923 + if (!rdev->ops->del_key)
2924 + return -EOPNOTSUPP;
2925 +
2926 +diff --git a/net/wireless/util.c b/net/wireless/util.c
2927 +index 82244e2fc1f54..4eae6ad328514 100644
2928 +--- a/net/wireless/util.c
2929 ++++ b/net/wireless/util.c
2930 +@@ -229,11 +229,48 @@ bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher)
2931 + return false;
2932 + }
2933 +
2934 ++static bool
2935 ++cfg80211_igtk_cipher_supported(struct cfg80211_registered_device *rdev)
2936 ++{
2937 ++ struct wiphy *wiphy = &rdev->wiphy;
2938 ++ int i;
2939 ++
2940 ++ for (i = 0; i < wiphy->n_cipher_suites; i++) {
2941 ++ switch (wiphy->cipher_suites[i]) {
2942 ++ case WLAN_CIPHER_SUITE_AES_CMAC:
2943 ++ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
2944 ++ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2945 ++ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2946 ++ return true;
2947 ++ }
2948 ++ }
2949 ++
2950 ++ return false;
2951 ++}
2952 ++
2953 ++bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev,
2954 ++ int key_idx, bool pairwise)
2955 ++{
2956 ++ int max_key_idx;
2957 ++
2958 ++ if (pairwise)
2959 ++ max_key_idx = 3;
2960 ++ else if (cfg80211_igtk_cipher_supported(rdev))
2961 ++ max_key_idx = 5;
2962 ++ else
2963 ++ max_key_idx = 3;
2964 ++
2965 ++ if (key_idx < 0 || key_idx > max_key_idx)
2966 ++ return false;
2967 ++
2968 ++ return true;
2969 ++}
2970 ++
2971 + int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
2972 + struct key_params *params, int key_idx,
2973 + bool pairwise, const u8 *mac_addr)
2974 + {
2975 +- if (key_idx < 0 || key_idx > 5)
2976 ++ if (!cfg80211_valid_key_idx(rdev, key_idx, pairwise))
2977 + return -EINVAL;
2978 +
2979 + if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
2980 +diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c
2981 +index 2719bb2596530..a760e130bd0d6 100644
2982 +--- a/samples/vfio-mdev/mdpy-fb.c
2983 ++++ b/samples/vfio-mdev/mdpy-fb.c
2984 +@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
2985 + if (format != DRM_FORMAT_XRGB8888) {
2986 + pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
2987 + format, DRM_FORMAT_XRGB8888);
2988 +- return -EINVAL;
2989 ++ ret = -EINVAL;
2990 ++ goto err_release_regions;
2991 + }
2992 + if (width < 100 || width > 10000) {
2993 + pci_err(pdev, "width (%d) out of range\n", width);
2994 +- return -EINVAL;
2995 ++ ret = -EINVAL;
2996 ++ goto err_release_regions;
2997 + }
2998 + if (height < 100 || height > 10000) {
2999 + pci_err(pdev, "height (%d) out of range\n", height);
3000 +- return -EINVAL;
3001 ++ ret = -EINVAL;
3002 ++ goto err_release_regions;
3003 + }
3004 + pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
3005 + width, height);
3006 +
3007 + info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
3008 +- if (!info)
3009 ++ if (!info) {
3010 ++ ret = -ENOMEM;
3011 + goto err_release_regions;
3012 ++ }
3013 + pci_set_drvdata(pdev, info);
3014 + par = info->par;
3015 +
3016 +diff --git a/sound/core/timer.c b/sound/core/timer.c
3017 +index 013f0e69ff0f7..b5a0ba79bf746 100644
3018 +--- a/sound/core/timer.c
3019 ++++ b/sound/core/timer.c
3020 +@@ -491,9 +491,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
3021 + return;
3022 + if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
3023 + return;
3024 ++ event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
3025 + list_for_each_entry(ts, &ti->slave_active_head, active_list)
3026 + if (ts->ccallback)
3027 +- ts->ccallback(ts, event + 100, &tstamp, resolution);
3028 ++ ts->ccallback(ts, event, &tstamp, resolution);
3029 + }
3030 +
3031 + /* start/continue a master timer */
3032 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3033 +index b9fa2ee0a40cb..de40bb99b6793 100644
3034 +--- a/sound/pci/hda/patch_realtek.c
3035 ++++ b/sound/pci/hda/patch_realtek.c
3036 +@@ -8062,6 +8062,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3037 + SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
3038 + SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
3039 + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
3040 ++ SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
3041 + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
3042 + SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
3043 + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
3044 +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
3045 +index 5251818e10d33..d926869c031b1 100644
3046 +--- a/sound/usb/mixer_quirks.c
3047 ++++ b/sound/usb/mixer_quirks.c
3048 +@@ -1697,7 +1697,7 @@ static struct snd_kcontrol_new snd_microii_mixer_spdif[] = {
3049 + static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
3050 + {
3051 + int err, i;
3052 +- const static usb_mixer_elem_resume_func_t resume_funcs[] = {
3053 ++ static const usb_mixer_elem_resume_func_t resume_funcs[] = {
3054 + snd_microii_spdif_default_update,
3055 + NULL,
3056 + snd_microii_spdif_switch_update