Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.0 commit in: /
Date: Sat, 04 May 2019 18:29:56
Message-Id: 1556994578.dc81aa26ea1bd832413eabc76bcff4c1421e0b2c.mpagano@gentoo
1 commit: dc81aa26ea1bd832413eabc76bcff4c1421e0b2c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat May 4 18:29:38 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat May 4 18:29:38 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dc81aa26
7
8 Linux patch 5.0.12
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1011_linux-5.0.12.patch | 3398 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3402 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 4dfa486..3b63726 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -87,6 +87,10 @@ Patch: 1010_linux-5.0.11.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.0.11
23
24 +Patch: 1011_linux-5.0.12.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.0.12
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1011_linux-5.0.12.patch b/1011_linux-5.0.12.patch
33 new file mode 100644
34 index 0000000..f1fc8ab
35 --- /dev/null
36 +++ b/1011_linux-5.0.12.patch
37 @@ -0,0 +1,3398 @@
38 +diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
39 +index d1ee484a787d..ee9984f35868 100644
40 +--- a/Documentation/i2c/busses/i2c-i801
41 ++++ b/Documentation/i2c/busses/i2c-i801
42 +@@ -36,6 +36,7 @@ Supported adapters:
43 + * Intel Cannon Lake (PCH)
44 + * Intel Cedar Fork (PCH)
45 + * Intel Ice Lake (PCH)
46 ++ * Intel Comet Lake (PCH)
47 + Datasheets: Publicly available at the Intel website
48 +
49 + On Intel Patsburg and later chipsets, both the normal host SMBus controller
50 +diff --git a/Makefile b/Makefile
51 +index c3daaefa979c..fd044f594bbf 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,7 +1,7 @@
55 + # SPDX-License-Identifier: GPL-2.0
56 + VERSION = 5
57 + PATCHLEVEL = 0
58 +-SUBLEVEL = 11
59 ++SUBLEVEL = 12
60 + EXTRAVERSION =
61 + NAME = Shy Crocodile
62 +
63 +@@ -31,7 +31,7 @@ _all:
64 + # descending is started. They are now explicitly listed as the
65 + # prepare rule.
66 +
67 +-ifneq ($(sub-make-done),1)
68 ++ifneq ($(sub_make_done),1)
69 +
70 + # Do not use make's built-in rules and variables
71 + # (this increases performance and avoids hard-to-debug behaviour)
72 +@@ -159,6 +159,8 @@ need-sub-make := 1
73 + $(lastword $(MAKEFILE_LIST)): ;
74 + endif
75 +
76 ++export sub_make_done := 1
77 ++
78 + ifeq ($(need-sub-make),1)
79 +
80 + PHONY += $(MAKECMDGOALS) sub-make
81 +@@ -168,12 +170,12 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
82 +
83 + # Invoke a second make in the output directory, passing relevant variables
84 + sub-make:
85 +- $(Q)$(MAKE) sub-make-done=1 \
86 ++ $(Q)$(MAKE) \
87 + $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
88 + -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
89 +
90 + endif # need-sub-make
91 +-endif # sub-make-done
92 ++endif # sub_make_done
93 +
94 + # We process the rest of the Makefile if this is the final invocation of make
95 + ifeq ($(need-sub-make),)
96 +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
97 +index 26524b75970a..e5d56d9b712c 100644
98 +--- a/arch/arm/Kconfig
99 ++++ b/arch/arm/Kconfig
100 +@@ -593,6 +593,7 @@ config ARCH_DAVINCI
101 + select HAVE_IDE
102 + select PM_GENERIC_DOMAINS if PM
103 + select PM_GENERIC_DOMAINS_OF if PM && OF
104 ++ select REGMAP_MMIO
105 + select RESET_CONTROLLER
106 + select USE_OF
107 + select ZONE_DMA
108 +diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
109 +index 5641d162dfdb..28e7513ce617 100644
110 +--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
111 ++++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
112 +@@ -93,7 +93,7 @@
113 + };
114 +
115 + &hdmi {
116 +- hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
117 ++ hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
118 + };
119 +
120 + &pwm {
121 +diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
122 +index 1d1b4bd0670f..a4217f564a53 100644
123 +--- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
124 ++++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
125 +@@ -264,7 +264,7 @@
126 + pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
127 + vmcc-supply = <&reg_sd3_vmmc>;
128 + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
129 +- bus-witdh = <4>;
130 ++ bus-width = <4>;
131 + no-1-8-v;
132 + status = "okay";
133 + };
134 +@@ -275,7 +275,7 @@
135 + pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
136 + pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
137 + vmcc-supply = <&reg_sd4_vmmc>;
138 +- bus-witdh = <8>;
139 ++ bus-width = <8>;
140 + no-1-8-v;
141 + non-removable;
142 + status = "okay";
143 +diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
144 +index 1b50b01e9bac..65d03c5d409b 100644
145 +--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
146 ++++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
147 +@@ -90,6 +90,7 @@
148 + pinctrl-names = "default";
149 + pinctrl-0 = <&pinctrl_enet>;
150 + phy-mode = "rgmii";
151 ++ phy-reset-duration = <10>; /* in msecs */
152 + phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
153 + phy-supply = <&vdd_eth_io_reg>;
154 + status = "disabled";
155 +diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
156 +index 3a875fc1b63c..cee06509f00a 100644
157 +--- a/arch/arm/include/asm/kvm_mmu.h
158 ++++ b/arch/arm/include/asm/kvm_mmu.h
159 +@@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
160 + return ret;
161 + }
162 +
163 ++static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
164 ++ const void *data, unsigned long len)
165 ++{
166 ++ int srcu_idx = srcu_read_lock(&kvm->srcu);
167 ++ int ret = kvm_write_guest(kvm, gpa, data, len);
168 ++
169 ++ srcu_read_unlock(&kvm->srcu, srcu_idx);
170 ++
171 ++ return ret;
172 ++}
173 ++
174 + static inline void *kvm_get_hyp_vector(void)
175 + {
176 + switch(read_cpuid_part()) {
177 +diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
178 +index de2089501b8b..9e11dce55e06 100644
179 +--- a/arch/arm/include/asm/stage2_pgtable.h
180 ++++ b/arch/arm/include/asm/stage2_pgtable.h
181 +@@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
182 +
183 + #define S2_PMD_MASK PMD_MASK
184 + #define S2_PMD_SIZE PMD_SIZE
185 ++#define S2_PUD_MASK PUD_MASK
186 ++#define S2_PUD_SIZE PUD_SIZE
187 +
188 + static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
189 + {
190 +diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c
191 +index c7169c2f94c4..08c7892866c2 100644
192 +--- a/arch/arm/mach-imx/mach-imx51.c
193 ++++ b/arch/arm/mach-imx/mach-imx51.c
194 +@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
195 + return;
196 +
197 + m4if_base = of_iomap(np, 0);
198 ++ of_node_put(np);
199 + if (!m4if_base) {
200 + pr_err("Unable to map M4IF registers\n");
201 + return;
202 +diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
203 +index b2f606e286ce..327d12097643 100644
204 +--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
205 ++++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
206 +@@ -2,7 +2,7 @@
207 + /*
208 + * Device Tree Source for the R-Car E3 (R8A77990) SoC
209 + *
210 +- * Copyright (C) 2018 Renesas Electronics Corp.
211 ++ * Copyright (C) 2018-2019 Renesas Electronics Corp.
212 + */
213 +
214 + #include <dt-bindings/clock/r8a77990-cpg-mssr.h>
215 +@@ -1040,9 +1040,8 @@
216 + <&cpg CPG_CORE R8A77990_CLK_S3D1C>,
217 + <&scif_clk>;
218 + clock-names = "fck", "brg_int", "scif_clk";
219 +- dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
220 +- <&dmac2 0x5b>, <&dmac2 0x5a>;
221 +- dma-names = "tx", "rx", "tx", "rx";
222 ++ dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
223 ++ dma-names = "tx", "rx";
224 + power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
225 + resets = <&cpg 202>;
226 + status = "disabled";
227 +diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
228 +index 8af4b1befa42..c246effd1b67 100644
229 +--- a/arch/arm64/include/asm/kvm_mmu.h
230 ++++ b/arch/arm64/include/asm/kvm_mmu.h
231 +@@ -444,6 +444,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
232 + return ret;
233 + }
234 +
235 ++static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
236 ++ const void *data, unsigned long len)
237 ++{
238 ++ int srcu_idx = srcu_read_lock(&kvm->srcu);
239 ++ int ret = kvm_write_guest(kvm, gpa, data, len);
240 ++
241 ++ srcu_read_unlock(&kvm->srcu, srcu_idx);
242 ++
243 ++ return ret;
244 ++}
245 ++
246 + #ifdef CONFIG_KVM_INDIRECT_VECTORS
247 + /*
248 + * EL2 vectors can be mapped and rerouted in a number of ways,
249 +diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
250 +index f16a5f8ff2b4..e2a0500cd7a2 100644
251 +--- a/arch/arm64/kvm/reset.c
252 ++++ b/arch/arm64/kvm/reset.c
253 +@@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
254 + int ret = -EINVAL;
255 + bool loaded;
256 +
257 ++ /* Reset PMU outside of the non-preemptible section */
258 ++ kvm_pmu_vcpu_reset(vcpu);
259 ++
260 + preempt_disable();
261 + loaded = (vcpu->cpu != -1);
262 + if (loaded)
263 +@@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
264 + vcpu->arch.reset_state.reset = false;
265 + }
266 +
267 +- /* Reset PMU */
268 +- kvm_pmu_vcpu_reset(vcpu);
269 +-
270 + /* Default workaround setup is enabled (if supported) */
271 + if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
272 + vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
273 +diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
274 +index 7d22a474a040..f74639a05f0f 100644
275 +--- a/arch/s390/include/asm/elf.h
276 ++++ b/arch/s390/include/asm/elf.h
277 +@@ -252,11 +252,14 @@ do { \
278 +
279 + /*
280 + * Cache aliasing on the latest machines calls for a mapping granularity
281 +- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
282 +- * of up to 1GB. For 31-bit processes the virtual address space is limited,
283 +- * use no alignment and limit the randomization to 8MB.
284 ++ * of 512KB for the anonymous mapping base. For 64-bit processes use a
285 ++ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
286 ++ * the virtual address space is limited, use no alignment and limit the
287 ++ * randomization to 8MB.
288 ++ * For the additional randomization of the program break use 32MB for
289 ++ * 64-bit and 8MB for 31-bit.
290 + */
291 +-#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL)
292 ++#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL)
293 + #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
294 + #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
295 + #define STACK_RND_MASK MMAP_RND_MASK
296 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
297 +index 71d763ad2637..9f2d890733a9 100644
298 +--- a/arch/x86/include/asm/kvm_host.h
299 ++++ b/arch/x86/include/asm/kvm_host.h
300 +@@ -1198,6 +1198,8 @@ struct kvm_x86_ops {
301 + int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
302 + uint16_t *vmcs_version);
303 + uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
304 ++
305 ++ bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
306 + };
307 +
308 + struct kvm_arch_async_pf {
309 +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
310 +index 89d20ed1d2e8..371c669696d7 100644
311 +--- a/arch/x86/kvm/hyperv.c
312 ++++ b/arch/x86/kvm/hyperv.c
313 +@@ -526,7 +526,9 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
314 + new_config.enable = 0;
315 + stimer->config.as_uint64 = new_config.as_uint64;
316 +
317 +- stimer_mark_pending(stimer, false);
318 ++ if (stimer->config.enable)
319 ++ stimer_mark_pending(stimer, false);
320 ++
321 + return 0;
322 + }
323 +
324 +@@ -542,7 +544,10 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
325 + stimer->config.enable = 0;
326 + else if (stimer->config.auto_enable)
327 + stimer->config.enable = 1;
328 +- stimer_mark_pending(stimer, false);
329 ++
330 ++ if (stimer->config.enable)
331 ++ stimer_mark_pending(stimer, false);
332 ++
333 + return 0;
334 + }
335 +
336 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
337 +index 9ab33cab9486..77dbb57412cc 100644
338 +--- a/arch/x86/kvm/mmu.c
339 ++++ b/arch/x86/kvm/mmu.c
340 +@@ -4915,11 +4915,15 @@ static union kvm_mmu_role
341 + kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
342 + bool execonly)
343 + {
344 +- union kvm_mmu_role role;
345 ++ union kvm_mmu_role role = {0};
346 ++ union kvm_mmu_page_role root_base = vcpu->arch.root_mmu.mmu_role.base;
347 +
348 +- /* Base role is inherited from root_mmu */
349 +- role.base.word = vcpu->arch.root_mmu.mmu_role.base.word;
350 +- role.ext = kvm_calc_mmu_role_ext(vcpu);
351 ++ /* Legacy paging and SMM flags are inherited from root_mmu */
352 ++ role.base.smm = root_base.smm;
353 ++ role.base.nxe = root_base.nxe;
354 ++ role.base.cr0_wp = root_base.cr0_wp;
355 ++ role.base.smep_andnot_wp = root_base.smep_andnot_wp;
356 ++ role.base.smap_andnot_wp = root_base.smap_andnot_wp;
357 +
358 + role.base.level = PT64_ROOT_4LEVEL;
359 + role.base.direct = false;
360 +@@ -4927,6 +4931,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
361 + role.base.guest_mode = true;
362 + role.base.access = ACC_ALL;
363 +
364 ++ role.ext = kvm_calc_mmu_role_ext(vcpu);
365 + role.ext.execonly = execonly;
366 +
367 + return role;
368 +@@ -5390,10 +5395,12 @@ emulate:
369 + * This can happen if a guest gets a page-fault on data access but the HW
370 + * table walker is not able to read the instruction page (e.g instruction
371 + * page is not present in memory). In those cases we simply restart the
372 +- * guest.
373 ++ * guest, with the exception of AMD Erratum 1096 which is unrecoverable.
374 + */
375 +- if (unlikely(insn && !insn_len))
376 +- return 1;
377 ++ if (unlikely(insn && !insn_len)) {
378 ++ if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
379 ++ return 1;
380 ++ }
381 +
382 + er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
383 +
384 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
385 +index 516c1de03d47..e544cec812f9 100644
386 +--- a/arch/x86/kvm/svm.c
387 ++++ b/arch/x86/kvm/svm.c
388 +@@ -7114,6 +7114,36 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
389 + return -ENODEV;
390 + }
391 +
392 ++static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
393 ++{
394 ++ bool is_user, smap;
395 ++
396 ++ is_user = svm_get_cpl(vcpu) == 3;
397 ++ smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
398 ++
399 ++ /*
400 ++ * Detect and workaround Errata 1096 Fam_17h_00_0Fh
401 ++ *
402 ++ * In non SEV guest, hypervisor will be able to read the guest
403 ++ * memory to decode the instruction pointer when insn_len is zero
404 ++ * so we return true to indicate that decoding is possible.
405 ++ *
406 ++ * But in the SEV guest, the guest memory is encrypted with the
407 ++ * guest specific key and hypervisor will not be able to decode the
408 ++ * instruction pointer so we will not able to workaround it. Lets
409 ++ * print the error and request to kill the guest.
410 ++ */
411 ++ if (is_user && smap) {
412 ++ if (!sev_guest(vcpu->kvm))
413 ++ return true;
414 ++
415 ++ pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n");
416 ++ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
417 ++ }
418 ++
419 ++ return false;
420 ++}
421 ++
422 + static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
423 + .cpu_has_kvm_support = has_svm,
424 + .disabled_by_bios = is_disabled,
425 +@@ -7247,6 +7277,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
426 +
427 + .nested_enable_evmcs = nested_enable_evmcs,
428 + .nested_get_evmcs_version = nested_get_evmcs_version,
429 ++
430 ++ .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
431 + };
432 +
433 + static int __init svm_init(void)
434 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
435 +index 34499081022c..e7fe8c692362 100644
436 +--- a/arch/x86/kvm/vmx/vmx.c
437 ++++ b/arch/x86/kvm/vmx/vmx.c
438 +@@ -7526,6 +7526,11 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
439 + return 0;
440 + }
441 +
442 ++static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
443 ++{
444 ++ return 0;
445 ++}
446 ++
447 + static __init int hardware_setup(void)
448 + {
449 + unsigned long host_bndcfgs;
450 +@@ -7828,6 +7833,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
451 + .set_nested_state = NULL,
452 + .get_vmcs12_pages = NULL,
453 + .nested_enable_evmcs = NULL,
454 ++ .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
455 + };
456 +
457 + static void vmx_cleanup_l1d_flush(void)
458 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
459 +index 2db58067bb59..8c9fb6453b2f 100644
460 +--- a/arch/x86/kvm/x86.c
461 ++++ b/arch/x86/kvm/x86.c
462 +@@ -1127,7 +1127,7 @@ static u32 msrs_to_save[] = {
463 + #endif
464 + MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
465 + MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
466 +- MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES,
467 ++ MSR_IA32_SPEC_CTRL,
468 + MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
469 + MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
470 + MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
471 +@@ -1160,6 +1160,7 @@ static u32 emulated_msrs[] = {
472 +
473 + MSR_IA32_TSC_ADJUST,
474 + MSR_IA32_TSCDEADLINE,
475 ++ MSR_IA32_ARCH_CAPABILITIES,
476 + MSR_IA32_MISC_ENABLE,
477 + MSR_IA32_MCG_STATUS,
478 + MSR_IA32_MCG_CTL,
479 +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
480 +index db3165714521..dc726e07d8ba 100644
481 +--- a/arch/x86/mm/mmap.c
482 ++++ b/arch/x86/mm/mmap.c
483 +@@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
484 + /* Can we access it for direct reading/writing? Must be RAM: */
485 + int valid_phys_addr_range(phys_addr_t addr, size_t count)
486 + {
487 +- return addr + count <= __pa(high_memory);
488 ++ return addr + count - 1 <= __pa(high_memory - 1);
489 + }
490 +
491 + /* Can we access it through mmap? Must be a valid physical address: */
492 +diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
493 +index d10105825d57..47d097946872 100644
494 +--- a/arch/x86/realmode/init.c
495 ++++ b/arch/x86/realmode/init.c
496 +@@ -20,8 +20,6 @@ void __init set_real_mode_mem(phys_addr_t mem, size_t size)
497 + void *base = __va(mem);
498 +
499 + real_mode_header = (struct real_mode_header *) base;
500 +- printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
501 +- base, (unsigned long long)mem, size);
502 + }
503 +
504 + void __init reserve_real_mode(void)
505 +diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
506 +index 4424997ecf30..e10fec99a182 100644
507 +--- a/drivers/acpi/acpica/evgpe.c
508 ++++ b/drivers/acpi/acpica/evgpe.c
509 +@@ -81,12 +81,8 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
510 +
511 + ACPI_FUNCTION_TRACE(ev_enable_gpe);
512 +
513 +- /* Clear the GPE status */
514 +- status = acpi_hw_clear_gpe(gpe_event_info);
515 +- if (ACPI_FAILURE(status))
516 +- return_ACPI_STATUS(status);
517 +-
518 + /* Enable the requested GPE */
519 ++
520 + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
521 + return_ACPI_STATUS(status);
522 + }
523 +diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
524 +index b3ed8f9953a8..173e6f2dd9af 100644
525 +--- a/drivers/ata/libata-zpodd.c
526 ++++ b/drivers/ata/libata-zpodd.c
527 +@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
528 + /* Per the spec, only slot type and drawer type ODD can be supported */
529 + static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
530 + {
531 +- char buf[16];
532 ++ char *buf;
533 + unsigned int ret;
534 +- struct rm_feature_desc *desc = (void *)(buf + 8);
535 ++ struct rm_feature_desc *desc;
536 + struct ata_taskfile tf;
537 + static const char cdb[] = { GPCMD_GET_CONFIGURATION,
538 + 2, /* only 1 feature descriptor requested */
539 + 0, 3, /* 3, removable medium feature */
540 + 0, 0, 0,/* reserved */
541 +- 0, sizeof(buf),
542 ++ 0, 16,
543 + 0, 0, 0,
544 + };
545 +
546 ++ buf = kzalloc(16, GFP_KERNEL);
547 ++ if (!buf)
548 ++ return ODD_MECH_TYPE_UNSUPPORTED;
549 ++ desc = (void *)(buf + 8);
550 ++
551 + ata_tf_init(dev, &tf);
552 + tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
553 + tf.command = ATA_CMD_PACKET;
554 + tf.protocol = ATAPI_PROT_PIO;
555 +- tf.lbam = sizeof(buf);
556 ++ tf.lbam = 16;
557 +
558 + ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
559 +- buf, sizeof(buf), 0);
560 +- if (ret)
561 ++ buf, 16, 0);
562 ++ if (ret) {
563 ++ kfree(buf);
564 + return ODD_MECH_TYPE_UNSUPPORTED;
565 ++ }
566 +
567 +- if (be16_to_cpu(desc->feature_code) != 3)
568 ++ if (be16_to_cpu(desc->feature_code) != 3) {
569 ++ kfree(buf);
570 + return ODD_MECH_TYPE_UNSUPPORTED;
571 ++ }
572 +
573 +- if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
574 ++ if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
575 ++ kfree(buf);
576 + return ODD_MECH_TYPE_SLOT;
577 +- else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
578 ++ } else if (desc->mech_type == 1 && desc->load == 0 &&
579 ++ desc->eject == 1) {
580 ++ kfree(buf);
581 + return ODD_MECH_TYPE_DRAWER;
582 +- else
583 ++ } else {
584 ++ kfree(buf);
585 + return ODD_MECH_TYPE_UNSUPPORTED;
586 ++ }
587 + }
588 +
589 + /* Test if ODD is zero power ready by sense code */
590 +diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
591 +index 854bce4fb9e7..217507002dbc 100644
592 +--- a/drivers/gpio/gpio-aspeed.c
593 ++++ b/drivers/gpio/gpio-aspeed.c
594 +@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
595 +
596 + gpio->offset_timer =
597 + devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
598 ++ if (!gpio->offset_timer)
599 ++ return -ENOMEM;
600 +
601 + return aspeed_gpio_setup_irqs(gpio, pdev);
602 + }
603 +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
604 +index a1dd2f1c0d02..13a402ede07a 100644
605 +--- a/drivers/gpio/gpiolib-of.c
606 ++++ b/drivers/gpio/gpiolib-of.c
607 +@@ -119,7 +119,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
608 + * to determine if the flags should have inverted semantics.
609 + */
610 + if (IS_ENABLED(CONFIG_SPI_MASTER) &&
611 +- of_property_read_bool(np, "cs-gpios")) {
612 ++ of_property_read_bool(np, "cs-gpios") &&
613 ++ !strcmp(propname, "cs-gpios")) {
614 + struct device_node *child;
615 + u32 cs;
616 + int ret;
617 +@@ -141,16 +142,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
618 + * conflict and the "spi-cs-high" flag will
619 + * take precedence.
620 + */
621 +- if (of_property_read_bool(np, "spi-cs-high")) {
622 ++ if (of_property_read_bool(child, "spi-cs-high")) {
623 + if (*flags & OF_GPIO_ACTIVE_LOW) {
624 + pr_warn("%s GPIO handle specifies active low - ignored\n",
625 +- of_node_full_name(np));
626 ++ of_node_full_name(child));
627 + *flags &= ~OF_GPIO_ACTIVE_LOW;
628 + }
629 + } else {
630 + if (!(*flags & OF_GPIO_ACTIVE_LOW))
631 + pr_info("%s enforce active low on chipselect handle\n",
632 +- of_node_full_name(np));
633 ++ of_node_full_name(child));
634 + *flags |= OF_GPIO_ACTIVE_LOW;
635 + }
636 + break;
637 +@@ -711,7 +712,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
638 +
639 + of_node_get(chip->of_node);
640 +
641 +- return of_gpiochip_scan_gpios(chip);
642 ++ status = of_gpiochip_scan_gpios(chip);
643 ++ if (status) {
644 ++ of_node_put(chip->of_node);
645 ++ gpiochip_remove_pin_ranges(chip);
646 ++ }
647 ++
648 ++ return status;
649 + }
650 +
651 + void of_gpiochip_remove(struct gpio_chip *chip)
652 +diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
653 +index 12e5e2be7890..7a59b8b3ed5a 100644
654 +--- a/drivers/gpu/drm/drm_drv.c
655 ++++ b/drivers/gpu/drm/drm_drv.c
656 +@@ -381,11 +381,7 @@ void drm_dev_unplug(struct drm_device *dev)
657 + synchronize_srcu(&drm_unplug_srcu);
658 +
659 + drm_dev_unregister(dev);
660 +-
661 +- mutex_lock(&drm_global_mutex);
662 +- if (dev->open_count == 0)
663 +- drm_dev_put(dev);
664 +- mutex_unlock(&drm_global_mutex);
665 ++ drm_dev_put(dev);
666 + }
667 + EXPORT_SYMBOL(drm_dev_unplug);
668 +
669 +diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
670 +index 46f48f245eb5..3f20f598cd7c 100644
671 +--- a/drivers/gpu/drm/drm_file.c
672 ++++ b/drivers/gpu/drm/drm_file.c
673 +@@ -479,11 +479,9 @@ int drm_release(struct inode *inode, struct file *filp)
674 +
675 + drm_file_free(file_priv);
676 +
677 +- if (!--dev->open_count) {
678 ++ if (!--dev->open_count)
679 + drm_lastclose(dev);
680 +- if (drm_dev_is_unplugged(dev))
681 +- drm_put_dev(dev);
682 +- }
683 ++
684 + mutex_unlock(&drm_global_mutex);
685 +
686 + drm_minor_release(minor);
687 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
688 +index dcd1df5322e8..21c6016ccba5 100644
689 +--- a/drivers/gpu/drm/i915/intel_dp.c
690 ++++ b/drivers/gpu/drm/i915/intel_dp.c
691 +@@ -1871,6 +1871,9 @@ static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
692 + u8 dsc_max_bpc;
693 + int pipe_bpp;
694 +
695 ++ pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
696 ++ intel_dp_supports_fec(intel_dp, pipe_config);
697 ++
698 + if (!intel_dp_supports_dsc(intel_dp, pipe_config))
699 + return false;
700 +
701 +@@ -2097,9 +2100,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
702 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
703 + return false;
704 +
705 +- pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
706 +- intel_dp_supports_fec(intel_dp, pipe_config);
707 +-
708 + if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state))
709 + return false;
710 +
711 +diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
712 +index 12ff47b13668..a13704ab5d11 100644
713 +--- a/drivers/gpu/drm/meson/meson_drv.c
714 ++++ b/drivers/gpu/drm/meson/meson_drv.c
715 +@@ -317,12 +317,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
716 +
717 + ret = drm_dev_register(drm, 0);
718 + if (ret)
719 +- goto free_drm;
720 ++ goto uninstall_irq;
721 +
722 + drm_fbdev_generic_setup(drm, 32);
723 +
724 + return 0;
725 +
726 ++uninstall_irq:
727 ++ drm_irq_uninstall(drm);
728 + free_drm:
729 + drm_dev_put(drm);
730 +
731 +@@ -336,8 +338,8 @@ static int meson_drv_bind(struct device *dev)
732 +
733 + static void meson_drv_unbind(struct device *dev)
734 + {
735 +- struct drm_device *drm = dev_get_drvdata(dev);
736 +- struct meson_drm *priv = drm->dev_private;
737 ++ struct meson_drm *priv = dev_get_drvdata(dev);
738 ++ struct drm_device *drm = priv->drm;
739 +
740 + if (priv->canvas) {
741 + meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
742 +@@ -347,6 +349,7 @@ static void meson_drv_unbind(struct device *dev)
743 + }
744 +
745 + drm_dev_unregister(drm);
746 ++ drm_irq_uninstall(drm);
747 + drm_kms_helper_poll_fini(drm);
748 + drm_mode_config_cleanup(drm);
749 + drm_dev_put(drm);
750 +diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
751 +index 922a48d5a483..c7c612579270 100644
752 +--- a/drivers/gpu/drm/tegra/hub.c
753 ++++ b/drivers/gpu/drm/tegra/hub.c
754 +@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
755 + static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
756 + struct drm_plane_state *old_state)
757 + {
758 +- struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
759 + struct tegra_plane *p = to_tegra_plane(plane);
760 ++ struct tegra_dc *dc;
761 + u32 value;
762 +
763 + /* rien ne va plus */
764 + if (!old_state || !old_state->crtc)
765 + return;
766 +
767 ++ dc = to_tegra_dc(old_state->crtc);
768 ++
769 + /*
770 + * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
771 + * on planes that are already disabled. Make sure we fallback to the
772 +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
773 +index f2c681971201..f8979abb9a19 100644
774 +--- a/drivers/i2c/busses/Kconfig
775 ++++ b/drivers/i2c/busses/Kconfig
776 +@@ -131,6 +131,7 @@ config I2C_I801
777 + Cannon Lake (PCH)
778 + Cedar Fork (PCH)
779 + Ice Lake (PCH)
780 ++ Comet Lake (PCH)
781 +
782 + This driver can also be built as a module. If so, the module
783 + will be called i2c-i801.
784 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
785 +index c91e145ef5a5..679c6c41f64b 100644
786 +--- a/drivers/i2c/busses/i2c-i801.c
787 ++++ b/drivers/i2c/busses/i2c-i801.c
788 +@@ -71,6 +71,7 @@
789 + * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
790 + * Cedar Fork (PCH) 0x18df 32 hard yes yes yes
791 + * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
792 ++ * Comet Lake (PCH) 0x02a3 32 hard yes yes yes
793 + *
794 + * Features supported by this driver:
795 + * Software PEC no
796 +@@ -240,6 +241,7 @@
797 + #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
798 + #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
799 + #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
800 ++#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3
801 +
802 + struct i801_mux_config {
803 + char *gpio_chip;
804 +@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
805 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
806 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
807 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
808 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
809 + { 0, }
810 + };
811 +
812 +@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
813 + case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
814 + case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
815 + case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
816 ++ case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
817 + priv->features |= FEATURE_I2C_BLOCK_READ;
818 + priv->features |= FEATURE_IRQ;
819 + priv->features |= FEATURE_SMBUS_PEC;
820 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
821 +index e628ef23418f..55b3e4b9d5dc 100644
822 +--- a/drivers/iommu/amd_iommu.c
823 ++++ b/drivers/iommu/amd_iommu.c
824 +@@ -3166,21 +3166,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
825 + return;
826 +
827 + list_for_each_entry(entry, &amd_iommu_unity_map, list) {
828 ++ int type, prot = 0;
829 + size_t length;
830 +- int prot = 0;
831 +
832 + if (devid < entry->devid_start || devid > entry->devid_end)
833 + continue;
834 +
835 ++ type = IOMMU_RESV_DIRECT;
836 + length = entry->address_end - entry->address_start;
837 + if (entry->prot & IOMMU_PROT_IR)
838 + prot |= IOMMU_READ;
839 + if (entry->prot & IOMMU_PROT_IW)
840 + prot |= IOMMU_WRITE;
841 ++ if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
842 ++ /* Exclusion range */
843 ++ type = IOMMU_RESV_RESERVED;
844 +
845 + region = iommu_alloc_resv_region(entry->address_start,
846 +- length, prot,
847 +- IOMMU_RESV_DIRECT);
848 ++ length, prot, type);
849 + if (!region) {
850 + pr_err("Out of memory allocating dm-regions for %s\n",
851 + dev_name(dev));
852 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
853 +index 66123b911ec8..84fa5b22371e 100644
854 +--- a/drivers/iommu/amd_iommu_init.c
855 ++++ b/drivers/iommu/amd_iommu_init.c
856 +@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
857 + if (e == NULL)
858 + return -ENOMEM;
859 +
860 ++ if (m->flags & IVMD_FLAG_EXCL_RANGE)
861 ++ init_exclusion_range(m);
862 ++
863 + switch (m->type) {
864 + default:
865 + kfree(e);
866 +@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
867 +
868 + while (p < end) {
869 + m = (struct ivmd_header *)p;
870 +- if (m->flags & IVMD_FLAG_EXCL_RANGE)
871 +- init_exclusion_range(m);
872 +- else if (m->flags & IVMD_FLAG_UNITY_MAP)
873 ++ if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
874 + init_unity_map_range(m);
875 +
876 + p += m->length;
877 +diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
878 +index eae0741f72dc..87965e4d9647 100644
879 +--- a/drivers/iommu/amd_iommu_types.h
880 ++++ b/drivers/iommu/amd_iommu_types.h
881 +@@ -374,6 +374,8 @@
882 + #define IOMMU_PROT_IR 0x01
883 + #define IOMMU_PROT_IW 0x02
884 +
885 ++#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
886 ++
887 + /* IOMMU capabilities */
888 + #define IOMMU_CAP_IOTLB 24
889 + #define IOMMU_CAP_NPCACHE 26
890 +diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
891 +index 7fea18b0c15d..7cb4d685a1f1 100644
892 +--- a/drivers/leds/leds-pca9532.c
893 ++++ b/drivers/leds/leds-pca9532.c
894 +@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
895 + const struct i2c_device_id *id)
896 + {
897 + int devid;
898 ++ const struct of_device_id *of_id;
899 + struct pca9532_data *data = i2c_get_clientdata(client);
900 + struct pca9532_platform_data *pca9532_pdata =
901 + dev_get_platdata(&client->dev);
902 +@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
903 + dev_err(&client->dev, "no platform data\n");
904 + return -EINVAL;
905 + }
906 +- devid = (int)(uintptr_t)of_match_device(
907 +- of_pca9532_leds_match, &client->dev)->data;
908 ++ of_id = of_match_device(of_pca9532_leds_match,
909 ++ &client->dev);
910 ++ if (unlikely(!of_id))
911 ++ return -EINVAL;
912 ++ devid = (int)(uintptr_t) of_id->data;
913 + } else {
914 + devid = id->driver_data;
915 + }
916 +diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
917 +index 3dd3ed46d473..136f86a1627d 100644
918 +--- a/drivers/leds/trigger/ledtrig-netdev.c
919 ++++ b/drivers/leds/trigger/ledtrig-netdev.c
920 +@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
921 + trigger_data->net_dev = NULL;
922 + }
923 +
924 +- strncpy(trigger_data->device_name, buf, size);
925 ++ memcpy(trigger_data->device_name, buf, size);
926 ++ trigger_data->device_name[size] = 0;
927 + if (size > 0 && trigger_data->device_name[size - 1] == '\n')
928 + trigger_data->device_name[size - 1] = 0;
929 +
930 +@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
931 + container_of(nb, struct led_netdev_data, notifier);
932 +
933 + if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
934 +- && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
935 +- && evt != NETDEV_CHANGENAME)
936 ++ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
937 + return NOTIFY_DONE;
938 +
939 +- if (strcmp(dev->name, trigger_data->device_name))
940 ++ if (!(dev == trigger_data->net_dev ||
941 ++ (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
942 + return NOTIFY_DONE;
943 +
944 + cancel_delayed_work_sync(&trigger_data->work);
945 +@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
946 + dev_hold(dev);
947 + trigger_data->net_dev = dev;
948 + break;
949 +- case NETDEV_CHANGENAME:
950 + case NETDEV_UNREGISTER:
951 +- if (trigger_data->net_dev) {
952 +- dev_put(trigger_data->net_dev);
953 +- trigger_data->net_dev = NULL;
954 +- }
955 ++ dev_put(trigger_data->net_dev);
956 ++ trigger_data->net_dev = NULL;
957 + break;
958 + case NETDEV_UP:
959 + case NETDEV_CHANGE:
960 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
961 +index 2b2882615e8b..6cbe515bfdeb 100644
962 +--- a/drivers/net/ethernet/cadence/macb_main.c
963 ++++ b/drivers/net/ethernet/cadence/macb_main.c
964 +@@ -3318,14 +3318,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
965 + *hclk = devm_clk_get(&pdev->dev, "hclk");
966 + }
967 +
968 +- if (IS_ERR(*pclk)) {
969 ++ if (IS_ERR_OR_NULL(*pclk)) {
970 + err = PTR_ERR(*pclk);
971 ++ if (!err)
972 ++ err = -ENODEV;
973 ++
974 + dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
975 + return err;
976 + }
977 +
978 +- if (IS_ERR(*hclk)) {
979 ++ if (IS_ERR_OR_NULL(*hclk)) {
980 + err = PTR_ERR(*hclk);
981 ++ if (!err)
982 ++ err = -ENODEV;
983 ++
984 + dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
985 + return err;
986 + }
987 +diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
988 +index 3baabdc89726..90b62c1412c8 100644
989 +--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
990 ++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
991 +@@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev,
992 +
993 + if (ehea_add_adapter_mr(adapter)) {
994 + pr_err("creating MR failed\n");
995 ++ of_node_put(eth_dn);
996 + return -EIO;
997 + }
998 +
999 +diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
1000 +index bd6e9014bc74..b83b070a9eec 100644
1001 +--- a/drivers/net/ethernet/micrel/ks8851.c
1002 ++++ b/drivers/net/ethernet/micrel/ks8851.c
1003 +@@ -535,9 +535,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
1004 + /* set dma read address */
1005 + ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
1006 +
1007 +- /* start the packet dma process, and set auto-dequeue rx */
1008 +- ks8851_wrreg16(ks, KS_RXQCR,
1009 +- ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
1010 ++ /* start DMA access */
1011 ++ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
1012 +
1013 + if (rxlen > 4) {
1014 + unsigned int rxalign;
1015 +@@ -568,7 +567,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
1016 + }
1017 + }
1018 +
1019 +- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1020 ++ /* end DMA access and dequeue packet */
1021 ++ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
1022 + }
1023 + }
1024 +
1025 +@@ -785,6 +785,15 @@ static void ks8851_tx_work(struct work_struct *work)
1026 + static int ks8851_net_open(struct net_device *dev)
1027 + {
1028 + struct ks8851_net *ks = netdev_priv(dev);
1029 ++ int ret;
1030 ++
1031 ++ ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
1032 ++ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1033 ++ dev->name, ks);
1034 ++ if (ret < 0) {
1035 ++ netdev_err(dev, "failed to get irq\n");
1036 ++ return ret;
1037 ++ }
1038 +
1039 + /* lock the card, even if we may not actually be doing anything
1040 + * else at the moment */
1041 +@@ -849,6 +858,7 @@ static int ks8851_net_open(struct net_device *dev)
1042 + netif_dbg(ks, ifup, ks->netdev, "network device up\n");
1043 +
1044 + mutex_unlock(&ks->lock);
1045 ++ mii_check_link(&ks->mii);
1046 + return 0;
1047 + }
1048 +
1049 +@@ -899,6 +909,8 @@ static int ks8851_net_stop(struct net_device *dev)
1050 + dev_kfree_skb(txb);
1051 + }
1052 +
1053 ++ free_irq(dev->irq, ks);
1054 ++
1055 + return 0;
1056 + }
1057 +
1058 +@@ -1508,6 +1520,7 @@ static int ks8851_probe(struct spi_device *spi)
1059 +
1060 + spi_set_drvdata(spi, ks);
1061 +
1062 ++ netif_carrier_off(ks->netdev);
1063 + ndev->if_port = IF_PORT_100BASET;
1064 + ndev->netdev_ops = &ks8851_netdev_ops;
1065 + ndev->irq = spi->irq;
1066 +@@ -1529,14 +1542,6 @@ static int ks8851_probe(struct spi_device *spi)
1067 + ks8851_read_selftest(ks);
1068 + ks8851_init_mac(ks);
1069 +
1070 +- ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
1071 +- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1072 +- ndev->name, ks);
1073 +- if (ret < 0) {
1074 +- dev_err(&spi->dev, "failed to get irq\n");
1075 +- goto err_irq;
1076 +- }
1077 +-
1078 + ret = register_netdev(ndev);
1079 + if (ret) {
1080 + dev_err(&spi->dev, "failed to register network device\n");
1081 +@@ -1549,14 +1554,10 @@ static int ks8851_probe(struct spi_device *spi)
1082 +
1083 + return 0;
1084 +
1085 +-
1086 + err_netdev:
1087 +- free_irq(ndev->irq, ks);
1088 +-
1089 +-err_irq:
1090 ++err_id:
1091 + if (gpio_is_valid(gpio))
1092 + gpio_set_value(gpio, 0);
1093 +-err_id:
1094 + regulator_disable(ks->vdd_reg);
1095 + err_reg:
1096 + regulator_disable(ks->vdd_io);
1097 +@@ -1574,7 +1575,6 @@ static int ks8851_remove(struct spi_device *spi)
1098 + dev_info(&spi->dev, "remove\n");
1099 +
1100 + unregister_netdev(priv->netdev);
1101 +- free_irq(spi->irq, priv);
1102 + if (gpio_is_valid(priv->gpio))
1103 + gpio_set_value(priv->gpio, 0);
1104 + regulator_disable(priv->vdd_reg);
1105 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
1106 +index 3b0adda7cc9c..a4cd6f2cfb86 100644
1107 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
1108 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
1109 +@@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
1110 +
1111 + for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
1112 + skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
1113 ++ if (!skb)
1114 ++ break;
1115 + qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
1116 + skb_put(skb, QLCNIC_ILB_PKT_SIZE);
1117 + adapter->ahw->diag_cnt = 0;
1118 +diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1119 +index c0c75c111abb..4d9bcb4d0378 100644
1120 +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1121 ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1122 +@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
1123 +
1124 + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
1125 + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
1126 +- STMMAC_RING_MODE, 1, false, skb->len);
1127 ++ STMMAC_RING_MODE, 0, false, skb->len);
1128 + tx_q->tx_skbuff[entry] = NULL;
1129 + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1130 +
1131 +@@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
1132 +
1133 + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
1134 + stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
1135 +- STMMAC_RING_MODE, 1, true, skb->len);
1136 ++ STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
1137 ++ skb->len);
1138 + } else {
1139 + des2 = dma_map_single(priv->device, skb->data,
1140 + nopaged_len, DMA_TO_DEVICE);
1141 +@@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
1142 + tx_q->tx_skbuff_dma[entry].is_jumbo = true;
1143 + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
1144 + stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
1145 +- STMMAC_RING_MODE, 1, true, skb->len);
1146 ++ STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
1147 ++ skb->len);
1148 + }
1149 +
1150 + tx_q->cur_tx = entry;
1151 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1152 +index 1d8d6f2ddfd6..0bc3632880b5 100644
1153 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1154 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1155 +@@ -3190,14 +3190,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1156 + stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
1157 + csum_insertion, priv->mode, 1, last_segment,
1158 + skb->len);
1159 +-
1160 +- /* The own bit must be the latest setting done when prepare the
1161 +- * descriptor and then barrier is needed to make sure that
1162 +- * all is coherent before granting the DMA engine.
1163 +- */
1164 +- wmb();
1165 ++ } else {
1166 ++ stmmac_set_tx_owner(priv, first);
1167 + }
1168 +
1169 ++ /* The own bit must be the latest setting done when prepare the
1170 ++ * descriptor and then barrier is needed to make sure that
1171 ++ * all is coherent before granting the DMA engine.
1172 ++ */
1173 ++ wmb();
1174 ++
1175 + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
1176 +
1177 + stmmac_enable_dma_transmission(priv, priv->ioaddr);
1178 +diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
1179 +index 5174d318901e..0a920c5936b2 100644
1180 +--- a/drivers/net/ethernet/ti/netcp_ethss.c
1181 ++++ b/drivers/net/ethernet/ti/netcp_ethss.c
1182 +@@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
1183 +
1184 + ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
1185 + gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
1186 +- if (ret)
1187 ++ if (ret) {
1188 ++ of_node_put(interfaces);
1189 + return ret;
1190 ++ }
1191 +
1192 + ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
1193 +- if (ret)
1194 ++ if (ret) {
1195 ++ of_node_put(interfaces);
1196 + return ret;
1197 ++ }
1198 +
1199 + /* Create network interfaces */
1200 + INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
1201 +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1202 +index 0789d8af7d72..1ef56edb3918 100644
1203 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1204 ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1205 +@@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev)
1206 + ret = of_address_to_resource(np, 0, &dmares);
1207 + if (ret) {
1208 + dev_err(&pdev->dev, "unable to get DMA resource\n");
1209 ++ of_node_put(np);
1210 + goto free_netdev;
1211 + }
1212 + lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
1213 + if (IS_ERR(lp->dma_regs)) {
1214 + dev_err(&pdev->dev, "could not map DMA regs\n");
1215 + ret = PTR_ERR(lp->dma_regs);
1216 ++ of_node_put(np);
1217 + goto free_netdev;
1218 + }
1219 + lp->rx_irq = irq_of_parse_and_map(np, 1);
1220 +diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
1221 +index cd1d8faccca5..cd6b95e673a5 100644
1222 +--- a/drivers/net/ieee802154/adf7242.c
1223 ++++ b/drivers/net/ieee802154/adf7242.c
1224 +@@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi)
1225 + INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
1226 + lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
1227 + WQ_MEM_RECLAIM);
1228 ++ if (unlikely(!lp->wqueue)) {
1229 ++ ret = -ENOMEM;
1230 ++ goto err_hw_init;
1231 ++ }
1232 +
1233 + ret = adf7242_hw_init(lp);
1234 + if (ret)
1235 +diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
1236 +index b6743f03dce0..3b88846de31b 100644
1237 +--- a/drivers/net/ieee802154/mac802154_hwsim.c
1238 ++++ b/drivers/net/ieee802154/mac802154_hwsim.c
1239 +@@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
1240 + goto out_err;
1241 + }
1242 +
1243 +- genlmsg_reply(skb, info);
1244 ++ res = genlmsg_reply(skb, info);
1245 + break;
1246 + }
1247 +
1248 +diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
1249 +index 24c7f149f3e6..e11057892f07 100644
1250 +--- a/drivers/net/phy/dp83822.c
1251 ++++ b/drivers/net/phy/dp83822.c
1252 +@@ -23,6 +23,8 @@
1253 + #include <linux/netdevice.h>
1254 +
1255 + #define DP83822_PHY_ID 0x2000a240
1256 ++#define DP83825I_PHY_ID 0x2000a150
1257 ++
1258 + #define DP83822_DEVADDR 0x1f
1259 +
1260 + #define MII_DP83822_PHYSCR 0x11
1261 +@@ -312,26 +314,30 @@ static int dp83822_resume(struct phy_device *phydev)
1262 + return 0;
1263 + }
1264 +
1265 ++#define DP83822_PHY_DRIVER(_id, _name) \
1266 ++ { \
1267 ++ PHY_ID_MATCH_MODEL(_id), \
1268 ++ .name = (_name), \
1269 ++ .features = PHY_BASIC_FEATURES, \
1270 ++ .soft_reset = dp83822_phy_reset, \
1271 ++ .config_init = dp83822_config_init, \
1272 ++ .get_wol = dp83822_get_wol, \
1273 ++ .set_wol = dp83822_set_wol, \
1274 ++ .ack_interrupt = dp83822_ack_interrupt, \
1275 ++ .config_intr = dp83822_config_intr, \
1276 ++ .suspend = dp83822_suspend, \
1277 ++ .resume = dp83822_resume, \
1278 ++ }
1279 ++
1280 + static struct phy_driver dp83822_driver[] = {
1281 +- {
1282 +- .phy_id = DP83822_PHY_ID,
1283 +- .phy_id_mask = 0xfffffff0,
1284 +- .name = "TI DP83822",
1285 +- .features = PHY_BASIC_FEATURES,
1286 +- .config_init = dp83822_config_init,
1287 +- .soft_reset = dp83822_phy_reset,
1288 +- .get_wol = dp83822_get_wol,
1289 +- .set_wol = dp83822_set_wol,
1290 +- .ack_interrupt = dp83822_ack_interrupt,
1291 +- .config_intr = dp83822_config_intr,
1292 +- .suspend = dp83822_suspend,
1293 +- .resume = dp83822_resume,
1294 +- },
1295 ++ DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
1296 ++ DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
1297 + };
1298 + module_phy_driver(dp83822_driver);
1299 +
1300 + static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
1301 + { DP83822_PHY_ID, 0xfffffff0 },
1302 ++ { DP83825I_PHY_ID, 0xfffffff0 },
1303 + { },
1304 + };
1305 + MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
1306 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
1307 +index 81970cf777c0..8cafa5a749ca 100644
1308 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
1309 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
1310 +@@ -81,8 +81,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
1311 +
1312 + mt76x02_insert_hdr_pad(skb);
1313 +
1314 +- txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
1315 ++ txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
1316 + mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
1317 ++ skb_push(skb, sizeof(struct mt76x02_txwi));
1318 +
1319 + pid = mt76_tx_status_skb_add(mdev, wcid, skb);
1320 + txwi->pktid = pid;
1321 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
1322 +index c9634a774705..2f618536ef2a 100644
1323 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
1324 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
1325 +@@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
1326 + gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
1327 + gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
1328 +
1329 +- if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
1330 ++ val = 0x1836 << 16;
1331 ++ if (!mt76x2_has_ext_lna(dev) &&
1332 ++ dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
1333 + val = 0x1e42 << 16;
1334 +- else
1335 +- val = 0x1836 << 16;
1336 ++
1337 ++ if (mt76x2_has_ext_lna(dev) &&
1338 ++ dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
1339 ++ dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
1340 ++ val = 0x0f36 << 16;
1341 +
1342 + val |= 0xf8;
1343 +
1344 +@@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
1345 + {
1346 + u8 *gain = dev->cal.agc_gain_init;
1347 + u8 low_gain_delta, gain_delta;
1348 ++ u32 agc_35, agc_37;
1349 + bool gain_change;
1350 + int low_gain;
1351 + u32 val;
1352 +@@ -316,6 +322,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
1353 + else
1354 + low_gain_delta = 14;
1355 +
1356 ++ agc_37 = 0x2121262c;
1357 ++ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
1358 ++ agc_35 = 0x11111516;
1359 ++ else if (low_gain == 2)
1360 ++ agc_35 = agc_37 = 0x08080808;
1361 ++ else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
1362 ++ agc_35 = 0x10101014;
1363 ++ else
1364 ++ agc_35 = 0x11111116;
1365 ++
1366 + if (low_gain == 2) {
1367 + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
1368 + mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
1369 +@@ -324,15 +340,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
1370 + dev->cal.agc_gain_adjust = 0;
1371 + } else {
1372 + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
1373 +- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
1374 +- mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
1375 +- else
1376 +- mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
1377 +- mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
1378 + gain_delta = 0;
1379 + dev->cal.agc_gain_adjust = low_gain_delta;
1380 + }
1381 +
1382 ++ mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
1383 ++ mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
1384 ++
1385 + dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
1386 + dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
1387 + mt76x2_phy_set_gain_val(dev);
1388 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
1389 +index b9fff3b8ed1b..23da7beadd62 100644
1390 +--- a/drivers/nvme/host/multipath.c
1391 ++++ b/drivers/nvme/host/multipath.c
1392 +@@ -366,15 +366,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
1393 + static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
1394 + struct nvme_ns *ns)
1395 + {
1396 +- enum nvme_ana_state old;
1397 +-
1398 + mutex_lock(&ns->head->lock);
1399 +- old = ns->ana_state;
1400 + ns->ana_grpid = le32_to_cpu(desc->grpid);
1401 + ns->ana_state = desc->state;
1402 + clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
1403 +
1404 +- if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
1405 ++ if (nvme_state_is_live(ns->ana_state))
1406 + nvme_mpath_set_live(ns);
1407 + mutex_unlock(&ns->head->lock);
1408 + }
1409 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
1410 +index 02c63c463222..7bad21a2283f 100644
1411 +--- a/drivers/nvme/target/core.c
1412 ++++ b/drivers/nvme/target/core.c
1413 +@@ -517,7 +517,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
1414 +
1415 + ret = nvmet_p2pmem_ns_enable(ns);
1416 + if (ret)
1417 +- goto out_unlock;
1418 ++ goto out_dev_disable;
1419 +
1420 + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1421 + nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1422 +@@ -558,7 +558,7 @@ out_unlock:
1423 + out_dev_put:
1424 + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1425 + pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
1426 +-
1427 ++out_dev_disable:
1428 + nvmet_ns_dev_disable(ns);
1429 + goto out_unlock;
1430 + }
1431 +diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
1432 +index 517522305e5c..9a0fa3943ca7 100644
1433 +--- a/drivers/nvme/target/io-cmd-file.c
1434 ++++ b/drivers/nvme/target/io-cmd-file.c
1435 +@@ -75,11 +75,11 @@ err:
1436 + return ret;
1437 + }
1438 +
1439 +-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
1440 ++static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
1441 + {
1442 +- bv->bv_page = sg_page_iter_page(iter);
1443 +- bv->bv_offset = iter->sg->offset;
1444 +- bv->bv_len = PAGE_SIZE - iter->sg->offset;
1445 ++ bv->bv_page = sg_page(sg);
1446 ++ bv->bv_offset = sg->offset;
1447 ++ bv->bv_len = sg->length;
1448 + }
1449 +
1450 + static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
1451 +@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
1452 +
1453 + static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
1454 + {
1455 +- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
1456 +- struct sg_page_iter sg_pg_iter;
1457 ++ ssize_t nr_bvec = req->sg_cnt;
1458 + unsigned long bv_cnt = 0;
1459 + bool is_sync = false;
1460 + size_t len = 0, total_len = 0;
1461 + ssize_t ret = 0;
1462 + loff_t pos;
1463 +-
1464 ++ int i;
1465 ++ struct scatterlist *sg;
1466 +
1467 + if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
1468 + is_sync = true;
1469 +@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
1470 + }
1471 +
1472 + memset(&req->f.iocb, 0, sizeof(struct kiocb));
1473 +- for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
1474 +- nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
1475 ++ for_each_sg(req->sg, sg, req->sg_cnt, i) {
1476 ++ nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
1477 + len += req->f.bvec[bv_cnt].bv_len;
1478 + total_len += req->f.bvec[bv_cnt].bv_len;
1479 + bv_cnt++;
1480 +@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
1481 +
1482 + static void nvmet_file_execute_rw(struct nvmet_req *req)
1483 + {
1484 +- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
1485 ++ ssize_t nr_bvec = req->sg_cnt;
1486 +
1487 + if (!req->sg_cnt || !nr_bvec) {
1488 + nvmet_req_complete(req, 0);
1489 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
1490 +index df34bff4ac31..f73ce96e9603 100644
1491 +--- a/drivers/s390/net/qeth_l3_main.c
1492 ++++ b/drivers/s390/net/qeth_l3_main.c
1493 +@@ -2316,12 +2316,14 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
1494 + struct qeth_card *card = dev_get_drvdata(&gdev->dev);
1495 + int rc;
1496 +
1497 ++ hash_init(card->ip_htable);
1498 ++
1499 + if (gdev->dev.type == &qeth_generic_devtype) {
1500 + rc = qeth_l3_create_device_attributes(&gdev->dev);
1501 + if (rc)
1502 + return rc;
1503 + }
1504 +- hash_init(card->ip_htable);
1505 ++
1506 + hash_init(card->ip_mc_htable);
1507 + card->info.hwtrap = 0;
1508 + return 0;
1509 +diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
1510 +index db00b5e3abbe..33eddb02ee30 100644
1511 +--- a/drivers/s390/scsi/zfcp_fc.c
1512 ++++ b/drivers/s390/scsi/zfcp_fc.c
1513 +@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
1514 + list_for_each_entry(port, &adapter->port_list, list) {
1515 + if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
1516 + zfcp_fc_test_link(port);
1517 +- if (!port->d_id)
1518 +- zfcp_erp_port_reopen(port,
1519 +- ZFCP_STATUS_COMMON_ERP_FAILED,
1520 +- "fcrscn1");
1521 + }
1522 + read_unlock_irqrestore(&adapter->port_list_lock, flags);
1523 + }
1524 +@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
1525 + static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
1526 + {
1527 + struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
1528 ++ struct zfcp_adapter *adapter = fsf_req->adapter;
1529 + struct fc_els_rscn *head;
1530 + struct fc_els_rscn_page *page;
1531 + u16 i;
1532 +@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
1533 + no_entries = be16_to_cpu(head->rscn_plen) /
1534 + sizeof(struct fc_els_rscn_page);
1535 +
1536 ++ if (no_entries > 1) {
1537 ++ /* handle failed ports */
1538 ++ unsigned long flags;
1539 ++ struct zfcp_port *port;
1540 ++
1541 ++ read_lock_irqsave(&adapter->port_list_lock, flags);
1542 ++ list_for_each_entry(port, &adapter->port_list, list) {
1543 ++ if (port->d_id)
1544 ++ continue;
1545 ++ zfcp_erp_port_reopen(port,
1546 ++ ZFCP_STATUS_COMMON_ERP_FAILED,
1547 ++ "fcrscn1");
1548 ++ }
1549 ++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
1550 ++ }
1551 ++
1552 + for (i = 1; i < no_entries; i++) {
1553 + /* skip head and start with 1st element */
1554 + page++;
1555 +diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
1556 +index 3291d1c16864..8bd09b96ea18 100644
1557 +--- a/drivers/scsi/aacraid/aacraid.h
1558 ++++ b/drivers/scsi/aacraid/aacraid.h
1559 +@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
1560 + return capacity;
1561 + }
1562 +
1563 ++static inline int aac_pci_offline(struct aac_dev *dev)
1564 ++{
1565 ++ return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
1566 ++}
1567 ++
1568 + static inline int aac_adapter_check_health(struct aac_dev *dev)
1569 + {
1570 +- if (unlikely(pci_channel_offline(dev->pdev)))
1571 ++ if (unlikely(aac_pci_offline(dev)))
1572 + return -1;
1573 +
1574 + return (dev)->a_ops.adapter_check_health(dev);
1575 +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
1576 +index a3adc954f40f..09367b8a3885 100644
1577 +--- a/drivers/scsi/aacraid/commsup.c
1578 ++++ b/drivers/scsi/aacraid/commsup.c
1579 +@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
1580 + return -ETIMEDOUT;
1581 + }
1582 +
1583 +- if (unlikely(pci_channel_offline(dev->pdev)))
1584 ++ if (unlikely(aac_pci_offline(dev)))
1585 + return -EFAULT;
1586 +
1587 + if ((blink = aac_adapter_check_health(dev)) > 0) {
1588 +@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
1589 +
1590 + spin_unlock_irqrestore(&fibptr->event_lock, flags);
1591 +
1592 +- if (unlikely(pci_channel_offline(dev->pdev)))
1593 ++ if (unlikely(aac_pci_offline(dev)))
1594 + return -EFAULT;
1595 +
1596 + fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
1597 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
1598 +index 0a6cb8f0680c..c39f88100f31 100644
1599 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
1600 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
1601 +@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1602 +
1603 + if (smid < ioc->hi_priority_smid) {
1604 + struct scsiio_tracker *st;
1605 ++ void *request;
1606 +
1607 + st = _get_st_from_smid(ioc, smid);
1608 + if (!st) {
1609 + _base_recovery_check(ioc);
1610 + return;
1611 + }
1612 ++
1613 ++ /* Clear MPI request frame */
1614 ++ request = mpt3sas_base_get_msg_frame(ioc, smid);
1615 ++ memset(request, 0, ioc->request_sz);
1616 ++
1617 + mpt3sas_base_clear_st(ioc, st);
1618 + _base_recovery_check(ioc);
1619 + return;
1620 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1621 +index 6be39dc27103..6173c211a5e5 100644
1622 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1623 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1624 +@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1625 + {
1626 + struct scsi_cmnd *scmd = NULL;
1627 + struct scsiio_tracker *st;
1628 ++ Mpi25SCSIIORequest_t *mpi_request;
1629 +
1630 + if (smid > 0 &&
1631 + smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1632 + u32 unique_tag = smid - 1;
1633 +
1634 ++ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1635 ++
1636 ++ /*
1637 ++ * If SCSI IO request is outstanding at driver level then
1638 ++ * DevHandle filed must be non-zero. If DevHandle is zero
1639 ++ * then it means that this smid is free at driver level,
1640 ++ * so return NULL.
1641 ++ */
1642 ++ if (!mpi_request->DevHandle)
1643 ++ return scmd;
1644 ++
1645 + scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1646 + if (scmd) {
1647 + st = scsi_cmd_priv(scmd);
1648 +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
1649 +index a77bfb224248..80289c885c07 100644
1650 +--- a/drivers/scsi/qla4xxx/ql4_os.c
1651 ++++ b/drivers/scsi/qla4xxx/ql4_os.c
1652 +@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1653 + if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1654 + return -EINVAL;
1655 + ep = iscsi_lookup_endpoint(transport_fd);
1656 ++ if (!ep)
1657 ++ return -EINVAL;
1658 + conn = cls_conn->dd_data;
1659 + qla_conn = conn->dd_data;
1660 + qla_conn->qla_ep = ep->dd_data;
1661 +diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
1662 +index 687537203d9c..d9725888af6f 100644
1663 +--- a/drivers/staging/axis-fifo/Kconfig
1664 ++++ b/drivers/staging/axis-fifo/Kconfig
1665 +@@ -3,6 +3,7 @@
1666 + #
1667 + config XIL_AXIS_FIFO
1668 + tristate "Xilinx AXI-Stream FIFO IP core driver"
1669 ++ depends on OF
1670 + default n
1671 + help
1672 + This adds support for the Xilinx AXI-Stream
1673 +diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
1674 +index d33533872a16..c8fa17cfa807 100644
1675 +--- a/drivers/staging/mt7621-pci/Kconfig
1676 ++++ b/drivers/staging/mt7621-pci/Kconfig
1677 +@@ -1,6 +1,7 @@
1678 + config PCI_MT7621
1679 + tristate "MediaTek MT7621 PCI Controller"
1680 + depends on RALINK
1681 ++ depends on PCI
1682 + select PCI_DRIVERS_GENERIC
1683 + help
1684 + This selects a driver for the MediaTek MT7621 PCI Controller.
1685 +diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
1686 +index 3b1ccd138c3f..6fb6ea29a8b6 100644
1687 +--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
1688 ++++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
1689 +@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
1690 +
1691 + pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
1692 +
1693 +- rtw_alloc_hwxmits(padapter);
1694 ++ res = rtw_alloc_hwxmits(padapter);
1695 ++ if (res == _FAIL)
1696 ++ goto exit;
1697 + rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
1698 +
1699 + for (i = 0; i < 4; i++)
1700 +@@ -1503,7 +1505,7 @@ exit:
1701 + return res;
1702 + }
1703 +
1704 +-void rtw_alloc_hwxmits(struct adapter *padapter)
1705 ++s32 rtw_alloc_hwxmits(struct adapter *padapter)
1706 + {
1707 + struct hw_xmit *hwxmits;
1708 + struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
1709 +@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
1710 +
1711 + pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
1712 + sizeof(struct hw_xmit), GFP_KERNEL);
1713 ++ if (!pxmitpriv->hwxmits)
1714 ++ return _FAIL;
1715 +
1716 + hwxmits = pxmitpriv->hwxmits;
1717 +
1718 +@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
1719 + hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
1720 + hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
1721 + hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
1722 ++ return _SUCCESS;
1723 + }
1724 +
1725 + void rtw_free_hwxmits(struct adapter *padapter)
1726 +diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
1727 +index 788f59c74ea1..ba7e15fbde72 100644
1728 +--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
1729 ++++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
1730 +@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
1731 + void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
1732 + s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
1733 + void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
1734 +-void rtw_alloc_hwxmits(struct adapter *padapter);
1735 ++s32 rtw_alloc_hwxmits(struct adapter *padapter);
1736 + void rtw_free_hwxmits(struct adapter *padapter);
1737 + s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
1738 +
1739 +diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
1740 +index 1920d02f7c9f..8c36acedf507 100644
1741 +--- a/drivers/staging/rtl8712/rtl8712_cmd.c
1742 ++++ b/drivers/staging/rtl8712/rtl8712_cmd.c
1743 +@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
1744 +
1745 + static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
1746 + {
1747 +- u32 val;
1748 +- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
1749 + struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
1750 +
1751 +- if (pcmd->rsp && pcmd->rspsz > 0)
1752 +- memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
1753 +- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
1754 +- if (!pcmd_callback)
1755 +- r8712_free_cmd_obj(pcmd);
1756 +- else
1757 +- pcmd_callback(padapter, pcmd);
1758 ++ r8712_free_cmd_obj(pcmd);
1759 + return H2C_SUCCESS;
1760 + }
1761 +
1762 +diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
1763 +index 92fb77666d44..1ef86b8c592f 100644
1764 +--- a/drivers/staging/rtl8712/rtl8712_cmd.h
1765 ++++ b/drivers/staging/rtl8712/rtl8712_cmd.h
1766 +@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
1767 + static struct _cmd_callback cmd_callback[] = {
1768 + {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
1769 + {GEN_CMD_CODE(_Write_MACREG), NULL},
1770 +- {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
1771 ++ {GEN_CMD_CODE(_Read_BBREG), NULL},
1772 + {GEN_CMD_CODE(_Write_BBREG), NULL},
1773 + {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
1774 + {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
1775 +diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
1776 +index 625e67f39889..a36b2213d8ee 100644
1777 +--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
1778 ++++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
1779 +@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
1780 + }
1781 + }
1782 +
1783 +- rtw_alloc_hwxmits(padapter);
1784 ++ res = rtw_alloc_hwxmits(padapter);
1785 ++ if (res == _FAIL)
1786 ++ goto exit;
1787 + rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
1788 +
1789 + for (i = 0; i < 4; i++) {
1790 +@@ -2144,7 +2146,7 @@ exit:
1791 + return res;
1792 + }
1793 +
1794 +-void rtw_alloc_hwxmits(struct adapter *padapter)
1795 ++s32 rtw_alloc_hwxmits(struct adapter *padapter)
1796 + {
1797 + struct hw_xmit *hwxmits;
1798 + struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
1799 +@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
1800 +
1801 + pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
1802 +
1803 +- if (pxmitpriv->hwxmits == NULL) {
1804 +- DBG_871X("alloc hwxmits fail!...\n");
1805 +- return;
1806 +- }
1807 ++ if (!pxmitpriv->hwxmits)
1808 ++ return _FAIL;
1809 +
1810 + hwxmits = pxmitpriv->hwxmits;
1811 +
1812 +@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
1813 +
1814 + }
1815 +
1816 +-
1817 ++ return _SUCCESS;
1818 + }
1819 +
1820 + void rtw_free_hwxmits(struct adapter *padapter)
1821 +diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
1822 +index 1b38b9182b31..37f42b2f22f1 100644
1823 +--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
1824 ++++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
1825 +@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
1826 + void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
1827 +
1828 +
1829 +-void rtw_alloc_hwxmits(struct adapter *padapter);
1830 ++s32 rtw_alloc_hwxmits(struct adapter *padapter);
1831 + void rtw_free_hwxmits(struct adapter *padapter);
1832 +
1833 +
1834 +diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
1835 +index 9930ed954abb..4cc77b2016e1 100644
1836 +--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
1837 ++++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
1838 +@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
1839 +
1840 + rtlpriv->phydm.internal =
1841 + kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
1842 ++ if (!rtlpriv->phydm.internal)
1843 ++ return 0;
1844 +
1845 + _rtl_phydm_init_com_info(rtlpriv, ic, params);
1846 +
1847 +diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
1848 +index a40396614814..c1ed52df05f0 100644
1849 +--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
1850 ++++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
1851 +@@ -741,6 +741,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
1852 + u1_rsvd_page_loc, 3);
1853 +
1854 + skb = dev_alloc_skb(totalpacketlen);
1855 ++ if (!skb)
1856 ++ return;
1857 + memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
1858 + totalpacketlen);
1859 +
1860 +diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
1861 +index 804daf83be35..064d0db4c51e 100644
1862 +--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
1863 ++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
1864 +@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
1865 + struct device_node *fw_node;
1866 + const struct of_device_id *of_id;
1867 + struct vchiq_drvdata *drvdata;
1868 ++ struct device *vchiq_dev;
1869 + int err;
1870 +
1871 + of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1872 +@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
1873 + goto failed_platform_init;
1874 + }
1875 +
1876 +- if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid,
1877 +- NULL, "vchiq")))
1878 ++ vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
1879 ++ "vchiq");
1880 ++ if (IS_ERR(vchiq_dev)) {
1881 ++ err = PTR_ERR(vchiq_dev);
1882 + goto failed_device_create;
1883 ++ }
1884 +
1885 + vchiq_debugfs_init();
1886 +
1887 +diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
1888 +index db5df3d54818..3bdd56a1021b 100644
1889 +--- a/drivers/tty/serial/ar933x_uart.c
1890 ++++ b/drivers/tty/serial/ar933x_uart.c
1891 +@@ -49,11 +49,6 @@ struct ar933x_uart_port {
1892 + struct clk *clk;
1893 + };
1894 +
1895 +-static inline bool ar933x_uart_console_enabled(void)
1896 +-{
1897 +- return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
1898 +-}
1899 +-
1900 + static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
1901 + int offset)
1902 + {
1903 +@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
1904 + .verify_port = ar933x_uart_verify_port,
1905 + };
1906 +
1907 ++#ifdef CONFIG_SERIAL_AR933X_CONSOLE
1908 + static struct ar933x_uart_port *
1909 + ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
1910 +
1911 +@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
1912 + .index = -1,
1913 + .data = &ar933x_uart_driver,
1914 + };
1915 +-
1916 +-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
1917 +-{
1918 +- if (!ar933x_uart_console_enabled())
1919 +- return;
1920 +-
1921 +- ar933x_console_ports[up->port.line] = up;
1922 +-}
1923 ++#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
1924 +
1925 + static struct uart_driver ar933x_uart_driver = {
1926 + .owner = THIS_MODULE,
1927 +@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
1928 + baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
1929 + up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
1930 +
1931 +- ar933x_uart_add_console_port(up);
1932 ++#ifdef CONFIG_SERIAL_AR933X_CONSOLE
1933 ++ ar933x_console_ports[up->port.line] = up;
1934 ++#endif
1935 +
1936 + ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
1937 + if (ret)
1938 +@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
1939 + {
1940 + int ret;
1941 +
1942 +- if (ar933x_uart_console_enabled())
1943 +- ar933x_uart_driver.cons = &ar933x_uart_console;
1944 ++#ifdef CONFIG_SERIAL_AR933X_CONSOLE
1945 ++ ar933x_uart_driver.cons = &ar933x_uart_console;
1946 ++#endif
1947 +
1948 + ret = uart_register_driver(&ar933x_uart_driver);
1949 + if (ret)
1950 +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
1951 +index 268098681856..114e94f476c6 100644
1952 +--- a/drivers/tty/serial/sc16is7xx.c
1953 ++++ b/drivers/tty/serial/sc16is7xx.c
1954 +@@ -1509,7 +1509,7 @@ static int __init sc16is7xx_init(void)
1955 + ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
1956 + if (ret < 0) {
1957 + pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
1958 +- return ret;
1959 ++ goto err_i2c;
1960 + }
1961 + #endif
1962 +
1963 +@@ -1517,10 +1517,18 @@ static int __init sc16is7xx_init(void)
1964 + ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
1965 + if (ret < 0) {
1966 + pr_err("failed to init sc16is7xx spi --> %d\n", ret);
1967 +- return ret;
1968 ++ goto err_spi;
1969 + }
1970 + #endif
1971 + return ret;
1972 ++
1973 ++err_spi:
1974 ++#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
1975 ++ i2c_del_driver(&sc16is7xx_i2c_uart_driver);
1976 ++#endif
1977 ++err_i2c:
1978 ++ uart_unregister_driver(&sc16is7xx_uart);
1979 ++ return ret;
1980 + }
1981 + module_init(sc16is7xx_init);
1982 +
1983 +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
1984 +index fdc6e4e403e8..8cced3609e24 100644
1985 +--- a/drivers/usb/dwc3/dwc3-pci.c
1986 ++++ b/drivers/usb/dwc3/dwc3-pci.c
1987 +@@ -29,6 +29,7 @@
1988 + #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
1989 + #define PCI_DEVICE_ID_INTEL_APL 0x5aaa
1990 + #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
1991 ++#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee
1992 + #define PCI_DEVICE_ID_INTEL_GLK 0x31aa
1993 + #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
1994 + #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
1995 +@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
1996 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
1997 + (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
1998 +
1999 ++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
2000 ++ (kernel_ulong_t) &dwc3_pci_intel_properties, },
2001 ++
2002 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
2003 + (kernel_ulong_t) &dwc3_pci_intel_properties, },
2004 +
2005 +diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
2006 +index b77f3126580e..c2011cd7df8c 100644
2007 +--- a/drivers/usb/gadget/udc/net2272.c
2008 ++++ b/drivers/usb/gadget/udc/net2272.c
2009 +@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
2010 + break;
2011 + }
2012 + if (&req->req != _req) {
2013 ++ ep->stopped = stopped;
2014 + spin_unlock_irqrestore(&ep->dev->lock, flags);
2015 + return -EINVAL;
2016 + }
2017 +diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
2018 +index e7dae5379e04..d93cf4171953 100644
2019 +--- a/drivers/usb/gadget/udc/net2280.c
2020 ++++ b/drivers/usb/gadget/udc/net2280.c
2021 +@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
2022 + (void) readl(&ep->dev->pci->pcimstctl);
2023 +
2024 + writel(BIT(DMA_START), &dma->dmastat);
2025 +-
2026 +- if (!ep->is_in)
2027 +- stop_out_naking(ep);
2028 + }
2029 +
2030 + static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
2031 +@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
2032 + writel(BIT(DMA_START), &dma->dmastat);
2033 + return;
2034 + }
2035 ++ stop_out_naking(ep);
2036 + }
2037 +
2038 + tmp = dmactl_default;
2039 +@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
2040 + break;
2041 + }
2042 + if (&req->req != _req) {
2043 ++ ep->stopped = stopped;
2044 + spin_unlock_irqrestore(&ep->dev->lock, flags);
2045 +- dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
2046 +- __func__);
2047 ++ ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
2048 + return -EINVAL;
2049 + }
2050 +
2051 +diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
2052 +index 5b8a3d9530c4..5cac83aaeac3 100644
2053 +--- a/drivers/usb/host/u132-hcd.c
2054 ++++ b/drivers/usb/host/u132-hcd.c
2055 +@@ -3202,6 +3202,9 @@ static int __init u132_hcd_init(void)
2056 + printk(KERN_INFO "driver %s\n", hcd_name);
2057 + workqueue = create_singlethread_workqueue("u132");
2058 + retval = platform_driver_register(&u132_platform_driver);
2059 ++ if (retval)
2060 ++ destroy_workqueue(workqueue);
2061 ++
2062 + return retval;
2063 + }
2064 +
2065 +diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
2066 +index a6efb9a72939..5f7734c729b1 100644
2067 +--- a/drivers/usb/misc/usb251xb.c
2068 ++++ b/drivers/usb/misc/usb251xb.c
2069 +@@ -601,7 +601,7 @@ static int usb251xb_probe(struct usb251xb *hub)
2070 + dev);
2071 + int err;
2072 +
2073 +- if (np) {
2074 ++ if (np && of_id) {
2075 + err = usb251xb_get_ofdata(hub,
2076 + (struct usb251xb_data *)of_id->data);
2077 + if (err) {
2078 +diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
2079 +index ca08c83168f5..0b37867b5c20 100644
2080 +--- a/fs/afs/fsclient.c
2081 ++++ b/fs/afs/fsclient.c
2082 +@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
2083 +
2084 + xdr_encode_AFS_StoreStatus(&bp, attr);
2085 +
2086 +- *bp++ = 0; /* position of start of write */
2087 +- *bp++ = 0;
2088 ++ *bp++ = htonl(attr->ia_size >> 32); /* position of start of write */
2089 ++ *bp++ = htonl((u32) attr->ia_size);
2090 + *bp++ = 0; /* size of write */
2091 + *bp++ = 0;
2092 + *bp++ = htonl(attr->ia_size >> 32); /* new file length */
2093 +@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
2094 +
2095 + xdr_encode_AFS_StoreStatus(&bp, attr);
2096 +
2097 +- *bp++ = 0; /* position of start of write */
2098 ++ *bp++ = htonl(attr->ia_size); /* position of start of write */
2099 + *bp++ = 0; /* size of write */
2100 + *bp++ = htonl(attr->ia_size); /* new file length */
2101 +
2102 +diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
2103 +index 5aa57929e8c2..6e97a42d24d1 100644
2104 +--- a/fs/afs/yfsclient.c
2105 ++++ b/fs/afs/yfsclient.c
2106 +@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
2107 + bp = xdr_encode_u32(bp, 0); /* RPC flags */
2108 + bp = xdr_encode_YFSFid(bp, &vnode->fid);
2109 + bp = xdr_encode_YFS_StoreStatus(bp, attr);
2110 +- bp = xdr_encode_u64(bp, 0); /* position of start of write */
2111 ++ bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
2112 + bp = xdr_encode_u64(bp, 0); /* size of write */
2113 + bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
2114 + yfs_check_req(call, bp);
2115 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
2116 +index 4ec2b660d014..7f3ece91a4d0 100644
2117 +--- a/fs/btrfs/transaction.c
2118 ++++ b/fs/btrfs/transaction.c
2119 +@@ -1886,8 +1886,10 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
2120 + }
2121 + }
2122 +
2123 +-static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
2124 ++static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
2125 + {
2126 ++ struct btrfs_fs_info *fs_info = trans->fs_info;
2127 ++
2128 + /*
2129 + * We use writeback_inodes_sb here because if we used
2130 + * btrfs_start_delalloc_roots we would deadlock with fs freeze.
2131 +@@ -1897,15 +1899,50 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
2132 + * from already being in a transaction and our join_transaction doesn't
2133 + * have to re-take the fs freeze lock.
2134 + */
2135 +- if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2136 ++ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
2137 + writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
2138 ++ } else {
2139 ++ struct btrfs_pending_snapshot *pending;
2140 ++ struct list_head *head = &trans->transaction->pending_snapshots;
2141 ++
2142 ++ /*
2143 ++ * Flush dellaloc for any root that is going to be snapshotted.
2144 ++ * This is done to avoid a corrupted version of files, in the
2145 ++ * snapshots, that had both buffered and direct IO writes (even
2146 ++ * if they were done sequentially) due to an unordered update of
2147 ++ * the inode's size on disk.
2148 ++ */
2149 ++ list_for_each_entry(pending, head, list) {
2150 ++ int ret;
2151 ++
2152 ++ ret = btrfs_start_delalloc_snapshot(pending->root);
2153 ++ if (ret)
2154 ++ return ret;
2155 ++ }
2156 ++ }
2157 + return 0;
2158 + }
2159 +
2160 +-static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
2161 ++static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
2162 + {
2163 +- if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2164 ++ struct btrfs_fs_info *fs_info = trans->fs_info;
2165 ++
2166 ++ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
2167 + btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
2168 ++ } else {
2169 ++ struct btrfs_pending_snapshot *pending;
2170 ++ struct list_head *head = &trans->transaction->pending_snapshots;
2171 ++
2172 ++ /*
2173 ++ * Wait for any dellaloc that we started previously for the roots
2174 ++ * that are going to be snapshotted. This is to avoid a corrupted
2175 ++ * version of files in the snapshots that had both buffered and
2176 ++ * direct IO writes (even if they were done sequentially).
2177 ++ */
2178 ++ list_for_each_entry(pending, head, list)
2179 ++ btrfs_wait_ordered_extents(pending->root,
2180 ++ U64_MAX, 0, U64_MAX);
2181 ++ }
2182 + }
2183 +
2184 + int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2185 +@@ -2024,7 +2061,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2186 +
2187 + extwriter_counter_dec(cur_trans, trans->type);
2188 +
2189 +- ret = btrfs_start_delalloc_flush(fs_info);
2190 ++ ret = btrfs_start_delalloc_flush(trans);
2191 + if (ret)
2192 + goto cleanup_transaction;
2193 +
2194 +@@ -2040,7 +2077,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2195 + if (ret)
2196 + goto cleanup_transaction;
2197 +
2198 +- btrfs_wait_delalloc_flush(fs_info);
2199 ++ btrfs_wait_delalloc_flush(trans);
2200 +
2201 + btrfs_scrub_pause(fs_info);
2202 + /*
2203 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
2204 +index 9d1f34d46627..f7f9e305aaf8 100644
2205 +--- a/fs/ceph/inode.c
2206 ++++ b/fs/ceph/inode.c
2207 +@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head)
2208 + struct inode *inode = container_of(head, struct inode, i_rcu);
2209 + struct ceph_inode_info *ci = ceph_inode(inode);
2210 +
2211 ++ kfree(ci->i_symlink);
2212 + kmem_cache_free(ceph_inode_cachep, ci);
2213 + }
2214 +
2215 +@@ -561,7 +562,6 @@ void ceph_destroy_inode(struct inode *inode)
2216 + ceph_put_snap_realm(mdsc, realm);
2217 + }
2218 +
2219 +- kfree(ci->i_symlink);
2220 + while ((n = rb_first(&ci->i_fragtree)) != NULL) {
2221 + frag = rb_entry(n, struct ceph_inode_frag, node);
2222 + rb_erase(n, &ci->i_fragtree);
2223 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
2224 +index 809c0f2f9942..64f4de983468 100644
2225 +--- a/fs/fuse/dev.c
2226 ++++ b/fs/fuse/dev.c
2227 +@@ -2034,10 +2034,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2228 + rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
2229 +
2230 + ret = -EINVAL;
2231 +- if (rem < len) {
2232 +- pipe_unlock(pipe);
2233 +- goto out;
2234 +- }
2235 ++ if (rem < len)
2236 ++ goto out_free;
2237 +
2238 + rem = len;
2239 + while (rem) {
2240 +@@ -2055,7 +2053,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2241 + pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
2242 + pipe->nrbufs--;
2243 + } else {
2244 +- pipe_buf_get(pipe, ibuf);
2245 ++ if (!pipe_buf_get(pipe, ibuf))
2246 ++ goto out_free;
2247 ++
2248 + *obuf = *ibuf;
2249 + obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2250 + obuf->len = rem;
2251 +@@ -2078,11 +2078,11 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2252 + ret = fuse_dev_do_write(fud, &cs, len);
2253 +
2254 + pipe_lock(pipe);
2255 ++out_free:
2256 + for (idx = 0; idx < nbuf; idx++)
2257 + pipe_buf_release(pipe, &bufs[idx]);
2258 + pipe_unlock(pipe);
2259 +
2260 +-out:
2261 + kvfree(bufs);
2262 + return ret;
2263 + }
2264 +diff --git a/fs/nfs/client.c b/fs/nfs/client.c
2265 +index fb1cf1a4bda2..90d71fda65ce 100644
2266 +--- a/fs/nfs/client.c
2267 ++++ b/fs/nfs/client.c
2268 +@@ -453,7 +453,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
2269 + case XPRT_TRANSPORT_RDMA:
2270 + if (retrans == NFS_UNSPEC_RETRANS)
2271 + to->to_retries = NFS_DEF_TCP_RETRANS;
2272 +- if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
2273 ++ if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
2274 + to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
2275 + if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
2276 + to->to_initval = NFS_MAX_TCP_TIMEOUT;
2277 +diff --git a/fs/pipe.c b/fs/pipe.c
2278 +index c51750ed4011..2a297bce381f 100644
2279 +--- a/fs/pipe.c
2280 ++++ b/fs/pipe.c
2281 +@@ -189,9 +189,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
2282 + * in the tee() system call, when we duplicate the buffers in one
2283 + * pipe into another.
2284 + */
2285 +-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
2286 ++bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
2287 + {
2288 +- get_page(buf->page);
2289 ++ return try_get_page(buf->page);
2290 + }
2291 + EXPORT_SYMBOL(generic_pipe_buf_get);
2292 +
2293 +diff --git a/fs/splice.c b/fs/splice.c
2294 +index 7da7d5437472..c38c7e7a49c9 100644
2295 +--- a/fs/splice.c
2296 ++++ b/fs/splice.c
2297 +@@ -1588,7 +1588,11 @@ retry:
2298 + * Get a reference to this pipe buffer,
2299 + * so we can copy the contents over.
2300 + */
2301 +- pipe_buf_get(ipipe, ibuf);
2302 ++ if (!pipe_buf_get(ipipe, ibuf)) {
2303 ++ if (ret == 0)
2304 ++ ret = -EFAULT;
2305 ++ break;
2306 ++ }
2307 + *obuf = *ibuf;
2308 +
2309 + /*
2310 +@@ -1662,7 +1666,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
2311 + * Get a reference to this pipe buffer,
2312 + * so we can copy the contents over.
2313 + */
2314 +- pipe_buf_get(ipipe, ibuf);
2315 ++ if (!pipe_buf_get(ipipe, ibuf)) {
2316 ++ if (ret == 0)
2317 ++ ret = -EFAULT;
2318 ++ break;
2319 ++ }
2320 +
2321 + obuf = opipe->bufs + nbuf;
2322 + *obuf = *ibuf;
2323 +diff --git a/include/linux/mm.h b/include/linux/mm.h
2324 +index 80bb6408fe73..7000ddd807e0 100644
2325 +--- a/include/linux/mm.h
2326 ++++ b/include/linux/mm.h
2327 +@@ -965,6 +965,10 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
2328 + }
2329 + #endif /* CONFIG_DEV_PAGEMAP_OPS */
2330 +
2331 ++/* 127: arbitrary random number, small enough to assemble well */
2332 ++#define page_ref_zero_or_close_to_overflow(page) \
2333 ++ ((unsigned int) page_ref_count(page) + 127u <= 127u)
2334 ++
2335 + static inline void get_page(struct page *page)
2336 + {
2337 + page = compound_head(page);
2338 +@@ -972,8 +976,17 @@ static inline void get_page(struct page *page)
2339 + * Getting a normal page or the head of a compound page
2340 + * requires to already have an elevated page->_refcount.
2341 + */
2342 +- VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
2343 ++ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
2344 ++ page_ref_inc(page);
2345 ++}
2346 ++
2347 ++static inline __must_check bool try_get_page(struct page *page)
2348 ++{
2349 ++ page = compound_head(page);
2350 ++ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
2351 ++ return false;
2352 + page_ref_inc(page);
2353 ++ return true;
2354 + }
2355 +
2356 + static inline void put_page(struct page *page)
2357 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
2358 +index 66ee63cd5968..7897a3cc05b9 100644
2359 +--- a/include/linux/pipe_fs_i.h
2360 ++++ b/include/linux/pipe_fs_i.h
2361 +@@ -108,18 +108,20 @@ struct pipe_buf_operations {
2362 + /*
2363 + * Get a reference to the pipe buffer.
2364 + */
2365 +- void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
2366 ++ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
2367 + };
2368 +
2369 + /**
2370 + * pipe_buf_get - get a reference to a pipe_buffer
2371 + * @pipe: the pipe that the buffer belongs to
2372 + * @buf: the buffer to get a reference to
2373 ++ *
2374 ++ * Return: %true if the reference was successfully obtained.
2375 + */
2376 +-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
2377 ++static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
2378 + struct pipe_buffer *buf)
2379 + {
2380 +- buf->ops->get(pipe, buf);
2381 ++ return buf->ops->get(pipe, buf);
2382 + }
2383 +
2384 + /**
2385 +@@ -178,7 +180,7 @@ struct pipe_inode_info *alloc_pipe_info(void);
2386 + void free_pipe_info(struct pipe_inode_info *);
2387 +
2388 + /* Generic pipe buffer ops functions */
2389 +-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
2390 ++bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
2391 + int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
2392 + int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
2393 + int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
2394 +diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
2395 +index 13789d10a50e..76b8399b17f6 100644
2396 +--- a/include/linux/sched/signal.h
2397 ++++ b/include/linux/sched/signal.h
2398 +@@ -417,10 +417,20 @@ static inline void set_restore_sigmask(void)
2399 + set_thread_flag(TIF_RESTORE_SIGMASK);
2400 + WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2401 + }
2402 ++
2403 ++static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
2404 ++{
2405 ++ clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
2406 ++}
2407 ++
2408 + static inline void clear_restore_sigmask(void)
2409 + {
2410 + clear_thread_flag(TIF_RESTORE_SIGMASK);
2411 + }
2412 ++static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
2413 ++{
2414 ++ return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
2415 ++}
2416 + static inline bool test_restore_sigmask(void)
2417 + {
2418 + return test_thread_flag(TIF_RESTORE_SIGMASK);
2419 +@@ -438,6 +448,10 @@ static inline void set_restore_sigmask(void)
2420 + current->restore_sigmask = true;
2421 + WARN_ON(!test_thread_flag(TIF_SIGPENDING));
2422 + }
2423 ++static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
2424 ++{
2425 ++ tsk->restore_sigmask = false;
2426 ++}
2427 + static inline void clear_restore_sigmask(void)
2428 + {
2429 + current->restore_sigmask = false;
2430 +@@ -446,6 +460,10 @@ static inline bool test_restore_sigmask(void)
2431 + {
2432 + return current->restore_sigmask;
2433 + }
2434 ++static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
2435 ++{
2436 ++ return tsk->restore_sigmask;
2437 ++}
2438 + static inline bool test_and_clear_restore_sigmask(void)
2439 + {
2440 + if (!current->restore_sigmask)
2441 +diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
2442 +index ef8dd0db70ce..56935bf027a7 100644
2443 +--- a/include/net/tc_act/tc_gact.h
2444 ++++ b/include/net/tc_act/tc_gact.h
2445 +@@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
2446 +
2447 + static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
2448 + {
2449 +- return a->goto_chain->index;
2450 ++ return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
2451 + }
2452 +
2453 + #endif /* __NET_TC_GACT_H */
2454 +diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
2455 +index 13acb9803a6d..05d39e579953 100644
2456 +--- a/include/net/xdp_sock.h
2457 ++++ b/include/net/xdp_sock.h
2458 +@@ -36,7 +36,6 @@ struct xdp_umem {
2459 + u32 headroom;
2460 + u32 chunk_size_nohr;
2461 + struct user_struct *user;
2462 +- struct pid *pid;
2463 + unsigned long address;
2464 + refcount_t users;
2465 + struct work_struct work;
2466 +diff --git a/kernel/ptrace.c b/kernel/ptrace.c
2467 +index 771e93f9c43f..6f357f4fc859 100644
2468 +--- a/kernel/ptrace.c
2469 ++++ b/kernel/ptrace.c
2470 +@@ -29,6 +29,7 @@
2471 + #include <linux/hw_breakpoint.h>
2472 + #include <linux/cn_proc.h>
2473 + #include <linux/compat.h>
2474 ++#include <linux/sched/signal.h>
2475 +
2476 + /*
2477 + * Access another process' address space via ptrace.
2478 +@@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request,
2479 + ret = ptrace_setsiginfo(child, &siginfo);
2480 + break;
2481 +
2482 +- case PTRACE_GETSIGMASK:
2483 ++ case PTRACE_GETSIGMASK: {
2484 ++ sigset_t *mask;
2485 ++
2486 + if (addr != sizeof(sigset_t)) {
2487 + ret = -EINVAL;
2488 + break;
2489 + }
2490 +
2491 +- if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
2492 ++ if (test_tsk_restore_sigmask(child))
2493 ++ mask = &child->saved_sigmask;
2494 ++ else
2495 ++ mask = &child->blocked;
2496 ++
2497 ++ if (copy_to_user(datavp, mask, sizeof(sigset_t)))
2498 + ret = -EFAULT;
2499 + else
2500 + ret = 0;
2501 +
2502 + break;
2503 ++ }
2504 +
2505 + case PTRACE_SETSIGMASK: {
2506 + sigset_t new_set;
2507 +@@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request,
2508 + child->blocked = new_set;
2509 + spin_unlock_irq(&child->sighand->siglock);
2510 +
2511 ++ clear_tsk_restore_sigmask(child);
2512 ++
2513 + ret = 0;
2514 + break;
2515 + }
2516 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2517 +index d07fc2836786..3842773b8aee 100644
2518 +--- a/kernel/trace/trace.c
2519 ++++ b/kernel/trace/trace.c
2520 +@@ -6843,12 +6843,16 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
2521 + buf->private = 0;
2522 + }
2523 +
2524 +-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
2525 ++static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
2526 + struct pipe_buffer *buf)
2527 + {
2528 + struct buffer_ref *ref = (struct buffer_ref *)buf->private;
2529 +
2530 ++ if (refcount_read(&ref->refcount) > INT_MAX/2)
2531 ++ return false;
2532 ++
2533 + refcount_inc(&ref->refcount);
2534 ++ return true;
2535 + }
2536 +
2537 + /* Pipe buffer operations for a buffer. */
2538 +diff --git a/lib/sbitmap.c b/lib/sbitmap.c
2539 +index 5b382c1244ed..155fe38756ec 100644
2540 +--- a/lib/sbitmap.c
2541 ++++ b/lib/sbitmap.c
2542 +@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
2543 + void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
2544 + unsigned int cpu)
2545 + {
2546 ++ /*
2547 ++ * Once the clear bit is set, the bit may be allocated out.
2548 ++ *
2549 ++ * Orders READ/WRITE on the asssociated instance(such as request
2550 ++ * of blk_mq) by this bit for avoiding race with re-allocation,
2551 ++ * and its pair is the memory barrier implied in __sbitmap_get_word.
2552 ++ *
2553 ++ * One invariant is that the clear bit has to be zero when the bit
2554 ++ * is in use.
2555 ++ */
2556 ++ smp_mb__before_atomic();
2557 + sbitmap_deferred_clear_bit(&sbq->sb, nr);
2558 +
2559 + /*
2560 +diff --git a/mm/gup.c b/mm/gup.c
2561 +index 75029649baca..81e0bdefa2cc 100644
2562 +--- a/mm/gup.c
2563 ++++ b/mm/gup.c
2564 +@@ -157,8 +157,12 @@ retry:
2565 + goto retry;
2566 + }
2567 +
2568 +- if (flags & FOLL_GET)
2569 +- get_page(page);
2570 ++ if (flags & FOLL_GET) {
2571 ++ if (unlikely(!try_get_page(page))) {
2572 ++ page = ERR_PTR(-ENOMEM);
2573 ++ goto out;
2574 ++ }
2575 ++ }
2576 + if (flags & FOLL_TOUCH) {
2577 + if ((flags & FOLL_WRITE) &&
2578 + !pte_dirty(pte) && !PageDirty(page))
2579 +@@ -295,7 +299,10 @@ retry_locked:
2580 + if (pmd_trans_unstable(pmd))
2581 + ret = -EBUSY;
2582 + } else {
2583 +- get_page(page);
2584 ++ if (unlikely(!try_get_page(page))) {
2585 ++ spin_unlock(ptl);
2586 ++ return ERR_PTR(-ENOMEM);
2587 ++ }
2588 + spin_unlock(ptl);
2589 + lock_page(page);
2590 + ret = split_huge_page(page);
2591 +@@ -497,7 +504,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
2592 + if (is_device_public_page(*page))
2593 + goto unmap;
2594 + }
2595 +- get_page(*page);
2596 ++ if (unlikely(!try_get_page(*page))) {
2597 ++ ret = -ENOMEM;
2598 ++ goto unmap;
2599 ++ }
2600 + out:
2601 + ret = 0;
2602 + unmap:
2603 +@@ -1393,6 +1403,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
2604 + }
2605 + }
2606 +
2607 ++/*
2608 ++ * Return the compund head page with ref appropriately incremented,
2609 ++ * or NULL if that failed.
2610 ++ */
2611 ++static inline struct page *try_get_compound_head(struct page *page, int refs)
2612 ++{
2613 ++ struct page *head = compound_head(page);
2614 ++ if (WARN_ON_ONCE(page_ref_count(head) < 0))
2615 ++ return NULL;
2616 ++ if (unlikely(!page_cache_add_speculative(head, refs)))
2617 ++ return NULL;
2618 ++ return head;
2619 ++}
2620 ++
2621 + #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2622 + static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2623 + int write, struct page **pages, int *nr)
2624 +@@ -1427,9 +1451,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
2625 +
2626 + VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2627 + page = pte_page(pte);
2628 +- head = compound_head(page);
2629 +
2630 +- if (!page_cache_get_speculative(head))
2631 ++ head = try_get_compound_head(page, 1);
2632 ++ if (!head)
2633 + goto pte_unmap;
2634 +
2635 + if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2636 +@@ -1568,8 +1592,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2637 + refs++;
2638 + } while (addr += PAGE_SIZE, addr != end);
2639 +
2640 +- head = compound_head(pmd_page(orig));
2641 +- if (!page_cache_add_speculative(head, refs)) {
2642 ++ head = try_get_compound_head(pmd_page(orig), refs);
2643 ++ if (!head) {
2644 + *nr -= refs;
2645 + return 0;
2646 + }
2647 +@@ -1606,8 +1630,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2648 + refs++;
2649 + } while (addr += PAGE_SIZE, addr != end);
2650 +
2651 +- head = compound_head(pud_page(orig));
2652 +- if (!page_cache_add_speculative(head, refs)) {
2653 ++ head = try_get_compound_head(pud_page(orig), refs);
2654 ++ if (!head) {
2655 + *nr -= refs;
2656 + return 0;
2657 + }
2658 +@@ -1643,8 +1667,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2659 + refs++;
2660 + } while (addr += PAGE_SIZE, addr != end);
2661 +
2662 +- head = compound_head(pgd_page(orig));
2663 +- if (!page_cache_add_speculative(head, refs)) {
2664 ++ head = try_get_compound_head(pgd_page(orig), refs);
2665 ++ if (!head) {
2666 + *nr -= refs;
2667 + return 0;
2668 + }
2669 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2670 +index 8dfdffc34a99..c220315dc533 100644
2671 +--- a/mm/hugetlb.c
2672 ++++ b/mm/hugetlb.c
2673 +@@ -4298,6 +4298,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2674 +
2675 + pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2676 + page = pte_page(huge_ptep_get(pte));
2677 ++
2678 ++ /*
2679 ++ * Instead of doing 'try_get_page()' below in the same_page
2680 ++ * loop, just check the count once here.
2681 ++ */
2682 ++ if (unlikely(page_count(page) <= 0)) {
2683 ++ if (pages) {
2684 ++ spin_unlock(ptl);
2685 ++ remainder = 0;
2686 ++ err = -ENOMEM;
2687 ++ break;
2688 ++ }
2689 ++ }
2690 + same_page:
2691 + if (pages) {
2692 + pages[i] = mem_map_offset(page, pfn_offset);
2693 +diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
2694 +index ea51b2d898ec..c980ce43e3ba 100644
2695 +--- a/mm/kasan/kasan.h
2696 ++++ b/mm/kasan/kasan.h
2697 +@@ -164,7 +164,10 @@ static inline u8 random_tag(void)
2698 + #endif
2699 +
2700 + #ifndef arch_kasan_set_tag
2701 +-#define arch_kasan_set_tag(addr, tag) ((void *)(addr))
2702 ++static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
2703 ++{
2704 ++ return addr;
2705 ++}
2706 + #endif
2707 + #ifndef arch_kasan_reset_tag
2708 + #define arch_kasan_reset_tag(addr) ((void *)(addr))
2709 +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
2710 +index 40d058378b52..fc605758323b 100644
2711 +--- a/net/bridge/br_netfilter_hooks.c
2712 ++++ b/net/bridge/br_netfilter_hooks.c
2713 +@@ -502,6 +502,7 @@ static unsigned int br_nf_pre_routing(void *priv,
2714 + nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
2715 +
2716 + skb->protocol = htons(ETH_P_IP);
2717 ++ skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
2718 +
2719 + NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
2720 + skb->dev, NULL,
2721 +diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
2722 +index 564710f88f93..e88d6641647b 100644
2723 +--- a/net/bridge/br_netfilter_ipv6.c
2724 ++++ b/net/bridge/br_netfilter_ipv6.c
2725 +@@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
2726 + nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
2727 +
2728 + skb->protocol = htons(ETH_P_IPV6);
2729 ++ skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
2730 ++
2731 + NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
2732 + skb->dev, NULL,
2733 + br_nf_pre_routing_finish_ipv6);
2734 +diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c
2735 +index 1059894a6f4c..4cb83fb69844 100644
2736 +--- a/net/ipv6/netfilter/ip6t_srh.c
2737 ++++ b/net/ipv6/netfilter/ip6t_srh.c
2738 +@@ -210,6 +210,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
2739 + psidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
2740 + ((srh->segments_left + 1) * sizeof(struct in6_addr));
2741 + psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid);
2742 ++ if (!psid)
2743 ++ return false;
2744 + if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID,
2745 + ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk,
2746 + &srhinfo->psid_addr)))
2747 +@@ -223,6 +225,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
2748 + nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
2749 + ((srh->segments_left - 1) * sizeof(struct in6_addr));
2750 + nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid);
2751 ++ if (!nsid)
2752 ++ return false;
2753 + if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID,
2754 + ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk,
2755 + &srhinfo->nsid_addr)))
2756 +@@ -233,6 +237,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
2757 + if (srhinfo->mt_flags & IP6T_SRH_LSID) {
2758 + lsidoff = srhoff + sizeof(struct ipv6_sr_hdr);
2759 + lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid);
2760 ++ if (!lsid)
2761 ++ return false;
2762 + if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID,
2763 + ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk,
2764 + &srhinfo->lsid_addr)))
2765 +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
2766 +index beb3a69ce1d4..0f0e5806bf77 100644
2767 +--- a/net/netfilter/Kconfig
2768 ++++ b/net/netfilter/Kconfig
2769 +@@ -995,6 +995,7 @@ config NETFILTER_XT_TARGET_TEE
2770 + depends on NETFILTER_ADVANCED
2771 + depends on IPV6 || IPV6=n
2772 + depends on !NF_CONNTRACK || NF_CONNTRACK
2773 ++ depends on IP6_NF_IPTABLES || !IP6_NF_IPTABLES
2774 + select NF_DUP_IPV4
2775 + select NF_DUP_IPV6 if IP6_NF_IPTABLES
2776 + ---help---
2777 +diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
2778 +index fa61208371f8..321a0036fdf5 100644
2779 +--- a/net/netfilter/nft_set_rbtree.c
2780 ++++ b/net/netfilter/nft_set_rbtree.c
2781 +@@ -308,10 +308,6 @@ static void *nft_rbtree_deactivate(const struct net *net,
2782 + else if (d > 0)
2783 + parent = parent->rb_right;
2784 + else {
2785 +- if (!nft_set_elem_active(&rbe->ext, genmask)) {
2786 +- parent = parent->rb_left;
2787 +- continue;
2788 +- }
2789 + if (nft_rbtree_interval_end(rbe) &&
2790 + !nft_rbtree_interval_end(this)) {
2791 + parent = parent->rb_left;
2792 +@@ -320,6 +316,9 @@ static void *nft_rbtree_deactivate(const struct net *net,
2793 + nft_rbtree_interval_end(this)) {
2794 + parent = parent->rb_right;
2795 + continue;
2796 ++ } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
2797 ++ parent = parent->rb_left;
2798 ++ continue;
2799 + }
2800 + nft_rbtree_flush(net, set, rbe);
2801 + return rbe;
2802 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
2803 +index 7754aa3e434f..f88c2bd1335a 100644
2804 +--- a/net/sunrpc/xprtsock.c
2805 ++++ b/net/sunrpc/xprtsock.c
2806 +@@ -486,8 +486,8 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
2807 + int flags, struct rpc_rqst *req)
2808 + {
2809 + struct xdr_buf *buf = &req->rq_private_buf;
2810 +- size_t want, read;
2811 +- ssize_t ret;
2812 ++ size_t want, uninitialized_var(read);
2813 ++ ssize_t uninitialized_var(ret);
2814 +
2815 + xs_read_header(transport, buf);
2816 +
2817 +diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
2818 +index 37e1fe180769..9c767c68ed3a 100644
2819 +--- a/net/xdp/xdp_umem.c
2820 ++++ b/net/xdp/xdp_umem.c
2821 +@@ -189,9 +189,6 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
2822 +
2823 + static void xdp_umem_release(struct xdp_umem *umem)
2824 + {
2825 +- struct task_struct *task;
2826 +- struct mm_struct *mm;
2827 +-
2828 + xdp_umem_clear_dev(umem);
2829 +
2830 + if (umem->fq) {
2831 +@@ -208,21 +205,10 @@ static void xdp_umem_release(struct xdp_umem *umem)
2832 +
2833 + xdp_umem_unpin_pages(umem);
2834 +
2835 +- task = get_pid_task(umem->pid, PIDTYPE_PID);
2836 +- put_pid(umem->pid);
2837 +- if (!task)
2838 +- goto out;
2839 +- mm = get_task_mm(task);
2840 +- put_task_struct(task);
2841 +- if (!mm)
2842 +- goto out;
2843 +-
2844 +- mmput(mm);
2845 + kfree(umem->pages);
2846 + umem->pages = NULL;
2847 +
2848 + xdp_umem_unaccount_pages(umem);
2849 +-out:
2850 + kfree(umem);
2851 + }
2852 +
2853 +@@ -351,7 +337,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
2854 + if (size_chk < 0)
2855 + return -EINVAL;
2856 +
2857 +- umem->pid = get_task_pid(current, PIDTYPE_PID);
2858 + umem->address = (unsigned long)addr;
2859 + umem->chunk_mask = ~((u64)chunk_size - 1);
2860 + umem->size = size;
2861 +@@ -367,7 +352,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
2862 +
2863 + err = xdp_umem_account_pages(umem);
2864 + if (err)
2865 +- goto out;
2866 ++ return err;
2867 +
2868 + err = xdp_umem_pin_pages(umem);
2869 + if (err)
2870 +@@ -386,8 +371,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
2871 +
2872 + out_account:
2873 + xdp_umem_unaccount_pages(umem);
2874 +-out:
2875 +- put_pid(umem->pid);
2876 + return err;
2877 + }
2878 +
2879 +diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
2880 +index 611945611bf8..1dcfb288ee63 100644
2881 +--- a/scripts/kconfig/lxdialog/inputbox.c
2882 ++++ b/scripts/kconfig/lxdialog/inputbox.c
2883 +@@ -113,7 +113,8 @@ do_resize:
2884 + case KEY_DOWN:
2885 + break;
2886 + case KEY_BACKSPACE:
2887 +- case 127:
2888 ++ case 8: /* ^H */
2889 ++ case 127: /* ^? */
2890 + if (pos) {
2891 + wattrset(dialog, dlg.inputbox.atr);
2892 + if (input_x == 0) {
2893 +diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
2894 +index a4670f4e825a..ac92c0ded6c5 100644
2895 +--- a/scripts/kconfig/nconf.c
2896 ++++ b/scripts/kconfig/nconf.c
2897 +@@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans)
2898 + state->match_direction = FIND_NEXT_MATCH_UP;
2899 + *ans = get_mext_match(state->pattern,
2900 + state->match_direction);
2901 +- } else if (key == KEY_BACKSPACE || key == 127) {
2902 ++ } else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
2903 + state->pattern[strlen(state->pattern)-1] = '\0';
2904 + adj_match_dir(&state->match_direction);
2905 + } else
2906 +diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
2907 +index 7be620a1fcdb..77f525a8617c 100644
2908 +--- a/scripts/kconfig/nconf.gui.c
2909 ++++ b/scripts/kconfig/nconf.gui.c
2910 +@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window,
2911 + case KEY_F(F_EXIT):
2912 + case KEY_F(F_BACK):
2913 + break;
2914 +- case 127:
2915 ++ case 8: /* ^H */
2916 ++ case 127: /* ^? */
2917 + case KEY_BACKSPACE:
2918 + if (cursor_position > 0) {
2919 + memmove(&result[cursor_position-1],
2920 +diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c
2921 +index 1ceedea847dd..544ca126a8a8 100644
2922 +--- a/scripts/selinux/genheaders/genheaders.c
2923 ++++ b/scripts/selinux/genheaders/genheaders.c
2924 +@@ -9,7 +9,6 @@
2925 + #include <string.h>
2926 + #include <errno.h>
2927 + #include <ctype.h>
2928 +-#include <sys/socket.h>
2929 +
2930 + struct security_class_mapping {
2931 + const char *name;
2932 +diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
2933 +index 073fe7537f6c..6d51b74bc679 100644
2934 +--- a/scripts/selinux/mdp/mdp.c
2935 ++++ b/scripts/selinux/mdp/mdp.c
2936 +@@ -32,7 +32,6 @@
2937 + #include <stdlib.h>
2938 + #include <unistd.h>
2939 + #include <string.h>
2940 +-#include <sys/socket.h>
2941 +
2942 + static void usage(char *name)
2943 + {
2944 +diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
2945 +index bd5fe0d3204a..201f7e588a29 100644
2946 +--- a/security/selinux/include/classmap.h
2947 ++++ b/security/selinux/include/classmap.h
2948 +@@ -1,5 +1,6 @@
2949 + /* SPDX-License-Identifier: GPL-2.0 */
2950 + #include <linux/capability.h>
2951 ++#include <linux/socket.h>
2952 +
2953 + #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
2954 + "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map"
2955 +diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
2956 +index d68eb4fb40cc..2b0e02c38870 100644
2957 +--- a/tools/build/feature/test-libopencsd.c
2958 ++++ b/tools/build/feature/test-libopencsd.c
2959 +@@ -4,9 +4,9 @@
2960 + /*
2961 + * Check OpenCSD library version is sufficient to provide required features
2962 + */
2963 +-#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0))
2964 ++#define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0))
2965 + #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
2966 +-#error "OpenCSD >= 0.10.0 is required"
2967 ++#error "OpenCSD >= 0.11.0 is required"
2968 + #endif
2969 +
2970 + int main(void)
2971 +diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
2972 +index 8c155575c6c5..2a8bf6b45a30 100644
2973 +--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
2974 ++++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
2975 +@@ -374,6 +374,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
2976 + break;
2977 + case OCSD_INSTR_ISB:
2978 + case OCSD_INSTR_DSB_DMB:
2979 ++ case OCSD_INSTR_WFI_WFE:
2980 + case OCSD_INSTR_OTHER:
2981 + default:
2982 + packet->last_instr_taken_branch = false;
2983 +diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
2984 +index 143f7057d581..596db1daee35 100644
2985 +--- a/tools/perf/util/machine.c
2986 ++++ b/tools/perf/util/machine.c
2987 +@@ -1358,6 +1358,20 @@ static void machine__set_kernel_mmap(struct machine *machine,
2988 + machine->vmlinux_map->end = ~0ULL;
2989 + }
2990 +
2991 ++static void machine__update_kernel_mmap(struct machine *machine,
2992 ++ u64 start, u64 end)
2993 ++{
2994 ++ struct map *map = machine__kernel_map(machine);
2995 ++
2996 ++ map__get(map);
2997 ++ map_groups__remove(&machine->kmaps, map);
2998 ++
2999 ++ machine__set_kernel_mmap(machine, start, end);
3000 ++
3001 ++ map_groups__insert(&machine->kmaps, map);
3002 ++ map__put(map);
3003 ++}
3004 ++
3005 + int machine__create_kernel_maps(struct machine *machine)
3006 + {
3007 + struct dso *kernel = machine__get_kernel(machine);
3008 +@@ -1390,17 +1404,11 @@ int machine__create_kernel_maps(struct machine *machine)
3009 + goto out_put;
3010 + }
3011 +
3012 +- /* we have a real start address now, so re-order the kmaps */
3013 +- map = machine__kernel_map(machine);
3014 +-
3015 +- map__get(map);
3016 +- map_groups__remove(&machine->kmaps, map);
3017 +-
3018 +- /* assume it's the last in the kmaps */
3019 +- machine__set_kernel_mmap(machine, addr, ~0ULL);
3020 +-
3021 +- map_groups__insert(&machine->kmaps, map);
3022 +- map__put(map);
3023 ++ /*
3024 ++ * we have a real start address now, so re-order the kmaps
3025 ++ * assume it's the last in the kmaps
3026 ++ */
3027 ++ machine__update_kernel_mmap(machine, addr, ~0ULL);
3028 + }
3029 +
3030 + if (machine__create_extra_kernel_maps(machine, kernel))
3031 +@@ -1536,7 +1544,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
3032 + if (strstr(kernel->long_name, "vmlinux"))
3033 + dso__set_short_name(kernel, "[kernel.vmlinux]", false);
3034 +
3035 +- machine__set_kernel_mmap(machine, event->mmap.start,
3036 ++ machine__update_kernel_mmap(machine, event->mmap.start,
3037 + event->mmap.start + event->mmap.len);
3038 +
3039 + /*
3040 +diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
3041 +index f9a0e9938480..cb4a992d6dd3 100644
3042 +--- a/tools/testing/selftests/kvm/Makefile
3043 ++++ b/tools/testing/selftests/kvm/Makefile
3044 +@@ -28,8 +28,8 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
3045 + INSTALL_HDR_PATH = $(top_srcdir)/usr
3046 + LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
3047 + LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
3048 +-CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
3049 +-LDFLAGS += -pthread
3050 ++CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
3051 ++LDFLAGS += -pthread -no-pie
3052 +
3053 + # After inclusion, $(OUTPUT) is defined and
3054 + # $(TEST_GEN_PROGS) starts with $(OUTPUT)/
3055 +diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
3056 +index a84785b02557..07b71ad9734a 100644
3057 +--- a/tools/testing/selftests/kvm/include/kvm_util.h
3058 ++++ b/tools/testing/selftests/kvm/include/kvm_util.h
3059 +@@ -102,6 +102,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
3060 + struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
3061 + void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
3062 + int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
3063 ++void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
3064 + void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
3065 + struct kvm_mp_state *mp_state);
3066 + void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
3067 +diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
3068 +index b52cfdefecbf..efa0aad8b3c6 100644
3069 +--- a/tools/testing/selftests/kvm/lib/kvm_util.c
3070 ++++ b/tools/testing/selftests/kvm/lib/kvm_util.c
3071 +@@ -1121,6 +1121,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
3072 + return rc;
3073 + }
3074 +
3075 ++void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
3076 ++{
3077 ++ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
3078 ++ int ret;
3079 ++
3080 ++ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
3081 ++
3082 ++ vcpu->state->immediate_exit = 1;
3083 ++ ret = ioctl(vcpu->fd, KVM_RUN, NULL);
3084 ++ vcpu->state->immediate_exit = 0;
3085 ++
3086 ++ TEST_ASSERT(ret == -1 && errno == EINTR,
3087 ++ "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
3088 ++ ret, errno);
3089 ++}
3090 ++
3091 + /*
3092 + * VM VCPU Set MP State
3093 + *
3094 +diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
3095 +index d503a51fad30..7c2c4d4055a8 100644
3096 +--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
3097 ++++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
3098 +@@ -87,22 +87,25 @@ int main(int argc, char *argv[])
3099 + while (1) {
3100 + rc = _vcpu_run(vm, VCPU_ID);
3101 +
3102 +- if (run->exit_reason == KVM_EXIT_IO) {
3103 +- switch (get_ucall(vm, VCPU_ID, &uc)) {
3104 +- case UCALL_SYNC:
3105 +- /* emulate hypervisor clearing CR4.OSXSAVE */
3106 +- vcpu_sregs_get(vm, VCPU_ID, &sregs);
3107 +- sregs.cr4 &= ~X86_CR4_OSXSAVE;
3108 +- vcpu_sregs_set(vm, VCPU_ID, &sregs);
3109 +- break;
3110 +- case UCALL_ABORT:
3111 +- TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
3112 +- break;
3113 +- case UCALL_DONE:
3114 +- goto done;
3115 +- default:
3116 +- TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
3117 +- }
3118 ++ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
3119 ++ "Unexpected exit reason: %u (%s),\n",
3120 ++ run->exit_reason,
3121 ++ exit_reason_str(run->exit_reason));
3122 ++
3123 ++ switch (get_ucall(vm, VCPU_ID, &uc)) {
3124 ++ case UCALL_SYNC:
3125 ++ /* emulate hypervisor clearing CR4.OSXSAVE */
3126 ++ vcpu_sregs_get(vm, VCPU_ID, &sregs);
3127 ++ sregs.cr4 &= ~X86_CR4_OSXSAVE;
3128 ++ vcpu_sregs_set(vm, VCPU_ID, &sregs);
3129 ++ break;
3130 ++ case UCALL_ABORT:
3131 ++ TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
3132 ++ break;
3133 ++ case UCALL_DONE:
3134 ++ goto done;
3135 ++ default:
3136 ++ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
3137 + }
3138 + }
3139 +
3140 +diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
3141 +index 4b3f556265f1..30f75856cf39 100644
3142 +--- a/tools/testing/selftests/kvm/x86_64/state_test.c
3143 ++++ b/tools/testing/selftests/kvm/x86_64/state_test.c
3144 +@@ -134,6 +134,11 @@ int main(int argc, char *argv[])
3145 +
3146 + struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
3147 +
3148 ++ if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
3149 ++ fprintf(stderr, "immediate_exit not available, skipping test\n");
3150 ++ exit(KSFT_SKIP);
3151 ++ }
3152 ++
3153 + /* Create VM */
3154 + vm = vm_create_default(VCPU_ID, 0, guest_code);
3155 + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
3156 +@@ -156,8 +161,6 @@ int main(int argc, char *argv[])
3157 + stage, run->exit_reason,
3158 + exit_reason_str(run->exit_reason));
3159 +
3160 +- memset(&regs1, 0, sizeof(regs1));
3161 +- vcpu_regs_get(vm, VCPU_ID, &regs1);
3162 + switch (get_ucall(vm, VCPU_ID, &uc)) {
3163 + case UCALL_ABORT:
3164 + TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
3165 +@@ -176,6 +179,17 @@ int main(int argc, char *argv[])
3166 + uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
3167 + stage, (ulong)uc.args[1]);
3168 +
3169 ++ /*
3170 ++ * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
3171 ++ * guest state is consistent only after userspace re-enters the
3172 ++ * kernel with KVM_RUN. Complete IO prior to migrating state
3173 ++ * to a new VM.
3174 ++ */
3175 ++ vcpu_run_complete_io(vm, VCPU_ID);
3176 ++
3177 ++ memset(&regs1, 0, sizeof(regs1));
3178 ++ vcpu_regs_get(vm, VCPU_ID, &regs1);
3179 ++
3180 + state = vcpu_save_state(vm, VCPU_ID);
3181 + kvm_vm_release(vm);
3182 +
3183 +diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
3184 +index 9652c453480f..3c3f7cda95c7 100644
3185 +--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
3186 ++++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
3187 +@@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
3188 + }
3189 + }
3190 +
3191 +- if (used_lrs) {
3192 ++ if (used_lrs || cpu_if->its_vpe.its_vm) {
3193 + int i;
3194 + u32 elrsr;
3195 +
3196 +@@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
3197 + u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
3198 + int i;
3199 +
3200 +- if (used_lrs) {
3201 ++ if (used_lrs || cpu_if->its_vpe.its_vm) {
3202 + write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
3203 +
3204 + for (i = 0; i < used_lrs; i++)
3205 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
3206 +index 5cc22cdaa5ba..31e22b615d99 100644
3207 +--- a/virt/kvm/arm/mmu.c
3208 ++++ b/virt/kvm/arm/mmu.c
3209 +@@ -1060,25 +1060,43 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
3210 + {
3211 + pmd_t *pmd, old_pmd;
3212 +
3213 ++retry:
3214 + pmd = stage2_get_pmd(kvm, cache, addr);
3215 + VM_BUG_ON(!pmd);
3216 +
3217 + old_pmd = *pmd;
3218 ++ /*
3219 ++ * Multiple vcpus faulting on the same PMD entry, can
3220 ++ * lead to them sequentially updating the PMD with the
3221 ++ * same value. Following the break-before-make
3222 ++ * (pmd_clear() followed by tlb_flush()) process can
3223 ++ * hinder forward progress due to refaults generated
3224 ++ * on missing translations.
3225 ++ *
3226 ++ * Skip updating the page table if the entry is
3227 ++ * unchanged.
3228 ++ */
3229 ++ if (pmd_val(old_pmd) == pmd_val(*new_pmd))
3230 ++ return 0;
3231 ++
3232 + if (pmd_present(old_pmd)) {
3233 + /*
3234 +- * Multiple vcpus faulting on the same PMD entry, can
3235 +- * lead to them sequentially updating the PMD with the
3236 +- * same value. Following the break-before-make
3237 +- * (pmd_clear() followed by tlb_flush()) process can
3238 +- * hinder forward progress due to refaults generated
3239 +- * on missing translations.
3240 ++ * If we already have PTE level mapping for this block,
3241 ++ * we must unmap it to avoid inconsistent TLB state and
3242 ++ * leaking the table page. We could end up in this situation
3243 ++ * if the memory slot was marked for dirty logging and was
3244 ++ * reverted, leaving PTE level mappings for the pages accessed
3245 ++ * during the period. So, unmap the PTE level mapping for this
3246 ++ * block and retry, as we could have released the upper level
3247 ++ * table in the process.
3248 + *
3249 +- * Skip updating the page table if the entry is
3250 +- * unchanged.
3251 ++ * Normal THP split/merge follows mmu_notifier callbacks and do
3252 ++ * get handled accordingly.
3253 + */
3254 +- if (pmd_val(old_pmd) == pmd_val(*new_pmd))
3255 +- return 0;
3256 +-
3257 ++ if (!pmd_thp_or_huge(old_pmd)) {
3258 ++ unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
3259 ++ goto retry;
3260 ++ }
3261 + /*
3262 + * Mapping in huge pages should only happen through a
3263 + * fault. If a page is merged into a transparent huge
3264 +@@ -1090,8 +1108,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
3265 + * should become splitting first, unmapped, merged,
3266 + * and mapped back in on-demand.
3267 + */
3268 +- VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
3269 +-
3270 ++ WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
3271 + pmd_clear(pmd);
3272 + kvm_tlb_flush_vmid_ipa(kvm, addr);
3273 + } else {
3274 +@@ -1107,6 +1124,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
3275 + {
3276 + pud_t *pudp, old_pud;
3277 +
3278 ++retry:
3279 + pudp = stage2_get_pud(kvm, cache, addr);
3280 + VM_BUG_ON(!pudp);
3281 +
3282 +@@ -1114,14 +1132,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
3283 +
3284 + /*
3285 + * A large number of vcpus faulting on the same stage 2 entry,
3286 +- * can lead to a refault due to the
3287 +- * stage2_pud_clear()/tlb_flush(). Skip updating the page
3288 +- * tables if there is no change.
3289 ++ * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
3290 ++ * Skip updating the page tables if there is no change.
3291 + */
3292 + if (pud_val(old_pud) == pud_val(*new_pudp))
3293 + return 0;
3294 +
3295 + if (stage2_pud_present(kvm, old_pud)) {
3296 ++ /*
3297 ++ * If we already have table level mapping for this block, unmap
3298 ++ * the range for this block and retry.
3299 ++ */
3300 ++ if (!stage2_pud_huge(kvm, old_pud)) {
3301 ++ unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
3302 ++ goto retry;
3303 ++ }
3304 ++
3305 ++ WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
3306 + stage2_pud_clear(kvm, pudp);
3307 + kvm_tlb_flush_vmid_ipa(kvm, addr);
3308 + } else {
3309 +diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
3310 +index ab3f47745d9c..fcb2fceaa4a5 100644
3311 +--- a/virt/kvm/arm/vgic/vgic-its.c
3312 ++++ b/virt/kvm/arm/vgic/vgic-its.c
3313 +@@ -754,8 +754,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
3314 + u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
3315 + phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
3316 + int esz = GITS_BASER_ENTRY_SIZE(baser);
3317 +- int index;
3318 ++ int index, idx;
3319 + gfn_t gfn;
3320 ++ bool ret;
3321 +
3322 + switch (type) {
3323 + case GITS_BASER_TYPE_DEVICE:
3324 +@@ -782,7 +783,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
3325 +
3326 + if (eaddr)
3327 + *eaddr = addr;
3328 +- return kvm_is_visible_gfn(its->dev->kvm, gfn);
3329 ++
3330 ++ goto out;
3331 + }
3332 +
3333 + /* calculate and check the index into the 1st level */
3334 +@@ -812,7 +814,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
3335 +
3336 + if (eaddr)
3337 + *eaddr = indirect_ptr;
3338 +- return kvm_is_visible_gfn(its->dev->kvm, gfn);
3339 ++
3340 ++out:
3341 ++ idx = srcu_read_lock(&its->dev->kvm->srcu);
3342 ++ ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
3343 ++ srcu_read_unlock(&its->dev->kvm->srcu, idx);
3344 ++ return ret;
3345 + }
3346 +
3347 + static int vgic_its_alloc_collection(struct vgic_its *its,
3348 +@@ -1919,7 +1926,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
3349 + ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
3350 + ite->collection->collection_id;
3351 + val = cpu_to_le64(val);
3352 +- return kvm_write_guest(kvm, gpa, &val, ite_esz);
3353 ++ return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
3354 + }
3355 +
3356 + /**
3357 +@@ -2066,7 +2073,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
3358 + (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
3359 + (dev->num_eventid_bits - 1));
3360 + val = cpu_to_le64(val);
3361 +- return kvm_write_guest(kvm, ptr, &val, dte_esz);
3362 ++ return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
3363 + }
3364 +
3365 + /**
3366 +@@ -2246,7 +2253,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
3367 + ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
3368 + collection->collection_id);
3369 + val = cpu_to_le64(val);
3370 +- return kvm_write_guest(its->dev->kvm, gpa, &val, esz);
3371 ++ return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
3372 + }
3373 +
3374 + static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
3375 +@@ -2317,7 +2324,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
3376 + */
3377 + val = 0;
3378 + BUG_ON(cte_esz > sizeof(val));
3379 +- ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz);
3380 ++ ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
3381 + return ret;
3382 + }
3383 +
3384 +diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
3385 +index 4ee0aeb9a905..89260964be73 100644
3386 +--- a/virt/kvm/arm/vgic/vgic-v3.c
3387 ++++ b/virt/kvm/arm/vgic/vgic-v3.c
3388 +@@ -358,7 +358,7 @@ retry:
3389 + if (status) {
3390 + /* clear consumed data */
3391 + val &= ~(1 << bit_nr);
3392 +- ret = kvm_write_guest(kvm, ptr, &val, 1);
3393 ++ ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
3394 + if (ret)
3395 + return ret;
3396 + }
3397 +@@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
3398 + else
3399 + val &= ~(1 << bit_nr);
3400 +
3401 +- ret = kvm_write_guest(kvm, ptr, &val, 1);
3402 ++ ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
3403 + if (ret)
3404 + return ret;
3405 + }
3406 +diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
3407 +index abd9c7352677..3af69f2a3866 100644
3408 +--- a/virt/kvm/arm/vgic/vgic.c
3409 ++++ b/virt/kvm/arm/vgic/vgic.c
3410 +@@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
3411 + * either observe the new interrupt before or after doing this check,
3412 + * and introducing additional synchronization mechanism doesn't change
3413 + * this.
3414 ++ *
3415 ++ * Note that we still need to go through the whole thing if anything
3416 ++ * can be directly injected (GICv4).
3417 + */
3418 +- if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
3419 ++ if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
3420 ++ !vgic_supports_direct_msis(vcpu->kvm))
3421 + return;
3422 +
3423 + DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
3424 +
3425 +- raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
3426 +- vgic_flush_lr_state(vcpu);
3427 +- raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
3428 ++ if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
3429 ++ raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
3430 ++ vgic_flush_lr_state(vcpu);
3431 ++ raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
3432 ++ }
3433 +
3434 + if (can_access_vgic_from_kernel())
3435 + vgic_restore_state(vcpu);