Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.5 commit in: /
Date: Thu, 05 Mar 2020 16:27:32
Message-Id: 1583425630.b32327fabc409799ced22b412852f0b9871edd51.mpagano@gentoo
1 commit: b32327fabc409799ced22b412852f0b9871edd51
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Mar 5 16:27:10 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Mar 5 16:27:10 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b32327fa
7
8 Linux patch 5.5.8
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1007_linux-5.5.8.patch | 8013 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 8017 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 7611ed2..e58ee4a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -71,6 +71,10 @@ Patch: 1006_linux-5.5.7.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.5.7
23
24 +Patch: 1007_linux-5.5.8.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.5.8
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1007_linux-5.5.8.patch b/1007_linux-5.5.8.patch
33 new file mode 100644
34 index 0000000..e0458f6
35 --- /dev/null
36 +++ b/1007_linux-5.5.8.patch
37 @@ -0,0 +1,8013 @@
38 +diff --git a/Documentation/networking/nf_flowtable.txt b/Documentation/networking/nf_flowtable.txt
39 +index ca2136c76042..0bf32d1121be 100644
40 +--- a/Documentation/networking/nf_flowtable.txt
41 ++++ b/Documentation/networking/nf_flowtable.txt
42 +@@ -76,7 +76,7 @@ flowtable and add one rule to your forward chain.
43 +
44 + table inet x {
45 + flowtable f {
46 +- hook ingress priority 0 devices = { eth0, eth1 };
47 ++ hook ingress priority 0; devices = { eth0, eth1 };
48 + }
49 + chain y {
50 + type filter hook forward priority 0; policy accept;
51 +diff --git a/Documentation/sphinx/parallel-wrapper.sh b/Documentation/sphinx/parallel-wrapper.sh
52 +index 7daf5133bdd3..e54c44ce117d 100644
53 +--- a/Documentation/sphinx/parallel-wrapper.sh
54 ++++ b/Documentation/sphinx/parallel-wrapper.sh
55 +@@ -30,4 +30,4 @@ if [ -n "$parallel" ] ; then
56 + parallel="-j$parallel"
57 + fi
58 +
59 +-exec "$sphinx" "$parallel" "$@"
60 ++exec "$sphinx" $parallel "$@"
61 +diff --git a/Makefile b/Makefile
62 +index 0f64b92fa39a..a1e5190e4721 100644
63 +--- a/Makefile
64 ++++ b/Makefile
65 +@@ -1,7 +1,7 @@
66 + # SPDX-License-Identifier: GPL-2.0
67 + VERSION = 5
68 + PATCHLEVEL = 5
69 +-SUBLEVEL = 7
70 ++SUBLEVEL = 8
71 + EXTRAVERSION =
72 + NAME = Kleptomaniac Octopus
73 +
74 +diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi
75 +index 60e11045ad76..d051f080e52e 100644
76 +--- a/arch/arm/boot/dts/stihxxx-b2120.dtsi
77 ++++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi
78 +@@ -46,7 +46,7 @@
79 + /* DAC */
80 + format = "i2s";
81 + mclk-fs = <256>;
82 +- frame-inversion = <1>;
83 ++ frame-inversion;
84 + cpu {
85 + sound-dai = <&sti_uni_player2>;
86 + };
87 +diff --git a/arch/arm/include/asm/vdso/vsyscall.h b/arch/arm/include/asm/vdso/vsyscall.h
88 +index c4166f317071..cff87d8d30da 100644
89 +--- a/arch/arm/include/asm/vdso/vsyscall.h
90 ++++ b/arch/arm/include/asm/vdso/vsyscall.h
91 +@@ -34,9 +34,9 @@ struct vdso_data *__arm_get_k_vdso_data(void)
92 + #define __arch_get_k_vdso_data __arm_get_k_vdso_data
93 +
94 + static __always_inline
95 +-int __arm_update_vdso_data(void)
96 ++bool __arm_update_vdso_data(void)
97 + {
98 +- return !cntvct_ok;
99 ++ return cntvct_ok;
100 + }
101 + #define __arch_update_vdso_data __arm_update_vdso_data
102 +
103 +diff --git a/arch/mips/include/asm/sync.h b/arch/mips/include/asm/sync.h
104 +index 7c6a1095f556..aabd097933fe 100644
105 +--- a/arch/mips/include/asm/sync.h
106 ++++ b/arch/mips/include/asm/sync.h
107 +@@ -155,9 +155,11 @@
108 + * effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use
109 + * optimized memory barrier primitives."). Here we specify that the affected
110 + * sync instructions should be emitted twice.
111 ++ * Note that this expression is evaluated by the assembler (not the compiler),
112 ++ * and that the assembler evaluates '==' as 0 or -1, not 0 or 1.
113 + */
114 + #ifdef CONFIG_CPU_CAVIUM_OCTEON
115 +-# define __SYNC_rpt(type) (1 + (type == __SYNC_wmb))
116 ++# define __SYNC_rpt(type) (1 - (type == __SYNC_wmb))
117 + #else
118 + # define __SYNC_rpt(type) 1
119 + #endif
120 +diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
121 +index 6176b9acba95..d0d832ab3d3b 100644
122 +--- a/arch/mips/kernel/vpe.c
123 ++++ b/arch/mips/kernel/vpe.c
124 +@@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
125 + {
126 + list_del(&v->list);
127 + if (v->load_addr)
128 +- release_progmem(v);
129 ++ release_progmem(v->load_addr);
130 + kfree(v);
131 + }
132 +
133 +diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
134 +index f4cad5163bf2..ffb3d94bf0cc 100644
135 +--- a/arch/riscv/kernel/traps.c
136 ++++ b/arch/riscv/kernel/traps.c
137 +@@ -156,6 +156,6 @@ void __init trap_init(void)
138 + csr_write(CSR_SCRATCH, 0);
139 + /* Set the exception vector address */
140 + csr_write(CSR_TVEC, &handle_exception);
141 +- /* Enable all interrupts */
142 +- csr_write(CSR_IE, -1);
143 ++ /* Enable interrupts */
144 ++ csr_write(CSR_IE, IE_SIE | IE_EIE);
145 + }
146 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
147 +index 3be51aa06e67..dff6623804c2 100644
148 +--- a/arch/x86/events/intel/core.c
149 ++++ b/arch/x86/events/intel/core.c
150 +@@ -4765,6 +4765,7 @@ __init int intel_pmu_init(void)
151 + break;
152 +
153 + case INTEL_FAM6_ATOM_TREMONT_D:
154 ++ case INTEL_FAM6_ATOM_TREMONT:
155 + x86_pmu.late_ack = true;
156 + memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
157 + sizeof(hw_cache_event_ids));
158 +diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
159 +index e1daf4151e11..4814c964692c 100644
160 +--- a/arch/x86/events/intel/cstate.c
161 ++++ b/arch/x86/events/intel/cstate.c
162 +@@ -40,17 +40,18 @@
163 + * Model specific counters:
164 + * MSR_CORE_C1_RES: CORE C1 Residency Counter
165 + * perf code: 0x00
166 +- * Available model: SLM,AMT,GLM,CNL
167 ++ * Available model: SLM,AMT,GLM,CNL,TNT
168 + * Scope: Core (each processor core has a MSR)
169 + * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
170 + * perf code: 0x01
171 + * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
172 +- * CNL,KBL,CML
173 ++ * CNL,KBL,CML,TNT
174 + * Scope: Core
175 + * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
176 + * perf code: 0x02
177 + * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
178 +- * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
179 ++ * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
180 ++ * TNT
181 + * Scope: Core
182 + * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
183 + * perf code: 0x03
184 +@@ -60,17 +61,18 @@
185 + * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
186 + * perf code: 0x00
187 + * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
188 +- * KBL,CML,ICL,TGL
189 ++ * KBL,CML,ICL,TGL,TNT
190 + * Scope: Package (physical package)
191 + * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
192 + * perf code: 0x01
193 + * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
194 +- * GLM,CNL,KBL,CML,ICL,TGL
195 ++ * GLM,CNL,KBL,CML,ICL,TGL,TNT
196 + * Scope: Package (physical package)
197 + * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
198 + * perf code: 0x02
199 +- * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
200 +- * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
201 ++ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
202 ++ * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
203 ++ * TNT
204 + * Scope: Package (physical package)
205 + * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
206 + * perf code: 0x03
207 +@@ -87,7 +89,8 @@
208 + * Scope: Package (physical package)
209 + * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
210 + * perf code: 0x06
211 +- * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL
212 ++ * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
213 ++ * TNT
214 + * Scope: Package (physical package)
215 + *
216 + */
217 +@@ -640,8 +643,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
218 +
219 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
220 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates),
221 +-
222 + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
223 ++ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT_D, glm_cstates),
224 ++ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT, glm_cstates),
225 +
226 + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates),
227 + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, icl_cstates),
228 +diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
229 +index 6f86650b3f77..a949f6f55991 100644
230 +--- a/arch/x86/events/msr.c
231 ++++ b/arch/x86/events/msr.c
232 +@@ -75,8 +75,9 @@ static bool test_intel(int idx, void *data)
233 +
234 + case INTEL_FAM6_ATOM_GOLDMONT:
235 + case INTEL_FAM6_ATOM_GOLDMONT_D:
236 +-
237 + case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
238 ++ case INTEL_FAM6_ATOM_TREMONT_D:
239 ++ case INTEL_FAM6_ATOM_TREMONT:
240 +
241 + case INTEL_FAM6_XEON_PHI_KNL:
242 + case INTEL_FAM6_XEON_PHI_KNM:
243 +diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
244 +index e49b77283924..181c992f448c 100644
245 +--- a/arch/x86/kernel/cpu/resctrl/internal.h
246 ++++ b/arch/x86/kernel/cpu/resctrl/internal.h
247 +@@ -57,6 +57,7 @@ static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc)
248 + }
249 +
250 + DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
251 ++DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
252 +
253 + /**
254 + * struct mon_evt - Entry in the event list of a resource
255 +diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
256 +index 397206f23d14..773124b0e18a 100644
257 +--- a/arch/x86/kernel/cpu/resctrl/monitor.c
258 ++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
259 +@@ -514,7 +514,7 @@ void mbm_handle_overflow(struct work_struct *work)
260 +
261 + mutex_lock(&rdtgroup_mutex);
262 +
263 +- if (!static_branch_likely(&rdt_enable_key))
264 ++ if (!static_branch_likely(&rdt_mon_enable_key))
265 + goto out_unlock;
266 +
267 + d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]);
268 +@@ -543,7 +543,7 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
269 + unsigned long delay = msecs_to_jiffies(delay_ms);
270 + int cpu;
271 +
272 +- if (!static_branch_likely(&rdt_enable_key))
273 ++ if (!static_branch_likely(&rdt_mon_enable_key))
274 + return;
275 + cpu = cpumask_any(&dom->cpu_mask);
276 + dom->mbm_work_cpu = cpu;
277 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
278 +index f05123acaa64..a1daebe2a60f 100644
279 +--- a/arch/x86/kvm/lapic.c
280 ++++ b/arch/x86/kvm/lapic.c
281 +@@ -1150,7 +1150,7 @@ void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
282 + if (!kvm_apic_present(vcpu))
283 + continue;
284 + if (!kvm_apic_match_dest(vcpu, NULL,
285 +- irq->delivery_mode,
286 ++ irq->shorthand,
287 + irq->dest_id,
288 + irq->dest_mode))
289 + continue;
290 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
291 +index aace3b6ca2f7..2b3d8feec313 100644
292 +--- a/arch/x86/kvm/svm.c
293 ++++ b/arch/x86/kvm/svm.c
294 +@@ -1307,6 +1307,47 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
295 + }
296 + }
297 +
298 ++/*
299 ++ * The default MMIO mask is a single bit (excluding the present bit),
300 ++ * which could conflict with the memory encryption bit. Check for
301 ++ * memory encryption support and override the default MMIO mask if
302 ++ * memory encryption is enabled.
303 ++ */
304 ++static __init void svm_adjust_mmio_mask(void)
305 ++{
306 ++ unsigned int enc_bit, mask_bit;
307 ++ u64 msr, mask;
308 ++
309 ++ /* If there is no memory encryption support, use existing mask */
310 ++ if (cpuid_eax(0x80000000) < 0x8000001f)
311 ++ return;
312 ++
313 ++ /* If memory encryption is not enabled, use existing mask */
314 ++ rdmsrl(MSR_K8_SYSCFG, msr);
315 ++ if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
316 ++ return;
317 ++
318 ++ enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
319 ++ mask_bit = boot_cpu_data.x86_phys_bits;
320 ++
321 ++ /* Increment the mask bit if it is the same as the encryption bit */
322 ++ if (enc_bit == mask_bit)
323 ++ mask_bit++;
324 ++
325 ++ /*
326 ++ * If the mask bit location is below 52, then some bits above the
327 ++ * physical addressing limit will always be reserved, so use the
328 ++ * rsvd_bits() function to generate the mask. This mask, along with
329 ++ * the present bit, will be used to generate a page fault with
330 ++ * PFER.RSV = 1.
331 ++ *
332 ++ * If the mask bit location is 52 (or above), then clear the mask.
333 ++ */
334 ++ mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
335 ++
336 ++ kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
337 ++}
338 ++
339 + static __init int svm_hardware_setup(void)
340 + {
341 + int cpu;
342 +@@ -1361,6 +1402,8 @@ static __init int svm_hardware_setup(void)
343 + }
344 + }
345 +
346 ++ svm_adjust_mmio_mask();
347 ++
348 + for_each_possible_cpu(cpu) {
349 + r = svm_cpu_init(cpu);
350 + if (r)
351 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
352 +index af5a36dfc88a..63addc413d99 100644
353 +--- a/arch/x86/kvm/vmx/nested.c
354 ++++ b/arch/x86/kvm/vmx/nested.c
355 +@@ -4781,32 +4781,28 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
356 + {
357 + unsigned long field;
358 + u64 field_value;
359 ++ struct vcpu_vmx *vmx = to_vmx(vcpu);
360 + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
361 + u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
362 + int len;
363 + gva_t gva = 0;
364 +- struct vmcs12 *vmcs12;
365 ++ struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
366 ++ : get_vmcs12(vcpu);
367 + struct x86_exception e;
368 + short offset;
369 +
370 + if (!nested_vmx_check_permission(vcpu))
371 + return 1;
372 +
373 +- if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
374 ++ /*
375 ++ * In VMX non-root operation, when the VMCS-link pointer is -1ull,
376 ++ * any VMREAD sets the ALU flags for VMfailInvalid.
377 ++ */
378 ++ if (vmx->nested.current_vmptr == -1ull ||
379 ++ (is_guest_mode(vcpu) &&
380 ++ get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
381 + return nested_vmx_failInvalid(vcpu);
382 +
383 +- if (!is_guest_mode(vcpu))
384 +- vmcs12 = get_vmcs12(vcpu);
385 +- else {
386 +- /*
387 +- * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
388 +- * to shadowed-field sets the ALU flags for VMfailInvalid.
389 +- */
390 +- if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
391 +- return nested_vmx_failInvalid(vcpu);
392 +- vmcs12 = get_shadow_vmcs12(vcpu);
393 +- }
394 +-
395 + /* Decode instruction info and find the field to read */
396 + field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
397 +
398 +@@ -4885,13 +4881,20 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
399 + */
400 + u64 field_value = 0;
401 + struct x86_exception e;
402 +- struct vmcs12 *vmcs12;
403 ++ struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
404 ++ : get_vmcs12(vcpu);
405 + short offset;
406 +
407 + if (!nested_vmx_check_permission(vcpu))
408 + return 1;
409 +
410 +- if (vmx->nested.current_vmptr == -1ull)
411 ++ /*
412 ++ * In VMX non-root operation, when the VMCS-link pointer is -1ull,
413 ++ * any VMWRITE sets the ALU flags for VMfailInvalid.
414 ++ */
415 ++ if (vmx->nested.current_vmptr == -1ull ||
416 ++ (is_guest_mode(vcpu) &&
417 ++ get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
418 + return nested_vmx_failInvalid(vcpu);
419 +
420 + if (vmx_instruction_info & (1u << 10))
421 +@@ -4910,6 +4913,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
422 +
423 +
424 + field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
425 ++
426 ++ offset = vmcs_field_to_offset(field);
427 ++ if (offset < 0)
428 ++ return nested_vmx_failValid(vcpu,
429 ++ VMXERR_UNSUPPORTED_VMCS_COMPONENT);
430 ++
431 + /*
432 + * If the vCPU supports "VMWRITE to any supported field in the
433 + * VMCS," then the "read-only" fields are actually read/write.
434 +@@ -4919,29 +4928,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
435 + return nested_vmx_failValid(vcpu,
436 + VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
437 +
438 +- if (!is_guest_mode(vcpu)) {
439 +- vmcs12 = get_vmcs12(vcpu);
440 +-
441 +- /*
442 +- * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
443 +- * vmcs12, else we may crush a field or consume a stale value.
444 +- */
445 +- if (!is_shadow_field_rw(field))
446 +- copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
447 +- } else {
448 +- /*
449 +- * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
450 +- * to shadowed-field sets the ALU flags for VMfailInvalid.
451 +- */
452 +- if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
453 +- return nested_vmx_failInvalid(vcpu);
454 +- vmcs12 = get_shadow_vmcs12(vcpu);
455 +- }
456 +-
457 +- offset = vmcs_field_to_offset(field);
458 +- if (offset < 0)
459 +- return nested_vmx_failValid(vcpu,
460 +- VMXERR_UNSUPPORTED_VMCS_COMPONENT);
461 ++ /*
462 ++ * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
463 ++ * vmcs12, else we may crush a field or consume a stale value.
464 ++ */
465 ++ if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
466 ++ copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
467 +
468 + /*
469 + * Some Intel CPUs intentionally drop the reserved bits of the AR byte
470 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
471 +index be438bc7cfa3..3e381b31b9a6 100644
472 +--- a/arch/x86/kvm/vmx/vmx.c
473 ++++ b/arch/x86/kvm/vmx/vmx.c
474 +@@ -7179,6 +7179,7 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
475 + else
476 + intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
477 +
478 ++ /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
479 + return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
480 + }
481 +
482 +@@ -7208,6 +7209,20 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
483 + case x86_intercept_outs:
484 + return vmx_check_intercept_io(vcpu, info);
485 +
486 ++ case x86_intercept_lgdt:
487 ++ case x86_intercept_lidt:
488 ++ case x86_intercept_lldt:
489 ++ case x86_intercept_ltr:
490 ++ case x86_intercept_sgdt:
491 ++ case x86_intercept_sidt:
492 ++ case x86_intercept_sldt:
493 ++ case x86_intercept_str:
494 ++ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
495 ++ return X86EMUL_CONTINUE;
496 ++
497 ++ /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
498 ++ break;
499 ++
500 + /* TODO: check more intercepts... */
501 + default:
502 + break;
503 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
504 +index e594fd2719dd..dafb5aff200f 100644
505 +--- a/arch/x86/kvm/x86.c
506 ++++ b/arch/x86/kvm/x86.c
507 +@@ -9225,12 +9225,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
508 +
509 + void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
510 + {
511 +- vcpu->arch.apf.msr_val = 0;
512 +-
513 +- vcpu_load(vcpu);
514 +- kvm_mmu_unload(vcpu);
515 +- vcpu_put(vcpu);
516 +-
517 + kvm_arch_vcpu_free(vcpu);
518 + }
519 +
520 +diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
521 +index b5516b04ffc0..d827a4a3e946 100644
522 +--- a/drivers/acpi/acpi_watchdog.c
523 ++++ b/drivers/acpi/acpi_watchdog.c
524 +@@ -126,12 +126,11 @@ void __init acpi_watchdog_init(void)
525 + gas = &entries[i].register_region;
526 +
527 + res.start = gas->address;
528 ++ res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
529 + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
530 + res.flags = IORESOURCE_MEM;
531 +- res.end = res.start + ALIGN(gas->access_width, 4) - 1;
532 + } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
533 + res.flags = IORESOURCE_IO;
534 +- res.end = res.start + gas->access_width - 1;
535 + } else {
536 + pr_warn("Unsupported address space: %u\n",
537 + gas->space_id);
538 +diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
539 +index 50200d1c06ea..6095b6df8a81 100644
540 +--- a/drivers/bus/Kconfig
541 ++++ b/drivers/bus/Kconfig
542 +@@ -139,7 +139,6 @@ config TEGRA_ACONNECT
543 + tristate "Tegra ACONNECT Bus Driver"
544 + depends on ARCH_TEGRA_210_SOC
545 + depends on OF && PM
546 +- select PM_CLK
547 + help
548 + Driver for the Tegra ACONNECT bus which is used to interface with
549 + the devices inside the Audio Processing Engine (APE) for Tegra210.
550 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
551 +index 22c6a2e61236..8ac390c2b514 100644
552 +--- a/drivers/char/ipmi/ipmi_ssif.c
553 ++++ b/drivers/char/ipmi/ipmi_ssif.c
554 +@@ -775,10 +775,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
555 + flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
556 + msg = ssif_info->curr_msg;
557 + if (msg) {
558 ++ if (data) {
559 ++ if (len > IPMI_MAX_MSG_LENGTH)
560 ++ len = IPMI_MAX_MSG_LENGTH;
561 ++ memcpy(msg->rsp, data, len);
562 ++ } else {
563 ++ len = 0;
564 ++ }
565 + msg->rsp_size = len;
566 +- if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
567 +- msg->rsp_size = IPMI_MAX_MSG_LENGTH;
568 +- memcpy(msg->rsp, data, msg->rsp_size);
569 + ssif_info->curr_msg = NULL;
570 + }
571 +
572 +diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
573 +index 7ed313ad6e43..d9e17b91c68e 100644
574 +--- a/drivers/clk/qcom/clk-rpmh.c
575 ++++ b/drivers/clk/qcom/clk-rpmh.c
576 +@@ -481,9 +481,9 @@ static int clk_rpmh_probe(struct platform_device *pdev)
577 + }
578 +
579 + static const struct of_device_id clk_rpmh_match_table[] = {
580 ++ { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
581 + { .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
582 + { .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
583 +- { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
584 + { }
585 + };
586 + MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
587 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
588 +index 4adac3a8c265..b60d349e3b1e 100644
589 +--- a/drivers/cpufreq/cpufreq.c
590 ++++ b/drivers/cpufreq/cpufreq.c
591 +@@ -1074,9 +1074,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
592 + pol = policy->last_policy;
593 + } else if (def_gov) {
594 + pol = cpufreq_parse_policy(def_gov->name);
595 +- } else {
596 +- return -ENODATA;
597 ++ /*
598 ++ * In case the default governor is neiter "performance"
599 ++ * nor "powersave", fall back to the initial policy
600 ++ * value set by the driver.
601 ++ */
602 ++ if (pol == CPUFREQ_POLICY_UNKNOWN)
603 ++ pol = policy->policy;
604 + }
605 ++ if (pol != CPUFREQ_POLICY_PERFORMANCE &&
606 ++ pol != CPUFREQ_POLICY_POWERSAVE)
607 ++ return -ENODATA;
608 + }
609 +
610 + return cpufreq_set_policy(policy, gov, pol);
611 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
612 +index e99f082d15df..c5a34be182ca 100644
613 +--- a/drivers/devfreq/devfreq.c
614 ++++ b/drivers/devfreq/devfreq.c
615 +@@ -738,7 +738,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
616 + {
617 + struct devfreq *devfreq;
618 + struct devfreq_governor *governor;
619 +- static atomic_t devfreq_no = ATOMIC_INIT(-1);
620 + int err = 0;
621 +
622 + if (!dev || !profile || !governor_name) {
623 +@@ -800,8 +799,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
624 + devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
625 + atomic_set(&devfreq->suspend_count, 0);
626 +
627 +- dev_set_name(&devfreq->dev, "devfreq%d",
628 +- atomic_inc_return(&devfreq_no));
629 ++ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
630 + err = device_register(&devfreq->dev);
631 + if (err) {
632 + mutex_unlock(&devfreq->lock);
633 +diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
634 +index 95662a4ff4c4..99bbaf629b8d 100644
635 +--- a/drivers/edac/skx_common.c
636 ++++ b/drivers/edac/skx_common.c
637 +@@ -256,7 +256,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
638 +
639 + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
640 + if (!pdev) {
641 +- skx_printk(KERN_ERR, "Can't get tolm/tohm\n");
642 ++ edac_dbg(2, "Can't get tolm/tohm\n");
643 + return -ENODEV;
644 + }
645 +
646 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
647 +index 30a1e3ac21d6..4169abc32219 100644
648 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
649 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
650 +@@ -1357,7 +1357,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
651 +
652 + static struct drm_driver kms_driver = {
653 + .driver_features =
654 +- DRIVER_USE_AGP | DRIVER_ATOMIC |
655 ++ DRIVER_ATOMIC |
656 + DRIVER_GEM |
657 + DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
658 + DRIVER_SYNCOBJ_TIMELINE,
659 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
660 +index b499a3de8bb6..c75cc97eca44 100644
661 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
662 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
663 +@@ -192,6 +192,7 @@ struct amdgpu_gmc {
664 + uint32_t srbm_soft_reset;
665 + bool prt_warning;
666 + uint64_t stolen_size;
667 ++ uint32_t sdpif_register;
668 + /* apertures */
669 + u64 shared_aperture_start;
670 + u64 shared_aperture_end;
671 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
672 +index a5b68b5e452f..0b88c9f877ec 100644
673 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
674 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
675 +@@ -1203,6 +1203,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
676 + }
677 + }
678 +
679 ++/**
680 ++ * gmc_v9_0_restore_registers - restores regs
681 ++ *
682 ++ * @adev: amdgpu_device pointer
683 ++ *
684 ++ * This restores register values, saved at suspend.
685 ++ */
686 ++static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
687 ++{
688 ++ if (adev->asic_type == CHIP_RAVEN)
689 ++ WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
690 ++}
691 ++
692 + /**
693 + * gmc_v9_0_gart_enable - gart enable
694 + *
695 +@@ -1307,6 +1320,20 @@ static int gmc_v9_0_hw_init(void *handle)
696 + return r;
697 + }
698 +
699 ++/**
700 ++ * gmc_v9_0_save_registers - saves regs
701 ++ *
702 ++ * @adev: amdgpu_device pointer
703 ++ *
704 ++ * This saves potential register values that should be
705 ++ * restored upon resume
706 ++ */
707 ++static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
708 ++{
709 ++ if (adev->asic_type == CHIP_RAVEN)
710 ++ adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
711 ++}
712 ++
713 + /**
714 + * gmc_v9_0_gart_disable - gart disable
715 + *
716 +@@ -1343,9 +1370,16 @@ static int gmc_v9_0_hw_fini(void *handle)
717 +
718 + static int gmc_v9_0_suspend(void *handle)
719 + {
720 ++ int r;
721 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
722 +
723 +- return gmc_v9_0_hw_fini(adev);
724 ++ r = gmc_v9_0_hw_fini(adev);
725 ++ if (r)
726 ++ return r;
727 ++
728 ++ gmc_v9_0_save_registers(adev);
729 ++
730 ++ return 0;
731 + }
732 +
733 + static int gmc_v9_0_resume(void *handle)
734 +@@ -1353,6 +1387,7 @@ static int gmc_v9_0_resume(void *handle)
735 + int r;
736 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
737 +
738 ++ gmc_v9_0_restore_registers(adev);
739 + r = gmc_v9_0_hw_init(adev);
740 + if (r)
741 + return r;
742 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
743 +index b864869cc7e3..6fa7422c51da 100644
744 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
745 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
746 +@@ -91,6 +91,12 @@ ifdef CONFIG_DRM_AMD_DC_DCN2_1
747 + ###############################################################################
748 + CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
749 +
750 ++# prevent build errors regarding soft-float vs hard-float FP ABI tags
751 ++# this code is currently unused on ppc64, as it applies to Renoir APUs only
752 ++ifdef CONFIG_PPC64
753 ++CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
754 ++endif
755 ++
756 + AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
757 +
758 + AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
759 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
760 +index dbf063856846..5f683d118d2a 100644
761 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
762 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
763 +@@ -149,6 +149,12 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
764 + rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
765 + }
766 +
767 ++ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
768 ++ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
769 ++ if (new_clocks->dppclk_khz < 100000)
770 ++ new_clocks->dppclk_khz = 100000;
771 ++ }
772 ++
773 + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
774 + if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
775 + dpp_clock_lowered = true;
776 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
777 +index 793c0cec407f..5fcffb29317e 100644
778 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
779 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
780 +@@ -398,7 +398,7 @@ static bool acquire(
781 + {
782 + enum gpio_result result;
783 +
784 +- if (!is_engine_available(engine))
785 ++ if ((engine == NULL) || !is_engine_available(engine))
786 + return false;
787 +
788 + result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
789 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
790 +index ac8c18fadefc..448bc9b39942 100644
791 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
792 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
793 +@@ -493,7 +493,6 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
794 + dpp->funcs->dpp_dppclk_control(dpp, false, false);
795 +
796 + hubp->power_gated = true;
797 +- dc->optimized_required = false; /* We're powering off, no need to optimize */
798 +
799 + dc->hwss.plane_atomic_power_down(dc,
800 + pipe_ctx->plane_res.dpp,
801 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
802 +index 83cda43a1b6b..77741b18c85b 100644
803 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
804 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
805 +@@ -57,6 +57,7 @@
806 + #include "dcn20/dcn20_dccg.h"
807 + #include "dcn21_hubbub.h"
808 + #include "dcn10/dcn10_resource.h"
809 ++#include "dce110/dce110_resource.h"
810 +
811 + #include "dcn20/dcn20_dwb.h"
812 + #include "dcn20/dcn20_mmhubbub.h"
813 +@@ -867,6 +868,7 @@ static const struct dc_debug_options debug_defaults_diags = {
814 + enum dcn20_clk_src_array_id {
815 + DCN20_CLK_SRC_PLL0,
816 + DCN20_CLK_SRC_PLL1,
817 ++ DCN20_CLK_SRC_PLL2,
818 + DCN20_CLK_SRC_TOTAL_DCN21
819 + };
820 +
821 +@@ -1730,6 +1732,10 @@ static bool construct(
822 + dcn21_clock_source_create(ctx, ctx->dc_bios,
823 + CLOCK_SOURCE_COMBO_PHY_PLL1,
824 + &clk_src_regs[1], false);
825 ++ pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
826 ++ dcn21_clock_source_create(ctx, ctx->dc_bios,
827 ++ CLOCK_SOURCE_COMBO_PHY_PLL2,
828 ++ &clk_src_regs[2], false);
829 +
830 + pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
831 +
832 +diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
833 +index b6f74bf4af02..27bb8c1ab858 100644
834 +--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
835 ++++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
836 +@@ -7376,6 +7376,8 @@
837 + #define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e
838 + #define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2
839 +
840 ++#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d
841 ++#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
842 +
843 + // addressBlock: dce_dc_fmt4_dispdec
844 + // base address: 0x2000
845 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
846 +index f2418a1cfe68..b5875063b97c 100644
847 +--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
848 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
849 +@@ -257,8 +257,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
850 + with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
851 + freed = i915_gem_shrink(i915, -1UL, NULL,
852 + I915_SHRINK_BOUND |
853 +- I915_SHRINK_UNBOUND |
854 +- I915_SHRINK_ACTIVE);
855 ++ I915_SHRINK_UNBOUND);
856 + }
857 +
858 + return freed;
859 +@@ -337,7 +336,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
860 + freed_pages = 0;
861 + with_intel_runtime_pm(&i915->runtime_pm, wakeref)
862 + freed_pages += i915_gem_shrink(i915, -1UL, NULL,
863 +- I915_SHRINK_ACTIVE |
864 + I915_SHRINK_BOUND |
865 + I915_SHRINK_UNBOUND |
866 + I915_SHRINK_WRITEBACK);
867 +diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
868 +index 2477a1e5a166..ae139f0877ae 100644
869 +--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
870 ++++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
871 +@@ -151,12 +151,12 @@ static void dmabuf_gem_object_free(struct kref *kref)
872 + dmabuf_obj = container_of(pos,
873 + struct intel_vgpu_dmabuf_obj, list);
874 + if (dmabuf_obj == obj) {
875 ++ list_del(pos);
876 + intel_gvt_hypervisor_put_vfio_device(vgpu);
877 + idr_remove(&vgpu->object_idr,
878 + dmabuf_obj->dmabuf_id);
879 + kfree(dmabuf_obj->info);
880 + kfree(dmabuf_obj);
881 +- list_del(pos);
882 + break;
883 + }
884 + }
885 +diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
886 +index 85bd9bf4f6ee..487af6ea9972 100644
887 +--- a/drivers/gpu/drm/i915/gvt/vgpu.c
888 ++++ b/drivers/gpu/drm/i915/gvt/vgpu.c
889 +@@ -560,9 +560,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
890 +
891 + intel_vgpu_reset_mmio(vgpu, dmlr);
892 + populate_pvinfo_page(vgpu);
893 +- intel_vgpu_reset_display(vgpu);
894 +
895 + if (dmlr) {
896 ++ intel_vgpu_reset_display(vgpu);
897 + intel_vgpu_reset_cfg_space(vgpu);
898 + /* only reset the failsafe mode when dmlr reset */
899 + vgpu->failsafe = false;
900 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
901 +index c84f0a8b3f2c..b73fbb65e14b 100644
902 +--- a/drivers/gpu/drm/msm/msm_drv.c
903 ++++ b/drivers/gpu/drm/msm/msm_drv.c
904 +@@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
905 + if (ret)
906 + goto err_msm_uninit;
907 +
908 ++ if (!dev->dma_parms) {
909 ++ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
910 ++ GFP_KERNEL);
911 ++ if (!dev->dma_parms)
912 ++ return -ENOMEM;
913 ++ }
914 ++ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
915 ++
916 + msm_gem_shrinker_init(ddev);
917 +
918 + switch (get_mdp_ver(pdev)) {
919 +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
920 +index fd74e2611185..8696af1ee14d 100644
921 +--- a/drivers/gpu/drm/radeon/radeon_drv.c
922 ++++ b/drivers/gpu/drm/radeon/radeon_drv.c
923 +@@ -37,6 +37,7 @@
924 + #include <linux/vga_switcheroo.h>
925 + #include <linux/mmu_notifier.h>
926 +
927 ++#include <drm/drm_agpsupport.h>
928 + #include <drm/drm_crtc_helper.h>
929 + #include <drm/drm_drv.h>
930 + #include <drm/drm_fb_helper.h>
931 +@@ -325,6 +326,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
932 + const struct pci_device_id *ent)
933 + {
934 + unsigned long flags = 0;
935 ++ struct drm_device *dev;
936 + int ret;
937 +
938 + if (!ent)
939 +@@ -365,7 +367,44 @@ static int radeon_pci_probe(struct pci_dev *pdev,
940 + if (ret)
941 + return ret;
942 +
943 +- return drm_get_pci_dev(pdev, ent, &kms_driver);
944 ++ dev = drm_dev_alloc(&kms_driver, &pdev->dev);
945 ++ if (IS_ERR(dev))
946 ++ return PTR_ERR(dev);
947 ++
948 ++ ret = pci_enable_device(pdev);
949 ++ if (ret)
950 ++ goto err_free;
951 ++
952 ++ dev->pdev = pdev;
953 ++#ifdef __alpha__
954 ++ dev->hose = pdev->sysdata;
955 ++#endif
956 ++
957 ++ pci_set_drvdata(pdev, dev);
958 ++
959 ++ if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
960 ++ dev->agp = drm_agp_init(dev);
961 ++ if (dev->agp) {
962 ++ dev->agp->agp_mtrr = arch_phys_wc_add(
963 ++ dev->agp->agp_info.aper_base,
964 ++ dev->agp->agp_info.aper_size *
965 ++ 1024 * 1024);
966 ++ }
967 ++
968 ++ ret = drm_dev_register(dev, ent->driver_data);
969 ++ if (ret)
970 ++ goto err_agp;
971 ++
972 ++ return 0;
973 ++
974 ++err_agp:
975 ++ if (dev->agp)
976 ++ arch_phys_wc_del(dev->agp->agp_mtrr);
977 ++ kfree(dev->agp);
978 ++ pci_disable_device(pdev);
979 ++err_free:
980 ++ drm_dev_put(dev);
981 ++ return ret;
982 + }
983 +
984 + static void
985 +@@ -575,7 +614,7 @@ radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
986 +
987 + static struct drm_driver kms_driver = {
988 + .driver_features =
989 +- DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER,
990 ++ DRIVER_GEM | DRIVER_RENDER,
991 + .load = radeon_driver_load_kms,
992 + .open = radeon_driver_open_kms,
993 + .postclose = radeon_driver_postclose_kms,
994 +diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
995 +index e85c554eeaa9..2bb0187c5bc7 100644
996 +--- a/drivers/gpu/drm/radeon/radeon_kms.c
997 ++++ b/drivers/gpu/drm/radeon/radeon_kms.c
998 +@@ -31,6 +31,7 @@
999 + #include <linux/uaccess.h>
1000 + #include <linux/vga_switcheroo.h>
1001 +
1002 ++#include <drm/drm_agpsupport.h>
1003 + #include <drm/drm_fb_helper.h>
1004 + #include <drm/drm_file.h>
1005 + #include <drm/drm_ioctl.h>
1006 +@@ -77,6 +78,11 @@ void radeon_driver_unload_kms(struct drm_device *dev)
1007 + radeon_modeset_fini(rdev);
1008 + radeon_device_fini(rdev);
1009 +
1010 ++ if (dev->agp)
1011 ++ arch_phys_wc_del(dev->agp->agp_mtrr);
1012 ++ kfree(dev->agp);
1013 ++ dev->agp = NULL;
1014 ++
1015 + done_free:
1016 + kfree(rdev);
1017 + dev->dev_private = NULL;
1018 +diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
1019 +index ae79a7c66737..fa704153cb00 100644
1020 +--- a/drivers/hid/hid-alps.c
1021 ++++ b/drivers/hid/hid-alps.c
1022 +@@ -730,7 +730,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
1023 + if (data->has_sp) {
1024 + input2 = input_allocate_device();
1025 + if (!input2) {
1026 +- input_free_device(input2);
1027 ++ ret = -ENOMEM;
1028 + goto exit;
1029 + }
1030 +
1031 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1032 +index 851fe54ea59e..359616e3efbb 100644
1033 +--- a/drivers/hid/hid-core.c
1034 ++++ b/drivers/hid/hid-core.c
1035 +@@ -1741,7 +1741,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
1036 +
1037 + rsize = ((report->size - 1) >> 3) + 1;
1038 +
1039 +- if (rsize > HID_MAX_BUFFER_SIZE)
1040 ++ if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
1041 ++ rsize = HID_MAX_BUFFER_SIZE - 1;
1042 ++ else if (rsize > HID_MAX_BUFFER_SIZE)
1043 + rsize = HID_MAX_BUFFER_SIZE;
1044 +
1045 + if (csize < rsize) {
1046 +diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
1047 +index c436e12feb23..6c55682c5974 100644
1048 +--- a/drivers/hid/hid-ite.c
1049 ++++ b/drivers/hid/hid-ite.c
1050 +@@ -41,8 +41,9 @@ static const struct hid_device_id ite_devices[] = {
1051 + { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
1052 + { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
1053 + /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
1054 +- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
1055 +- USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
1056 ++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
1057 ++ USB_VENDOR_ID_SYNAPTICS,
1058 ++ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
1059 + { }
1060 + };
1061 + MODULE_DEVICE_TABLE(hid, ite_devices);
1062 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
1063 +index a970b809d778..4140dea693e9 100644
1064 +--- a/drivers/hid/usbhid/hiddev.c
1065 ++++ b/drivers/hid/usbhid/hiddev.c
1066 +@@ -932,9 +932,9 @@ void hiddev_disconnect(struct hid_device *hid)
1067 + hiddev->exist = 0;
1068 +
1069 + if (hiddev->open) {
1070 +- mutex_unlock(&hiddev->existancelock);
1071 + hid_hw_close(hiddev->hid);
1072 + wake_up_interruptible(&hiddev->wait);
1073 ++ mutex_unlock(&hiddev->existancelock);
1074 + } else {
1075 + mutex_unlock(&hiddev->existancelock);
1076 + kfree(hiddev);
1077 +diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
1078 +index 5255d3755411..1de23b4f3809 100644
1079 +--- a/drivers/i2c/busses/i2c-altera.c
1080 ++++ b/drivers/i2c/busses/i2c-altera.c
1081 +@@ -171,7 +171,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
1082 + /* SCL Low Time */
1083 + writel(t_low, idev->base + ALTR_I2C_SCL_LOW);
1084 + /* SDA Hold Time, 300ns */
1085 +- writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD);
1086 ++ writel(3 * clk_mhz / 10, idev->base + ALTR_I2C_SDA_HOLD);
1087 +
1088 + /* Mask all master interrupt bits */
1089 + altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
1090 +diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
1091 +index 25dcd73acd63..8f0e1f802f2d 100644
1092 +--- a/drivers/i2c/busses/i2c-jz4780.c
1093 ++++ b/drivers/i2c/busses/i2c-jz4780.c
1094 +@@ -73,25 +73,6 @@
1095 + #define JZ4780_I2C_STA_TFNF BIT(1)
1096 + #define JZ4780_I2C_STA_ACT BIT(0)
1097 +
1098 +-static const char * const jz4780_i2c_abrt_src[] = {
1099 +- "ABRT_7B_ADDR_NOACK",
1100 +- "ABRT_10ADDR1_NOACK",
1101 +- "ABRT_10ADDR2_NOACK",
1102 +- "ABRT_XDATA_NOACK",
1103 +- "ABRT_GCALL_NOACK",
1104 +- "ABRT_GCALL_READ",
1105 +- "ABRT_HS_ACKD",
1106 +- "SBYTE_ACKDET",
1107 +- "ABRT_HS_NORSTRT",
1108 +- "SBYTE_NORSTRT",
1109 +- "ABRT_10B_RD_NORSTRT",
1110 +- "ABRT_MASTER_DIS",
1111 +- "ARB_LOST",
1112 +- "SLVFLUSH_TXFIFO",
1113 +- "SLV_ARBLOST",
1114 +- "SLVRD_INTX",
1115 +-};
1116 +-
1117 + #define JZ4780_I2C_INTST_IGC BIT(11)
1118 + #define JZ4780_I2C_INTST_ISTT BIT(10)
1119 + #define JZ4780_I2C_INTST_ISTP BIT(9)
1120 +@@ -529,21 +510,8 @@ done:
1121 +
1122 + static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src)
1123 + {
1124 +- int i;
1125 +-
1126 +- dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src);
1127 +- dev_err(&i2c->adap.dev, "device addr=%x\n",
1128 +- jz4780_i2c_readw(i2c, JZ4780_I2C_TAR));
1129 +- dev_err(&i2c->adap.dev, "send cmd count:%d %d\n",
1130 +- i2c->cmd, i2c->cmd_buf[i2c->cmd]);
1131 +- dev_err(&i2c->adap.dev, "receive data count:%d %d\n",
1132 +- i2c->cmd, i2c->data_buf[i2c->cmd]);
1133 +-
1134 +- for (i = 0; i < 16; i++) {
1135 +- if (src & BIT(i))
1136 +- dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n",
1137 +- i, jz4780_i2c_abrt_src[i]);
1138 +- }
1139 ++ dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n",
1140 ++ src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]);
1141 + }
1142 +
1143 + static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
1144 +diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
1145 +index 5617434cbfb4..416341ada172 100644
1146 +--- a/drivers/infiniband/hw/hns/hns_roce_device.h
1147 ++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
1148 +@@ -423,7 +423,7 @@ struct hns_roce_mr_table {
1149 + struct hns_roce_wq {
1150 + u64 *wrid; /* Work request ID */
1151 + spinlock_t lock;
1152 +- int wqe_cnt; /* WQE num */
1153 ++ u32 wqe_cnt; /* WQE num */
1154 + int max_gs;
1155 + int offset;
1156 + int wqe_shift; /* WQE size */
1157 +@@ -647,7 +647,6 @@ struct hns_roce_qp {
1158 + u8 sdb_en;
1159 + u32 doorbell_qpn;
1160 + u32 sq_signal_bits;
1161 +- u32 sq_next_wqe;
1162 + struct hns_roce_wq sq;
1163 +
1164 + struct ib_umem *umem;
1165 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1166 +index 2a2b2112f886..a31a21433f65 100644
1167 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1168 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1169 +@@ -74,8 +74,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1170 + unsigned long flags = 0;
1171 + void *wqe = NULL;
1172 + __le32 doorbell[2];
1173 ++ u32 wqe_idx = 0;
1174 + int nreq = 0;
1175 +- u32 ind = 0;
1176 + int ret = 0;
1177 + u8 *smac;
1178 + int loopback;
1179 +@@ -88,7 +88,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1180 + }
1181 +
1182 + spin_lock_irqsave(&qp->sq.lock, flags);
1183 +- ind = qp->sq_next_wqe;
1184 ++
1185 + for (nreq = 0; wr; ++nreq, wr = wr->next) {
1186 + if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1187 + ret = -ENOMEM;
1188 +@@ -96,6 +96,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1189 + goto out;
1190 + }
1191 +
1192 ++ wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
1193 ++
1194 + if (unlikely(wr->num_sge > qp->sq.max_gs)) {
1195 + dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
1196 + wr->num_sge, qp->sq.max_gs);
1197 +@@ -104,9 +106,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1198 + goto out;
1199 + }
1200 +
1201 +- wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
1202 +- qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
1203 +- wr->wr_id;
1204 ++ wqe = get_send_wqe(qp, wqe_idx);
1205 ++ qp->sq.wrid[wqe_idx] = wr->wr_id;
1206 +
1207 + /* Corresponding to the RC and RD type wqe process separately */
1208 + if (ibqp->qp_type == IB_QPT_GSI) {
1209 +@@ -210,7 +211,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1210 + cpu_to_le32((wr->sg_list[1].addr) >> 32);
1211 + ud_sq_wqe->l_key1 =
1212 + cpu_to_le32(wr->sg_list[1].lkey);
1213 +- ind++;
1214 + } else if (ibqp->qp_type == IB_QPT_RC) {
1215 + u32 tmp_len = 0;
1216 +
1217 +@@ -308,7 +308,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1218 + ctrl->flag |= cpu_to_le32(wr->num_sge <<
1219 + HNS_ROCE_WQE_SGE_NUM_BIT);
1220 + }
1221 +- ind++;
1222 + }
1223 + }
1224 +
1225 +@@ -336,7 +335,6 @@ out:
1226 + doorbell[1] = sq_db.u32_8;
1227 +
1228 + hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
1229 +- qp->sq_next_wqe = ind;
1230 + }
1231 +
1232 + spin_unlock_irqrestore(&qp->sq.lock, flags);
1233 +@@ -348,12 +346,6 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
1234 + const struct ib_recv_wr *wr,
1235 + const struct ib_recv_wr **bad_wr)
1236 + {
1237 +- int ret = 0;
1238 +- int nreq = 0;
1239 +- int ind = 0;
1240 +- int i = 0;
1241 +- u32 reg_val;
1242 +- unsigned long flags = 0;
1243 + struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
1244 + struct hns_roce_wqe_data_seg *scat = NULL;
1245 + struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1246 +@@ -361,9 +353,14 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
1247 + struct device *dev = &hr_dev->pdev->dev;
1248 + struct hns_roce_rq_db rq_db;
1249 + __le32 doorbell[2] = {0};
1250 ++ unsigned long flags = 0;
1251 ++ unsigned int wqe_idx;
1252 ++ int ret = 0;
1253 ++ int nreq = 0;
1254 ++ int i = 0;
1255 ++ u32 reg_val;
1256 +
1257 + spin_lock_irqsave(&hr_qp->rq.lock, flags);
1258 +- ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
1259 +
1260 + for (nreq = 0; wr; ++nreq, wr = wr->next) {
1261 + if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
1262 +@@ -373,6 +370,8 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
1263 + goto out;
1264 + }
1265 +
1266 ++ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
1267 ++
1268 + if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
1269 + dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
1270 + wr->num_sge, hr_qp->rq.max_gs);
1271 +@@ -381,7 +380,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
1272 + goto out;
1273 + }
1274 +
1275 +- ctrl = get_recv_wqe(hr_qp, ind);
1276 ++ ctrl = get_recv_wqe(hr_qp, wqe_idx);
1277 +
1278 + roce_set_field(ctrl->rwqe_byte_12,
1279 + RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
1280 +@@ -393,9 +392,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
1281 + for (i = 0; i < wr->num_sge; i++)
1282 + set_data_seg(scat + i, wr->sg_list + i);
1283 +
1284 +- hr_qp->rq.wrid[ind] = wr->wr_id;
1285 +-
1286 +- ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
1287 ++ hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
1288 + }
1289 +
1290 + out:
1291 +@@ -2701,7 +2698,6 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
1292 + hr_qp->rq.tail = 0;
1293 + hr_qp->sq.head = 0;
1294 + hr_qp->sq.tail = 0;
1295 +- hr_qp->sq_next_wqe = 0;
1296 + }
1297 +
1298 + kfree(context);
1299 +@@ -3315,7 +3311,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
1300 + hr_qp->rq.tail = 0;
1301 + hr_qp->sq.head = 0;
1302 + hr_qp->sq.tail = 0;
1303 +- hr_qp->sq_next_wqe = 0;
1304 + }
1305 + out:
1306 + kfree(context);
1307 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1308 +index cb8071a3e0d5..87186446dffb 100644
1309 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1310 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1311 +@@ -110,7 +110,7 @@ static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
1312 + }
1313 +
1314 + static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
1315 +- unsigned int *sge_ind)
1316 ++ unsigned int *sge_ind, int valid_num_sge)
1317 + {
1318 + struct hns_roce_v2_wqe_data_seg *dseg;
1319 + struct ib_sge *sg;
1320 +@@ -123,7 +123,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
1321 +
1322 + if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
1323 + num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
1324 +- extend_sge_num = wr->num_sge - num_in_wqe;
1325 ++ extend_sge_num = valid_num_sge - num_in_wqe;
1326 + sg = wr->sg_list + num_in_wqe;
1327 + shift = qp->hr_buf.page_shift;
1328 +
1329 +@@ -159,14 +159,16 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
1330 + static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1331 + struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
1332 + void *wqe, unsigned int *sge_ind,
1333 ++ int valid_num_sge,
1334 + const struct ib_send_wr **bad_wr)
1335 + {
1336 + struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1337 + struct hns_roce_v2_wqe_data_seg *dseg = wqe;
1338 + struct hns_roce_qp *qp = to_hr_qp(ibqp);
1339 ++ int j = 0;
1340 + int i;
1341 +
1342 +- if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
1343 ++ if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
1344 + if (le32_to_cpu(rc_sq_wqe->msg_len) >
1345 + hr_dev->caps.max_sq_inline) {
1346 + *bad_wr = wr;
1347 +@@ -190,7 +192,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1348 + roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
1349 + 1);
1350 + } else {
1351 +- if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
1352 ++ if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
1353 + for (i = 0; i < wr->num_sge; i++) {
1354 + if (likely(wr->sg_list[i].length)) {
1355 + set_data_seg_v2(dseg, wr->sg_list + i);
1356 +@@ -203,19 +205,21 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1357 + V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
1358 + (*sge_ind) & (qp->sge.sge_cnt - 1));
1359 +
1360 +- for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
1361 ++ for (i = 0; i < wr->num_sge &&
1362 ++ j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
1363 + if (likely(wr->sg_list[i].length)) {
1364 + set_data_seg_v2(dseg, wr->sg_list + i);
1365 + dseg++;
1366 ++ j++;
1367 + }
1368 + }
1369 +
1370 +- set_extend_sge(qp, wr, sge_ind);
1371 ++ set_extend_sge(qp, wr, sge_ind, valid_num_sge);
1372 + }
1373 +
1374 + roce_set_field(rc_sq_wqe->byte_16,
1375 + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
1376 +- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
1377 ++ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
1378 + }
1379 +
1380 + return 0;
1381 +@@ -239,10 +243,11 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1382 + struct device *dev = hr_dev->dev;
1383 + struct hns_roce_v2_db sq_db;
1384 + struct ib_qp_attr attr;
1385 +- unsigned int sge_ind;
1386 + unsigned int owner_bit;
1387 ++ unsigned int sge_idx;
1388 ++ unsigned int wqe_idx;
1389 + unsigned long flags;
1390 +- unsigned int ind;
1391 ++ int valid_num_sge;
1392 + void *wqe = NULL;
1393 + bool loopback;
1394 + int attr_mask;
1395 +@@ -269,8 +274,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1396 + }
1397 +
1398 + spin_lock_irqsave(&qp->sq.lock, flags);
1399 +- ind = qp->sq_next_wqe;
1400 +- sge_ind = qp->next_sge;
1401 ++ sge_idx = qp->next_sge;
1402 +
1403 + for (nreq = 0; wr; ++nreq, wr = wr->next) {
1404 + if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1405 +@@ -279,6 +283,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1406 + goto out;
1407 + }
1408 +
1409 ++ wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
1410 ++
1411 + if (unlikely(wr->num_sge > qp->sq.max_gs)) {
1412 + dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
1413 + wr->num_sge, qp->sq.max_gs);
1414 +@@ -287,14 +293,20 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1415 + goto out;
1416 + }
1417 +
1418 +- wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
1419 +- qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
1420 +- wr->wr_id;
1421 +-
1422 ++ wqe = get_send_wqe(qp, wqe_idx);
1423 ++ qp->sq.wrid[wqe_idx] = wr->wr_id;
1424 + owner_bit =
1425 + ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
1426 ++ valid_num_sge = 0;
1427 + tmp_len = 0;
1428 +
1429 ++ for (i = 0; i < wr->num_sge; i++) {
1430 ++ if (likely(wr->sg_list[i].length)) {
1431 ++ tmp_len += wr->sg_list[i].length;
1432 ++ valid_num_sge++;
1433 ++ }
1434 ++ }
1435 ++
1436 + /* Corresponding to the QP type, wqe process separately */
1437 + if (ibqp->qp_type == IB_QPT_GSI) {
1438 + ud_sq_wqe = wqe;
1439 +@@ -330,9 +342,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1440 + V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
1441 + HNS_ROCE_V2_WQE_OP_SEND);
1442 +
1443 +- for (i = 0; i < wr->num_sge; i++)
1444 +- tmp_len += wr->sg_list[i].length;
1445 +-
1446 + ud_sq_wqe->msg_len =
1447 + cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
1448 +
1449 +@@ -368,12 +377,12 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1450 + roce_set_field(ud_sq_wqe->byte_16,
1451 + V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
1452 + V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
1453 +- wr->num_sge);
1454 ++ valid_num_sge);
1455 +
1456 + roce_set_field(ud_sq_wqe->byte_20,
1457 + V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
1458 + V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
1459 +- sge_ind & (qp->sge.sge_cnt - 1));
1460 ++ sge_idx & (qp->sge.sge_cnt - 1));
1461 +
1462 + roce_set_field(ud_sq_wqe->byte_24,
1463 + V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
1464 +@@ -423,13 +432,10 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1465 + memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
1466 + GID_LEN_V2);
1467 +
1468 +- set_extend_sge(qp, wr, &sge_ind);
1469 +- ind++;
1470 ++ set_extend_sge(qp, wr, &sge_idx, valid_num_sge);
1471 + } else if (ibqp->qp_type == IB_QPT_RC) {
1472 + rc_sq_wqe = wqe;
1473 + memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
1474 +- for (i = 0; i < wr->num_sge; i++)
1475 +- tmp_len += wr->sg_list[i].length;
1476 +
1477 + rc_sq_wqe->msg_len =
1478 + cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
1479 +@@ -550,15 +556,14 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1480 + roce_set_field(rc_sq_wqe->byte_16,
1481 + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
1482 + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
1483 +- wr->num_sge);
1484 ++ valid_num_sge);
1485 + } else if (wr->opcode != IB_WR_REG_MR) {
1486 + ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
1487 +- wqe, &sge_ind, bad_wr);
1488 ++ wqe, &sge_idx,
1489 ++ valid_num_sge, bad_wr);
1490 + if (ret)
1491 + goto out;
1492 + }
1493 +-
1494 +- ind++;
1495 + } else {
1496 + dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
1497 + spin_unlock_irqrestore(&qp->sq.lock, flags);
1498 +@@ -588,8 +593,7 @@ out:
1499 +
1500 + hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
1501 +
1502 +- qp->sq_next_wqe = ind;
1503 +- qp->next_sge = sge_ind;
1504 ++ qp->next_sge = sge_idx;
1505 +
1506 + if (qp->state == IB_QPS_ERR) {
1507 + attr_mask = IB_QP_STATE;
1508 +@@ -623,13 +627,12 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1509 + unsigned long flags;
1510 + void *wqe = NULL;
1511 + int attr_mask;
1512 ++ u32 wqe_idx;
1513 + int ret = 0;
1514 + int nreq;
1515 +- int ind;
1516 + int i;
1517 +
1518 + spin_lock_irqsave(&hr_qp->rq.lock, flags);
1519 +- ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
1520 +
1521 + if (hr_qp->state == IB_QPS_RESET) {
1522 + spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
1523 +@@ -645,6 +648,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1524 + goto out;
1525 + }
1526 +
1527 ++ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
1528 ++
1529 + if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
1530 + dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
1531 + wr->num_sge, hr_qp->rq.max_gs);
1532 +@@ -653,7 +658,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1533 + goto out;
1534 + }
1535 +
1536 +- wqe = get_recv_wqe(hr_qp, ind);
1537 ++ wqe = get_recv_wqe(hr_qp, wqe_idx);
1538 + dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
1539 + for (i = 0; i < wr->num_sge; i++) {
1540 + if (!wr->sg_list[i].length)
1541 +@@ -669,8 +674,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1542 +
1543 + /* rq support inline data */
1544 + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
1545 +- sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
1546 +- hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
1547 ++ sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
1548 ++ hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
1549 + (u32)wr->num_sge;
1550 + for (i = 0; i < wr->num_sge; i++) {
1551 + sge_list[i].addr =
1552 +@@ -679,9 +684,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1553 + }
1554 + }
1555 +
1556 +- hr_qp->rq.wrid[ind] = wr->wr_id;
1557 +-
1558 +- ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
1559 ++ hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
1560 + }
1561 +
1562 + out:
1563 +@@ -4464,7 +4467,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
1564 + hr_qp->rq.tail = 0;
1565 + hr_qp->sq.head = 0;
1566 + hr_qp->sq.tail = 0;
1567 +- hr_qp->sq_next_wqe = 0;
1568 + hr_qp->next_sge = 0;
1569 + if (hr_qp->rq.wqe_cnt)
1570 + *hr_qp->rdb.db_record = 0;
1571 +diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
1572 +index 3bccfef40e7e..ac86363ce1a2 100644
1573 +--- a/drivers/infiniband/sw/siw/siw_cm.c
1574 ++++ b/drivers/infiniband/sw/siw/siw_cm.c
1575 +@@ -1225,10 +1225,9 @@ static void siw_cm_llp_data_ready(struct sock *sk)
1576 + read_lock(&sk->sk_callback_lock);
1577 +
1578 + cep = sk_to_cep(sk);
1579 +- if (!cep) {
1580 +- WARN_ON(1);
1581 ++ if (!cep)
1582 + goto out;
1583 +- }
1584 ++
1585 + siw_dbg_cep(cep, "state: %d\n", cep->state);
1586 +
1587 + switch (cep->state) {
1588 +diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
1589 +index 8c744578122a..a0d87ed9da69 100644
1590 +--- a/drivers/macintosh/therm_windtunnel.c
1591 ++++ b/drivers/macintosh/therm_windtunnel.c
1592 +@@ -300,9 +300,11 @@ static int control_loop(void *dummy)
1593 + /* i2c probing and setup */
1594 + /************************************************************************/
1595 +
1596 +-static int
1597 +-do_attach( struct i2c_adapter *adapter )
1598 ++static void do_attach(struct i2c_adapter *adapter)
1599 + {
1600 ++ struct i2c_board_info info = { };
1601 ++ struct device_node *np;
1602 ++
1603 + /* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */
1604 + static const unsigned short scan_ds1775[] = {
1605 + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
1606 +@@ -313,25 +315,24 @@ do_attach( struct i2c_adapter *adapter )
1607 + I2C_CLIENT_END
1608 + };
1609 +
1610 +- if( strncmp(adapter->name, "uni-n", 5) )
1611 +- return 0;
1612 +-
1613 +- if( !x.running ) {
1614 +- struct i2c_board_info info;
1615 ++ if (x.running || strncmp(adapter->name, "uni-n", 5))
1616 ++ return;
1617 +
1618 +- memset(&info, 0, sizeof(struct i2c_board_info));
1619 +- strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE);
1620 ++ np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775");
1621 ++ if (np) {
1622 ++ of_node_put(np);
1623 ++ } else {
1624 ++ strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
1625 + i2c_new_probed_device(adapter, &info, scan_ds1775, NULL);
1626 ++ }
1627 +
1628 +- strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE);
1629 ++ np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
1630 ++ if (np) {
1631 ++ of_node_put(np);
1632 ++ } else {
1633 ++ strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
1634 + i2c_new_probed_device(adapter, &info, scan_adm1030, NULL);
1635 +-
1636 +- if( x.thermostat && x.fan ) {
1637 +- x.running = 1;
1638 +- x.poll_task = kthread_run(control_loop, NULL, "g4fand");
1639 +- }
1640 + }
1641 +- return 0;
1642 + }
1643 +
1644 + static int
1645 +@@ -404,8 +405,8 @@ out:
1646 + enum chip { ds1775, adm1030 };
1647 +
1648 + static const struct i2c_device_id therm_windtunnel_id[] = {
1649 +- { "therm_ds1775", ds1775 },
1650 +- { "therm_adm1030", adm1030 },
1651 ++ { "MAC,ds1775", ds1775 },
1652 ++ { "MAC,adm1030", adm1030 },
1653 + { }
1654 + };
1655 + MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
1656 +@@ -414,6 +415,7 @@ static int
1657 + do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
1658 + {
1659 + struct i2c_adapter *adapter = cl->adapter;
1660 ++ int ret = 0;
1661 +
1662 + if( !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA
1663 + | I2C_FUNC_SMBUS_WRITE_BYTE) )
1664 +@@ -421,11 +423,19 @@ do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
1665 +
1666 + switch (id->driver_data) {
1667 + case adm1030:
1668 +- return attach_fan( cl );
1669 ++ ret = attach_fan(cl);
1670 ++ break;
1671 + case ds1775:
1672 +- return attach_thermostat(cl);
1673 ++ ret = attach_thermostat(cl);
1674 ++ break;
1675 + }
1676 +- return 0;
1677 ++
1678 ++ if (!x.running && x.thermostat && x.fan) {
1679 ++ x.running = 1;
1680 ++ x.poll_task = kthread_run(control_loop, NULL, "g4fand");
1681 ++ }
1682 ++
1683 ++ return ret;
1684 + }
1685 +
1686 + static struct i2c_driver g4fan_driver = {
1687 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1688 +index 48d5ec770b94..d10805e5e623 100644
1689 +--- a/drivers/net/bonding/bond_main.c
1690 ++++ b/drivers/net/bonding/bond_main.c
1691 +@@ -3526,6 +3526,47 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
1692 + }
1693 + }
1694 +
1695 ++#ifdef CONFIG_LOCKDEP
1696 ++static int bond_get_lowest_level_rcu(struct net_device *dev)
1697 ++{
1698 ++ struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
1699 ++ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
1700 ++ int cur = 0, max = 0;
1701 ++
1702 ++ now = dev;
1703 ++ iter = &dev->adj_list.lower;
1704 ++
1705 ++ while (1) {
1706 ++ next = NULL;
1707 ++ while (1) {
1708 ++ ldev = netdev_next_lower_dev_rcu(now, &iter);
1709 ++ if (!ldev)
1710 ++ break;
1711 ++
1712 ++ next = ldev;
1713 ++ niter = &ldev->adj_list.lower;
1714 ++ dev_stack[cur] = now;
1715 ++ iter_stack[cur++] = iter;
1716 ++ if (max <= cur)
1717 ++ max = cur;
1718 ++ break;
1719 ++ }
1720 ++
1721 ++ if (!next) {
1722 ++ if (!cur)
1723 ++ return max;
1724 ++ next = dev_stack[--cur];
1725 ++ niter = iter_stack[cur];
1726 ++ }
1727 ++
1728 ++ now = next;
1729 ++ iter = niter;
1730 ++ }
1731 ++
1732 ++ return max;
1733 ++}
1734 ++#endif
1735 ++
1736 + static void bond_get_stats(struct net_device *bond_dev,
1737 + struct rtnl_link_stats64 *stats)
1738 + {
1739 +@@ -3533,11 +3574,17 @@ static void bond_get_stats(struct net_device *bond_dev,
1740 + struct rtnl_link_stats64 temp;
1741 + struct list_head *iter;
1742 + struct slave *slave;
1743 ++ int nest_level = 0;
1744 +
1745 +- spin_lock(&bond->stats_lock);
1746 +- memcpy(stats, &bond->bond_stats, sizeof(*stats));
1747 +
1748 + rcu_read_lock();
1749 ++#ifdef CONFIG_LOCKDEP
1750 ++ nest_level = bond_get_lowest_level_rcu(bond_dev);
1751 ++#endif
1752 ++
1753 ++ spin_lock_nested(&bond->stats_lock, nest_level);
1754 ++ memcpy(stats, &bond->bond_stats, sizeof(*stats));
1755 ++
1756 + bond_for_each_slave_rcu(bond, slave, iter) {
1757 + const struct rtnl_link_stats64 *new =
1758 + dev_get_stats(slave->dev, &temp);
1759 +@@ -3547,10 +3594,10 @@ static void bond_get_stats(struct net_device *bond_dev,
1760 + /* save off the slave stats for the next run */
1761 + memcpy(&slave->slave_stats, new, sizeof(*new));
1762 + }
1763 +- rcu_read_unlock();
1764 +
1765 + memcpy(&bond->bond_stats, stats, sizeof(*stats));
1766 + spin_unlock(&bond->stats_lock);
1767 ++ rcu_read_unlock();
1768 + }
1769 +
1770 + static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
1771 +@@ -3640,6 +3687,8 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
1772 + case BOND_RELEASE_OLD:
1773 + case SIOCBONDRELEASE:
1774 + res = bond_release(bond_dev, slave_dev);
1775 ++ if (!res)
1776 ++ netdev_update_lockdep_key(slave_dev);
1777 + break;
1778 + case BOND_SETHWADDR_OLD:
1779 + case SIOCBONDSETHWADDR:
1780 +diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
1781 +index ddb3916d3506..215c10923289 100644
1782 +--- a/drivers/net/bonding/bond_options.c
1783 ++++ b/drivers/net/bonding/bond_options.c
1784 +@@ -1398,6 +1398,8 @@ static int bond_option_slaves_set(struct bonding *bond,
1785 + case '-':
1786 + slave_dbg(bond->dev, dev, "Releasing interface\n");
1787 + ret = bond_release(bond->dev, dev);
1788 ++ if (!ret)
1789 ++ netdev_update_lockdep_key(dev);
1790 + break;
1791 +
1792 + default:
1793 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
1794 +index 6a1ff4d43e3a..38b16efda4a9 100644
1795 +--- a/drivers/net/dsa/b53/b53_common.c
1796 ++++ b/drivers/net/dsa/b53/b53_common.c
1797 +@@ -1353,6 +1353,9 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
1798 +
1799 + b53_get_vlan_entry(dev, vid, vl);
1800 +
1801 ++ if (vid == 0 && vid == b53_default_pvid(dev))
1802 ++ untagged = true;
1803 ++
1804 + vl->members |= BIT(port);
1805 + if (untagged && !dsa_is_cpu_port(ds, port))
1806 + vl->untag |= BIT(port);
1807 +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
1808 +index ea62604fdf8c..1fb58f9ad80b 100644
1809 +--- a/drivers/net/ethernet/amazon/ena/ena_com.c
1810 ++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
1811 +@@ -200,6 +200,11 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
1812 + static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
1813 + u16 command_id, bool capture)
1814 + {
1815 ++ if (unlikely(!queue->comp_ctx)) {
1816 ++ pr_err("Completion context is NULL\n");
1817 ++ return NULL;
1818 ++ }
1819 ++
1820 + if (unlikely(command_id >= queue->q_depth)) {
1821 + pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
1822 + command_id, queue->q_depth);
1823 +@@ -1041,9 +1046,41 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1824 + feature_ver);
1825 + }
1826 +
1827 ++int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1828 ++{
1829 ++ return ena_dev->rss.hash_func;
1830 ++}
1831 ++
1832 ++static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1833 ++{
1834 ++ struct ena_admin_feature_rss_flow_hash_control *hash_key =
1835 ++ (ena_dev->rss).hash_key;
1836 ++
1837 ++ netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1838 ++ /* The key is stored in the device in u32 array
1839 ++ * as well as the API requires the key to be passed in this
1840 ++ * format. Thus the size of our array should be divided by 4
1841 ++ */
1842 ++ hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
1843 ++}
1844 ++
1845 + static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1846 + {
1847 + struct ena_rss *rss = &ena_dev->rss;
1848 ++ struct ena_admin_feature_rss_flow_hash_control *hash_key;
1849 ++ struct ena_admin_get_feat_resp get_resp;
1850 ++ int rc;
1851 ++
1852 ++ hash_key = (ena_dev->rss).hash_key;
1853 ++
1854 ++ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
1855 ++ ENA_ADMIN_RSS_HASH_FUNCTION,
1856 ++ ena_dev->rss.hash_key_dma_addr,
1857 ++ sizeof(ena_dev->rss.hash_key), 0);
1858 ++ if (unlikely(rc)) {
1859 ++ hash_key = NULL;
1860 ++ return -EOPNOTSUPP;
1861 ++ }
1862 +
1863 + rss->hash_key =
1864 + dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1865 +@@ -1254,30 +1291,6 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1866 + return 0;
1867 + }
1868 +
1869 +-static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1870 +-{
1871 +- u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1872 +- struct ena_rss *rss = &ena_dev->rss;
1873 +- u8 idx;
1874 +- u16 i;
1875 +-
1876 +- for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1877 +- dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1878 +-
1879 +- for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1880 +- if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1881 +- return -EINVAL;
1882 +- idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1883 +-
1884 +- if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1885 +- return -EINVAL;
1886 +-
1887 +- rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1888 +- }
1889 +-
1890 +- return 0;
1891 +-}
1892 +-
1893 + static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1894 + u16 intr_delay_resolution)
1895 + {
1896 +@@ -2297,15 +2310,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
1897 +
1898 + switch (func) {
1899 + case ENA_ADMIN_TOEPLITZ:
1900 +- if (key_len > sizeof(hash_key->key)) {
1901 +- pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
1902 +- key_len, sizeof(hash_key->key));
1903 +- return -EINVAL;
1904 ++ if (key) {
1905 ++ if (key_len != sizeof(hash_key->key)) {
1906 ++ pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
1907 ++ key_len, sizeof(hash_key->key));
1908 ++ return -EINVAL;
1909 ++ }
1910 ++ memcpy(hash_key->key, key, key_len);
1911 ++ rss->hash_init_val = init_val;
1912 ++ hash_key->keys_num = key_len >> 2;
1913 + }
1914 +-
1915 +- memcpy(hash_key->key, key, key_len);
1916 +- rss->hash_init_val = init_val;
1917 +- hash_key->keys_num = key_len >> 2;
1918 + break;
1919 + case ENA_ADMIN_CRC32:
1920 + rss->hash_init_val = init_val;
1921 +@@ -2342,7 +2356,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
1922 + if (unlikely(rc))
1923 + return rc;
1924 +
1925 +- rss->hash_func = get_resp.u.flow_hash_func.selected_func;
1926 ++ /* ffs() returns 1 in case the lsb is set */
1927 ++ rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
1928 ++ if (rss->hash_func)
1929 ++ rss->hash_func--;
1930 ++
1931 + if (func)
1932 + *func = rss->hash_func;
1933 +
1934 +@@ -2606,10 +2624,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
1935 + if (!ind_tbl)
1936 + return 0;
1937 +
1938 +- rc = ena_com_ind_tbl_convert_from_device(ena_dev);
1939 +- if (unlikely(rc))
1940 +- return rc;
1941 +-
1942 + for (i = 0; i < (1 << rss->tbl_log_size); i++)
1943 + ind_tbl[i] = rss->host_rss_ind_tbl[i];
1944 +
1945 +@@ -2626,9 +2640,15 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
1946 + if (unlikely(rc))
1947 + goto err_indr_tbl;
1948 +
1949 ++ /* The following function might return unsupported in case the
1950 ++ * device doesn't support setting the key / hash function. We can safely
1951 ++ * ignore this error and have indirection table support only.
1952 ++ */
1953 + rc = ena_com_hash_key_allocate(ena_dev);
1954 +- if (unlikely(rc))
1955 ++ if (unlikely(rc) && rc != -EOPNOTSUPP)
1956 + goto err_hash_key;
1957 ++ else if (rc != -EOPNOTSUPP)
1958 ++ ena_com_hash_key_fill_default_key(ena_dev);
1959 +
1960 + rc = ena_com_hash_ctrl_init(ena_dev);
1961 + if (unlikely(rc))
1962 +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
1963 +index 0ce37d54ed10..469f298199a7 100644
1964 +--- a/drivers/net/ethernet/amazon/ena/ena_com.h
1965 ++++ b/drivers/net/ethernet/amazon/ena/ena_com.h
1966 +@@ -44,6 +44,7 @@
1967 + #include <linux/spinlock.h>
1968 + #include <linux/types.h>
1969 + #include <linux/wait.h>
1970 ++#include <linux/netdevice.h>
1971 +
1972 + #include "ena_common_defs.h"
1973 + #include "ena_admin_defs.h"
1974 +@@ -655,6 +656,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
1975 + */
1976 + void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
1977 +
1978 ++/* ena_com_get_current_hash_function - Get RSS hash function
1979 ++ * @ena_dev: ENA communication layer struct
1980 ++ *
1981 ++ * Return the current hash function.
1982 ++ * @return: 0 or one of the ena_admin_hash_functions values.
1983 ++ */
1984 ++int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
1985 ++
1986 + /* ena_com_fill_hash_function - Fill RSS hash function
1987 + * @ena_dev: ENA communication layer struct
1988 + * @func: The hash function (Toeplitz or crc)
1989 +diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
1990 +index fc96c66b44cb..971f02ea55a1 100644
1991 +--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
1992 ++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
1993 +@@ -636,6 +636,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
1994 + return ENA_HASH_KEY_SIZE;
1995 + }
1996 +
1997 ++static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
1998 ++{
1999 ++ struct ena_com_dev *ena_dev = adapter->ena_dev;
2000 ++ int i, rc;
2001 ++
2002 ++ if (!indir)
2003 ++ return 0;
2004 ++
2005 ++ rc = ena_com_indirect_table_get(ena_dev, indir);
2006 ++ if (rc)
2007 ++ return rc;
2008 ++
2009 ++ /* Our internal representation of the indices is: even indices
2010 ++ * for Tx and uneven indices for Rx. We need to convert the Rx
2011 ++ * indices to be consecutive
2012 ++ */
2013 ++ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
2014 ++ indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
2015 ++
2016 ++ return rc;
2017 ++}
2018 ++
2019 + static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2020 + u8 *hfunc)
2021 + {
2022 +@@ -644,11 +666,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2023 + u8 func;
2024 + int rc;
2025 +
2026 +- rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
2027 ++ rc = ena_indirection_table_get(adapter, indir);
2028 + if (rc)
2029 + return rc;
2030 +
2031 ++ /* We call this function in order to check if the device
2032 ++ * supports getting/setting the hash function.
2033 ++ */
2034 + rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
2035 ++
2036 ++ if (rc) {
2037 ++ if (rc == -EOPNOTSUPP) {
2038 ++ key = NULL;
2039 ++ hfunc = NULL;
2040 ++ rc = 0;
2041 ++ }
2042 ++
2043 ++ return rc;
2044 ++ }
2045 ++
2046 + if (rc)
2047 + return rc;
2048 +
2049 +@@ -657,7 +693,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2050 + func = ETH_RSS_HASH_TOP;
2051 + break;
2052 + case ENA_ADMIN_CRC32:
2053 +- func = ETH_RSS_HASH_XOR;
2054 ++ func = ETH_RSS_HASH_CRC32;
2055 + break;
2056 + default:
2057 + netif_err(adapter, drv, netdev,
2058 +@@ -700,10 +736,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
2059 + }
2060 +
2061 + switch (hfunc) {
2062 ++ case ETH_RSS_HASH_NO_CHANGE:
2063 ++ func = ena_com_get_current_hash_function(ena_dev);
2064 ++ break;
2065 + case ETH_RSS_HASH_TOP:
2066 + func = ENA_ADMIN_TOEPLITZ;
2067 + break;
2068 +- case ETH_RSS_HASH_XOR:
2069 ++ case ETH_RSS_HASH_CRC32:
2070 + func = ENA_ADMIN_CRC32;
2071 + break;
2072 + default:
2073 +@@ -812,6 +851,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
2074 + .set_channels = ena_set_channels,
2075 + .get_tunable = ena_get_tunable,
2076 + .set_tunable = ena_set_tunable,
2077 ++ .get_ts_info = ethtool_op_get_ts_info,
2078 + };
2079 +
2080 + void ena_set_ethtool_ops(struct net_device *netdev)
2081 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
2082 +index 948583fdcc28..1c1a41bd11da 100644
2083 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
2084 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
2085 +@@ -3049,8 +3049,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2086 + if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2087 + return;
2088 +
2089 +- keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
2090 +- adapter->keep_alive_timeout);
2091 ++ keep_alive_expired = adapter->last_keep_alive_jiffies +
2092 ++ adapter->keep_alive_timeout;
2093 + if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
2094 + netif_err(adapter, drv, adapter->netdev,
2095 + "Keep alive watchdog timeout.\n");
2096 +@@ -3152,7 +3152,7 @@ static void ena_timer_service(struct timer_list *t)
2097 + }
2098 +
2099 + /* Reset the timer */
2100 +- mod_timer(&adapter->timer_service, jiffies + HZ);
2101 ++ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2102 + }
2103 +
2104 + static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
2105 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
2106 +index bffd778f2ce3..2fe5eeea6b69 100644
2107 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
2108 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
2109 +@@ -129,6 +129,8 @@
2110 +
2111 + #define ENA_IO_TXQ_IDX(q) (2 * (q))
2112 + #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
2113 ++#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
2114 ++#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
2115 +
2116 + #define ENA_MGMNT_IRQ_IDX 0
2117 + #define ENA_IO_IRQ_FIRST_IDX 1
2118 +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
2119 +index d8612131c55e..cc8031ae9aa3 100644
2120 +--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
2121 ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
2122 +@@ -2020,7 +2020,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
2123 + int ret;
2124 +
2125 + ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
2126 +- XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
2127 ++ XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
2128 + if (!ndev)
2129 + return -ENOMEM;
2130 +
2131 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
2132 +index a1f99bef4a68..7b55633d2cb9 100644
2133 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
2134 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
2135 +@@ -722,6 +722,11 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
2136 + if (flags & ~AQ_PRIV_FLAGS_MASK)
2137 + return -EOPNOTSUPP;
2138 +
2139 ++ if (hweight32((flags | priv_flags) & AQ_HW_LOOPBACK_MASK) > 1) {
2140 ++ netdev_info(ndev, "Can't enable more than one loopback simultaneously\n");
2141 ++ return -EINVAL;
2142 ++ }
2143 ++
2144 + cfg->priv_flags = flags;
2145 +
2146 + if ((priv_flags ^ flags) & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
2147 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
2148 +index 6102251bb909..03ff92bc4a7f 100644
2149 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
2150 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
2151 +@@ -163,7 +163,7 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
2152 + }
2153 +
2154 + if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2155 +- (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
2156 ++ (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK,
2157 + aq_nic->active_vlans))) {
2158 + netdev_err(aq_nic->ndev,
2159 + "ethtool: unknown vlan-id specified");
2160 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
2161 +index c85e3e29012c..e95f6a6bef73 100644
2162 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
2163 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
2164 +@@ -533,8 +533,10 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
2165 + dx_buff->len,
2166 + DMA_TO_DEVICE);
2167 +
2168 +- if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
2169 ++ if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
2170 ++ ret = 0;
2171 + goto exit;
2172 ++ }
2173 +
2174 + first = dx_buff;
2175 + dx_buff->len_pkt = skb->len;
2176 +@@ -655,10 +657,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
2177 + if (likely(frags)) {
2178 + err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
2179 + ring, frags);
2180 +- if (err >= 0) {
2181 +- ++ring->stats.tx.packets;
2182 +- ring->stats.tx.bytes += skb->len;
2183 +- }
2184 + } else {
2185 + err = NETDEV_TX_BUSY;
2186 + }
2187 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
2188 +index 2bb329606794..f74952674084 100644
2189 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
2190 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
2191 +@@ -359,7 +359,8 @@ static int aq_suspend_common(struct device *dev, bool deep)
2192 + netif_device_detach(nic->ndev);
2193 + netif_tx_stop_all_queues(nic->ndev);
2194 +
2195 +- aq_nic_stop(nic);
2196 ++ if (netif_running(nic->ndev))
2197 ++ aq_nic_stop(nic);
2198 +
2199 + if (deep) {
2200 + aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
2201 +@@ -375,7 +376,7 @@ static int atl_resume_common(struct device *dev, bool deep)
2202 + {
2203 + struct pci_dev *pdev = to_pci_dev(dev);
2204 + struct aq_nic_s *nic;
2205 +- int ret;
2206 ++ int ret = 0;
2207 +
2208 + nic = pci_get_drvdata(pdev);
2209 +
2210 +@@ -390,9 +391,11 @@ static int atl_resume_common(struct device *dev, bool deep)
2211 + goto err_exit;
2212 + }
2213 +
2214 +- ret = aq_nic_start(nic);
2215 +- if (ret)
2216 +- goto err_exit;
2217 ++ if (netif_running(nic->ndev)) {
2218 ++ ret = aq_nic_start(nic);
2219 ++ if (ret)
2220 ++ goto err_exit;
2221 ++ }
2222 +
2223 + netif_device_attach(nic->ndev);
2224 + netif_tx_start_all_queues(nic->ndev);
2225 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
2226 +index 951d86f8b66e..bae95a618560 100644
2227 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
2228 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
2229 +@@ -272,9 +272,12 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
2230 + }
2231 + }
2232 +
2233 +- if (unlikely(buff->is_eop))
2234 +- dev_kfree_skb_any(buff->skb);
2235 ++ if (unlikely(buff->is_eop)) {
2236 ++ ++self->stats.rx.packets;
2237 ++ self->stats.tx.bytes += buff->skb->len;
2238 +
2239 ++ dev_kfree_skb_any(buff->skb);
2240 ++ }
2241 + buff->pa = 0U;
2242 + buff->eop_index = 0xffffU;
2243 + self->sw_head = aq_ring_next_dx(self, self->sw_head);
2244 +@@ -351,7 +354,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
2245 + err = 0;
2246 + goto err_exit;
2247 + }
2248 +- if (buff->is_error || buff->is_cso_err) {
2249 ++ if (buff->is_error ||
2250 ++ (buff->is_lro && buff->is_cso_err)) {
2251 + buff_ = buff;
2252 + do {
2253 + next_ = buff_->next,
2254 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
2255 +index 991e4d31b094..2c96f20f6289 100644
2256 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
2257 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
2258 +@@ -78,7 +78,8 @@ struct __packed aq_ring_buff_s {
2259 + u32 is_cleaned:1;
2260 + u32 is_error:1;
2261 + u32 is_vlan:1;
2262 +- u32 rsvd3:4;
2263 ++ u32 is_lro:1;
2264 ++ u32 rsvd3:3;
2265 + u16 eop_index;
2266 + u16 rsvd4;
2267 + };
2268 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
2269 +index ec041f78d063..fce587aaba33 100644
2270 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
2271 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
2272 +@@ -823,6 +823,8 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
2273 + }
2274 + }
2275 +
2276 ++ buff->is_lro = !!(HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
2277 ++ rxd_wb->status);
2278 + if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
2279 + buff->len = rxd_wb->pkt_len %
2280 + AQ_CFG_RX_FRAME_MAX;
2281 +@@ -835,8 +837,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
2282 + rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
2283 + AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
2284 +
2285 +- if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
2286 +- rxd_wb->status) {
2287 ++ if (buff->is_lro) {
2288 + /* LRO */
2289 + buff->next = rxd_wb->next_desc_ptr;
2290 + ++ring->stats.rx.lro_packets;
2291 +@@ -884,13 +885,16 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
2292 + {
2293 + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
2294 + unsigned int i = 0U;
2295 ++ u32 vlan_promisc;
2296 ++ u32 l2_promisc;
2297 +
2298 +- hw_atl_rpfl2promiscuous_mode_en_set(self,
2299 +- IS_FILTER_ENABLED(IFF_PROMISC));
2300 ++ l2_promisc = IS_FILTER_ENABLED(IFF_PROMISC) ||
2301 ++ !!(cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET));
2302 ++ vlan_promisc = l2_promisc || cfg->is_vlan_force_promisc;
2303 +
2304 +- hw_atl_rpf_vlan_prom_mode_en_set(self,
2305 +- IS_FILTER_ENABLED(IFF_PROMISC) ||
2306 +- cfg->is_vlan_force_promisc);
2307 ++ hw_atl_rpfl2promiscuous_mode_en_set(self, l2_promisc);
2308 ++
2309 ++ hw_atl_rpf_vlan_prom_mode_en_set(self, vlan_promisc);
2310 +
2311 + hw_atl_rpfl2multicast_flr_en_set(self,
2312 + IS_FILTER_ENABLED(IFF_ALLMULTI) &&
2313 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2314 +index 9d62200b6c33..cc86038b1d96 100644
2315 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2316 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2317 +@@ -11775,6 +11775,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2318 + if (version_printed++ == 0)
2319 + pr_info("%s", version);
2320 +
2321 ++ /* Clear any pending DMA transactions from crash kernel
2322 ++ * while loading driver in capture kernel.
2323 ++ */
2324 ++ if (is_kdump_kernel()) {
2325 ++ pci_clear_master(pdev);
2326 ++ pcie_flr(pdev);
2327 ++ }
2328 ++
2329 + max_irqs = bnxt_get_max_irq(pdev);
2330 + dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
2331 + if (!dev)
2332 +@@ -11972,10 +11980,10 @@ static void bnxt_shutdown(struct pci_dev *pdev)
2333 + dev_close(dev);
2334 +
2335 + bnxt_ulp_shutdown(bp);
2336 ++ bnxt_clear_int_mode(bp);
2337 ++ pci_disable_device(pdev);
2338 +
2339 + if (system_state == SYSTEM_POWER_OFF) {
2340 +- bnxt_clear_int_mode(bp);
2341 +- pci_disable_device(pdev);
2342 + pci_wake_from_d3(pdev, bp->wol);
2343 + pci_set_power_state(pdev, PCI_D3hot);
2344 + }
2345 +diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
2346 +index 19fe4f4867c7..c16cc1cb5874 100644
2347 +--- a/drivers/net/ethernet/cadence/macb.h
2348 ++++ b/drivers/net/ethernet/cadence/macb.h
2349 +@@ -645,6 +645,7 @@
2350 + #define MACB_CAPS_GEM_HAS_PTP 0x00000040
2351 + #define MACB_CAPS_BD_RD_PREFETCH 0x00000080
2352 + #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
2353 ++#define MACB_CAPS_MACB_IS_EMAC 0x08000000
2354 + #define MACB_CAPS_FIFO_MODE 0x10000000
2355 + #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
2356 + #define MACB_CAPS_SG_DISABLED 0x40000000
2357 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
2358 +index 71bb0d56533a..20db44d7cda8 100644
2359 +--- a/drivers/net/ethernet/cadence/macb_main.c
2360 ++++ b/drivers/net/ethernet/cadence/macb_main.c
2361 +@@ -533,8 +533,21 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
2362 + old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
2363 +
2364 + /* Clear all the bits we might set later */
2365 +- ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE) |
2366 +- GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
2367 ++ ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE));
2368 ++
2369 ++ if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
2370 ++ if (state->interface == PHY_INTERFACE_MODE_RMII)
2371 ++ ctrl |= MACB_BIT(RM9200_RMII);
2372 ++ } else {
2373 ++ ctrl &= ~(GEM_BIT(GBE) | GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
2374 ++
2375 ++ /* We do not support MLO_PAUSE_RX yet */
2376 ++ if (state->pause & MLO_PAUSE_TX)
2377 ++ ctrl |= MACB_BIT(PAE);
2378 ++
2379 ++ if (state->interface == PHY_INTERFACE_MODE_SGMII)
2380 ++ ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2381 ++ }
2382 +
2383 + if (state->speed == SPEED_1000)
2384 + ctrl |= GEM_BIT(GBE);
2385 +@@ -544,13 +557,6 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
2386 + if (state->duplex)
2387 + ctrl |= MACB_BIT(FD);
2388 +
2389 +- /* We do not support MLO_PAUSE_RX yet */
2390 +- if (state->pause & MLO_PAUSE_TX)
2391 +- ctrl |= MACB_BIT(PAE);
2392 +-
2393 +- if (state->interface == PHY_INTERFACE_MODE_SGMII)
2394 +- ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2395 +-
2396 + /* Apply the new configuration, if any */
2397 + if (old_ctrl ^ ctrl)
2398 + macb_or_gem_writel(bp, NCFGR, ctrl);
2399 +@@ -569,9 +575,10 @@ static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
2400 + unsigned int q;
2401 + u32 ctrl;
2402 +
2403 +- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2404 +- queue_writel(queue, IDR,
2405 +- bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
2406 ++ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
2407 ++ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2408 ++ queue_writel(queue, IDR,
2409 ++ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
2410 +
2411 + /* Disable Rx and Tx */
2412 + ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
2413 +@@ -588,17 +595,19 @@ static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
2414 + struct macb_queue *queue;
2415 + unsigned int q;
2416 +
2417 +- macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
2418 ++ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
2419 ++ macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
2420 +
2421 +- /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
2422 +- * cleared the pipeline and control registers.
2423 +- */
2424 +- bp->macbgem_ops.mog_init_rings(bp);
2425 +- macb_init_buffers(bp);
2426 ++ /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
2427 ++ * cleared the pipeline and control registers.
2428 ++ */
2429 ++ bp->macbgem_ops.mog_init_rings(bp);
2430 ++ macb_init_buffers(bp);
2431 +
2432 +- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2433 +- queue_writel(queue, IER,
2434 +- bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
2435 ++ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2436 ++ queue_writel(queue, IER,
2437 ++ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
2438 ++ }
2439 +
2440 + /* Enable Rx and Tx */
2441 + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
2442 +@@ -3751,6 +3760,10 @@ static int at91ether_open(struct net_device *dev)
2443 + u32 ctl;
2444 + int ret;
2445 +
2446 ++ ret = pm_runtime_get_sync(&lp->pdev->dev);
2447 ++ if (ret < 0)
2448 ++ return ret;
2449 ++
2450 + /* Clear internal statistics */
2451 + ctl = macb_readl(lp, NCR);
2452 + macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
2453 +@@ -3815,7 +3828,7 @@ static int at91ether_close(struct net_device *dev)
2454 + q->rx_buffers, q->rx_buffers_dma);
2455 + q->rx_buffers = NULL;
2456 +
2457 +- return 0;
2458 ++ return pm_runtime_put(&lp->pdev->dev);
2459 + }
2460 +
2461 + /* Transmit packet */
2462 +@@ -3998,7 +4011,6 @@ static int at91ether_init(struct platform_device *pdev)
2463 + struct net_device *dev = platform_get_drvdata(pdev);
2464 + struct macb *bp = netdev_priv(dev);
2465 + int err;
2466 +- u32 reg;
2467 +
2468 + bp->queues[0].bp = bp;
2469 +
2470 +@@ -4012,11 +4024,7 @@ static int at91ether_init(struct platform_device *pdev)
2471 +
2472 + macb_writel(bp, NCR, 0);
2473 +
2474 +- reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
2475 +- if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
2476 +- reg |= MACB_BIT(RM9200_RMII);
2477 +-
2478 +- macb_writel(bp, NCFGR, reg);
2479 ++ macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
2480 +
2481 + return 0;
2482 + }
2483 +@@ -4175,7 +4183,7 @@ static const struct macb_config sama5d4_config = {
2484 + };
2485 +
2486 + static const struct macb_config emac_config = {
2487 +- .caps = MACB_CAPS_NEEDS_RSTONUBR,
2488 ++ .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
2489 + .clk_init = at91ether_clk_init,
2490 + .init = at91ether_init,
2491 + };
2492 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2493 +index 13dbd249f35f..5d74f5a60102 100644
2494 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2495 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2496 +@@ -6106,6 +6106,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
2497 + static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
2498 + struct hclge_fd_rule_tuples *tuples)
2499 + {
2500 ++#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
2501 ++#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
2502 ++
2503 + tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
2504 + tuples->ip_proto = fkeys->basic.ip_proto;
2505 + tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
2506 +@@ -6114,12 +6117,12 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
2507 + tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
2508 + tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
2509 + } else {
2510 +- memcpy(tuples->src_ip,
2511 +- fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
2512 +- sizeof(tuples->src_ip));
2513 +- memcpy(tuples->dst_ip,
2514 +- fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
2515 +- sizeof(tuples->dst_ip));
2516 ++ int i;
2517 ++
2518 ++ for (i = 0; i < IPV6_SIZE; i++) {
2519 ++ tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
2520 ++ tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
2521 ++ }
2522 + }
2523 + }
2524 +
2525 +@@ -9821,6 +9824,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
2526 + return ret;
2527 + }
2528 +
2529 ++ ret = init_mgr_tbl(hdev);
2530 ++ if (ret) {
2531 ++ dev_err(&pdev->dev,
2532 ++ "failed to reinit manager table, ret = %d\n", ret);
2533 ++ return ret;
2534 ++ }
2535 ++
2536 + ret = hclge_init_fd_config(hdev);
2537 + if (ret) {
2538 + dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
2539 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2540 +index 180224eab1ca..28db13253a5e 100644
2541 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2542 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2543 +@@ -566,7 +566,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
2544 + */
2545 + kinfo->num_tc = vport->vport_id ? 1 :
2546 + min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
2547 +- vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
2548 ++ vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
2549 + (vport->vport_id ? (vport->vport_id - 1) : 0);
2550 +
2551 + max_rss_size = min_t(u16, hdev->rss_size_max,
2552 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2553 +index 69523ac85639..56b9e445732b 100644
2554 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2555 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2556 +@@ -2362,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2557 + goto error_param;
2558 + }
2559 +
2560 +- if (i40e_vc_validate_vqs_bitmaps(vqs)) {
2561 ++ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2562 + aq_ret = I40E_ERR_PARAM;
2563 + goto error_param;
2564 + }
2565 +@@ -2424,7 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2566 + goto error_param;
2567 + }
2568 +
2569 +- if (i40e_vc_validate_vqs_bitmaps(vqs)) {
2570 ++ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2571 + aq_ret = I40E_ERR_PARAM;
2572 + goto error_param;
2573 + }
2574 +diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
2575 +index 77d6a0291e97..6939c14858b2 100644
2576 +--- a/drivers/net/ethernet/intel/ice/ice_base.c
2577 ++++ b/drivers/net/ethernet/intel/ice/ice_base.c
2578 +@@ -320,7 +320,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
2579 + if (err)
2580 + return err;
2581 +
2582 +- dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
2583 ++ dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
2584 + ring->q_index);
2585 + } else {
2586 + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
2587 +@@ -399,7 +399,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
2588 + /* Absolute queue number out of 2K needs to be passed */
2589 + err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
2590 + if (err) {
2591 +- dev_err(&vsi->back->pdev->dev,
2592 ++ dev_err(ice_pf_to_dev(vsi->back),
2593 + "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
2594 + pf_q, err);
2595 + return -EIO;
2596 +@@ -422,7 +422,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
2597 + ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
2598 + ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
2599 + if (err)
2600 +- dev_info(&vsi->back->pdev->dev,
2601 ++ dev_info(ice_pf_to_dev(vsi->back),
2602 + "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
2603 + ring->xsk_umem ? "UMEM enabled " : "",
2604 + ring->q_index, pf_q);
2605 +@@ -817,13 +817,13 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2606 + * queues at the hardware level anyway.
2607 + */
2608 + if (status == ICE_ERR_RESET_ONGOING) {
2609 +- dev_dbg(&vsi->back->pdev->dev,
2610 ++ dev_dbg(ice_pf_to_dev(vsi->back),
2611 + "Reset in progress. LAN Tx queues already disabled\n");
2612 + } else if (status == ICE_ERR_DOES_NOT_EXIST) {
2613 +- dev_dbg(&vsi->back->pdev->dev,
2614 ++ dev_dbg(ice_pf_to_dev(vsi->back),
2615 + "LAN Tx queues do not exist, nothing to disable\n");
2616 + } else if (status) {
2617 +- dev_err(&vsi->back->pdev->dev,
2618 ++ dev_err(ice_pf_to_dev(vsi->back),
2619 + "Failed to disable LAN Tx queues, error: %d\n", status);
2620 + return -ENODEV;
2621 + }
2622 +diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
2623 +index fb1d930470c7..cb437a448305 100644
2624 +--- a/drivers/net/ethernet/intel/ice/ice_common.c
2625 ++++ b/drivers/net/ethernet/intel/ice/ice_common.c
2626 +@@ -937,7 +937,7 @@ void ice_deinit_hw(struct ice_hw *hw)
2627 + */
2628 + enum ice_status ice_check_reset(struct ice_hw *hw)
2629 + {
2630 +- u32 cnt, reg = 0, grst_delay;
2631 ++ u32 cnt, reg = 0, grst_delay, uld_mask;
2632 +
2633 + /* Poll for Device Active state in case a recent CORER, GLOBR,
2634 + * or EMPR has occurred. The grst delay value is in 100ms units.
2635 +@@ -959,13 +959,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
2636 + return ICE_ERR_RESET_FAILED;
2637 + }
2638 +
2639 +-#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
2640 +- GLNVM_ULD_GLOBR_DONE_M)
2641 ++#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
2642 ++ GLNVM_ULD_PCIER_DONE_1_M |\
2643 ++ GLNVM_ULD_CORER_DONE_M |\
2644 ++ GLNVM_ULD_GLOBR_DONE_M |\
2645 ++ GLNVM_ULD_POR_DONE_M |\
2646 ++ GLNVM_ULD_POR_DONE_1_M |\
2647 ++ GLNVM_ULD_PCIER_DONE_2_M)
2648 ++
2649 ++ uld_mask = ICE_RESET_DONE_MASK;
2650 +
2651 + /* Device is Active; check Global Reset processes are done */
2652 + for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
2653 +- reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
2654 +- if (reg == ICE_RESET_DONE_MASK) {
2655 ++ reg = rd32(hw, GLNVM_ULD) & uld_mask;
2656 ++ if (reg == uld_mask) {
2657 + ice_debug(hw, ICE_DBG_INIT,
2658 + "Global reset processes done. %d\n", cnt);
2659 + break;
2660 +diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
2661 +index d870c1aedc17..265cf69b321b 100644
2662 +--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
2663 ++++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
2664 +@@ -713,13 +713,13 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
2665 + return -EINVAL;
2666 +
2667 + mutex_lock(&pf->tc_mutex);
2668 +- ret = dcb_ieee_delapp(netdev, app);
2669 +- if (ret)
2670 +- goto delapp_out;
2671 +-
2672 + old_cfg = &pf->hw.port_info->local_dcbx_cfg;
2673 +
2674 +- if (old_cfg->numapps == 1)
2675 ++ if (old_cfg->numapps <= 1)
2676 ++ goto delapp_out;
2677 ++
2678 ++ ret = dcb_ieee_delapp(netdev, app);
2679 ++ if (ret)
2680 + goto delapp_out;
2681 +
2682 + new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
2683 +@@ -882,7 +882,7 @@ ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
2684 + sapp.protocol = app->prot_id;
2685 + sapp.priority = app->priority;
2686 + err = ice_dcbnl_delapp(vsi->netdev, &sapp);
2687 +- dev_dbg(&vsi->back->pdev->dev,
2688 ++ dev_dbg(ice_pf_to_dev(vsi->back),
2689 + "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
2690 + vsi->idx, err, app->selector, app->prot_id, app->priority);
2691 + }
2692 +diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
2693 +index 9ebd93e79aeb..9bd166e3dff3 100644
2694 +--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
2695 ++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
2696 +@@ -165,13 +165,24 @@ static void
2697 + ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2698 + {
2699 + struct ice_netdev_priv *np = netdev_priv(netdev);
2700 ++ u8 oem_ver, oem_patch, nvm_ver_hi, nvm_ver_lo;
2701 + struct ice_vsi *vsi = np->vsi;
2702 + struct ice_pf *pf = vsi->back;
2703 ++ struct ice_hw *hw = &pf->hw;
2704 ++ u16 oem_build;
2705 +
2706 + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
2707 + strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
2708 +- strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
2709 +- sizeof(drvinfo->fw_version));
2710 ++
2711 ++ /* Display NVM version (from which the firmware version can be
2712 ++ * determined) which contains more pertinent information.
2713 ++ */
2714 ++ ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch,
2715 ++ &nvm_ver_hi, &nvm_ver_lo);
2716 ++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
2717 ++ "%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
2718 ++ hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
2719 ++
2720 + strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
2721 + sizeof(drvinfo->bus_info));
2722 + drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
2723 +@@ -1043,7 +1054,7 @@ ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
2724 + fec = ICE_FEC_NONE;
2725 + break;
2726 + default:
2727 +- dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n",
2728 ++ dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n",
2729 + fecparam->fec);
2730 + return -EINVAL;
2731 + }
2732 +diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
2733 +index e8f32350fed2..6f4a70fa3903 100644
2734 +--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
2735 ++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
2736 +@@ -276,8 +276,14 @@
2737 + #define GLNVM_GENS_SR_SIZE_S 5
2738 + #define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
2739 + #define GLNVM_ULD 0x000B6008
2740 ++#define GLNVM_ULD_PCIER_DONE_M BIT(0)
2741 ++#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
2742 + #define GLNVM_ULD_CORER_DONE_M BIT(3)
2743 + #define GLNVM_ULD_GLOBR_DONE_M BIT(4)
2744 ++#define GLNVM_ULD_POR_DONE_M BIT(5)
2745 ++#define GLNVM_ULD_POR_DONE_1_M BIT(8)
2746 ++#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
2747 ++#define GLNVM_ULD_PE_DONE_M BIT(10)
2748 + #define GLPCI_CNF2 0x000BE004
2749 + #define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
2750 + #define PF_FUNC_RID 0x0009E880
2751 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
2752 +index e7449248fab4..b43bb51f6067 100644
2753 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
2754 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
2755 +@@ -116,7 +116,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
2756 + vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
2757 + break;
2758 + default:
2759 +- dev_dbg(&vsi->back->pdev->dev,
2760 ++ dev_dbg(ice_pf_to_dev(vsi->back),
2761 + "Not setting number of Tx/Rx descriptors for VSI type %d\n",
2762 + vsi->type);
2763 + break;
2764 +@@ -697,7 +697,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
2765 + vsi->num_txq = tx_count;
2766 +
2767 + if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
2768 +- dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
2769 ++ dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
2770 + /* since there is a chance that num_rxq could have been changed
2771 + * in the above for loop, make num_txq equal to num_rxq.
2772 + */
2773 +@@ -1306,7 +1306,7 @@ setup_rings:
2774 +
2775 + err = ice_setup_rx_ctx(vsi->rx_rings[i]);
2776 + if (err) {
2777 +- dev_err(&vsi->back->pdev->dev,
2778 ++ dev_err(ice_pf_to_dev(vsi->back),
2779 + "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
2780 + i, err);
2781 + return err;
2782 +@@ -1476,7 +1476,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
2783 +
2784 + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2785 + if (status) {
2786 +- dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
2787 ++ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
2788 + status, hw->adminq.sq_last_status);
2789 + ret = -EIO;
2790 + goto out;
2791 +@@ -1522,7 +1522,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
2792 +
2793 + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2794 + if (status) {
2795 +- dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
2796 ++ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
2797 + ena, status, hw->adminq.sq_last_status);
2798 + ret = -EIO;
2799 + goto out;
2800 +@@ -1696,7 +1696,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
2801 + struct ice_q_vector *q_vector = vsi->q_vectors[i];
2802 +
2803 + if (!q_vector) {
2804 +- dev_err(&vsi->back->pdev->dev,
2805 ++ dev_err(ice_pf_to_dev(vsi->back),
2806 + "Failed to set reg_idx on q_vector %d VSI %d\n",
2807 + i, vsi->vsi_num);
2808 + goto clear_reg_idx;
2809 +@@ -2647,25 +2647,6 @@ out:
2810 + }
2811 + #endif /* CONFIG_DCB */
2812 +
2813 +-/**
2814 +- * ice_nvm_version_str - format the NVM version strings
2815 +- * @hw: ptr to the hardware info
2816 +- */
2817 +-char *ice_nvm_version_str(struct ice_hw *hw)
2818 +-{
2819 +- u8 oem_ver, oem_patch, ver_hi, ver_lo;
2820 +- static char buf[ICE_NVM_VER_LEN];
2821 +- u16 oem_build;
2822 +-
2823 +- ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
2824 +- &ver_lo);
2825 +-
2826 +- snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
2827 +- hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
2828 +-
2829 +- return buf;
2830 +-}
2831 +-
2832 + /**
2833 + * ice_update_ring_stats - Update ring statistics
2834 + * @ring: ring to update
2835 +@@ -2737,6 +2718,6 @@ ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
2836 + status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
2837 +
2838 + cfg_mac_fltr_exit:
2839 +- ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
2840 ++ ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
2841 + return status;
2842 + }
2843 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
2844 +index 6e31e30aba39..0d2b1119c0e3 100644
2845 +--- a/drivers/net/ethernet/intel/ice/ice_lib.h
2846 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.h
2847 +@@ -97,8 +97,6 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
2848 +
2849 + u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
2850 +
2851 +-char *ice_nvm_version_str(struct ice_hw *hw);
2852 +-
2853 + enum ice_status
2854 + ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
2855 +
2856 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
2857 +index 69bff085acf7..7f71f06fa819 100644
2858 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
2859 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
2860 +@@ -269,7 +269,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
2861 + */
2862 + static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
2863 + {
2864 +- struct device *dev = &vsi->back->pdev->dev;
2865 ++ struct device *dev = ice_pf_to_dev(vsi->back);
2866 + struct net_device *netdev = vsi->netdev;
2867 + bool promisc_forced_on = false;
2868 + struct ice_pf *pf = vsi->back;
2869 +@@ -1235,7 +1235,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
2870 + u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
2871 + GL_MDET_TX_TCLAN_QNUM_S);
2872 +
2873 +- if (netif_msg_rx_err(pf))
2874 ++ if (netif_msg_tx_err(pf))
2875 + dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
2876 + event, queue, pf_num, vf_num);
2877 + wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
2878 +@@ -1364,7 +1364,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
2879 + if (vsi->type != ICE_VSI_PF)
2880 + return 0;
2881 +
2882 +- dev = &vsi->back->pdev->dev;
2883 ++ dev = ice_pf_to_dev(vsi->back);
2884 +
2885 + pi = vsi->port_info;
2886 +
2887 +@@ -1682,7 +1682,7 @@ free_q_irqs:
2888 + */
2889 + static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2890 + {
2891 +- struct device *dev = &vsi->back->pdev->dev;
2892 ++ struct device *dev = ice_pf_to_dev(vsi->back);
2893 + int i;
2894 +
2895 + for (i = 0; i < vsi->num_xdp_txq; i++) {
2896 +@@ -3241,11 +3241,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
2897 + goto err_exit_unroll;
2898 + }
2899 +
2900 +- dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n",
2901 +- hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
2902 +- hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
2903 +- ice_nvm_version_str(hw), hw->fw_build);
2904 +-
2905 + ice_request_fw(pf);
2906 +
2907 + /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
2908 +@@ -3863,14 +3858,14 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
2909 +
2910 + /* Don't set any netdev advanced features with device in Safe Mode */
2911 + if (ice_is_safe_mode(vsi->back)) {
2912 +- dev_err(&vsi->back->pdev->dev,
2913 ++ dev_err(ice_pf_to_dev(vsi->back),
2914 + "Device is in Safe Mode - not enabling advanced netdev features\n");
2915 + return ret;
2916 + }
2917 +
2918 + /* Do not change setting during reset */
2919 + if (ice_is_reset_in_progress(pf->state)) {
2920 +- dev_err(&vsi->back->pdev->dev,
2921 ++ dev_err(ice_pf_to_dev(vsi->back),
2922 + "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
2923 + return -EBUSY;
2924 + }
2925 +@@ -4413,7 +4408,7 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
2926 + int i, err = 0;
2927 +
2928 + if (!vsi->num_txq) {
2929 +- dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
2930 ++ dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
2931 + vsi->vsi_num);
2932 + return -EINVAL;
2933 + }
2934 +@@ -4444,7 +4439,7 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
2935 + int i, err = 0;
2936 +
2937 + if (!vsi->num_rxq) {
2938 +- dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
2939 ++ dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
2940 + vsi->vsi_num);
2941 + return -EINVAL;
2942 + }
2943 +@@ -4973,7 +4968,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
2944 +
2945 + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2946 + if (status) {
2947 +- dev_err(&vsi->back->pdev->dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
2948 ++ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
2949 + bmode, status, hw->adminq.sq_last_status);
2950 + ret = -EIO;
2951 + goto out;
2952 +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
2953 +index 35bbc4ff603c..6da048a6ca7c 100644
2954 +--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
2955 ++++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
2956 +@@ -10,7 +10,7 @@
2957 + */
2958 + void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
2959 + {
2960 +- u16 prev_ntu = rx_ring->next_to_use;
2961 ++ u16 prev_ntu = rx_ring->next_to_use & ~0x7;
2962 +
2963 + rx_ring->next_to_use = val;
2964 +
2965 +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
2966 +index edb374296d1f..e2114f24a19e 100644
2967 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
2968 ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
2969 +@@ -508,7 +508,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
2970 +
2971 + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2972 + if (status) {
2973 +- dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
2974 ++ dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
2975 + status, hw->adminq.sq_last_status);
2976 + ret = -EIO;
2977 + goto out;
2978 +@@ -2019,7 +2019,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2979 + continue;
2980 +
2981 + if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
2982 +- dev_err(&vsi->back->pdev->dev,
2983 ++ dev_err(ice_pf_to_dev(vsi->back),
2984 + "Failed to enable Rx ring %d on VSI %d\n",
2985 + vf_q_id, vsi->vsi_num);
2986 + v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2987 +@@ -2122,7 +2122,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2988 +
2989 + if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2990 + ring, &txq_meta)) {
2991 +- dev_err(&vsi->back->pdev->dev,
2992 ++ dev_err(ice_pf_to_dev(vsi->back),
2993 + "Failed to stop Tx ring %d on VSI %d\n",
2994 + vf_q_id, vsi->vsi_num);
2995 + v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2996 +@@ -2149,7 +2149,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2997 + continue;
2998 +
2999 + if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
3000 +- dev_err(&vsi->back->pdev->dev,
3001 ++ dev_err(ice_pf_to_dev(vsi->back),
3002 + "Failed to stop Rx ring %d on VSI %d\n",
3003 + vf_q_id, vsi->vsi_num);
3004 + v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3005 +diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
3006 +index 2da8eee27e98..ecbd4be145b8 100644
3007 +--- a/drivers/net/ethernet/mscc/ocelot_board.c
3008 ++++ b/drivers/net/ethernet/mscc/ocelot_board.c
3009 +@@ -114,6 +114,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
3010 + if (err != 4)
3011 + break;
3012 +
3013 ++ /* At this point the IFH was read correctly, so it is safe to
3014 ++ * presume that there is no error. The err needs to be reset
3015 ++ * otherwise a frame could come in CPU queue between the while
3016 ++ * condition and the check for error later on. And in that case
3017 ++ * the new frame is just removed and not processed.
3018 ++ */
3019 ++ err = 0;
3020 ++
3021 + ocelot_parse_ifh(ifh, &info);
3022 +
3023 + ocelot_port = ocelot->ports[info.port];
3024 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
3025 +index 5f9d2ec70446..61c06fbe10db 100644
3026 +--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
3027 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
3028 +@@ -103,7 +103,7 @@ int ionic_heartbeat_check(struct ionic *ionic)
3029 + {
3030 + struct ionic_dev *idev = &ionic->idev;
3031 + unsigned long hb_time;
3032 +- u32 fw_status;
3033 ++ u8 fw_status;
3034 + u32 hb;
3035 +
3036 + /* wait a little more than one second before testing again */
3037 +@@ -111,9 +111,12 @@ int ionic_heartbeat_check(struct ionic *ionic)
3038 + if (time_before(hb_time, (idev->last_hb_time + ionic->watchdog_period)))
3039 + return 0;
3040 +
3041 +- /* firmware is useful only if fw_status is non-zero */
3042 +- fw_status = ioread32(&idev->dev_info_regs->fw_status);
3043 +- if (!fw_status)
3044 ++ /* firmware is useful only if the running bit is set and
3045 ++ * fw_status != 0xff (bad PCI read)
3046 ++ */
3047 ++ fw_status = ioread8(&idev->dev_info_regs->fw_status);
3048 ++ if (fw_status == 0xff ||
3049 ++ !(fw_status & IONIC_FW_STS_F_RUNNING))
3050 + return -ENXIO;
3051 +
3052 + /* early FW has no heartbeat, else FW will return non-zero */
3053 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
3054 +index ed23a05f2642..d5e8b4e2a96e 100644
3055 +--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
3056 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
3057 +@@ -2348,6 +2348,7 @@ union ionic_dev_info_regs {
3058 + u8 version;
3059 + u8 asic_type;
3060 + u8 asic_rev;
3061 ++#define IONIC_FW_STS_F_RUNNING 0x1
3062 + u8 fw_status;
3063 + u32 fw_heartbeat;
3064 + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN];
3065 +diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
3066 +index e8a1b27db84d..234c6f30effb 100644
3067 +--- a/drivers/net/ethernet/qlogic/qede/qede.h
3068 ++++ b/drivers/net/ethernet/qlogic/qede/qede.h
3069 +@@ -163,6 +163,8 @@ struct qede_rdma_dev {
3070 + struct list_head entry;
3071 + struct list_head rdma_event_list;
3072 + struct workqueue_struct *rdma_wq;
3073 ++ struct kref refcnt;
3074 ++ struct completion event_comp;
3075 + bool exp_recovery;
3076 + };
3077 +
3078 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
3079 +index ffabc2d2f082..2d873ae8a234 100644
3080 +--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
3081 ++++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
3082 +@@ -59,6 +59,9 @@ static void _qede_rdma_dev_add(struct qede_dev *edev)
3083 + static int qede_rdma_create_wq(struct qede_dev *edev)
3084 + {
3085 + INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
3086 ++ kref_init(&edev->rdma_info.refcnt);
3087 ++ init_completion(&edev->rdma_info.event_comp);
3088 ++
3089 + edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
3090 + if (!edev->rdma_info.rdma_wq) {
3091 + DP_NOTICE(edev, "qedr: Could not create workqueue\n");
3092 +@@ -83,8 +86,23 @@ static void qede_rdma_cleanup_event(struct qede_dev *edev)
3093 + }
3094 + }
3095 +
3096 ++static void qede_rdma_complete_event(struct kref *ref)
3097 ++{
3098 ++ struct qede_rdma_dev *rdma_dev =
3099 ++ container_of(ref, struct qede_rdma_dev, refcnt);
3100 ++
3101 ++ /* no more events will be added after this */
3102 ++ complete(&rdma_dev->event_comp);
3103 ++}
3104 ++
3105 + static void qede_rdma_destroy_wq(struct qede_dev *edev)
3106 + {
3107 ++ /* Avoid race with add_event flow, make sure it finishes before
3108 ++ * we start accessing the list and cleaning up the work
3109 ++ */
3110 ++ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
3111 ++ wait_for_completion(&edev->rdma_info.event_comp);
3112 ++
3113 + qede_rdma_cleanup_event(edev);
3114 + destroy_workqueue(edev->rdma_info.rdma_wq);
3115 + }
3116 +@@ -310,15 +328,24 @@ static void qede_rdma_add_event(struct qede_dev *edev,
3117 + if (!edev->rdma_info.qedr_dev)
3118 + return;
3119 +
3120 ++ /* We don't want the cleanup flow to start while we're allocating and
3121 ++ * scheduling the work
3122 ++ */
3123 ++ if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
3124 ++ return; /* already being destroyed */
3125 ++
3126 + event_node = qede_rdma_get_free_event_node(edev);
3127 + if (!event_node)
3128 +- return;
3129 ++ goto out;
3130 +
3131 + event_node->event = event;
3132 + event_node->ptr = edev;
3133 +
3134 + INIT_WORK(&event_node->work, qede_rdma_handle_event);
3135 + queue_work(edev->rdma_info.rdma_wq, &event_node->work);
3136 ++
3137 ++out:
3138 ++ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
3139 + }
3140 +
3141 + void qede_rdma_dev_event_open(struct qede_dev *edev)
3142 +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
3143 +index eab83e71567a..6c0732fc8c25 100644
3144 +--- a/drivers/net/hyperv/netvsc.c
3145 ++++ b/drivers/net/hyperv/netvsc.c
3146 +@@ -99,7 +99,7 @@ static struct netvsc_device *alloc_net_device(void)
3147 +
3148 + init_waitqueue_head(&net_device->wait_drain);
3149 + net_device->destroy = false;
3150 +- net_device->tx_disable = false;
3151 ++ net_device->tx_disable = true;
3152 +
3153 + net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
3154 + net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
3155 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
3156 +index f3f9eb8a402a..ee1ad7ae7555 100644
3157 +--- a/drivers/net/hyperv/netvsc_drv.c
3158 ++++ b/drivers/net/hyperv/netvsc_drv.c
3159 +@@ -977,6 +977,7 @@ static int netvsc_attach(struct net_device *ndev,
3160 + }
3161 +
3162 + /* In any case device is now ready */
3163 ++ nvdev->tx_disable = false;
3164 + netif_device_attach(ndev);
3165 +
3166 + /* Note: enable and attach happen when sub-channels setup */
3167 +@@ -2354,6 +2355,8 @@ static int netvsc_probe(struct hv_device *dev,
3168 + else
3169 + net->max_mtu = ETH_DATA_LEN;
3170 +
3171 ++ nvdev->tx_disable = false;
3172 ++
3173 + ret = register_netdevice(net);
3174 + if (ret != 0) {
3175 + pr_err("Unable to register netdev.\n");
3176 +diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
3177 +index 7e9975d25066..f1ded03f0229 100644
3178 +--- a/drivers/net/phy/mdio-bcm-iproc.c
3179 ++++ b/drivers/net/phy/mdio-bcm-iproc.c
3180 +@@ -178,6 +178,23 @@ static int iproc_mdio_remove(struct platform_device *pdev)
3181 + return 0;
3182 + }
3183 +
3184 ++#ifdef CONFIG_PM_SLEEP
3185 ++int iproc_mdio_resume(struct device *dev)
3186 ++{
3187 ++ struct platform_device *pdev = to_platform_device(dev);
3188 ++ struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
3189 ++
3190 ++ /* restore the mii clock configuration */
3191 ++ iproc_mdio_config_clk(priv->base);
3192 ++
3193 ++ return 0;
3194 ++}
3195 ++
3196 ++static const struct dev_pm_ops iproc_mdio_pm_ops = {
3197 ++ .resume = iproc_mdio_resume
3198 ++};
3199 ++#endif /* CONFIG_PM_SLEEP */
3200 ++
3201 + static const struct of_device_id iproc_mdio_of_match[] = {
3202 + { .compatible = "brcm,iproc-mdio", },
3203 + { /* sentinel */ },
3204 +@@ -188,6 +205,9 @@ static struct platform_driver iproc_mdio_driver = {
3205 + .driver = {
3206 + .name = "iproc-mdio",
3207 + .of_match_table = iproc_mdio_of_match,
3208 ++#ifdef CONFIG_PM_SLEEP
3209 ++ .pm = &iproc_mdio_pm_ops,
3210 ++#endif
3211 + },
3212 + .probe = iproc_mdio_probe,
3213 + .remove = iproc_mdio_remove,
3214 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
3215 +index 9485c8d1de8a..3b7a3b8a5e06 100644
3216 +--- a/drivers/net/usb/qmi_wwan.c
3217 ++++ b/drivers/net/usb/qmi_wwan.c
3218 +@@ -61,7 +61,6 @@ enum qmi_wwan_flags {
3219 +
3220 + enum qmi_wwan_quirks {
3221 + QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
3222 +- QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
3223 + };
3224 +
3225 + struct qmimux_hdr {
3226 +@@ -916,16 +915,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
3227 + .data = QMI_WWAN_QUIRK_DTR,
3228 + };
3229 +
3230 +-static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
3231 +- .description = "WWAN/QMI device",
3232 +- .flags = FLAG_WWAN | FLAG_SEND_ZLP,
3233 +- .bind = qmi_wwan_bind,
3234 +- .unbind = qmi_wwan_unbind,
3235 +- .manage_power = qmi_wwan_manage_power,
3236 +- .rx_fixup = qmi_wwan_rx_fixup,
3237 +- .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
3238 +-};
3239 +-
3240 + #define HUAWEI_VENDOR_ID 0x12D1
3241 +
3242 + /* map QMI/wwan function by a fixed interface number */
3243 +@@ -946,14 +935,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
3244 + #define QMI_GOBI_DEVICE(vend, prod) \
3245 + QMI_FIXED_INTF(vend, prod, 0)
3246 +
3247 +-/* Quectel does not use fixed interface numbers on at least some of their
3248 +- * devices. We need to check the number of endpoints to ensure that we bind to
3249 +- * the correct interface.
3250 ++/* Many devices have QMI and DIAG functions which are distinguishable
3251 ++ * from other vendor specific functions by class, subclass and
3252 ++ * protocol all being 0xff. The DIAG function has exactly 2 endpoints
3253 ++ * and is silently rejected when probed.
3254 ++ *
3255 ++ * This makes it possible to match dynamically numbered QMI functions
3256 ++ * as seen on e.g. many Quectel modems.
3257 + */
3258 +-#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
3259 ++#define QMI_MATCH_FF_FF_FF(vend, prod) \
3260 + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
3261 + USB_SUBCLASS_VENDOR_SPEC, 0xff), \
3262 +- .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
3263 ++ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
3264 +
3265 + static const struct usb_device_id products[] = {
3266 + /* 1. CDC ECM like devices match on the control interface */
3267 +@@ -1059,10 +1052,10 @@ static const struct usb_device_id products[] = {
3268 + USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
3269 + .driver_info = (unsigned long)&qmi_wwan_info,
3270 + },
3271 +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
3272 +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
3273 +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
3274 +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
3275 ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
3276 ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
3277 ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
3278 ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
3279 +
3280 + /* 3. Combined interface devices matching on interface number */
3281 + {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
3282 +@@ -1363,6 +1356,7 @@ static const struct usb_device_id products[] = {
3283 + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
3284 + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
3285 + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
3286 ++ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
3287 + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
3288 + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
3289 + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
3290 +@@ -1454,7 +1448,6 @@ static int qmi_wwan_probe(struct usb_interface *intf,
3291 + {
3292 + struct usb_device_id *id = (struct usb_device_id *)prod;
3293 + struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
3294 +- const struct driver_info *info;
3295 +
3296 + /* Workaround to enable dynamic IDs. This disables usbnet
3297 + * blacklisting functionality. Which, if required, can be
3298 +@@ -1490,12 +1483,8 @@ static int qmi_wwan_probe(struct usb_interface *intf,
3299 + * different. Ignore the current interface if the number of endpoints
3300 + * equals the number for the diag interface (two).
3301 + */
3302 +- info = (void *)id->driver_info;
3303 +-
3304 +- if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
3305 +- if (desc->bNumEndpoints == 2)
3306 +- return -ENODEV;
3307 +- }
3308 ++ if (desc->bNumEndpoints == 2)
3309 ++ return -ENODEV;
3310 +
3311 + return usbnet_probe(intf, id);
3312 + }
3313 +diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
3314 +index 547ff3c578ee..fa5634af40f7 100644
3315 +--- a/drivers/net/wireless/marvell/mwifiex/main.h
3316 ++++ b/drivers/net/wireless/marvell/mwifiex/main.h
3317 +@@ -1295,19 +1295,6 @@ mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len)
3318 + return pos;
3319 + }
3320 +
3321 +-/* This function return interface number with the same bss_type.
3322 +- */
3323 +-static inline u8
3324 +-mwifiex_get_intf_num(struct mwifiex_adapter *adapter, u8 bss_type)
3325 +-{
3326 +- u8 i, num = 0;
3327 +-
3328 +- for (i = 0; i < adapter->priv_num; i++)
3329 +- if (adapter->priv[i] && adapter->priv[i]->bss_type == bss_type)
3330 +- num++;
3331 +- return num;
3332 +-}
3333 +-
3334 + /*
3335 + * This function returns the correct private structure pointer based
3336 + * upon the BSS type and BSS number.
3337 +diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
3338 +index 7caf1d26124a..f8f282ce39bd 100644
3339 +--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
3340 ++++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
3341 +@@ -894,7 +894,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
3342 + u8 *peer, *pos, *end;
3343 + u8 i, action, basic;
3344 + u16 cap = 0;
3345 +- int ie_len = 0;
3346 ++ int ies_len = 0;
3347 +
3348 + if (len < (sizeof(struct ethhdr) + 3))
3349 + return;
3350 +@@ -916,7 +916,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
3351 + pos = buf + sizeof(struct ethhdr) + 4;
3352 + /* payload 1+ category 1 + action 1 + dialog 1 */
3353 + cap = get_unaligned_le16(pos);
3354 +- ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
3355 ++ ies_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
3356 + pos += 2;
3357 + break;
3358 +
3359 +@@ -926,7 +926,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
3360 + /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
3361 + pos = buf + sizeof(struct ethhdr) + 6;
3362 + cap = get_unaligned_le16(pos);
3363 +- ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
3364 ++ ies_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
3365 + pos += 2;
3366 + break;
3367 +
3368 +@@ -934,7 +934,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
3369 + if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
3370 + return;
3371 + pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
3372 +- ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
3373 ++ ies_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
3374 + break;
3375 + default:
3376 + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
3377 +@@ -947,33 +947,33 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
3378 +
3379 + sta_ptr->tdls_cap.capab = cpu_to_le16(cap);
3380 +
3381 +- for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
3382 +- if (pos + 2 + pos[1] > end)
3383 ++ for (end = pos + ies_len; pos + 1 < end; pos += 2 + pos[1]) {
3384 ++ u8 ie_len = pos[1];
3385 ++
3386 ++ if (pos + 2 + ie_len > end)
3387 + break;
3388 +
3389 + switch (*pos) {
3390 + case WLAN_EID_SUPP_RATES:
3391 +- if (pos[1] > 32)
3392 ++ if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
3393 + return;
3394 +- sta_ptr->tdls_cap.rates_len = pos[1];
3395 +- for (i = 0; i < pos[1]; i++)
3396 ++ sta_ptr->tdls_cap.rates_len = ie_len;
3397 ++ for (i = 0; i < ie_len; i++)
3398 + sta_ptr->tdls_cap.rates[i] = pos[i + 2];
3399 + break;
3400 +
3401 + case WLAN_EID_EXT_SUPP_RATES:
3402 +- if (pos[1] > 32)
3403 ++ if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
3404 + return;
3405 + basic = sta_ptr->tdls_cap.rates_len;
3406 +- if (pos[1] > 32 - basic)
3407 ++ if (ie_len > sizeof(sta_ptr->tdls_cap.rates) - basic)
3408 + return;
3409 +- for (i = 0; i < pos[1]; i++)
3410 ++ for (i = 0; i < ie_len; i++)
3411 + sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
3412 +- sta_ptr->tdls_cap.rates_len += pos[1];
3413 ++ sta_ptr->tdls_cap.rates_len += ie_len;
3414 + break;
3415 + case WLAN_EID_HT_CAPABILITY:
3416 +- if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
3417 +- return;
3418 +- if (pos[1] != sizeof(struct ieee80211_ht_cap))
3419 ++ if (ie_len != sizeof(struct ieee80211_ht_cap))
3420 + return;
3421 + /* copy the ie's value into ht_capb*/
3422 + memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
3423 +@@ -981,59 +981,45 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
3424 + sta_ptr->is_11n_enabled = 1;
3425 + break;
3426 + case WLAN_EID_HT_OPERATION:
3427 +- if (pos > end -
3428 +- sizeof(struct ieee80211_ht_operation) - 2)
3429 +- return;
3430 +- if (pos[1] != sizeof(struct ieee80211_ht_operation))
3431 ++ if (ie_len != sizeof(struct ieee80211_ht_operation))
3432 + return;
3433 + /* copy the ie's value into ht_oper*/
3434 + memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
3435 + sizeof(struct ieee80211_ht_operation));
3436 + break;
3437 + case WLAN_EID_BSS_COEX_2040:
3438 +- if (pos > end - 3)
3439 +- return;
3440 +- if (pos[1] != 1)
3441 ++ if (ie_len != sizeof(pos[2]))
3442 + return;
3443 + sta_ptr->tdls_cap.coex_2040 = pos[2];
3444 + break;
3445 + case WLAN_EID_EXT_CAPABILITY:
3446 +- if (pos > end - sizeof(struct ieee_types_header))
3447 +- return;
3448 +- if (pos[1] < sizeof(struct ieee_types_header))
3449 ++ if (ie_len < sizeof(struct ieee_types_header))
3450 + return;
3451 +- if (pos[1] > 8)
3452 ++ if (ie_len > 8)
3453 + return;
3454 + memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
3455 + sizeof(struct ieee_types_header) +
3456 +- min_t(u8, pos[1], 8));
3457 ++ min_t(u8, ie_len, 8));
3458 + break;
3459 + case WLAN_EID_RSN:
3460 +- if (pos > end - sizeof(struct ieee_types_header))
3461 ++ if (ie_len < sizeof(struct ieee_types_header))
3462 + return;
3463 +- if (pos[1] < sizeof(struct ieee_types_header))
3464 +- return;
3465 +- if (pos[1] > IEEE_MAX_IE_SIZE -
3466 ++ if (ie_len > IEEE_MAX_IE_SIZE -
3467 + sizeof(struct ieee_types_header))
3468 + return;
3469 + memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
3470 + sizeof(struct ieee_types_header) +
3471 +- min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
3472 ++ min_t(u8, ie_len, IEEE_MAX_IE_SIZE -
3473 + sizeof(struct ieee_types_header)));
3474 + break;
3475 + case WLAN_EID_QOS_CAPA:
3476 +- if (pos > end - 3)
3477 +- return;
3478 +- if (pos[1] != 1)
3479 ++ if (ie_len != sizeof(pos[2]))
3480 + return;
3481 + sta_ptr->tdls_cap.qos_info = pos[2];
3482 + break;
3483 + case WLAN_EID_VHT_OPERATION:
3484 + if (priv->adapter->is_hw_11ac_capable) {
3485 +- if (pos > end -
3486 +- sizeof(struct ieee80211_vht_operation) - 2)
3487 +- return;
3488 +- if (pos[1] !=
3489 ++ if (ie_len !=
3490 + sizeof(struct ieee80211_vht_operation))
3491 + return;
3492 + /* copy the ie's value into vhtoper*/
3493 +@@ -1043,10 +1029,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
3494 + break;
3495 + case WLAN_EID_VHT_CAPABILITY:
3496 + if (priv->adapter->is_hw_11ac_capable) {
3497 +- if (pos > end -
3498 +- sizeof(struct ieee80211_vht_cap) - 2)
3499 +- return;
3500 +- if (pos[1] != sizeof(struct ieee80211_vht_cap))
3501 ++ if (ie_len != sizeof(struct ieee80211_vht_cap))
3502 + return;
3503 + /* copy the ie's value into vhtcap*/
3504 + memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
3505 +@@ -1056,9 +1039,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
3506 + break;
3507 + case WLAN_EID_AID:
3508 + if (priv->adapter->is_hw_11ac_capable) {
3509 +- if (pos > end - 4)
3510 +- return;
3511 +- if (pos[1] != 2)
3512 ++ if (ie_len != sizeof(u16))
3513 + return;
3514 + sta_ptr->tdls_cap.aid =
3515 + get_unaligned_le16((pos + 2));
3516 +diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
3517 +index 720c89d6066e..4ac8cb262559 100644
3518 +--- a/drivers/nfc/pn544/i2c.c
3519 ++++ b/drivers/nfc/pn544/i2c.c
3520 +@@ -225,6 +225,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
3521 +
3522 + out:
3523 + gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity);
3524 ++ usleep_range(10000, 15000);
3525 + }
3526 +
3527 + static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
3528 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
3529 +index 641c07347e8d..ada59df642d2 100644
3530 +--- a/drivers/nvme/host/core.c
3531 ++++ b/drivers/nvme/host/core.c
3532 +@@ -66,8 +66,8 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
3533 + * nvme_reset_wq - hosts nvme reset works
3534 + * nvme_delete_wq - hosts nvme delete works
3535 + *
3536 +- * nvme_wq will host works such are scan, aen handling, fw activation,
3537 +- * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
3538 ++ * nvme_wq will host works such as scan, aen handling, fw activation,
3539 ++ * keep-alive, periodic reconnects etc. nvme_reset_wq
3540 + * runs reset works which also flush works hosted on nvme_wq for
3541 + * serialization purposes. nvme_delete_wq host controller deletion
3542 + * works which flush reset works for serialization.
3543 +@@ -976,7 +976,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
3544 + startka = true;
3545 + spin_unlock_irqrestore(&ctrl->lock, flags);
3546 + if (startka)
3547 +- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
3548 ++ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
3549 + }
3550 +
3551 + static int nvme_keep_alive(struct nvme_ctrl *ctrl)
3552 +@@ -1006,7 +1006,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
3553 + dev_dbg(ctrl->device,
3554 + "reschedule traffic based keep-alive timer\n");
3555 + ctrl->comp_seen = false;
3556 +- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
3557 ++ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
3558 + return;
3559 + }
3560 +
3561 +@@ -1023,7 +1023,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
3562 + if (unlikely(ctrl->kato == 0))
3563 + return;
3564 +
3565 +- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
3566 ++ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
3567 + }
3568 +
3569 + void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
3570 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
3571 +index da392b50f73e..bb5e13ad1aff 100644
3572 +--- a/drivers/nvme/host/pci.c
3573 ++++ b/drivers/nvme/host/pci.c
3574 +@@ -1078,9 +1078,9 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx)
3575 +
3576 + spin_lock(&nvmeq->cq_poll_lock);
3577 + found = nvme_process_cq(nvmeq, &start, &end, -1);
3578 ++ nvme_complete_cqes(nvmeq, start, end);
3579 + spin_unlock(&nvmeq->cq_poll_lock);
3580 +
3581 +- nvme_complete_cqes(nvmeq, start, end);
3582 + return found;
3583 + }
3584 +
3585 +@@ -1401,6 +1401,23 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
3586 + nvme_poll_irqdisable(nvmeq, -1);
3587 + }
3588 +
3589 ++/*
3590 ++ * Called only on a device that has been disabled and after all other threads
3591 ++ * that can check this device's completion queues have synced. This is the
3592 ++ * last chance for the driver to see a natural completion before
3593 ++ * nvme_cancel_request() terminates all incomplete requests.
3594 ++ */
3595 ++static void nvme_reap_pending_cqes(struct nvme_dev *dev)
3596 ++{
3597 ++ u16 start, end;
3598 ++ int i;
3599 ++
3600 ++ for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
3601 ++ nvme_process_cq(&dev->queues[i], &start, &end, -1);
3602 ++ nvme_complete_cqes(&dev->queues[i], start, end);
3603 ++ }
3604 ++}
3605 ++
3606 + static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
3607 + int entry_size)
3608 + {
3609 +@@ -2235,11 +2252,6 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
3610 + if (timeout == 0)
3611 + return false;
3612 +
3613 +- /* handle any remaining CQEs */
3614 +- if (opcode == nvme_admin_delete_cq &&
3615 +- !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags))
3616 +- nvme_poll_irqdisable(nvmeq, -1);
3617 +-
3618 + sent--;
3619 + if (nr_queues)
3620 + goto retry;
3621 +@@ -2428,6 +2440,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
3622 + nvme_suspend_io_queues(dev);
3623 + nvme_suspend_queue(&dev->queues[0]);
3624 + nvme_pci_disable(dev);
3625 ++ nvme_reap_pending_cqes(dev);
3626 +
3627 + blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
3628 + blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
3629 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
3630 +index 2a47c6c5007e..3e85c5cacefd 100644
3631 +--- a/drivers/nvme/host/rdma.c
3632 ++++ b/drivers/nvme/host/rdma.c
3633 +@@ -1088,7 +1088,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
3634 + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
3635 + return;
3636 +
3637 +- queue_work(nvme_wq, &ctrl->err_work);
3638 ++ queue_work(nvme_reset_wq, &ctrl->err_work);
3639 + }
3640 +
3641 + static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
3642 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
3643 +index 6d43b23a0fc8..49d4373b84eb 100644
3644 +--- a/drivers/nvme/host/tcp.c
3645 ++++ b/drivers/nvme/host/tcp.c
3646 +@@ -422,7 +422,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
3647 + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
3648 + return;
3649 +
3650 +- queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
3651 ++ queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
3652 + }
3653 +
3654 + static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
3655 +@@ -1054,7 +1054,12 @@ static void nvme_tcp_io_work(struct work_struct *w)
3656 + } else if (unlikely(result < 0)) {
3657 + dev_err(queue->ctrl->ctrl.device,
3658 + "failed to send request %d\n", result);
3659 +- if (result != -EPIPE)
3660 ++
3661 ++ /*
3662 ++ * Fail the request unless peer closed the connection,
3663 ++ * in which case error recovery flow will complete all.
3664 ++ */
3665 ++ if ((result != -EPIPE) && (result != -ECONNRESET))
3666 + nvme_tcp_fail_request(queue->request);
3667 + nvme_tcp_done_send_req(queue);
3668 + return;
3669 +diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
3670 +index d704eccc548f..f01a57e5a5f3 100644
3671 +--- a/drivers/perf/arm_smmuv3_pmu.c
3672 ++++ b/drivers/perf/arm_smmuv3_pmu.c
3673 +@@ -771,7 +771,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
3674 + smmu_pmu->reloc_base = smmu_pmu->reg_base;
3675 + }
3676 +
3677 +- irq = platform_get_irq(pdev, 0);
3678 ++ irq = platform_get_irq_optional(pdev, 0);
3679 + if (irq > 0)
3680 + smmu_pmu->irq = irq;
3681 +
3682 +diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
3683 +index e36fcad668a6..88a3c5690fea 100644
3684 +--- a/drivers/pwm/pwm-omap-dmtimer.c
3685 ++++ b/drivers/pwm/pwm-omap-dmtimer.c
3686 +@@ -256,7 +256,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
3687 + if (!timer_pdev) {
3688 + dev_err(&pdev->dev, "Unable to find Timer pdev\n");
3689 + ret = -ENODEV;
3690 +- goto put;
3691 ++ goto err_find_timer_pdev;
3692 + }
3693 +
3694 + timer_pdata = dev_get_platdata(&timer_pdev->dev);
3695 +@@ -264,7 +264,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
3696 + dev_dbg(&pdev->dev,
3697 + "dmtimer pdata structure NULL, deferring probe\n");
3698 + ret = -EPROBE_DEFER;
3699 +- goto put;
3700 ++ goto err_platdata;
3701 + }
3702 +
3703 + pdata = timer_pdata->timer_ops;
3704 +@@ -283,19 +283,19 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
3705 + !pdata->write_counter) {
3706 + dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
3707 + ret = -EINVAL;
3708 +- goto put;
3709 ++ goto err_platdata;
3710 + }
3711 +
3712 + if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
3713 + dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
3714 + ret = -ENODEV;
3715 +- goto put;
3716 ++ goto err_timer_property;
3717 + }
3718 +
3719 + dm_timer = pdata->request_by_node(timer);
3720 + if (!dm_timer) {
3721 + ret = -EPROBE_DEFER;
3722 +- goto put;
3723 ++ goto err_request_timer;
3724 + }
3725 +
3726 + omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
3727 +@@ -352,7 +352,14 @@ err_pwmchip_add:
3728 + err_alloc_omap:
3729 +
3730 + pdata->free(dm_timer);
3731 +-put:
3732 ++err_request_timer:
3733 ++
3734 ++err_timer_property:
3735 ++err_platdata:
3736 ++
3737 ++ put_device(&timer_pdev->dev);
3738 ++err_find_timer_pdev:
3739 ++
3740 + of_node_put(timer);
3741 +
3742 + return ret;
3743 +@@ -372,6 +379,8 @@ static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
3744 +
3745 + omap->pdata->free(omap->dm_timer);
3746 +
3747 ++ put_device(&omap->dm_timer_pdev->dev);
3748 ++
3749 + mutex_destroy(&omap->mutex);
3750 +
3751 + return 0;
3752 +diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
3753 +index bb35ba4a8d24..4348fdff1c61 100644
3754 +--- a/drivers/s390/crypto/ap_bus.h
3755 ++++ b/drivers/s390/crypto/ap_bus.h
3756 +@@ -162,7 +162,7 @@ struct ap_card {
3757 + unsigned int functions; /* AP device function bitfield. */
3758 + int queue_depth; /* AP queue depth.*/
3759 + int id; /* AP card number. */
3760 +- atomic_t total_request_count; /* # requests ever for this AP device.*/
3761 ++ atomic64_t total_request_count; /* # requests ever for this AP device.*/
3762 + };
3763 +
3764 + #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
3765 +@@ -179,7 +179,7 @@ struct ap_queue {
3766 + enum ap_state state; /* State of the AP device. */
3767 + int pendingq_count; /* # requests on pendingq list. */
3768 + int requestq_count; /* # requests on requestq list. */
3769 +- int total_request_count; /* # requests ever for this AP device.*/
3770 ++ u64 total_request_count; /* # requests ever for this AP device.*/
3771 + int request_timeout; /* Request timeout in jiffies. */
3772 + struct timer_list timeout; /* Timer for request timeouts. */
3773 + struct list_head pendingq; /* List of message sent to AP queue. */
3774 +diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
3775 +index 63b4cc6cd7e5..e85bfca1ed16 100644
3776 +--- a/drivers/s390/crypto/ap_card.c
3777 ++++ b/drivers/s390/crypto/ap_card.c
3778 +@@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev,
3779 + char *buf)
3780 + {
3781 + struct ap_card *ac = to_ap_card(dev);
3782 +- unsigned int req_cnt;
3783 ++ u64 req_cnt;
3784 +
3785 + req_cnt = 0;
3786 + spin_lock_bh(&ap_list_lock);
3787 +- req_cnt = atomic_read(&ac->total_request_count);
3788 ++ req_cnt = atomic64_read(&ac->total_request_count);
3789 + spin_unlock_bh(&ap_list_lock);
3790 +- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
3791 ++ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
3792 + }
3793 +
3794 + static ssize_t request_count_store(struct device *dev,
3795 +@@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev,
3796 + for_each_ap_queue(aq, ac)
3797 + aq->total_request_count = 0;
3798 + spin_unlock_bh(&ap_list_lock);
3799 +- atomic_set(&ac->total_request_count, 0);
3800 ++ atomic64_set(&ac->total_request_count, 0);
3801 +
3802 + return count;
3803 + }
3804 +diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
3805 +index 37c3bdc3642d..a317ab484932 100644
3806 +--- a/drivers/s390/crypto/ap_queue.c
3807 ++++ b/drivers/s390/crypto/ap_queue.c
3808 +@@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev,
3809 + char *buf)
3810 + {
3811 + struct ap_queue *aq = to_ap_queue(dev);
3812 +- unsigned int req_cnt;
3813 ++ u64 req_cnt;
3814 +
3815 + spin_lock_bh(&aq->lock);
3816 + req_cnt = aq->total_request_count;
3817 + spin_unlock_bh(&aq->lock);
3818 +- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
3819 ++ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
3820 + }
3821 +
3822 + static ssize_t request_count_store(struct device *dev,
3823 +@@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
3824 + list_add_tail(&ap_msg->list, &aq->requestq);
3825 + aq->requestq_count++;
3826 + aq->total_request_count++;
3827 +- atomic_inc(&aq->card->total_request_count);
3828 ++ atomic64_inc(&aq->card->total_request_count);
3829 + /* Send/receive as many request from the queue as possible. */
3830 + ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
3831 + spin_unlock_bh(&aq->lock);
3832 +diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
3833 +index 9157e728a362..7fa0262e91af 100644
3834 +--- a/drivers/s390/crypto/zcrypt_api.c
3835 ++++ b/drivers/s390/crypto/zcrypt_api.c
3836 +@@ -605,8 +605,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
3837 + weight += atomic_read(&zc->load);
3838 + pref_weight += atomic_read(&pref_zc->load);
3839 + if (weight == pref_weight)
3840 +- return atomic_read(&zc->card->total_request_count) >
3841 +- atomic_read(&pref_zc->card->total_request_count);
3842 ++ return atomic64_read(&zc->card->total_request_count) >
3843 ++ atomic64_read(&pref_zc->card->total_request_count);
3844 + return weight > pref_weight;
3845 + }
3846 +
3847 +@@ -1216,11 +1216,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
3848 + spin_unlock(&zcrypt_list_lock);
3849 + }
3850 +
3851 +-static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
3852 ++static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
3853 + {
3854 + struct zcrypt_card *zc;
3855 + struct zcrypt_queue *zq;
3856 + int card;
3857 ++ u64 cnt;
3858 +
3859 + memset(reqcnt, 0, sizeof(int) * max_adapters);
3860 + spin_lock(&zcrypt_list_lock);
3861 +@@ -1232,8 +1233,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
3862 + || card >= max_adapters)
3863 + continue;
3864 + spin_lock(&zq->queue->lock);
3865 +- reqcnt[card] = zq->queue->total_request_count;
3866 ++ cnt = zq->queue->total_request_count;
3867 + spin_unlock(&zq->queue->lock);
3868 ++ reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
3869 + }
3870 + }
3871 + local_bh_enable();
3872 +@@ -1411,9 +1413,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
3873 + return 0;
3874 + }
3875 + case ZCRYPT_PERDEV_REQCNT: {
3876 +- int *reqcnt;
3877 ++ u32 *reqcnt;
3878 +
3879 +- reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
3880 ++ reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
3881 + if (!reqcnt)
3882 + return -ENOMEM;
3883 + zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
3884 +@@ -1470,7 +1472,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
3885 + }
3886 + case Z90STAT_PERDEV_REQCNT: {
3887 + /* the old ioctl supports only 64 adapters */
3888 +- int reqcnt[MAX_ZDEV_CARDIDS];
3889 ++ u32 reqcnt[MAX_ZDEV_CARDIDS];
3890 +
3891 + zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
3892 + if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
3893 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
3894 +index 29facb913671..10edfd6fc930 100644
3895 +--- a/drivers/s390/net/qeth_core_main.c
3896 ++++ b/drivers/s390/net/qeth_core_main.c
3897 +@@ -5142,7 +5142,7 @@ next_packet:
3898 + }
3899 +
3900 + use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
3901 +- ((skb_len >= card->options.rx_sg_cb) &&
3902 ++ (skb_len > card->options.rx_sg_cb &&
3903 + !atomic_read(&card->force_alloc_skb) &&
3904 + !IS_OSN(card));
3905 +
3906 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
3907 +index 47d37e75dda6..e26ad80ddfa3 100644
3908 +--- a/drivers/s390/net/qeth_l2_main.c
3909 ++++ b/drivers/s390/net/qeth_l2_main.c
3910 +@@ -1815,15 +1815,14 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
3911 +
3912 + QETH_CARD_TEXT(card, 2, "vniccsch");
3913 +
3914 +- /* do not change anything if BridgePort is enabled */
3915 +- if (qeth_bridgeport_is_in_use(card))
3916 +- return -EBUSY;
3917 +-
3918 + /* check if characteristic and enable/disable are supported */
3919 + if (!(card->options.vnicc.sup_chars & vnicc) ||
3920 + !(card->options.vnicc.set_char_sup & vnicc))
3921 + return -EOPNOTSUPP;
3922 +
3923 ++ if (qeth_bridgeport_is_in_use(card))
3924 ++ return -EBUSY;
3925 ++
3926 + /* set enable/disable command and store wanted characteristic */
3927 + if (state) {
3928 + cmd = IPA_VNICC_ENABLE;
3929 +@@ -1869,14 +1868,13 @@ int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
3930 +
3931 + QETH_CARD_TEXT(card, 2, "vniccgch");
3932 +
3933 +- /* do not get anything if BridgePort is enabled */
3934 +- if (qeth_bridgeport_is_in_use(card))
3935 +- return -EBUSY;
3936 +-
3937 + /* check if characteristic is supported */
3938 + if (!(card->options.vnicc.sup_chars & vnicc))
3939 + return -EOPNOTSUPP;
3940 +
3941 ++ if (qeth_bridgeport_is_in_use(card))
3942 ++ return -EBUSY;
3943 ++
3944 + /* if card is ready, query current VNICC state */
3945 + if (qeth_card_hw_is_reachable(card))
3946 + rc = qeth_l2_vnicc_query_chars(card);
3947 +@@ -1894,15 +1892,14 @@ int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
3948 +
3949 + QETH_CARD_TEXT(card, 2, "vniccsto");
3950 +
3951 +- /* do not change anything if BridgePort is enabled */
3952 +- if (qeth_bridgeport_is_in_use(card))
3953 +- return -EBUSY;
3954 +-
3955 + /* check if characteristic and set_timeout are supported */
3956 + if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
3957 + !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
3958 + return -EOPNOTSUPP;
3959 +
3960 ++ if (qeth_bridgeport_is_in_use(card))
3961 ++ return -EBUSY;
3962 ++
3963 + /* do we need to do anything? */
3964 + if (card->options.vnicc.learning_timeout == timeout)
3965 + return rc;
3966 +@@ -1931,14 +1928,14 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
3967 +
3968 + QETH_CARD_TEXT(card, 2, "vniccgto");
3969 +
3970 +- /* do not get anything if BridgePort is enabled */
3971 +- if (qeth_bridgeport_is_in_use(card))
3972 +- return -EBUSY;
3973 +-
3974 + /* check if characteristic and get_timeout are supported */
3975 + if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
3976 + !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
3977 + return -EOPNOTSUPP;
3978 ++
3979 ++ if (qeth_bridgeport_is_in_use(card))
3980 ++ return -EBUSY;
3981 ++
3982 + /* if card is ready, get timeout. Otherwise, just return stored value */
3983 + *timeout = card->options.vnicc.learning_timeout;
3984 + if (qeth_card_hw_is_reachable(card))
3985 +diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
3986 +index 2b1e4da1944f..4bfb79f20588 100644
3987 +--- a/drivers/s390/scsi/zfcp_fsf.h
3988 ++++ b/drivers/s390/scsi/zfcp_fsf.h
3989 +@@ -410,7 +410,7 @@ struct fsf_qtcb_bottom_port {
3990 + u8 cb_util;
3991 + u8 a_util;
3992 + u8 res2;
3993 +- u16 temperature;
3994 ++ s16 temperature;
3995 + u16 vcc;
3996 + u16 tx_bias;
3997 + u16 tx_power;
3998 +diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
3999 +index 494b9fe9cc94..a711a0d15100 100644
4000 +--- a/drivers/s390/scsi/zfcp_sysfs.c
4001 ++++ b/drivers/s390/scsi/zfcp_sysfs.c
4002 +@@ -800,7 +800,7 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
4003 + static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \
4004 + zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
4005 +
4006 +-ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu");
4007 ++ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 6, "%hd");
4008 + ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
4009 + ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
4010 + ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
4011 +diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
4012 +index e0bd4cf17230..5b75a65103bd 100644
4013 +--- a/drivers/scsi/sd_zbc.c
4014 ++++ b/drivers/scsi/sd_zbc.c
4015 +@@ -161,6 +161,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
4016 + unsigned int nr_zones, report_zones_cb cb, void *data)
4017 + {
4018 + struct scsi_disk *sdkp = scsi_disk(disk);
4019 ++ sector_t capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
4020 + unsigned int nr, i;
4021 + unsigned char *buf;
4022 + size_t offset, buflen = 0;
4023 +@@ -171,11 +172,15 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
4024 + /* Not a zoned device */
4025 + return -EOPNOTSUPP;
4026 +
4027 ++ if (!capacity)
4028 ++ /* Device gone or invalid */
4029 ++ return -ENODEV;
4030 ++
4031 + buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
4032 + if (!buf)
4033 + return -ENOMEM;
4034 +
4035 +- while (zone_idx < nr_zones && sector < get_capacity(disk)) {
4036 ++ while (zone_idx < nr_zones && sector < capacity) {
4037 + ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
4038 + sectors_to_logical(sdkp->device, sector), true);
4039 + if (ret)
4040 +diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
4041 +index b8daaf5b7291..efd158b4607c 100644
4042 +--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
4043 ++++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
4044 +@@ -36,7 +36,8 @@
4045 + defined(CONFIG_ARCH_TEGRA_124_SOC) || \
4046 + defined(CONFIG_ARCH_TEGRA_132_SOC) || \
4047 + defined(CONFIG_ARCH_TEGRA_210_SOC) || \
4048 +- defined(CONFIG_ARCH_TEGRA_186_SOC)
4049 ++ defined(CONFIG_ARCH_TEGRA_186_SOC) || \
4050 ++ defined(CONFIG_ARCH_TEGRA_194_SOC)
4051 + static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
4052 + {
4053 + if (WARN_ON(!fuse->base))
4054 +diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
4055 +index 5825ac581f56..680f1a070606 100644
4056 +--- a/drivers/thermal/broadcom/brcmstb_thermal.c
4057 ++++ b/drivers/thermal/broadcom/brcmstb_thermal.c
4058 +@@ -49,7 +49,7 @@
4059 + #define AVS_TMON_TP_TEST_ENABLE 0x20
4060 +
4061 + /* Default coefficients */
4062 +-#define AVS_TMON_TEMP_SLOPE -487
4063 ++#define AVS_TMON_TEMP_SLOPE 487
4064 + #define AVS_TMON_TEMP_OFFSET 410040
4065 +
4066 + /* HW related temperature constants */
4067 +@@ -108,23 +108,12 @@ struct brcmstb_thermal_priv {
4068 + struct thermal_zone_device *thermal;
4069 + };
4070 +
4071 +-static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope,
4072 +- int *offset)
4073 +-{
4074 +- *slope = thermal_zone_get_slope(tz);
4075 +- *offset = thermal_zone_get_offset(tz);
4076 +-}
4077 +-
4078 + /* Convert a HW code to a temperature reading (millidegree celsius) */
4079 + static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
4080 + u32 code)
4081 + {
4082 +- const int val = code & AVS_TMON_TEMP_MASK;
4083 +- int slope, offset;
4084 +-
4085 +- avs_tmon_get_coeffs(tz, &slope, &offset);
4086 +-
4087 +- return slope * val + offset;
4088 ++ return (AVS_TMON_TEMP_OFFSET -
4089 ++ (int)((code & AVS_TMON_TEMP_MAX) * AVS_TMON_TEMP_SLOPE));
4090 + }
4091 +
4092 + /*
4093 +@@ -136,20 +125,18 @@ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
4094 + static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz,
4095 + int temp, bool low)
4096 + {
4097 +- int slope, offset;
4098 +-
4099 + if (temp < AVS_TMON_TEMP_MIN)
4100 +- return AVS_TMON_TEMP_MAX; /* Maximum code value */
4101 +-
4102 +- avs_tmon_get_coeffs(tz, &slope, &offset);
4103 ++ return AVS_TMON_TEMP_MAX; /* Maximum code value */
4104 +
4105 +- if (temp >= offset)
4106 ++ if (temp >= AVS_TMON_TEMP_OFFSET)
4107 + return 0; /* Minimum code value */
4108 +
4109 + if (low)
4110 +- return (u32)(DIV_ROUND_UP(offset - temp, abs(slope)));
4111 ++ return (u32)(DIV_ROUND_UP(AVS_TMON_TEMP_OFFSET - temp,
4112 ++ AVS_TMON_TEMP_SLOPE));
4113 + else
4114 +- return (u32)((offset - temp) / abs(slope));
4115 ++ return (u32)((AVS_TMON_TEMP_OFFSET - temp) /
4116 ++ AVS_TMON_TEMP_SLOPE);
4117 + }
4118 +
4119 + static int brcmstb_get_temp(void *data, int *temp)
4120 +diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
4121 +index 372dbbaaafb8..21d4d6e6409a 100644
4122 +--- a/drivers/thermal/db8500_thermal.c
4123 ++++ b/drivers/thermal/db8500_thermal.c
4124 +@@ -152,8 +152,8 @@ static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data)
4125 + db8500_thermal_update_config(th, idx, THERMAL_TREND_RAISING,
4126 + next_low, next_high);
4127 +
4128 +- dev_info(&th->tz->device,
4129 +- "PRCMU set max %ld, min %ld\n", next_high, next_low);
4130 ++ dev_dbg(&th->tz->device,
4131 ++ "PRCMU set max %ld, min %ld\n", next_high, next_low);
4132 + } else if (idx == num_points - 1)
4133 + /* So we roof out 1 degree over the max point */
4134 + th->interpolated_temp = db8500_thermal_points[idx] + 1;
4135 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
4136 +index e158159671fa..18e205eeb9af 100644
4137 +--- a/drivers/vhost/net.c
4138 ++++ b/drivers/vhost/net.c
4139 +@@ -1414,10 +1414,6 @@ static int vhost_net_release(struct inode *inode, struct file *f)
4140 +
4141 + static struct socket *get_raw_socket(int fd)
4142 + {
4143 +- struct {
4144 +- struct sockaddr_ll sa;
4145 +- char buf[MAX_ADDR_LEN];
4146 +- } uaddr;
4147 + int r;
4148 + struct socket *sock = sockfd_lookup(fd, &r);
4149 +
4150 +@@ -1430,11 +1426,7 @@ static struct socket *get_raw_socket(int fd)
4151 + goto err;
4152 + }
4153 +
4154 +- r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0);
4155 +- if (r < 0)
4156 +- goto err;
4157 +-
4158 +- if (uaddr.sa.sll_family != AF_PACKET) {
4159 ++ if (sock->sk->sk_family != AF_PACKET) {
4160 + r = -EPFNOSUPPORT;
4161 + goto err;
4162 + }
4163 +diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
4164 +index b069349b52f5..e1b1fcfc02af 100644
4165 +--- a/drivers/watchdog/wdat_wdt.c
4166 ++++ b/drivers/watchdog/wdat_wdt.c
4167 +@@ -389,7 +389,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
4168 +
4169 + memset(&r, 0, sizeof(r));
4170 + r.start = gas->address;
4171 +- r.end = r.start + gas->access_width - 1;
4172 ++ r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
4173 + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
4174 + r.flags = IORESOURCE_MEM;
4175 + } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
4176 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
4177 +index 11929d2bb594..cd09e63d682b 100644
4178 +--- a/fs/ceph/file.c
4179 ++++ b/fs/ceph/file.c
4180 +@@ -1418,6 +1418,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
4181 + struct ceph_cap_flush *prealloc_cf;
4182 + ssize_t count, written = 0;
4183 + int err, want, got;
4184 ++ bool direct_lock = false;
4185 + loff_t pos;
4186 + loff_t limit = max(i_size_read(inode), fsc->max_file_size);
4187 +
4188 +@@ -1428,8 +1429,11 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
4189 + if (!prealloc_cf)
4190 + return -ENOMEM;
4191 +
4192 ++ if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
4193 ++ direct_lock = true;
4194 ++
4195 + retry_snap:
4196 +- if (iocb->ki_flags & IOCB_DIRECT)
4197 ++ if (direct_lock)
4198 + ceph_start_io_direct(inode);
4199 + else
4200 + ceph_start_io_write(inode);
4201 +@@ -1519,14 +1523,15 @@ retry_snap:
4202 +
4203 + /* we might need to revert back to that point */
4204 + data = *from;
4205 +- if (iocb->ki_flags & IOCB_DIRECT) {
4206 ++ if (iocb->ki_flags & IOCB_DIRECT)
4207 + written = ceph_direct_read_write(iocb, &data, snapc,
4208 + &prealloc_cf);
4209 +- ceph_end_io_direct(inode);
4210 +- } else {
4211 ++ else
4212 + written = ceph_sync_write(iocb, &data, pos, snapc);
4213 ++ if (direct_lock)
4214 ++ ceph_end_io_direct(inode);
4215 ++ else
4216 + ceph_end_io_write(inode);
4217 +- }
4218 + if (written > 0)
4219 + iov_iter_advance(from, written);
4220 + ceph_put_snap_context(snapc);
4221 +@@ -1577,7 +1582,7 @@ retry_snap:
4222 +
4223 + goto out_unlocked;
4224 + out:
4225 +- if (iocb->ki_flags & IOCB_DIRECT)
4226 ++ if (direct_lock)
4227 + ceph_end_io_direct(inode);
4228 + else
4229 + ceph_end_io_write(inode);
4230 +diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
4231 +index fb41e51dd574..25704beb9d4c 100644
4232 +--- a/fs/cifs/cifsacl.c
4233 ++++ b/fs/cifs/cifsacl.c
4234 +@@ -601,7 +601,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
4235 + ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
4236 + *pmode |= (S_IXUGO & (*pbits_to_set));
4237 +
4238 +- cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
4239 ++ cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
4240 + return;
4241 + }
4242 +
4243 +@@ -630,7 +630,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
4244 + if (mode & S_IXUGO)
4245 + *pace_flags |= SET_FILE_EXEC_RIGHTS;
4246 +
4247 +- cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
4248 ++ cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
4249 + mode, *pace_flags);
4250 + return;
4251 + }
4252 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
4253 +index 0aa3623ae0e1..641825cfa767 100644
4254 +--- a/fs/cifs/connect.c
4255 ++++ b/fs/cifs/connect.c
4256 +@@ -4151,7 +4151,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
4257 + cifs_sb->mnt_gid = pvolume_info->linux_gid;
4258 + cifs_sb->mnt_file_mode = pvolume_info->file_mode;
4259 + cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
4260 +- cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n",
4261 ++ cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
4262 + cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
4263 +
4264 + cifs_sb->actimeo = pvolume_info->actimeo;
4265 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
4266 +index ca76a9287456..b3f3675e1878 100644
4267 +--- a/fs/cifs/inode.c
4268 ++++ b/fs/cifs/inode.c
4269 +@@ -1649,7 +1649,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
4270 + struct TCP_Server_Info *server;
4271 + char *full_path;
4272 +
4273 +- cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n",
4274 ++ cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
4275 + mode, inode);
4276 +
4277 + cifs_sb = CIFS_SB(inode->i_sb);
4278 +diff --git a/fs/dax.c b/fs/dax.c
4279 +index 1f1f0201cad1..0b0d8819cb1b 100644
4280 +--- a/fs/dax.c
4281 ++++ b/fs/dax.c
4282 +@@ -1207,6 +1207,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
4283 + lockdep_assert_held(&inode->i_rwsem);
4284 + }
4285 +
4286 ++ if (iocb->ki_flags & IOCB_NOWAIT)
4287 ++ flags |= IOMAP_NOWAIT;
4288 ++
4289 + while (iov_iter_count(iter)) {
4290 + ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
4291 + iter, dax_iomap_actor);
4292 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4293 +index 12806be10a18..71e2b80ff4aa 100644
4294 +--- a/fs/ext4/super.c
4295 ++++ b/fs/ext4/super.c
4296 +@@ -2346,7 +2346,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
4297 + {
4298 + struct ext4_sb_info *sbi = EXT4_SB(sb);
4299 + struct flex_groups **old_groups, **new_groups;
4300 +- int size, i;
4301 ++ int size, i, j;
4302 +
4303 + if (!sbi->s_log_groups_per_flex)
4304 + return 0;
4305 +@@ -2367,8 +2367,8 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
4306 + sizeof(struct flex_groups)),
4307 + GFP_KERNEL);
4308 + if (!new_groups[i]) {
4309 +- for (i--; i >= sbi->s_flex_groups_allocated; i--)
4310 +- kvfree(new_groups[i]);
4311 ++ for (j = sbi->s_flex_groups_allocated; j < i; j++)
4312 ++ kvfree(new_groups[j]);
4313 + kvfree(new_groups);
4314 + ext4_msg(sb, KERN_ERR,
4315 + "not enough memory for %d flex groups", size);
4316 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
4317 +index fc40a72f7827..930706e171fd 100644
4318 +--- a/fs/f2fs/data.c
4319 ++++ b/fs/f2fs/data.c
4320 +@@ -3132,7 +3132,8 @@ int f2fs_migrate_page(struct address_space *mapping,
4321 +
4322 + #ifdef CONFIG_SWAP
4323 + /* Copied from generic_swapfile_activate() to check any holes */
4324 +-static int check_swap_activate(struct file *swap_file, unsigned int max)
4325 ++static int check_swap_activate(struct swap_info_struct *sis,
4326 ++ struct file *swap_file, sector_t *span)
4327 + {
4328 + struct address_space *mapping = swap_file->f_mapping;
4329 + struct inode *inode = mapping->host;
4330 +@@ -3143,6 +3144,8 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
4331 + sector_t last_block;
4332 + sector_t lowest_block = -1;
4333 + sector_t highest_block = 0;
4334 ++ int nr_extents = 0;
4335 ++ int ret;
4336 +
4337 + blkbits = inode->i_blkbits;
4338 + blocks_per_page = PAGE_SIZE >> blkbits;
4339 +@@ -3154,7 +3157,8 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
4340 + probe_block = 0;
4341 + page_no = 0;
4342 + last_block = i_size_read(inode) >> blkbits;
4343 +- while ((probe_block + blocks_per_page) <= last_block && page_no < max) {
4344 ++ while ((probe_block + blocks_per_page) <= last_block &&
4345 ++ page_no < sis->max) {
4346 + unsigned block_in_page;
4347 + sector_t first_block;
4348 +
4349 +@@ -3194,13 +3198,27 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
4350 + highest_block = first_block;
4351 + }
4352 +
4353 ++ /*
4354 ++ * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4355 ++ */
4356 ++ ret = add_swap_extent(sis, page_no, 1, first_block);
4357 ++ if (ret < 0)
4358 ++ goto out;
4359 ++ nr_extents += ret;
4360 + page_no++;
4361 + probe_block += blocks_per_page;
4362 + reprobe:
4363 + continue;
4364 + }
4365 +- return 0;
4366 +-
4367 ++ ret = nr_extents;
4368 ++ *span = 1 + highest_block - lowest_block;
4369 ++ if (page_no == 0)
4370 ++ page_no = 1; /* force Empty message */
4371 ++ sis->max = page_no;
4372 ++ sis->pages = page_no - 1;
4373 ++ sis->highest_bit = page_no - 1;
4374 ++out:
4375 ++ return ret;
4376 + bad_bmap:
4377 + pr_err("swapon: swapfile has holes\n");
4378 + return -EINVAL;
4379 +@@ -3222,14 +3240,14 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4380 + if (ret)
4381 + return ret;
4382 +
4383 +- ret = check_swap_activate(file, sis->max);
4384 +- if (ret)
4385 ++ ret = check_swap_activate(sis, file, span);
4386 ++ if (ret < 0)
4387 + return ret;
4388 +
4389 + set_inode_flag(inode, FI_PIN_FILE);
4390 + f2fs_precache_extents(inode);
4391 + f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4392 +- return 0;
4393 ++ return ret;
4394 + }
4395 +
4396 + static void f2fs_swap_deactivate(struct file *file)
4397 +diff --git a/fs/io-wq.c b/fs/io-wq.c
4398 +index 0dc4bb6de656..25ffb6685bae 100644
4399 +--- a/fs/io-wq.c
4400 ++++ b/fs/io-wq.c
4401 +@@ -666,11 +666,16 @@ static int io_wq_manager(void *data)
4402 + /* create fixed workers */
4403 + refcount_set(&wq->refs, workers_to_create);
4404 + for_each_node(node) {
4405 ++ if (!node_online(node))
4406 ++ continue;
4407 + if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
4408 + goto err;
4409 + workers_to_create--;
4410 + }
4411 +
4412 ++ while (workers_to_create--)
4413 ++ refcount_dec(&wq->refs);
4414 ++
4415 + complete(&wq->done);
4416 +
4417 + while (!kthread_should_stop()) {
4418 +@@ -678,6 +683,9 @@ static int io_wq_manager(void *data)
4419 + struct io_wqe *wqe = wq->wqes[node];
4420 + bool fork_worker[2] = { false, false };
4421 +
4422 ++ if (!node_online(node))
4423 ++ continue;
4424 ++
4425 + spin_lock_irq(&wqe->lock);
4426 + if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
4427 + fork_worker[IO_WQ_ACCT_BOUND] = true;
4428 +@@ -793,7 +801,9 @@ static bool io_wq_for_each_worker(struct io_wqe *wqe,
4429 +
4430 + list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
4431 + if (io_worker_get(worker)) {
4432 +- ret = func(worker, data);
4433 ++ /* no task if node is/was offline */
4434 ++ if (worker->task)
4435 ++ ret = func(worker, data);
4436 + io_worker_release(worker);
4437 + if (ret)
4438 + break;
4439 +@@ -1006,6 +1016,8 @@ void io_wq_flush(struct io_wq *wq)
4440 + for_each_node(node) {
4441 + struct io_wqe *wqe = wq->wqes[node];
4442 +
4443 ++ if (!node_online(node))
4444 ++ continue;
4445 + init_completion(&data.done);
4446 + INIT_IO_WORK(&data.work, io_wq_flush_func);
4447 + data.work.flags |= IO_WQ_WORK_INTERNAL;
4448 +@@ -1038,12 +1050,15 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
4449 +
4450 + for_each_node(node) {
4451 + struct io_wqe *wqe;
4452 ++ int alloc_node = node;
4453 +
4454 +- wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, node);
4455 ++ if (!node_online(alloc_node))
4456 ++ alloc_node = NUMA_NO_NODE;
4457 ++ wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
4458 + if (!wqe)
4459 + goto err;
4460 + wq->wqes[node] = wqe;
4461 +- wqe->node = node;
4462 ++ wqe->node = alloc_node;
4463 + wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
4464 + atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
4465 + if (wq->user) {
4466 +@@ -1051,7 +1066,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
4467 + task_rlimit(current, RLIMIT_NPROC);
4468 + }
4469 + atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
4470 +- wqe->node = node;
4471 + wqe->wq = wq;
4472 + spin_lock_init(&wqe->lock);
4473 + INIT_WQ_LIST(&wqe->work_list);
4474 +diff --git a/fs/io_uring.c b/fs/io_uring.c
4475 +index 678c62782ba3..60a483208998 100644
4476 +--- a/fs/io_uring.c
4477 ++++ b/fs/io_uring.c
4478 +@@ -2166,6 +2166,11 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4479 + sr->msg_flags = READ_ONCE(sqe->msg_flags);
4480 + sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4481 +
4482 ++#ifdef CONFIG_COMPAT
4483 ++ if (req->ctx->compat)
4484 ++ sr->msg_flags |= MSG_CMSG_COMPAT;
4485 ++#endif
4486 ++
4487 + if (!io)
4488 + return 0;
4489 +
4490 +@@ -2258,6 +2263,11 @@ static int io_recvmsg_prep(struct io_kiocb *req,
4491 + sr->msg_flags = READ_ONCE(sqe->msg_flags);
4492 + sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4493 +
4494 ++#ifdef CONFIG_COMPAT
4495 ++ if (req->ctx->compat)
4496 ++ sr->msg_flags |= MSG_CMSG_COMPAT;
4497 ++#endif
4498 ++
4499 + if (!io)
4500 + return 0;
4501 +
4502 +@@ -4970,7 +4980,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
4503 + if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
4504 + ctx->rings->sq_ring_entries)
4505 + mask |= EPOLLOUT | EPOLLWRNORM;
4506 +- if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
4507 ++ if (io_cqring_events(ctx, false))
4508 + mask |= EPOLLIN | EPOLLRDNORM;
4509 +
4510 + return mask;
4511 +diff --git a/fs/namei.c b/fs/namei.c
4512 +index 6cc88b6d68c8..70eb4bfeaebc 100644
4513 +--- a/fs/namei.c
4514 ++++ b/fs/namei.c
4515 +@@ -1367,7 +1367,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
4516 + nd->path.dentry = parent;
4517 + nd->seq = seq;
4518 + if (unlikely(!path_connected(&nd->path)))
4519 +- return -ENOENT;
4520 ++ return -ECHILD;
4521 + break;
4522 + } else {
4523 + struct mount *mnt = real_mount(nd->path.mnt);
4524 +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
4525 +index 620de905cba9..3f892035c141 100644
4526 +--- a/fs/nfs/nfs4file.c
4527 ++++ b/fs/nfs/nfs4file.c
4528 +@@ -86,7 +86,6 @@ nfs4_file_open(struct inode *inode, struct file *filp)
4529 + if (inode != d_inode(dentry))
4530 + goto out_drop;
4531 +
4532 +- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
4533 + nfs_file_set_open_context(filp, ctx);
4534 + nfs_fscache_open_file(inode, filp);
4535 + err = 0;
4536 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4537 +index 6ddb4f517d37..13c2de527718 100644
4538 +--- a/fs/nfs/nfs4proc.c
4539 ++++ b/fs/nfs/nfs4proc.c
4540 +@@ -2962,10 +2962,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
4541 + struct dentry *dentry;
4542 + struct nfs4_state *state;
4543 + fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
4544 ++ struct inode *dir = d_inode(opendata->dir);
4545 ++ unsigned long dir_verifier;
4546 + unsigned int seq;
4547 + int ret;
4548 +
4549 + seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
4550 ++ dir_verifier = nfs_save_change_attribute(dir);
4551 +
4552 + ret = _nfs4_proc_open(opendata, ctx);
4553 + if (ret != 0)
4554 +@@ -2993,8 +2996,19 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
4555 + dput(ctx->dentry);
4556 + ctx->dentry = dentry = alias;
4557 + }
4558 +- nfs_set_verifier(dentry,
4559 +- nfs_save_change_attribute(d_inode(opendata->dir)));
4560 ++ }
4561 ++
4562 ++ switch(opendata->o_arg.claim) {
4563 ++ default:
4564 ++ break;
4565 ++ case NFS4_OPEN_CLAIM_NULL:
4566 ++ case NFS4_OPEN_CLAIM_DELEGATE_CUR:
4567 ++ case NFS4_OPEN_CLAIM_DELEGATE_PREV:
4568 ++ if (!opendata->rpc_done)
4569 ++ break;
4570 ++ if (opendata->o_res.delegation_type != 0)
4571 ++ dir_verifier = nfs_save_change_attribute(dir);
4572 ++ nfs_set_verifier(dentry, dir_verifier);
4573 + }
4574 +
4575 + /* Parse layoutget results before we check for access */
4576 +diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
4577 +index 54d6db61106f..edf43ddd7dce 100644
4578 +--- a/fs/ubifs/orphan.c
4579 ++++ b/fs/ubifs/orphan.c
4580 +@@ -129,7 +129,7 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
4581 + static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
4582 + {
4583 + if (orph->del) {
4584 +- dbg_gen("deleted twice ino %lu", orph->inum);
4585 ++ dbg_gen("deleted twice ino %lu", (unsigned long)orph->inum);
4586 + return;
4587 + }
4588 +
4589 +@@ -137,7 +137,7 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
4590 + orph->del = 1;
4591 + orph->dnext = c->orph_dnext;
4592 + c->orph_dnext = orph;
4593 +- dbg_gen("delete later ino %lu", orph->inum);
4594 ++ dbg_gen("delete later ino %lu", (unsigned long)orph->inum);
4595 + return;
4596 + }
4597 +
4598 +diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
4599 +index 94badfa1743e..91c2cb14276e 100644
4600 +--- a/fs/xfs/libxfs/xfs_attr.h
4601 ++++ b/fs/xfs/libxfs/xfs_attr.h
4602 +@@ -26,7 +26,7 @@ struct xfs_attr_list_context;
4603 + *========================================================================*/
4604 +
4605 +
4606 +-#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */
4607 ++#define ATTR_DONTFOLLOW 0x0001 /* -- ignored, from IRIX -- */
4608 + #define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
4609 + #define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */
4610 + #define ATTR_SECURE 0x0008 /* use attrs in security namespace */
4611 +@@ -37,7 +37,10 @@ struct xfs_attr_list_context;
4612 + #define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
4613 +
4614 + #define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
4615 +-#define ATTR_ALLOC 0x8000 /* allocate xattr buffer on demand */
4616 ++#define ATTR_ALLOC 0x8000 /* [kernel] allocate xattr buffer on demand */
4617 ++
4618 ++#define ATTR_KERNEL_FLAGS \
4619 ++ (ATTR_KERNOTIME | ATTR_KERNOVAL | ATTR_INCOMPLETE | ATTR_ALLOC)
4620 +
4621 + #define XFS_ATTR_FLAGS \
4622 + { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
4623 +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
4624 +index 7b35d62ede9f..edfbdb8f85e2 100644
4625 +--- a/fs/xfs/xfs_ioctl.c
4626 ++++ b/fs/xfs/xfs_ioctl.c
4627 +@@ -462,6 +462,8 @@ xfs_attrmulti_by_handle(
4628 +
4629 + error = 0;
4630 + for (i = 0; i < am_hreq.opcount; i++) {
4631 ++ ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
4632 ++
4633 + ops[i].am_error = strncpy_from_user((char *)attr_name,
4634 + ops[i].am_attrname, MAXNAMELEN);
4635 + if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
4636 +diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
4637 +index c4c4f09113d3..bd9d9ebf85d8 100644
4638 +--- a/fs/xfs/xfs_ioctl32.c
4639 ++++ b/fs/xfs/xfs_ioctl32.c
4640 +@@ -450,6 +450,8 @@ xfs_compat_attrmulti_by_handle(
4641 +
4642 + error = 0;
4643 + for (i = 0; i < am_hreq.opcount; i++) {
4644 ++ ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
4645 ++
4646 + ops[i].am_error = strncpy_from_user((char *)attr_name,
4647 + compat_ptr(ops[i].am_attrname),
4648 + MAXNAMELEN);
4649 +diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
4650 +index 2f3f28c7cea3..9373662cdb44 100644
4651 +--- a/include/acpi/actypes.h
4652 ++++ b/include/acpi/actypes.h
4653 +@@ -532,11 +532,12 @@ typedef u64 acpi_integer;
4654 + strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE)
4655 +
4656 + /*
4657 +- * Algorithm to obtain access bit width.
4658 ++ * Algorithm to obtain access bit or byte width.
4659 + * Can be used with access_width of struct acpi_generic_address and access_size of
4660 + * struct acpi_resource_generic_register.
4661 + */
4662 + #define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2))
4663 ++#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
4664 +
4665 + /*******************************************************************************
4666 + *
4667 +diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
4668 +index ce4103208619..cec543d9e87b 100644
4669 +--- a/include/asm-generic/vdso/vsyscall.h
4670 ++++ b/include/asm-generic/vdso/vsyscall.h
4671 +@@ -12,9 +12,9 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
4672 + #endif /* __arch_get_k_vdso_data */
4673 +
4674 + #ifndef __arch_update_vdso_data
4675 +-static __always_inline int __arch_update_vdso_data(void)
4676 ++static __always_inline bool __arch_update_vdso_data(void)
4677 + {
4678 +- return 0;
4679 ++ return true;
4680 + }
4681 + #endif /* __arch_update_vdso_data */
4682 +
4683 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
4684 +index 4c636c42ad68..1cb5afed5515 100644
4685 +--- a/include/linux/blkdev.h
4686 ++++ b/include/linux/blkdev.h
4687 +@@ -524,7 +524,7 @@ struct request_queue {
4688 + unsigned int sg_reserved_size;
4689 + int node;
4690 + #ifdef CONFIG_BLK_DEV_IO_TRACE
4691 +- struct blk_trace *blk_trace;
4692 ++ struct blk_trace __rcu *blk_trace;
4693 + struct mutex blk_trace_mutex;
4694 + #endif
4695 + /*
4696 +diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
4697 +index 7bb2d8de9f30..3b6ff5902edc 100644
4698 +--- a/include/linux/blktrace_api.h
4699 ++++ b/include/linux/blktrace_api.h
4700 +@@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
4701 + **/
4702 + #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
4703 + do { \
4704 +- struct blk_trace *bt = (q)->blk_trace; \
4705 ++ struct blk_trace *bt; \
4706 ++ \
4707 ++ rcu_read_lock(); \
4708 ++ bt = rcu_dereference((q)->blk_trace); \
4709 + if (unlikely(bt)) \
4710 + __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
4711 ++ rcu_read_unlock(); \
4712 + } while (0)
4713 + #define blk_add_trace_msg(q, fmt, ...) \
4714 + blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
4715 +@@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
4716 +
4717 + static inline bool blk_trace_note_message_enabled(struct request_queue *q)
4718 + {
4719 +- struct blk_trace *bt = q->blk_trace;
4720 +- if (likely(!bt))
4721 +- return false;
4722 +- return bt->act_mask & BLK_TC_NOTIFY;
4723 ++ struct blk_trace *bt;
4724 ++ bool ret;
4725 ++
4726 ++ rcu_read_lock();
4727 ++ bt = rcu_dereference(q->blk_trace);
4728 ++ ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
4729 ++ rcu_read_unlock();
4730 ++ return ret;
4731 + }
4732 +
4733 + extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
4734 +diff --git a/include/linux/hid.h b/include/linux/hid.h
4735 +index cd41f209043f..875f71132b14 100644
4736 +--- a/include/linux/hid.h
4737 ++++ b/include/linux/hid.h
4738 +@@ -492,7 +492,7 @@ struct hid_report_enum {
4739 + };
4740 +
4741 + #define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
4742 +-#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
4743 ++#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */
4744 + #define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
4745 + #define HID_OUTPUT_FIFO_SIZE 64
4746 +
4747 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
4748 +index cac56fb59af8..1dabd86b232a 100644
4749 +--- a/include/linux/netdevice.h
4750 ++++ b/include/linux/netdevice.h
4751 +@@ -72,6 +72,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
4752 + #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
4753 + #define NET_RX_DROP 1 /* packet dropped */
4754 +
4755 ++#define MAX_NEST_DEV 8
4756 ++
4757 + /*
4758 + * Transmit return codes: transmit return codes originate from three different
4759 + * namespaces:
4760 +@@ -4323,11 +4325,8 @@ void *netdev_lower_get_next(struct net_device *dev,
4761 + ldev; \
4762 + ldev = netdev_lower_get_next(dev, &(iter)))
4763 +
4764 +-struct net_device *netdev_all_lower_get_next(struct net_device *dev,
4765 ++struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
4766 + struct list_head **iter);
4767 +-struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
4768 +- struct list_head **iter);
4769 +-
4770 + int netdev_walk_all_lower_dev(struct net_device *dev,
4771 + int (*fn)(struct net_device *lower_dev,
4772 + void *data),
4773 +diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
4774 +index 908d38dbcb91..5448c8b443db 100644
4775 +--- a/include/linux/netfilter/ipset/ip_set.h
4776 ++++ b/include/linux/netfilter/ipset/ip_set.h
4777 +@@ -121,6 +121,7 @@ struct ip_set_ext {
4778 + u32 timeout;
4779 + u8 packets_op;
4780 + u8 bytes_op;
4781 ++ bool target;
4782 + };
4783 +
4784 + struct ip_set;
4785 +@@ -187,6 +188,14 @@ struct ip_set_type_variant {
4786 + /* Return true if "b" set is the same as "a"
4787 + * according to the create set parameters */
4788 + bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
4789 ++ /* Region-locking is used */
4790 ++ bool region_lock;
4791 ++};
4792 ++
4793 ++struct ip_set_region {
4794 ++ spinlock_t lock; /* Region lock */
4795 ++ size_t ext_size; /* Size of the dynamic extensions */
4796 ++ u32 elements; /* Number of elements vs timeout */
4797 + };
4798 +
4799 + /* The core set type structure */
4800 +@@ -501,7 +510,7 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
4801 + }
4802 +
4803 + #define IP_SET_INIT_KEXT(skb, opt, set) \
4804 +- { .bytes = (skb)->len, .packets = 1, \
4805 ++ { .bytes = (skb)->len, .packets = 1, .target = true,\
4806 + .timeout = ip_set_adt_opt_timeout(opt, set) }
4807 +
4808 + #define IP_SET_INIT_UEXT(set) \
4809 +diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
4810 +index 1abe91ff6e4a..6d67e9a5af6b 100644
4811 +--- a/include/linux/sched/nohz.h
4812 ++++ b/include/linux/sched/nohz.h
4813 +@@ -15,9 +15,11 @@ static inline void nohz_balance_enter_idle(int cpu) { }
4814 +
4815 + #ifdef CONFIG_NO_HZ_COMMON
4816 + void calc_load_nohz_start(void);
4817 ++void calc_load_nohz_remote(struct rq *rq);
4818 + void calc_load_nohz_stop(void);
4819 + #else
4820 + static inline void calc_load_nohz_start(void) { }
4821 ++static inline void calc_load_nohz_remote(struct rq *rq) { }
4822 + static inline void calc_load_nohz_stop(void) { }
4823 + #endif /* CONFIG_NO_HZ_COMMON */
4824 +
4825 +diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
4826 +index d93017a7ce5c..e03827f702f3 100644
4827 +--- a/include/net/flow_dissector.h
4828 ++++ b/include/net/flow_dissector.h
4829 +@@ -5,6 +5,7 @@
4830 + #include <linux/types.h>
4831 + #include <linux/in6.h>
4832 + #include <linux/siphash.h>
4833 ++#include <linux/string.h>
4834 + #include <uapi/linux/if_ether.h>
4835 +
4836 + struct sk_buff;
4837 +@@ -349,4 +350,12 @@ struct bpf_flow_dissector {
4838 + void *data_end;
4839 + };
4840 +
4841 ++static inline void
4842 ++flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
4843 ++ struct flow_dissector_key_basic *key_basic)
4844 ++{
4845 ++ memset(key_control, 0, sizeof(*key_control));
4846 ++ memset(key_basic, 0, sizeof(*key_basic));
4847 ++}
4848 ++
4849 + #endif
4850 +diff --git a/include/uapi/linux/usb/charger.h b/include/uapi/linux/usb/charger.h
4851 +index 5f72af35b3ed..ad22079125bf 100644
4852 +--- a/include/uapi/linux/usb/charger.h
4853 ++++ b/include/uapi/linux/usb/charger.h
4854 +@@ -14,18 +14,18 @@
4855 + * ACA (Accessory Charger Adapters)
4856 + */
4857 + enum usb_charger_type {
4858 +- UNKNOWN_TYPE,
4859 +- SDP_TYPE,
4860 +- DCP_TYPE,
4861 +- CDP_TYPE,
4862 +- ACA_TYPE,
4863 ++ UNKNOWN_TYPE = 0,
4864 ++ SDP_TYPE = 1,
4865 ++ DCP_TYPE = 2,
4866 ++ CDP_TYPE = 3,
4867 ++ ACA_TYPE = 4,
4868 + };
4869 +
4870 + /* USB charger state */
4871 + enum usb_charger_state {
4872 +- USB_CHARGER_DEFAULT,
4873 +- USB_CHARGER_PRESENT,
4874 +- USB_CHARGER_ABSENT,
4875 ++ USB_CHARGER_DEFAULT = 0,
4876 ++ USB_CHARGER_PRESENT = 1,
4877 ++ USB_CHARGER_ABSENT = 2,
4878 + };
4879 +
4880 + #endif /* _UAPI__LINUX_USB_CHARGER_H */
4881 +diff --git a/kernel/audit.c b/kernel/audit.c
4882 +index 8e09f0f55b4b..f971cd636426 100644
4883 +--- a/kernel/audit.c
4884 ++++ b/kernel/audit.c
4885 +@@ -1100,13 +1100,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
4886 + audit_log_end(ab);
4887 + }
4888 +
4889 +-static int audit_set_feature(struct sk_buff *skb)
4890 ++static int audit_set_feature(struct audit_features *uaf)
4891 + {
4892 +- struct audit_features *uaf;
4893 + int i;
4894 +
4895 + BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names));
4896 +- uaf = nlmsg_data(nlmsg_hdr(skb));
4897 +
4898 + /* if there is ever a version 2 we should handle that here */
4899 +
4900 +@@ -1174,6 +1172,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
4901 + {
4902 + u32 seq;
4903 + void *data;
4904 ++ int data_len;
4905 + int err;
4906 + struct audit_buffer *ab;
4907 + u16 msg_type = nlh->nlmsg_type;
4908 +@@ -1187,6 +1186,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
4909 +
4910 + seq = nlh->nlmsg_seq;
4911 + data = nlmsg_data(nlh);
4912 ++ data_len = nlmsg_len(nlh);
4913 +
4914 + switch (msg_type) {
4915 + case AUDIT_GET: {
4916 +@@ -1210,7 +1210,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
4917 + struct audit_status s;
4918 + memset(&s, 0, sizeof(s));
4919 + /* guard against past and future API changes */
4920 +- memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
4921 ++ memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
4922 + if (s.mask & AUDIT_STATUS_ENABLED) {
4923 + err = audit_set_enabled(s.enabled);
4924 + if (err < 0)
4925 +@@ -1314,7 +1314,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
4926 + return err;
4927 + break;
4928 + case AUDIT_SET_FEATURE:
4929 +- err = audit_set_feature(skb);
4930 ++ if (data_len < sizeof(struct audit_features))
4931 ++ return -EINVAL;
4932 ++ err = audit_set_feature(data);
4933 + if (err)
4934 + return err;
4935 + break;
4936 +@@ -1326,6 +1328,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
4937 +
4938 + err = audit_filter(msg_type, AUDIT_FILTER_USER);
4939 + if (err == 1) { /* match or error */
4940 ++ char *str = data;
4941 ++
4942 + err = 0;
4943 + if (msg_type == AUDIT_USER_TTY) {
4944 + err = tty_audit_push();
4945 +@@ -1333,26 +1337,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
4946 + break;
4947 + }
4948 + audit_log_user_recv_msg(&ab, msg_type);
4949 +- if (msg_type != AUDIT_USER_TTY)
4950 ++ if (msg_type != AUDIT_USER_TTY) {
4951 ++ /* ensure NULL termination */
4952 ++ str[data_len - 1] = '\0';
4953 + audit_log_format(ab, " msg='%.*s'",
4954 + AUDIT_MESSAGE_TEXT_MAX,
4955 +- (char *)data);
4956 +- else {
4957 +- int size;
4958 +-
4959 ++ str);
4960 ++ } else {
4961 + audit_log_format(ab, " data=");
4962 +- size = nlmsg_len(nlh);
4963 +- if (size > 0 &&
4964 +- ((unsigned char *)data)[size - 1] == '\0')
4965 +- size--;
4966 +- audit_log_n_untrustedstring(ab, data, size);
4967 ++ if (data_len > 0 && str[data_len - 1] == '\0')
4968 ++ data_len--;
4969 ++ audit_log_n_untrustedstring(ab, str, data_len);
4970 + }
4971 + audit_log_end(ab);
4972 + }
4973 + break;
4974 + case AUDIT_ADD_RULE:
4975 + case AUDIT_DEL_RULE:
4976 +- if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
4977 ++ if (data_len < sizeof(struct audit_rule_data))
4978 + return -EINVAL;
4979 + if (audit_enabled == AUDIT_LOCKED) {
4980 + audit_log_common_recv_msg(audit_context(), &ab,
4981 +@@ -1364,7 +1366,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
4982 + audit_log_end(ab);
4983 + return -EPERM;
4984 + }
4985 +- err = audit_rule_change(msg_type, seq, data, nlmsg_len(nlh));
4986 ++ err = audit_rule_change(msg_type, seq, data, data_len);
4987 + break;
4988 + case AUDIT_LIST_RULES:
4989 + err = audit_list_rules_send(skb, seq);
4990 +@@ -1379,7 +1381,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
4991 + case AUDIT_MAKE_EQUIV: {
4992 + void *bufp = data;
4993 + u32 sizes[2];
4994 +- size_t msglen = nlmsg_len(nlh);
4995 ++ size_t msglen = data_len;
4996 + char *old, *new;
4997 +
4998 + err = -EINVAL;
4999 +@@ -1455,7 +1457,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
5000 +
5001 + memset(&s, 0, sizeof(s));
5002 + /* guard against past and future API changes */
5003 +- memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
5004 ++ memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
5005 + /* check if new data is valid */
5006 + if ((s.enabled != 0 && s.enabled != 1) ||
5007 + (s.log_passwd != 0 && s.log_passwd != 1))
5008 +diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
5009 +index b0126e9c0743..026e34da4ace 100644
5010 +--- a/kernel/auditfilter.c
5011 ++++ b/kernel/auditfilter.c
5012 +@@ -456,6 +456,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5013 + bufp = data->buf;
5014 + for (i = 0; i < data->field_count; i++) {
5015 + struct audit_field *f = &entry->rule.fields[i];
5016 ++ u32 f_val;
5017 +
5018 + err = -EINVAL;
5019 +
5020 +@@ -464,12 +465,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5021 + goto exit_free;
5022 +
5023 + f->type = data->fields[i];
5024 +- f->val = data->values[i];
5025 ++ f_val = data->values[i];
5026 +
5027 + /* Support legacy tests for a valid loginuid */
5028 +- if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
5029 ++ if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) {
5030 + f->type = AUDIT_LOGINUID_SET;
5031 +- f->val = 0;
5032 ++ f_val = 0;
5033 + entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
5034 + }
5035 +
5036 +@@ -485,7 +486,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5037 + case AUDIT_SUID:
5038 + case AUDIT_FSUID:
5039 + case AUDIT_OBJ_UID:
5040 +- f->uid = make_kuid(current_user_ns(), f->val);
5041 ++ f->uid = make_kuid(current_user_ns(), f_val);
5042 + if (!uid_valid(f->uid))
5043 + goto exit_free;
5044 + break;
5045 +@@ -494,11 +495,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5046 + case AUDIT_SGID:
5047 + case AUDIT_FSGID:
5048 + case AUDIT_OBJ_GID:
5049 +- f->gid = make_kgid(current_user_ns(), f->val);
5050 ++ f->gid = make_kgid(current_user_ns(), f_val);
5051 + if (!gid_valid(f->gid))
5052 + goto exit_free;
5053 + break;
5054 + case AUDIT_ARCH:
5055 ++ f->val = f_val;
5056 + entry->rule.arch_f = f;
5057 + break;
5058 + case AUDIT_SUBJ_USER:
5059 +@@ -511,11 +513,13 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5060 + case AUDIT_OBJ_TYPE:
5061 + case AUDIT_OBJ_LEV_LOW:
5062 + case AUDIT_OBJ_LEV_HIGH:
5063 +- str = audit_unpack_string(&bufp, &remain, f->val);
5064 +- if (IS_ERR(str))
5065 ++ str = audit_unpack_string(&bufp, &remain, f_val);
5066 ++ if (IS_ERR(str)) {
5067 ++ err = PTR_ERR(str);
5068 + goto exit_free;
5069 +- entry->rule.buflen += f->val;
5070 +-
5071 ++ }
5072 ++ entry->rule.buflen += f_val;
5073 ++ f->lsm_str = str;
5074 + err = security_audit_rule_init(f->type, f->op, str,
5075 + (void **)&f->lsm_rule);
5076 + /* Keep currently invalid fields around in case they
5077 +@@ -524,68 +528,71 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5078 + pr_warn("audit rule for LSM \'%s\' is invalid\n",
5079 + str);
5080 + err = 0;
5081 +- }
5082 +- if (err) {
5083 +- kfree(str);
5084 ++ } else if (err)
5085 + goto exit_free;
5086 +- } else
5087 +- f->lsm_str = str;
5088 + break;
5089 + case AUDIT_WATCH:
5090 +- str = audit_unpack_string(&bufp, &remain, f->val);
5091 +- if (IS_ERR(str))
5092 ++ str = audit_unpack_string(&bufp, &remain, f_val);
5093 ++ if (IS_ERR(str)) {
5094 ++ err = PTR_ERR(str);
5095 + goto exit_free;
5096 +- entry->rule.buflen += f->val;
5097 +-
5098 +- err = audit_to_watch(&entry->rule, str, f->val, f->op);
5099 ++ }
5100 ++ err = audit_to_watch(&entry->rule, str, f_val, f->op);
5101 + if (err) {
5102 + kfree(str);
5103 + goto exit_free;
5104 + }
5105 ++ entry->rule.buflen += f_val;
5106 + break;
5107 + case AUDIT_DIR:
5108 +- str = audit_unpack_string(&bufp, &remain, f->val);
5109 +- if (IS_ERR(str))
5110 ++ str = audit_unpack_string(&bufp, &remain, f_val);
5111 ++ if (IS_ERR(str)) {
5112 ++ err = PTR_ERR(str);
5113 + goto exit_free;
5114 +- entry->rule.buflen += f->val;
5115 +-
5116 ++ }
5117 + err = audit_make_tree(&entry->rule, str, f->op);
5118 + kfree(str);
5119 + if (err)
5120 + goto exit_free;
5121 ++ entry->rule.buflen += f_val;
5122 + break;
5123 + case AUDIT_INODE:
5124 ++ f->val = f_val;
5125 + err = audit_to_inode(&entry->rule, f);
5126 + if (err)
5127 + goto exit_free;
5128 + break;
5129 + case AUDIT_FILTERKEY:
5130 +- if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
5131 ++ if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN)
5132 + goto exit_free;
5133 +- str = audit_unpack_string(&bufp, &remain, f->val);
5134 +- if (IS_ERR(str))
5135 ++ str = audit_unpack_string(&bufp, &remain, f_val);
5136 ++ if (IS_ERR(str)) {
5137 ++ err = PTR_ERR(str);
5138 + goto exit_free;
5139 +- entry->rule.buflen += f->val;
5140 ++ }
5141 ++ entry->rule.buflen += f_val;
5142 + entry->rule.filterkey = str;
5143 + break;
5144 + case AUDIT_EXE:
5145 +- if (entry->rule.exe || f->val > PATH_MAX)
5146 ++ if (entry->rule.exe || f_val > PATH_MAX)
5147 + goto exit_free;
5148 +- str = audit_unpack_string(&bufp, &remain, f->val);
5149 ++ str = audit_unpack_string(&bufp, &remain, f_val);
5150 + if (IS_ERR(str)) {
5151 + err = PTR_ERR(str);
5152 + goto exit_free;
5153 + }
5154 +- entry->rule.buflen += f->val;
5155 +-
5156 +- audit_mark = audit_alloc_mark(&entry->rule, str, f->val);
5157 ++ audit_mark = audit_alloc_mark(&entry->rule, str, f_val);
5158 + if (IS_ERR(audit_mark)) {
5159 + kfree(str);
5160 + err = PTR_ERR(audit_mark);
5161 + goto exit_free;
5162 + }
5163 ++ entry->rule.buflen += f_val;
5164 + entry->rule.exe = audit_mark;
5165 + break;
5166 ++ default:
5167 ++ f->val = f_val;
5168 ++ break;
5169 + }
5170 + }
5171 +
5172 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
5173 +index fd81882f0521..2625c241ac00 100644
5174 +--- a/kernel/kprobes.c
5175 ++++ b/kernel/kprobes.c
5176 +@@ -510,6 +510,8 @@ static void do_unoptimize_kprobes(void)
5177 + arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
5178 + /* Loop free_list for disarming */
5179 + list_for_each_entry_safe(op, tmp, &freeing_list, list) {
5180 ++ /* Switching from detour code to origin */
5181 ++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
5182 + /* Disarm probes if marked disabled */
5183 + if (kprobe_disabled(&op->kp))
5184 + arch_disarm_kprobe(&op->kp);
5185 +@@ -665,6 +667,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
5186 + {
5187 + lockdep_assert_cpus_held();
5188 + arch_unoptimize_kprobe(op);
5189 ++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
5190 + if (kprobe_disabled(&op->kp))
5191 + arch_disarm_kprobe(&op->kp);
5192 + }
5193 +@@ -681,7 +684,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
5194 + if (!kprobe_optimized(p))
5195 + return;
5196 +
5197 +- op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
5198 + if (!list_empty(&op->list)) {
5199 + if (optprobe_queued_unopt(op)) {
5200 + /* Queued in unoptimizing queue */
5201 +diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
5202 +index dadb7b7fba37..9bb6d2497b04 100644
5203 +--- a/kernel/locking/lockdep_proc.c
5204 ++++ b/kernel/locking/lockdep_proc.c
5205 +@@ -286,9 +286,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
5206 + seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
5207 + nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
5208 + #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
5209 +- seq_printf(m, " number of stack traces: %llu\n",
5210 ++ seq_printf(m, " number of stack traces: %11llu\n",
5211 + lockdep_stack_trace_count());
5212 +- seq_printf(m, " number of stack hash chains: %llu\n",
5213 ++ seq_printf(m, " number of stack hash chains: %11llu\n",
5214 + lockdep_stack_hash_count());
5215 + #endif
5216 + seq_printf(m, " combined max dependencies: %11u\n",
5217 +diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
5218 +index f504ac831779..df90d4d7ad2e 100644
5219 +--- a/kernel/rcu/tree_exp.h
5220 ++++ b/kernel/rcu/tree_exp.h
5221 +@@ -540,14 +540,13 @@ static void rcu_exp_wait_wake(unsigned long s)
5222 + struct rcu_node *rnp;
5223 +
5224 + synchronize_sched_expedited_wait();
5225 +- rcu_exp_gp_seq_end();
5226 +- trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
5227 +
5228 +- /*
5229 +- * Switch over to wakeup mode, allowing the next GP, but -only- the
5230 +- * next GP, to proceed.
5231 +- */
5232 ++ // Switch over to wakeup mode, allowing the next GP to proceed.
5233 ++ // End the previous grace period only after acquiring the mutex
5234 ++ // to ensure that only one GP runs concurrently with wakeups.
5235 + mutex_lock(&rcu_state.exp_wake_mutex);
5236 ++ rcu_exp_gp_seq_end();
5237 ++ trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
5238 +
5239 + rcu_for_each_node_breadth_first(rnp) {
5240 + if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
5241 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5242 +index b2564d62a0f7..9e7768dbd92d 100644
5243 +--- a/kernel/sched/core.c
5244 ++++ b/kernel/sched/core.c
5245 +@@ -3669,28 +3669,32 @@ static void sched_tick_remote(struct work_struct *work)
5246 + * statistics and checks timeslices in a time-independent way, regardless
5247 + * of when exactly it is running.
5248 + */
5249 +- if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
5250 ++ if (!tick_nohz_tick_stopped_cpu(cpu))
5251 + goto out_requeue;
5252 +
5253 + rq_lock_irq(rq, &rf);
5254 + curr = rq->curr;
5255 +- if (is_idle_task(curr) || cpu_is_offline(cpu))
5256 ++ if (cpu_is_offline(cpu))
5257 + goto out_unlock;
5258 +
5259 ++ curr = rq->curr;
5260 + update_rq_clock(rq);
5261 +- delta = rq_clock_task(rq) - curr->se.exec_start;
5262 +
5263 +- /*
5264 +- * Make sure the next tick runs within a reasonable
5265 +- * amount of time.
5266 +- */
5267 +- WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5268 ++ if (!is_idle_task(curr)) {
5269 ++ /*
5270 ++ * Make sure the next tick runs within a reasonable
5271 ++ * amount of time.
5272 ++ */
5273 ++ delta = rq_clock_task(rq) - curr->se.exec_start;
5274 ++ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5275 ++ }
5276 + curr->sched_class->task_tick(rq, curr, 0);
5277 +
5278 ++ calc_load_nohz_remote(rq);
5279 + out_unlock:
5280 + rq_unlock_irq(rq, &rf);
5281 +-
5282 + out_requeue:
5283 ++
5284 + /*
5285 + * Run the remote tick once per second (1Hz). This arbitrary
5286 + * frequency is large enough to avoid overload but short enough
5287 +@@ -7064,8 +7068,15 @@ void sched_move_task(struct task_struct *tsk)
5288 +
5289 + if (queued)
5290 + enqueue_task(rq, tsk, queue_flags);
5291 +- if (running)
5292 ++ if (running) {
5293 + set_next_task(rq, tsk);
5294 ++ /*
5295 ++ * After changing group, the running task may have joined a
5296 ++ * throttled one but it's still the running task. Trigger a
5297 ++ * resched to make sure that task can still run.
5298 ++ */
5299 ++ resched_curr(rq);
5300 ++ }
5301 +
5302 + task_rq_unlock(rq, tsk, &rf);
5303 + }
5304 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
5305 +index ba749f579714..b0ee5eedeccd 100644
5306 +--- a/kernel/sched/fair.c
5307 ++++ b/kernel/sched/fair.c
5308 +@@ -5828,6 +5828,7 @@ static inline int select_idle_smt(struct task_struct *p, int target)
5309 + */
5310 + static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
5311 + {
5312 ++ struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
5313 + struct sched_domain *this_sd;
5314 + u64 avg_cost, avg_idle;
5315 + u64 time, cost;
5316 +@@ -5859,11 +5860,11 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
5317 +
5318 + time = cpu_clock(this);
5319 +
5320 +- for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
5321 ++ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
5322 ++
5323 ++ for_each_cpu_wrap(cpu, cpus, target) {
5324 + if (!--nr)
5325 + return si_cpu;
5326 +- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
5327 +- continue;
5328 + if (available_idle_cpu(cpu))
5329 + break;
5330 + if (si_cpu == -1 && sched_idle_cpu(cpu))
5331 +diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
5332 +index 28a516575c18..de22da666ac7 100644
5333 +--- a/kernel/sched/loadavg.c
5334 ++++ b/kernel/sched/loadavg.c
5335 +@@ -231,16 +231,11 @@ static inline int calc_load_read_idx(void)
5336 + return calc_load_idx & 1;
5337 + }
5338 +
5339 +-void calc_load_nohz_start(void)
5340 ++static void calc_load_nohz_fold(struct rq *rq)
5341 + {
5342 +- struct rq *this_rq = this_rq();
5343 + long delta;
5344 +
5345 +- /*
5346 +- * We're going into NO_HZ mode, if there's any pending delta, fold it
5347 +- * into the pending NO_HZ delta.
5348 +- */
5349 +- delta = calc_load_fold_active(this_rq, 0);
5350 ++ delta = calc_load_fold_active(rq, 0);
5351 + if (delta) {
5352 + int idx = calc_load_write_idx();
5353 +
5354 +@@ -248,6 +243,24 @@ void calc_load_nohz_start(void)
5355 + }
5356 + }
5357 +
5358 ++void calc_load_nohz_start(void)
5359 ++{
5360 ++ /*
5361 ++ * We're going into NO_HZ mode, if there's any pending delta, fold it
5362 ++ * into the pending NO_HZ delta.
5363 ++ */
5364 ++ calc_load_nohz_fold(this_rq());
5365 ++}
5366 ++
5367 ++/*
5368 ++ * Keep track of the load for NOHZ_FULL, must be called between
5369 ++ * calc_load_nohz_{start,stop}().
5370 ++ */
5371 ++void calc_load_nohz_remote(struct rq *rq)
5372 ++{
5373 ++ calc_load_nohz_fold(rq);
5374 ++}
5375 ++
5376 + void calc_load_nohz_stop(void)
5377 + {
5378 + struct rq *this_rq = this_rq();
5379 +@@ -268,7 +281,7 @@ void calc_load_nohz_stop(void)
5380 + this_rq->calc_load_update += LOAD_FREQ;
5381 + }
5382 +
5383 +-static long calc_load_nohz_fold(void)
5384 ++static long calc_load_nohz_read(void)
5385 + {
5386 + int idx = calc_load_read_idx();
5387 + long delta = 0;
5388 +@@ -323,7 +336,7 @@ static void calc_global_nohz(void)
5389 + }
5390 + #else /* !CONFIG_NO_HZ_COMMON */
5391 +
5392 +-static inline long calc_load_nohz_fold(void) { return 0; }
5393 ++static inline long calc_load_nohz_read(void) { return 0; }
5394 + static inline void calc_global_nohz(void) { }
5395 +
5396 + #endif /* CONFIG_NO_HZ_COMMON */
5397 +@@ -346,7 +359,7 @@ void calc_global_load(unsigned long ticks)
5398 + /*
5399 + * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs.
5400 + */
5401 +- delta = calc_load_nohz_fold();
5402 ++ delta = calc_load_nohz_read();
5403 + if (delta)
5404 + atomic_long_add(delta, &calc_load_tasks);
5405 +
5406 +diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
5407 +index 5ee0f7709410..9577c89179cd 100644
5408 +--- a/kernel/time/vsyscall.c
5409 ++++ b/kernel/time/vsyscall.c
5410 +@@ -28,11 +28,6 @@ static inline void update_vdso_data(struct vdso_data *vdata,
5411 + vdata[CS_RAW].mult = tk->tkr_raw.mult;
5412 + vdata[CS_RAW].shift = tk->tkr_raw.shift;
5413 +
5414 +- /* CLOCK_REALTIME */
5415 +- vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
5416 +- vdso_ts->sec = tk->xtime_sec;
5417 +- vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
5418 +-
5419 + /* CLOCK_MONOTONIC */
5420 + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
5421 + vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
5422 +@@ -70,12 +65,6 @@ static inline void update_vdso_data(struct vdso_data *vdata,
5423 + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
5424 + vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
5425 + vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
5426 +-
5427 +- /*
5428 +- * Read without the seqlock held by clock_getres().
5429 +- * Note: No need to have a second copy.
5430 +- */
5431 +- WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
5432 + }
5433 +
5434 + void update_vsyscall(struct timekeeper *tk)
5435 +@@ -84,20 +73,17 @@ void update_vsyscall(struct timekeeper *tk)
5436 + struct vdso_timestamp *vdso_ts;
5437 + u64 nsec;
5438 +
5439 +- if (__arch_update_vdso_data()) {
5440 +- /*
5441 +- * Some architectures might want to skip the update of the
5442 +- * data page.
5443 +- */
5444 +- return;
5445 +- }
5446 +-
5447 + /* copy vsyscall data */
5448 + vdso_write_begin(vdata);
5449 +
5450 + vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk);
5451 + vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk);
5452 +
5453 ++ /* CLOCK_REALTIME also required for time() */
5454 ++ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
5455 ++ vdso_ts->sec = tk->xtime_sec;
5456 ++ vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
5457 ++
5458 + /* CLOCK_REALTIME_COARSE */
5459 + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
5460 + vdso_ts->sec = tk->xtime_sec;
5461 +@@ -110,7 +96,18 @@ void update_vsyscall(struct timekeeper *tk)
5462 + nsec = nsec + tk->wall_to_monotonic.tv_nsec;
5463 + vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
5464 +
5465 +- update_vdso_data(vdata, tk);
5466 ++ /*
5467 ++ * Read without the seqlock held by clock_getres().
5468 ++ * Note: No need to have a second copy.
5469 ++ */
5470 ++ WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
5471 ++
5472 ++ /*
5473 ++ * Architectures can opt out of updating the high resolution part
5474 ++ * of the VDSO.
5475 ++ */
5476 ++ if (__arch_update_vdso_data())
5477 ++ update_vdso_data(vdata, tk);
5478 +
5479 + __arch_update_vsyscall(vdata, tk);
5480 +
5481 +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
5482 +index 475e29498bca..a6d3016410eb 100644
5483 +--- a/kernel/trace/blktrace.c
5484 ++++ b/kernel/trace/blktrace.c
5485 +@@ -335,6 +335,7 @@ static void put_probe_ref(void)
5486 +
5487 + static void blk_trace_cleanup(struct blk_trace *bt)
5488 + {
5489 ++ synchronize_rcu();
5490 + blk_trace_free(bt);
5491 + put_probe_ref();
5492 + }
5493 +@@ -629,8 +630,10 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
5494 + static int __blk_trace_startstop(struct request_queue *q, int start)
5495 + {
5496 + int ret;
5497 +- struct blk_trace *bt = q->blk_trace;
5498 ++ struct blk_trace *bt;
5499 +
5500 ++ bt = rcu_dereference_protected(q->blk_trace,
5501 ++ lockdep_is_held(&q->blk_trace_mutex));
5502 + if (bt == NULL)
5503 + return -EINVAL;
5504 +
5505 +@@ -740,8 +743,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
5506 + void blk_trace_shutdown(struct request_queue *q)
5507 + {
5508 + mutex_lock(&q->blk_trace_mutex);
5509 +-
5510 +- if (q->blk_trace) {
5511 ++ if (rcu_dereference_protected(q->blk_trace,
5512 ++ lockdep_is_held(&q->blk_trace_mutex))) {
5513 + __blk_trace_startstop(q, 0);
5514 + __blk_trace_remove(q);
5515 + }
5516 +@@ -752,8 +755,10 @@ void blk_trace_shutdown(struct request_queue *q)
5517 + #ifdef CONFIG_BLK_CGROUP
5518 + static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
5519 + {
5520 +- struct blk_trace *bt = q->blk_trace;
5521 ++ struct blk_trace *bt;
5522 +
5523 ++ /* We don't use the 'bt' value here except as an optimization... */
5524 ++ bt = rcu_dereference_protected(q->blk_trace, 1);
5525 + if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
5526 + return 0;
5527 +
5528 +@@ -796,10 +801,14 @@ blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
5529 + static void blk_add_trace_rq(struct request *rq, int error,
5530 + unsigned int nr_bytes, u32 what, u64 cgid)
5531 + {
5532 +- struct blk_trace *bt = rq->q->blk_trace;
5533 ++ struct blk_trace *bt;
5534 +
5535 +- if (likely(!bt))
5536 ++ rcu_read_lock();
5537 ++ bt = rcu_dereference(rq->q->blk_trace);
5538 ++ if (likely(!bt)) {
5539 ++ rcu_read_unlock();
5540 + return;
5541 ++ }
5542 +
5543 + if (blk_rq_is_passthrough(rq))
5544 + what |= BLK_TC_ACT(BLK_TC_PC);
5545 +@@ -808,6 +817,7 @@ static void blk_add_trace_rq(struct request *rq, int error,
5546 +
5547 + __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
5548 + rq->cmd_flags, what, error, 0, NULL, cgid);
5549 ++ rcu_read_unlock();
5550 + }
5551 +
5552 + static void blk_add_trace_rq_insert(void *ignore,
5553 +@@ -853,14 +863,19 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
5554 + static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
5555 + u32 what, int error)
5556 + {
5557 +- struct blk_trace *bt = q->blk_trace;
5558 ++ struct blk_trace *bt;
5559 +
5560 +- if (likely(!bt))
5561 ++ rcu_read_lock();
5562 ++ bt = rcu_dereference(q->blk_trace);
5563 ++ if (likely(!bt)) {
5564 ++ rcu_read_unlock();
5565 + return;
5566 ++ }
5567 +
5568 + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
5569 + bio_op(bio), bio->bi_opf, what, error, 0, NULL,
5570 + blk_trace_bio_get_cgid(q, bio));
5571 ++ rcu_read_unlock();
5572 + }
5573 +
5574 + static void blk_add_trace_bio_bounce(void *ignore,
5575 +@@ -905,11 +920,14 @@ static void blk_add_trace_getrq(void *ignore,
5576 + if (bio)
5577 + blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
5578 + else {
5579 +- struct blk_trace *bt = q->blk_trace;
5580 ++ struct blk_trace *bt;
5581 +
5582 ++ rcu_read_lock();
5583 ++ bt = rcu_dereference(q->blk_trace);
5584 + if (bt)
5585 + __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
5586 + NULL, 0);
5587 ++ rcu_read_unlock();
5588 + }
5589 + }
5590 +
5591 +@@ -921,27 +939,35 @@ static void blk_add_trace_sleeprq(void *ignore,
5592 + if (bio)
5593 + blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
5594 + else {
5595 +- struct blk_trace *bt = q->blk_trace;
5596 ++ struct blk_trace *bt;
5597 +
5598 ++ rcu_read_lock();
5599 ++ bt = rcu_dereference(q->blk_trace);
5600 + if (bt)
5601 + __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
5602 + 0, 0, NULL, 0);
5603 ++ rcu_read_unlock();
5604 + }
5605 + }
5606 +
5607 + static void blk_add_trace_plug(void *ignore, struct request_queue *q)
5608 + {
5609 +- struct blk_trace *bt = q->blk_trace;
5610 ++ struct blk_trace *bt;
5611 +
5612 ++ rcu_read_lock();
5613 ++ bt = rcu_dereference(q->blk_trace);
5614 + if (bt)
5615 + __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
5616 ++ rcu_read_unlock();
5617 + }
5618 +
5619 + static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
5620 + unsigned int depth, bool explicit)
5621 + {
5622 +- struct blk_trace *bt = q->blk_trace;
5623 ++ struct blk_trace *bt;
5624 +
5625 ++ rcu_read_lock();
5626 ++ bt = rcu_dereference(q->blk_trace);
5627 + if (bt) {
5628 + __be64 rpdu = cpu_to_be64(depth);
5629 + u32 what;
5630 +@@ -953,14 +979,17 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
5631 +
5632 + __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
5633 + }
5634 ++ rcu_read_unlock();
5635 + }
5636 +
5637 + static void blk_add_trace_split(void *ignore,
5638 + struct request_queue *q, struct bio *bio,
5639 + unsigned int pdu)
5640 + {
5641 +- struct blk_trace *bt = q->blk_trace;
5642 ++ struct blk_trace *bt;
5643 +
5644 ++ rcu_read_lock();
5645 ++ bt = rcu_dereference(q->blk_trace);
5646 + if (bt) {
5647 + __be64 rpdu = cpu_to_be64(pdu);
5648 +
5649 +@@ -969,6 +998,7 @@ static void blk_add_trace_split(void *ignore,
5650 + BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
5651 + &rpdu, blk_trace_bio_get_cgid(q, bio));
5652 + }
5653 ++ rcu_read_unlock();
5654 + }
5655 +
5656 + /**
5657 +@@ -988,11 +1018,15 @@ static void blk_add_trace_bio_remap(void *ignore,
5658 + struct request_queue *q, struct bio *bio,
5659 + dev_t dev, sector_t from)
5660 + {
5661 +- struct blk_trace *bt = q->blk_trace;
5662 ++ struct blk_trace *bt;
5663 + struct blk_io_trace_remap r;
5664 +
5665 +- if (likely(!bt))
5666 ++ rcu_read_lock();
5667 ++ bt = rcu_dereference(q->blk_trace);
5668 ++ if (likely(!bt)) {
5669 ++ rcu_read_unlock();
5670 + return;
5671 ++ }
5672 +
5673 + r.device_from = cpu_to_be32(dev);
5674 + r.device_to = cpu_to_be32(bio_dev(bio));
5675 +@@ -1001,6 +1035,7 @@ static void blk_add_trace_bio_remap(void *ignore,
5676 + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
5677 + bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
5678 + sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
5679 ++ rcu_read_unlock();
5680 + }
5681 +
5682 + /**
5683 +@@ -1021,11 +1056,15 @@ static void blk_add_trace_rq_remap(void *ignore,
5684 + struct request *rq, dev_t dev,
5685 + sector_t from)
5686 + {
5687 +- struct blk_trace *bt = q->blk_trace;
5688 ++ struct blk_trace *bt;
5689 + struct blk_io_trace_remap r;
5690 +
5691 +- if (likely(!bt))
5692 ++ rcu_read_lock();
5693 ++ bt = rcu_dereference(q->blk_trace);
5694 ++ if (likely(!bt)) {
5695 ++ rcu_read_unlock();
5696 + return;
5697 ++ }
5698 +
5699 + r.device_from = cpu_to_be32(dev);
5700 + r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
5701 +@@ -1034,6 +1073,7 @@ static void blk_add_trace_rq_remap(void *ignore,
5702 + __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
5703 + rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
5704 + sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
5705 ++ rcu_read_unlock();
5706 + }
5707 +
5708 + /**
5709 +@@ -1051,14 +1091,19 @@ void blk_add_driver_data(struct request_queue *q,
5710 + struct request *rq,
5711 + void *data, size_t len)
5712 + {
5713 +- struct blk_trace *bt = q->blk_trace;
5714 ++ struct blk_trace *bt;
5715 +
5716 +- if (likely(!bt))
5717 ++ rcu_read_lock();
5718 ++ bt = rcu_dereference(q->blk_trace);
5719 ++ if (likely(!bt)) {
5720 ++ rcu_read_unlock();
5721 + return;
5722 ++ }
5723 +
5724 + __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
5725 + BLK_TA_DRV_DATA, 0, len, data,
5726 + blk_trace_request_get_cgid(q, rq));
5727 ++ rcu_read_unlock();
5728 + }
5729 + EXPORT_SYMBOL_GPL(blk_add_driver_data);
5730 +
5731 +@@ -1597,6 +1642,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
5732 + return -EINVAL;
5733 +
5734 + put_probe_ref();
5735 ++ synchronize_rcu();
5736 + blk_trace_free(bt);
5737 + return 0;
5738 + }
5739 +@@ -1758,6 +1804,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
5740 + struct hd_struct *p = dev_to_part(dev);
5741 + struct request_queue *q;
5742 + struct block_device *bdev;
5743 ++ struct blk_trace *bt;
5744 + ssize_t ret = -ENXIO;
5745 +
5746 + bdev = bdget(part_devt(p));
5747 +@@ -1770,21 +1817,23 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
5748 +
5749 + mutex_lock(&q->blk_trace_mutex);
5750 +
5751 ++ bt = rcu_dereference_protected(q->blk_trace,
5752 ++ lockdep_is_held(&q->blk_trace_mutex));
5753 + if (attr == &dev_attr_enable) {
5754 +- ret = sprintf(buf, "%u\n", !!q->blk_trace);
5755 ++ ret = sprintf(buf, "%u\n", !!bt);
5756 + goto out_unlock_bdev;
5757 + }
5758 +
5759 +- if (q->blk_trace == NULL)
5760 ++ if (bt == NULL)
5761 + ret = sprintf(buf, "disabled\n");
5762 + else if (attr == &dev_attr_act_mask)
5763 +- ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
5764 ++ ret = blk_trace_mask2str(buf, bt->act_mask);
5765 + else if (attr == &dev_attr_pid)
5766 +- ret = sprintf(buf, "%u\n", q->blk_trace->pid);
5767 ++ ret = sprintf(buf, "%u\n", bt->pid);
5768 + else if (attr == &dev_attr_start_lba)
5769 +- ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
5770 ++ ret = sprintf(buf, "%llu\n", bt->start_lba);
5771 + else if (attr == &dev_attr_end_lba)
5772 +- ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
5773 ++ ret = sprintf(buf, "%llu\n", bt->end_lba);
5774 +
5775 + out_unlock_bdev:
5776 + mutex_unlock(&q->blk_trace_mutex);
5777 +@@ -1801,6 +1850,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
5778 + struct block_device *bdev;
5779 + struct request_queue *q;
5780 + struct hd_struct *p;
5781 ++ struct blk_trace *bt;
5782 + u64 value;
5783 + ssize_t ret = -EINVAL;
5784 +
5785 +@@ -1831,8 +1881,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
5786 +
5787 + mutex_lock(&q->blk_trace_mutex);
5788 +
5789 ++ bt = rcu_dereference_protected(q->blk_trace,
5790 ++ lockdep_is_held(&q->blk_trace_mutex));
5791 + if (attr == &dev_attr_enable) {
5792 +- if (!!value == !!q->blk_trace) {
5793 ++ if (!!value == !!bt) {
5794 + ret = 0;
5795 + goto out_unlock_bdev;
5796 + }
5797 +@@ -1844,18 +1896,18 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
5798 + }
5799 +
5800 + ret = 0;
5801 +- if (q->blk_trace == NULL)
5802 ++ if (bt == NULL)
5803 + ret = blk_trace_setup_queue(q, bdev);
5804 +
5805 + if (ret == 0) {
5806 + if (attr == &dev_attr_act_mask)
5807 +- q->blk_trace->act_mask = value;
5808 ++ bt->act_mask = value;
5809 + else if (attr == &dev_attr_pid)
5810 +- q->blk_trace->pid = value;
5811 ++ bt->pid = value;
5812 + else if (attr == &dev_attr_start_lba)
5813 +- q->blk_trace->start_lba = value;
5814 ++ bt->start_lba = value;
5815 + else if (attr == &dev_attr_end_lba)
5816 +- q->blk_trace->end_lba = value;
5817 ++ bt->end_lba = value;
5818 + }
5819 +
5820 + out_unlock_bdev:
5821 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
5822 +index 5b6ee4aadc26..256ac508196f 100644
5823 +--- a/kernel/trace/trace.c
5824 ++++ b/kernel/trace/trace.c
5825 +@@ -1827,6 +1827,7 @@ static __init int init_trace_selftests(void)
5826 +
5827 + pr_info("Running postponed tracer tests:\n");
5828 +
5829 ++ tracing_selftest_running = true;
5830 + list_for_each_entry_safe(p, n, &postponed_selftests, list) {
5831 + /* This loop can take minutes when sanitizers are enabled, so
5832 + * lets make sure we allow RCU processing.
5833 +@@ -1849,6 +1850,7 @@ static __init int init_trace_selftests(void)
5834 + list_del(&p->list);
5835 + kfree(p);
5836 + }
5837 ++ tracing_selftest_running = false;
5838 +
5839 + out:
5840 + mutex_unlock(&trace_types_lock);
5841 +diff --git a/mm/debug.c b/mm/debug.c
5842 +index 0461df1207cb..6a52316af839 100644
5843 +--- a/mm/debug.c
5844 ++++ b/mm/debug.c
5845 +@@ -47,6 +47,7 @@ void __dump_page(struct page *page, const char *reason)
5846 + struct address_space *mapping;
5847 + bool page_poisoned = PagePoisoned(page);
5848 + int mapcount;
5849 ++ char *type = "";
5850 +
5851 + /*
5852 + * If struct page is poisoned don't access Page*() functions as that
5853 +@@ -78,9 +79,9 @@ void __dump_page(struct page *page, const char *reason)
5854 + page, page_ref_count(page), mapcount,
5855 + page->mapping, page_to_pgoff(page));
5856 + if (PageKsm(page))
5857 +- pr_warn("ksm flags: %#lx(%pGp)\n", page->flags, &page->flags);
5858 ++ type = "ksm ";
5859 + else if (PageAnon(page))
5860 +- pr_warn("anon flags: %#lx(%pGp)\n", page->flags, &page->flags);
5861 ++ type = "anon ";
5862 + else if (mapping) {
5863 + if (mapping->host && mapping->host->i_dentry.first) {
5864 + struct dentry *dentry;
5865 +@@ -88,10 +89,11 @@ void __dump_page(struct page *page, const char *reason)
5866 + pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
5867 + } else
5868 + pr_warn("%ps\n", mapping->a_ops);
5869 +- pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
5870 + }
5871 + BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
5872 +
5873 ++ pr_warn("%sflags: %#lx(%pGp)\n", type, page->flags, &page->flags);
5874 ++
5875 + hex_only:
5876 + print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
5877 + sizeof(unsigned long), page,
5878 +diff --git a/mm/gup.c b/mm/gup.c
5879 +index 7646bf993b25..5244b8090440 100644
5880 +--- a/mm/gup.c
5881 ++++ b/mm/gup.c
5882 +@@ -2415,7 +2415,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
5883 + unsigned long addr, len, end;
5884 + int nr = 0, ret = 0;
5885 +
5886 +- if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM)))
5887 ++ if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
5888 ++ FOLL_FORCE)))
5889 + return -EINVAL;
5890 +
5891 + start = untagged_addr(start) & PAGE_MASK;
5892 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
5893 +index a88093213674..54c106bdbafd 100644
5894 +--- a/mm/huge_memory.c
5895 ++++ b/mm/huge_memory.c
5896 +@@ -177,16 +177,13 @@ static ssize_t enabled_store(struct kobject *kobj,
5897 + {
5898 + ssize_t ret = count;
5899 +
5900 +- if (!memcmp("always", buf,
5901 +- min(sizeof("always")-1, count))) {
5902 ++ if (sysfs_streq(buf, "always")) {
5903 + clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
5904 + set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
5905 +- } else if (!memcmp("madvise", buf,
5906 +- min(sizeof("madvise")-1, count))) {
5907 ++ } else if (sysfs_streq(buf, "madvise")) {
5908 + clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
5909 + set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
5910 +- } else if (!memcmp("never", buf,
5911 +- min(sizeof("never")-1, count))) {
5912 ++ } else if (sysfs_streq(buf, "never")) {
5913 + clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
5914 + clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
5915 + } else
5916 +@@ -250,32 +247,27 @@ static ssize_t defrag_store(struct kobject *kobj,
5917 + struct kobj_attribute *attr,
5918 + const char *buf, size_t count)
5919 + {
5920 +- if (!memcmp("always", buf,
5921 +- min(sizeof("always")-1, count))) {
5922 ++ if (sysfs_streq(buf, "always")) {
5923 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
5924 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
5925 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
5926 + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
5927 +- } else if (!memcmp("defer+madvise", buf,
5928 +- min(sizeof("defer+madvise")-1, count))) {
5929 ++ } else if (sysfs_streq(buf, "defer+madvise")) {
5930 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
5931 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
5932 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
5933 + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
5934 +- } else if (!memcmp("defer", buf,
5935 +- min(sizeof("defer")-1, count))) {
5936 ++ } else if (sysfs_streq(buf, "defer")) {
5937 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
5938 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
5939 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
5940 + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
5941 +- } else if (!memcmp("madvise", buf,
5942 +- min(sizeof("madvise")-1, count))) {
5943 ++ } else if (sysfs_streq(buf, "madvise")) {
5944 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
5945 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
5946 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
5947 + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
5948 +- } else if (!memcmp("never", buf,
5949 +- min(sizeof("never")-1, count))) {
5950 ++ } else if (sysfs_streq(buf, "never")) {
5951 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
5952 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
5953 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
5954 +@@ -2712,7 +2704,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
5955 + unsigned long flags;
5956 + pgoff_t end;
5957 +
5958 +- VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
5959 ++ VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
5960 + VM_BUG_ON_PAGE(!PageLocked(page), page);
5961 + VM_BUG_ON_PAGE(!PageCompound(page), page);
5962 +
5963 +diff --git a/net/core/dev.c b/net/core/dev.c
5964 +index 466f2e4144b0..c3da35f3c7e4 100644
5965 +--- a/net/core/dev.c
5966 ++++ b/net/core/dev.c
5967 +@@ -146,7 +146,6 @@
5968 + #include "net-sysfs.h"
5969 +
5970 + #define MAX_GRO_SKBS 8
5971 +-#define MAX_NEST_DEV 8
5972 +
5973 + /* This should be increased if a protocol with a bigger head is added. */
5974 + #define GRO_MAX_HEAD (MAX_HEADER + 128)
5975 +@@ -331,6 +330,12 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
5976 + name_node = netdev_name_node_lookup(net, name);
5977 + if (!name_node)
5978 + return -ENOENT;
5979 ++ /* lookup might have found our primary name or a name belonging
5980 ++ * to another device.
5981 ++ */
5982 ++ if (name_node == dev->name_node || name_node->dev != dev)
5983 ++ return -EINVAL;
5984 ++
5985 + __netdev_name_node_alt_destroy(name_node);
5986 +
5987 + return 0;
5988 +@@ -3607,26 +3612,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
5989 + qdisc_calculate_pkt_len(skb, q);
5990 +
5991 + if (q->flags & TCQ_F_NOLOCK) {
5992 +- if ((q->flags & TCQ_F_CAN_BYPASS) && READ_ONCE(q->empty) &&
5993 +- qdisc_run_begin(q)) {
5994 +- if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5995 +- &q->state))) {
5996 +- __qdisc_drop(skb, &to_free);
5997 +- rc = NET_XMIT_DROP;
5998 +- goto end_run;
5999 +- }
6000 +- qdisc_bstats_cpu_update(q, skb);
6001 +-
6002 +- rc = NET_XMIT_SUCCESS;
6003 +- if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
6004 +- __qdisc_run(q);
6005 +-
6006 +-end_run:
6007 +- qdisc_run_end(q);
6008 +- } else {
6009 +- rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
6010 +- qdisc_run(q);
6011 +- }
6012 ++ rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
6013 ++ qdisc_run(q);
6014 +
6015 + if (unlikely(to_free))
6016 + kfree_skb_list(to_free);
6017 +@@ -7153,8 +7140,8 @@ static int __netdev_walk_all_lower_dev(struct net_device *dev,
6018 + return 0;
6019 + }
6020 +
6021 +-static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6022 +- struct list_head **iter)
6023 ++struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6024 ++ struct list_head **iter)
6025 + {
6026 + struct netdev_adjacent *lower;
6027 +
6028 +@@ -7166,6 +7153,7 @@ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6029 +
6030 + return lower->dev;
6031 + }
6032 ++EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
6033 +
6034 + static u8 __netdev_upper_depth(struct net_device *dev)
6035 + {
6036 +diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
6037 +index 3e7e15278c46..bd7eba9066f8 100644
6038 +--- a/net/core/fib_rules.c
6039 ++++ b/net/core/fib_rules.c
6040 +@@ -974,7 +974,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
6041 +
6042 + frh = nlmsg_data(nlh);
6043 + frh->family = ops->family;
6044 +- frh->table = rule->table;
6045 ++ frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
6046 + if (nla_put_u32(skb, FRA_TABLE, rule->table))
6047 + goto nla_put_failure;
6048 + if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
6049 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
6050 +index f02705ff0e5e..1737bac74c45 100644
6051 +--- a/net/core/rtnetlink.c
6052 ++++ b/net/core/rtnetlink.c
6053 +@@ -3499,27 +3499,25 @@ static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
6054 + if (err)
6055 + return err;
6056 +
6057 +- alt_ifname = nla_data(attr);
6058 ++ alt_ifname = nla_strdup(attr, GFP_KERNEL);
6059 ++ if (!alt_ifname)
6060 ++ return -ENOMEM;
6061 ++
6062 + if (cmd == RTM_NEWLINKPROP) {
6063 +- alt_ifname = kstrdup(alt_ifname, GFP_KERNEL);
6064 +- if (!alt_ifname)
6065 +- return -ENOMEM;
6066 + err = netdev_name_node_alt_create(dev, alt_ifname);
6067 +- if (err) {
6068 +- kfree(alt_ifname);
6069 +- return err;
6070 +- }
6071 ++ if (!err)
6072 ++ alt_ifname = NULL;
6073 + } else if (cmd == RTM_DELLINKPROP) {
6074 + err = netdev_name_node_alt_destroy(dev, alt_ifname);
6075 +- if (err)
6076 +- return err;
6077 + } else {
6078 +- WARN_ON(1);
6079 +- return 0;
6080 ++ WARN_ON_ONCE(1);
6081 ++ err = -EINVAL;
6082 + }
6083 +
6084 +- *changed = true;
6085 +- return 0;
6086 ++ kfree(alt_ifname);
6087 ++ if (!err)
6088 ++ *changed = true;
6089 ++ return err;
6090 + }
6091 +
6092 + static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
6093 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
6094 +index 030d43c7c957..be5c5903cfe1 100644
6095 +--- a/net/ipv4/udp.c
6096 ++++ b/net/ipv4/udp.c
6097 +@@ -1856,8 +1856,12 @@ int __udp_disconnect(struct sock *sk, int flags)
6098 + inet->inet_dport = 0;
6099 + sock_rps_reset_rxhash(sk);
6100 + sk->sk_bound_dev_if = 0;
6101 +- if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
6102 ++ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) {
6103 + inet_reset_saddr(sk);
6104 ++ if (sk->sk_prot->rehash &&
6105 ++ (sk->sk_userlocks & SOCK_BINDPORT_LOCK))
6106 ++ sk->sk_prot->rehash(sk);
6107 ++ }
6108 +
6109 + if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
6110 + sk->sk_prot->unhash(sk);
6111 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
6112 +index cfae0a1529a1..bde3bf180871 100644
6113 +--- a/net/ipv6/ip6_fib.c
6114 ++++ b/net/ipv6/ip6_fib.c
6115 +@@ -1068,8 +1068,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
6116 + found++;
6117 + break;
6118 + }
6119 +- if (rt_can_ecmp)
6120 +- fallback_ins = fallback_ins ?: ins;
6121 ++ fallback_ins = fallback_ins ?: ins;
6122 + goto next_iter;
6123 + }
6124 +
6125 +@@ -1112,7 +1111,9 @@ next_iter:
6126 + }
6127 +
6128 + if (fallback_ins && !found) {
6129 +- /* No ECMP-able route found, replace first non-ECMP one */
6130 ++ /* No matching route with same ecmp-able-ness found, replace
6131 ++ * first matching route
6132 ++ */
6133 + ins = fallback_ins;
6134 + iter = rcu_dereference_protected(*ins,
6135 + lockdep_is_held(&rt->fib6_table->tb6_lock));
6136 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
6137 +index affb51c11a25..119c7226c4be 100644
6138 +--- a/net/ipv6/route.c
6139 ++++ b/net/ipv6/route.c
6140 +@@ -5152,6 +5152,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
6141 + */
6142 + cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
6143 + NLM_F_REPLACE);
6144 ++ cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
6145 + nhn++;
6146 + }
6147 +
6148 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
6149 +index e041af2f021a..88d7a692a965 100644
6150 +--- a/net/mac80211/mlme.c
6151 ++++ b/net/mac80211/mlme.c
6152 +@@ -2959,7 +2959,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
6153 + (auth_transaction == 2 &&
6154 + ifmgd->auth_data->expected_transaction == 2)) {
6155 + if (!ieee80211_mark_sta_auth(sdata, bssid))
6156 +- goto out_err;
6157 ++ return; /* ignore frame -- wait for timeout */
6158 + } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
6159 + auth_transaction == 2) {
6160 + sdata_info(sdata, "SAE peer confirmed\n");
6161 +@@ -2967,10 +2967,6 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
6162 + }
6163 +
6164 + cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
6165 +- return;
6166 +- out_err:
6167 +- mutex_unlock(&sdata->local->sta_mtx);
6168 +- /* ignore frame -- wait for timeout */
6169 + }
6170 +
6171 + #define case_WLAN(type) \
6172 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
6173 +index 32a7a53833c0..decd46b38393 100644
6174 +--- a/net/mac80211/util.c
6175 ++++ b/net/mac80211/util.c
6176 +@@ -1063,16 +1063,22 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
6177 + elem_parse_failed = true;
6178 + break;
6179 + case WLAN_EID_VHT_OPERATION:
6180 +- if (elen >= sizeof(struct ieee80211_vht_operation))
6181 ++ if (elen >= sizeof(struct ieee80211_vht_operation)) {
6182 + elems->vht_operation = (void *)pos;
6183 +- else
6184 +- elem_parse_failed = true;
6185 ++ if (calc_crc)
6186 ++ crc = crc32_be(crc, pos - 2, elen + 2);
6187 ++ break;
6188 ++ }
6189 ++ elem_parse_failed = true;
6190 + break;
6191 + case WLAN_EID_OPMODE_NOTIF:
6192 +- if (elen > 0)
6193 ++ if (elen > 0) {
6194 + elems->opmode_notif = pos;
6195 +- else
6196 +- elem_parse_failed = true;
6197 ++ if (calc_crc)
6198 ++ crc = crc32_be(crc, pos - 2, elen + 2);
6199 ++ break;
6200 ++ }
6201 ++ elem_parse_failed = true;
6202 + break;
6203 + case WLAN_EID_MESH_ID:
6204 + elems->mesh_id = pos;
6205 +@@ -2987,10 +2993,22 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
6206 + int cf0, cf1;
6207 + int ccfs0, ccfs1, ccfs2;
6208 + int ccf0, ccf1;
6209 ++ u32 vht_cap;
6210 ++ bool support_80_80 = false;
6211 ++ bool support_160 = false;
6212 +
6213 + if (!oper || !htop)
6214 + return false;
6215 +
6216 ++ vht_cap = hw->wiphy->bands[chandef->chan->band]->vht_cap.cap;
6217 ++ support_160 = (vht_cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK |
6218 ++ IEEE80211_VHT_CAP_EXT_NSS_BW_MASK));
6219 ++ support_80_80 = ((vht_cap &
6220 ++ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ||
6221 ++ (vht_cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
6222 ++ vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) ||
6223 ++ ((vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) >>
6224 ++ IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT > 1));
6225 + ccfs0 = oper->center_freq_seg0_idx;
6226 + ccfs1 = oper->center_freq_seg1_idx;
6227 + ccfs2 = (le16_to_cpu(htop->operation_mode) &
6228 +@@ -3018,10 +3036,10 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
6229 + unsigned int diff;
6230 +
6231 + diff = abs(ccf1 - ccf0);
6232 +- if (diff == 8) {
6233 ++ if ((diff == 8) && support_160) {
6234 + new.width = NL80211_CHAN_WIDTH_160;
6235 + new.center_freq1 = cf1;
6236 +- } else if (diff > 8) {
6237 ++ } else if ((diff > 8) && support_80_80) {
6238 + new.width = NL80211_CHAN_WIDTH_80P80;
6239 + new.center_freq2 = cf1;
6240 + }
6241 +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
6242 +index 69c107f9ba8d..8dd17589217d 100644
6243 +--- a/net/netfilter/ipset/ip_set_core.c
6244 ++++ b/net/netfilter/ipset/ip_set_core.c
6245 +@@ -723,6 +723,20 @@ ip_set_rcu_get(struct net *net, ip_set_id_t index)
6246 + return set;
6247 + }
6248 +
6249 ++static inline void
6250 ++ip_set_lock(struct ip_set *set)
6251 ++{
6252 ++ if (!set->variant->region_lock)
6253 ++ spin_lock_bh(&set->lock);
6254 ++}
6255 ++
6256 ++static inline void
6257 ++ip_set_unlock(struct ip_set *set)
6258 ++{
6259 ++ if (!set->variant->region_lock)
6260 ++ spin_unlock_bh(&set->lock);
6261 ++}
6262 ++
6263 + int
6264 + ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
6265 + const struct xt_action_param *par, struct ip_set_adt_opt *opt)
6266 +@@ -744,9 +758,9 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
6267 + if (ret == -EAGAIN) {
6268 + /* Type requests element to be completed */
6269 + pr_debug("element must be completed, ADD is triggered\n");
6270 +- spin_lock_bh(&set->lock);
6271 ++ ip_set_lock(set);
6272 + set->variant->kadt(set, skb, par, IPSET_ADD, opt);
6273 +- spin_unlock_bh(&set->lock);
6274 ++ ip_set_unlock(set);
6275 + ret = 1;
6276 + } else {
6277 + /* --return-nomatch: invert matched element */
6278 +@@ -775,9 +789,9 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
6279 + !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
6280 + return -IPSET_ERR_TYPE_MISMATCH;
6281 +
6282 +- spin_lock_bh(&set->lock);
6283 ++ ip_set_lock(set);
6284 + ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
6285 +- spin_unlock_bh(&set->lock);
6286 ++ ip_set_unlock(set);
6287 +
6288 + return ret;
6289 + }
6290 +@@ -797,9 +811,9 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
6291 + !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
6292 + return -IPSET_ERR_TYPE_MISMATCH;
6293 +
6294 +- spin_lock_bh(&set->lock);
6295 ++ ip_set_lock(set);
6296 + ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
6297 +- spin_unlock_bh(&set->lock);
6298 ++ ip_set_unlock(set);
6299 +
6300 + return ret;
6301 + }
6302 +@@ -1264,9 +1278,9 @@ ip_set_flush_set(struct ip_set *set)
6303 + {
6304 + pr_debug("set: %s\n", set->name);
6305 +
6306 +- spin_lock_bh(&set->lock);
6307 ++ ip_set_lock(set);
6308 + set->variant->flush(set);
6309 +- spin_unlock_bh(&set->lock);
6310 ++ ip_set_unlock(set);
6311 + }
6312 +
6313 + static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
6314 +@@ -1713,9 +1727,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
6315 + bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
6316 +
6317 + do {
6318 +- spin_lock_bh(&set->lock);
6319 ++ ip_set_lock(set);
6320 + ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
6321 +- spin_unlock_bh(&set->lock);
6322 ++ ip_set_unlock(set);
6323 + retried = true;
6324 + } while (ret == -EAGAIN &&
6325 + set->variant->resize &&
6326 +diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
6327 +index 7480ce55b5c8..e52d7b7597a0 100644
6328 +--- a/net/netfilter/ipset/ip_set_hash_gen.h
6329 ++++ b/net/netfilter/ipset/ip_set_hash_gen.h
6330 +@@ -7,13 +7,21 @@
6331 + #include <linux/rcupdate.h>
6332 + #include <linux/jhash.h>
6333 + #include <linux/types.h>
6334 ++#include <linux/netfilter/nfnetlink.h>
6335 + #include <linux/netfilter/ipset/ip_set.h>
6336 +
6337 +-#define __ipset_dereference_protected(p, c) rcu_dereference_protected(p, c)
6338 +-#define ipset_dereference_protected(p, set) \
6339 +- __ipset_dereference_protected(p, lockdep_is_held(&(set)->lock))
6340 +-
6341 +-#define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1)
6342 ++#define __ipset_dereference(p) \
6343 ++ rcu_dereference_protected(p, 1)
6344 ++#define ipset_dereference_nfnl(p) \
6345 ++ rcu_dereference_protected(p, \
6346 ++ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
6347 ++#define ipset_dereference_set(p, set) \
6348 ++ rcu_dereference_protected(p, \
6349 ++ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
6350 ++ lockdep_is_held(&(set)->lock))
6351 ++#define ipset_dereference_bh_nfnl(p) \
6352 ++ rcu_dereference_bh_check(p, \
6353 ++ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
6354 +
6355 + /* Hashing which uses arrays to resolve clashing. The hash table is resized
6356 + * (doubled) when searching becomes too long.
6357 +@@ -72,11 +80,35 @@ struct hbucket {
6358 + __aligned(__alignof__(u64));
6359 + };
6360 +
6361 ++/* Region size for locking == 2^HTABLE_REGION_BITS */
6362 ++#define HTABLE_REGION_BITS 10
6363 ++#define ahash_numof_locks(htable_bits) \
6364 ++ ((htable_bits) < HTABLE_REGION_BITS ? 1 \
6365 ++ : jhash_size((htable_bits) - HTABLE_REGION_BITS))
6366 ++#define ahash_sizeof_regions(htable_bits) \
6367 ++ (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
6368 ++#define ahash_region(n, htable_bits) \
6369 ++ ((n) % ahash_numof_locks(htable_bits))
6370 ++#define ahash_bucket_start(h, htable_bits) \
6371 ++ ((htable_bits) < HTABLE_REGION_BITS ? 0 \
6372 ++ : (h) * jhash_size(HTABLE_REGION_BITS))
6373 ++#define ahash_bucket_end(h, htable_bits) \
6374 ++ ((htable_bits) < HTABLE_REGION_BITS ? jhash_size(htable_bits) \
6375 ++ : ((h) + 1) * jhash_size(HTABLE_REGION_BITS))
6376 ++
6377 ++struct htable_gc {
6378 ++ struct delayed_work dwork;
6379 ++ struct ip_set *set; /* Set the gc belongs to */
6380 ++ u32 region; /* Last gc run position */
6381 ++};
6382 ++
6383 + /* The hash table: the table size stored here in order to make resizing easy */
6384 + struct htable {
6385 + atomic_t ref; /* References for resizing */
6386 +- atomic_t uref; /* References for dumping */
6387 ++ atomic_t uref; /* References for dumping and gc */
6388 + u8 htable_bits; /* size of hash table == 2^htable_bits */
6389 ++ u32 maxelem; /* Maxelem per region */
6390 ++ struct ip_set_region *hregion; /* Region locks and ext sizes */
6391 + struct hbucket __rcu *bucket[0]; /* hashtable buckets */
6392 + };
6393 +
6394 +@@ -162,6 +194,10 @@ htable_bits(u32 hashsize)
6395 + #define NLEN 0
6396 + #endif /* IP_SET_HASH_WITH_NETS */
6397 +
6398 ++#define SET_ELEM_EXPIRED(set, d) \
6399 ++ (SET_WITH_TIMEOUT(set) && \
6400 ++ ip_set_timeout_expired(ext_timeout(d, set)))
6401 ++
6402 + #endif /* _IP_SET_HASH_GEN_H */
6403 +
6404 + #ifndef MTYPE
6405 +@@ -205,10 +241,12 @@ htable_bits(u32 hashsize)
6406 + #undef mtype_test_cidrs
6407 + #undef mtype_test
6408 + #undef mtype_uref
6409 +-#undef mtype_expire
6410 + #undef mtype_resize
6411 ++#undef mtype_ext_size
6412 ++#undef mtype_resize_ad
6413 + #undef mtype_head
6414 + #undef mtype_list
6415 ++#undef mtype_gc_do
6416 + #undef mtype_gc
6417 + #undef mtype_gc_init
6418 + #undef mtype_variant
6419 +@@ -247,10 +285,12 @@ htable_bits(u32 hashsize)
6420 + #define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs)
6421 + #define mtype_test IPSET_TOKEN(MTYPE, _test)
6422 + #define mtype_uref IPSET_TOKEN(MTYPE, _uref)
6423 +-#define mtype_expire IPSET_TOKEN(MTYPE, _expire)
6424 + #define mtype_resize IPSET_TOKEN(MTYPE, _resize)
6425 ++#define mtype_ext_size IPSET_TOKEN(MTYPE, _ext_size)
6426 ++#define mtype_resize_ad IPSET_TOKEN(MTYPE, _resize_ad)
6427 + #define mtype_head IPSET_TOKEN(MTYPE, _head)
6428 + #define mtype_list IPSET_TOKEN(MTYPE, _list)
6429 ++#define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do)
6430 + #define mtype_gc IPSET_TOKEN(MTYPE, _gc)
6431 + #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
6432 + #define mtype_variant IPSET_TOKEN(MTYPE, _variant)
6433 +@@ -275,8 +315,7 @@ htable_bits(u32 hashsize)
6434 + /* The generic hash structure */
6435 + struct htype {
6436 + struct htable __rcu *table; /* the hash table */
6437 +- struct timer_list gc; /* garbage collection when timeout enabled */
6438 +- struct ip_set *set; /* attached to this ip_set */
6439 ++ struct htable_gc gc; /* gc workqueue */
6440 + u32 maxelem; /* max elements in the hash */
6441 + u32 initval; /* random jhash init value */
6442 + #ifdef IP_SET_HASH_WITH_MARKMASK
6443 +@@ -288,21 +327,33 @@ struct htype {
6444 + #ifdef IP_SET_HASH_WITH_NETMASK
6445 + u8 netmask; /* netmask value for subnets to store */
6446 + #endif
6447 ++ struct list_head ad; /* Resize add|del backlist */
6448 + struct mtype_elem next; /* temporary storage for uadd */
6449 + #ifdef IP_SET_HASH_WITH_NETS
6450 + struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */
6451 + #endif
6452 + };
6453 +
6454 ++/* ADD|DEL entries saved during resize */
6455 ++struct mtype_resize_ad {
6456 ++ struct list_head list;
6457 ++ enum ipset_adt ad; /* ADD|DEL element */
6458 ++ struct mtype_elem d; /* Element value */
6459 ++ struct ip_set_ext ext; /* Extensions for ADD */
6460 ++ struct ip_set_ext mext; /* Target extensions for ADD */
6461 ++ u32 flags; /* Flags for ADD */
6462 ++};
6463 ++
6464 + #ifdef IP_SET_HASH_WITH_NETS
6465 + /* Network cidr size book keeping when the hash stores different
6466 + * sized networks. cidr == real cidr + 1 to support /0.
6467 + */
6468 + static void
6469 +-mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
6470 ++mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
6471 + {
6472 + int i, j;
6473 +
6474 ++ spin_lock_bh(&set->lock);
6475 + /* Add in increasing prefix order, so larger cidr first */
6476 + for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) {
6477 + if (j != -1) {
6478 +@@ -311,7 +362,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
6479 + j = i;
6480 + } else if (h->nets[i].cidr[n] == cidr) {
6481 + h->nets[CIDR_POS(cidr)].nets[n]++;
6482 +- return;
6483 ++ goto unlock;
6484 + }
6485 + }
6486 + if (j != -1) {
6487 +@@ -320,24 +371,29 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
6488 + }
6489 + h->nets[i].cidr[n] = cidr;
6490 + h->nets[CIDR_POS(cidr)].nets[n] = 1;
6491 ++unlock:
6492 ++ spin_unlock_bh(&set->lock);
6493 + }
6494 +
6495 + static void
6496 +-mtype_del_cidr(struct htype *h, u8 cidr, u8 n)
6497 ++mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
6498 + {
6499 + u8 i, j, net_end = NLEN - 1;
6500 +
6501 ++ spin_lock_bh(&set->lock);
6502 + for (i = 0; i < NLEN; i++) {
6503 + if (h->nets[i].cidr[n] != cidr)
6504 + continue;
6505 + h->nets[CIDR_POS(cidr)].nets[n]--;
6506 + if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
6507 +- return;
6508 ++ goto unlock;
6509 + for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
6510 + h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
6511 + h->nets[j].cidr[n] = 0;
6512 +- return;
6513 ++ goto unlock;
6514 + }
6515 ++unlock:
6516 ++ spin_unlock_bh(&set->lock);
6517 + }
6518 + #endif
6519 +
6520 +@@ -345,7 +401,7 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 n)
6521 + static size_t
6522 + mtype_ahash_memsize(const struct htype *h, const struct htable *t)
6523 + {
6524 +- return sizeof(*h) + sizeof(*t);
6525 ++ return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits);
6526 + }
6527 +
6528 + /* Get the ith element from the array block n */
6529 +@@ -369,24 +425,29 @@ mtype_flush(struct ip_set *set)
6530 + struct htype *h = set->data;
6531 + struct htable *t;
6532 + struct hbucket *n;
6533 +- u32 i;
6534 +-
6535 +- t = ipset_dereference_protected(h->table, set);
6536 +- for (i = 0; i < jhash_size(t->htable_bits); i++) {
6537 +- n = __ipset_dereference_protected(hbucket(t, i), 1);
6538 +- if (!n)
6539 +- continue;
6540 +- if (set->extensions & IPSET_EXT_DESTROY)
6541 +- mtype_ext_cleanup(set, n);
6542 +- /* FIXME: use slab cache */
6543 +- rcu_assign_pointer(hbucket(t, i), NULL);
6544 +- kfree_rcu(n, rcu);
6545 ++ u32 r, i;
6546 ++
6547 ++ t = ipset_dereference_nfnl(h->table);
6548 ++ for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
6549 ++ spin_lock_bh(&t->hregion[r].lock);
6550 ++ for (i = ahash_bucket_start(r, t->htable_bits);
6551 ++ i < ahash_bucket_end(r, t->htable_bits); i++) {
6552 ++ n = __ipset_dereference(hbucket(t, i));
6553 ++ if (!n)
6554 ++ continue;
6555 ++ if (set->extensions & IPSET_EXT_DESTROY)
6556 ++ mtype_ext_cleanup(set, n);
6557 ++ /* FIXME: use slab cache */
6558 ++ rcu_assign_pointer(hbucket(t, i), NULL);
6559 ++ kfree_rcu(n, rcu);
6560 ++ }
6561 ++ t->hregion[r].ext_size = 0;
6562 ++ t->hregion[r].elements = 0;
6563 ++ spin_unlock_bh(&t->hregion[r].lock);
6564 + }
6565 + #ifdef IP_SET_HASH_WITH_NETS
6566 + memset(h->nets, 0, sizeof(h->nets));
6567 + #endif
6568 +- set->elements = 0;
6569 +- set->ext_size = 0;
6570 + }
6571 +
6572 + /* Destroy the hashtable part of the set */
6573 +@@ -397,7 +458,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
6574 + u32 i;
6575 +
6576 + for (i = 0; i < jhash_size(t->htable_bits); i++) {
6577 +- n = __ipset_dereference_protected(hbucket(t, i), 1);
6578 ++ n = __ipset_dereference(hbucket(t, i));
6579 + if (!n)
6580 + continue;
6581 + if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
6582 +@@ -406,6 +467,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
6583 + kfree(n);
6584 + }
6585 +
6586 ++ ip_set_free(t->hregion);
6587 + ip_set_free(t);
6588 + }
6589 +
6590 +@@ -414,28 +476,21 @@ static void
6591 + mtype_destroy(struct ip_set *set)
6592 + {
6593 + struct htype *h = set->data;
6594 ++ struct list_head *l, *lt;
6595 +
6596 + if (SET_WITH_TIMEOUT(set))
6597 +- del_timer_sync(&h->gc);
6598 ++ cancel_delayed_work_sync(&h->gc.dwork);
6599 +
6600 +- mtype_ahash_destroy(set,
6601 +- __ipset_dereference_protected(h->table, 1), true);
6602 ++ mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
6603 ++ list_for_each_safe(l, lt, &h->ad) {
6604 ++ list_del(l);
6605 ++ kfree(l);
6606 ++ }
6607 + kfree(h);
6608 +
6609 + set->data = NULL;
6610 + }
6611 +
6612 +-static void
6613 +-mtype_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
6614 +-{
6615 +- struct htype *h = set->data;
6616 +-
6617 +- timer_setup(&h->gc, gc, 0);
6618 +- mod_timer(&h->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
6619 +- pr_debug("gc initialized, run in every %u\n",
6620 +- IPSET_GC_PERIOD(set->timeout));
6621 +-}
6622 +-
6623 + static bool
6624 + mtype_same_set(const struct ip_set *a, const struct ip_set *b)
6625 + {
6626 +@@ -454,11 +509,9 @@ mtype_same_set(const struct ip_set *a, const struct ip_set *b)
6627 + a->extensions == b->extensions;
6628 + }
6629 +
6630 +-/* Delete expired elements from the hashtable */
6631 + static void
6632 +-mtype_expire(struct ip_set *set, struct htype *h)
6633 ++mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r)
6634 + {
6635 +- struct htable *t;
6636 + struct hbucket *n, *tmp;
6637 + struct mtype_elem *data;
6638 + u32 i, j, d;
6639 +@@ -466,10 +519,12 @@ mtype_expire(struct ip_set *set, struct htype *h)
6640 + #ifdef IP_SET_HASH_WITH_NETS
6641 + u8 k;
6642 + #endif
6643 ++ u8 htable_bits = t->htable_bits;
6644 +
6645 +- t = ipset_dereference_protected(h->table, set);
6646 +- for (i = 0; i < jhash_size(t->htable_bits); i++) {
6647 +- n = __ipset_dereference_protected(hbucket(t, i), 1);
6648 ++ spin_lock_bh(&t->hregion[r].lock);
6649 ++ for (i = ahash_bucket_start(r, htable_bits);
6650 ++ i < ahash_bucket_end(r, htable_bits); i++) {
6651 ++ n = __ipset_dereference(hbucket(t, i));
6652 + if (!n)
6653 + continue;
6654 + for (j = 0, d = 0; j < n->pos; j++) {
6655 +@@ -485,58 +540,100 @@ mtype_expire(struct ip_set *set, struct htype *h)
6656 + smp_mb__after_atomic();
6657 + #ifdef IP_SET_HASH_WITH_NETS
6658 + for (k = 0; k < IPSET_NET_COUNT; k++)
6659 +- mtype_del_cidr(h,
6660 ++ mtype_del_cidr(set, h,
6661 + NCIDR_PUT(DCIDR_GET(data->cidr, k)),
6662 + k);
6663 + #endif
6664 ++ t->hregion[r].elements--;
6665 + ip_set_ext_destroy(set, data);
6666 +- set->elements--;
6667 + d++;
6668 + }
6669 + if (d >= AHASH_INIT_SIZE) {
6670 + if (d >= n->size) {
6671 ++ t->hregion[r].ext_size -=
6672 ++ ext_size(n->size, dsize);
6673 + rcu_assign_pointer(hbucket(t, i), NULL);
6674 + kfree_rcu(n, rcu);
6675 + continue;
6676 + }
6677 + tmp = kzalloc(sizeof(*tmp) +
6678 +- (n->size - AHASH_INIT_SIZE) * dsize,
6679 +- GFP_ATOMIC);
6680 ++ (n->size - AHASH_INIT_SIZE) * dsize,
6681 ++ GFP_ATOMIC);
6682 + if (!tmp)
6683 +- /* Still try to delete expired elements */
6684 ++ /* Still try to delete expired elements. */
6685 + continue;
6686 + tmp->size = n->size - AHASH_INIT_SIZE;
6687 + for (j = 0, d = 0; j < n->pos; j++) {
6688 + if (!test_bit(j, n->used))
6689 + continue;
6690 + data = ahash_data(n, j, dsize);
6691 +- memcpy(tmp->value + d * dsize, data, dsize);
6692 ++ memcpy(tmp->value + d * dsize,
6693 ++ data, dsize);
6694 + set_bit(d, tmp->used);
6695 + d++;
6696 + }
6697 + tmp->pos = d;
6698 +- set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
6699 ++ t->hregion[r].ext_size -=
6700 ++ ext_size(AHASH_INIT_SIZE, dsize);
6701 + rcu_assign_pointer(hbucket(t, i), tmp);
6702 + kfree_rcu(n, rcu);
6703 + }
6704 + }
6705 ++ spin_unlock_bh(&t->hregion[r].lock);
6706 + }
6707 +
6708 + static void
6709 +-mtype_gc(struct timer_list *t)
6710 ++mtype_gc(struct work_struct *work)
6711 + {
6712 +- struct htype *h = from_timer(h, t, gc);
6713 +- struct ip_set *set = h->set;
6714 ++ struct htable_gc *gc;
6715 ++ struct ip_set *set;
6716 ++ struct htype *h;
6717 ++ struct htable *t;
6718 ++ u32 r, numof_locks;
6719 ++ unsigned int next_run;
6720 ++
6721 ++ gc = container_of(work, struct htable_gc, dwork.work);
6722 ++ set = gc->set;
6723 ++ h = set->data;
6724 +
6725 +- pr_debug("called\n");
6726 + spin_lock_bh(&set->lock);
6727 +- mtype_expire(set, h);
6728 ++ t = ipset_dereference_set(h->table, set);
6729 ++ atomic_inc(&t->uref);
6730 ++ numof_locks = ahash_numof_locks(t->htable_bits);
6731 ++ r = gc->region++;
6732 ++ if (r >= numof_locks) {
6733 ++ r = gc->region = 0;
6734 ++ }
6735 ++ next_run = (IPSET_GC_PERIOD(set->timeout) * HZ) / numof_locks;
6736 ++ if (next_run < HZ/10)
6737 ++ next_run = HZ/10;
6738 + spin_unlock_bh(&set->lock);
6739 +
6740 +- h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
6741 +- add_timer(&h->gc);
6742 ++ mtype_gc_do(set, h, t, r);
6743 ++
6744 ++ if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
6745 ++ pr_debug("Table destroy after resize by expire: %p\n", t);
6746 ++ mtype_ahash_destroy(set, t, false);
6747 ++ }
6748 ++
6749 ++ queue_delayed_work(system_power_efficient_wq, &gc->dwork, next_run);
6750 ++
6751 ++}
6752 ++
6753 ++static void
6754 ++mtype_gc_init(struct htable_gc *gc)
6755 ++{
6756 ++ INIT_DEFERRABLE_WORK(&gc->dwork, mtype_gc);
6757 ++ queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
6758 + }
6759 +
6760 ++static int
6761 ++mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
6762 ++ struct ip_set_ext *mext, u32 flags);
6763 ++static int
6764 ++mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
6765 ++ struct ip_set_ext *mext, u32 flags);
6766 ++
6767 + /* Resize a hash: create a new hash table with doubling the hashsize
6768 + * and inserting the elements to it. Repeat until we succeed or
6769 + * fail due to memory pressures.
6770 +@@ -547,7 +644,7 @@ mtype_resize(struct ip_set *set, bool retried)
6771 + struct htype *h = set->data;
6772 + struct htable *t, *orig;
6773 + u8 htable_bits;
6774 +- size_t extsize, dsize = set->dsize;
6775 ++ size_t dsize = set->dsize;
6776 + #ifdef IP_SET_HASH_WITH_NETS
6777 + u8 flags;
6778 + struct mtype_elem *tmp;
6779 +@@ -555,7 +652,9 @@ mtype_resize(struct ip_set *set, bool retried)
6780 + struct mtype_elem *data;
6781 + struct mtype_elem *d;
6782 + struct hbucket *n, *m;
6783 +- u32 i, j, key;
6784 ++ struct list_head *l, *lt;
6785 ++ struct mtype_resize_ad *x;
6786 ++ u32 i, j, r, nr, key;
6787 + int ret;
6788 +
6789 + #ifdef IP_SET_HASH_WITH_NETS
6790 +@@ -563,10 +662,8 @@ mtype_resize(struct ip_set *set, bool retried)
6791 + if (!tmp)
6792 + return -ENOMEM;
6793 + #endif
6794 +- rcu_read_lock_bh();
6795 +- orig = rcu_dereference_bh_nfnl(h->table);
6796 ++ orig = ipset_dereference_bh_nfnl(h->table);
6797 + htable_bits = orig->htable_bits;
6798 +- rcu_read_unlock_bh();
6799 +
6800 + retry:
6801 + ret = 0;
6802 +@@ -583,88 +680,124 @@ retry:
6803 + ret = -ENOMEM;
6804 + goto out;
6805 + }
6806 ++ t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
6807 ++ if (!t->hregion) {
6808 ++ kfree(t);
6809 ++ ret = -ENOMEM;
6810 ++ goto out;
6811 ++ }
6812 + t->htable_bits = htable_bits;
6813 ++ t->maxelem = h->maxelem / ahash_numof_locks(htable_bits);
6814 ++ for (i = 0; i < ahash_numof_locks(htable_bits); i++)
6815 ++ spin_lock_init(&t->hregion[i].lock);
6816 +
6817 +- spin_lock_bh(&set->lock);
6818 +- orig = __ipset_dereference_protected(h->table, 1);
6819 +- /* There can't be another parallel resizing, but dumping is possible */
6820 ++ /* There can't be another parallel resizing,
6821 ++ * but dumping, gc, kernel side add/del are possible
6822 ++ */
6823 ++ orig = ipset_dereference_bh_nfnl(h->table);
6824 + atomic_set(&orig->ref, 1);
6825 + atomic_inc(&orig->uref);
6826 +- extsize = 0;
6827 + pr_debug("attempt to resize set %s from %u to %u, t %p\n",
6828 + set->name, orig->htable_bits, htable_bits, orig);
6829 +- for (i = 0; i < jhash_size(orig->htable_bits); i++) {
6830 +- n = __ipset_dereference_protected(hbucket(orig, i), 1);
6831 +- if (!n)
6832 +- continue;
6833 +- for (j = 0; j < n->pos; j++) {
6834 +- if (!test_bit(j, n->used))
6835 ++ for (r = 0; r < ahash_numof_locks(orig->htable_bits); r++) {
6836 ++ /* Expire may replace a hbucket with another one */
6837 ++ rcu_read_lock_bh();
6838 ++ for (i = ahash_bucket_start(r, orig->htable_bits);
6839 ++ i < ahash_bucket_end(r, orig->htable_bits); i++) {
6840 ++ n = __ipset_dereference(hbucket(orig, i));
6841 ++ if (!n)
6842 + continue;
6843 +- data = ahash_data(n, j, dsize);
6844 ++ for (j = 0; j < n->pos; j++) {
6845 ++ if (!test_bit(j, n->used))
6846 ++ continue;
6847 ++ data = ahash_data(n, j, dsize);
6848 ++ if (SET_ELEM_EXPIRED(set, data))
6849 ++ continue;
6850 + #ifdef IP_SET_HASH_WITH_NETS
6851 +- /* We have readers running parallel with us,
6852 +- * so the live data cannot be modified.
6853 +- */
6854 +- flags = 0;
6855 +- memcpy(tmp, data, dsize);
6856 +- data = tmp;
6857 +- mtype_data_reset_flags(data, &flags);
6858 ++ /* We have readers running parallel with us,
6859 ++ * so the live data cannot be modified.
6860 ++ */
6861 ++ flags = 0;
6862 ++ memcpy(tmp, data, dsize);
6863 ++ data = tmp;
6864 ++ mtype_data_reset_flags(data, &flags);
6865 + #endif
6866 +- key = HKEY(data, h->initval, htable_bits);
6867 +- m = __ipset_dereference_protected(hbucket(t, key), 1);
6868 +- if (!m) {
6869 +- m = kzalloc(sizeof(*m) +
6870 ++ key = HKEY(data, h->initval, htable_bits);
6871 ++ m = __ipset_dereference(hbucket(t, key));
6872 ++ nr = ahash_region(key, htable_bits);
6873 ++ if (!m) {
6874 ++ m = kzalloc(sizeof(*m) +
6875 + AHASH_INIT_SIZE * dsize,
6876 + GFP_ATOMIC);
6877 +- if (!m) {
6878 +- ret = -ENOMEM;
6879 +- goto cleanup;
6880 +- }
6881 +- m->size = AHASH_INIT_SIZE;
6882 +- extsize += ext_size(AHASH_INIT_SIZE, dsize);
6883 +- RCU_INIT_POINTER(hbucket(t, key), m);
6884 +- } else if (m->pos >= m->size) {
6885 +- struct hbucket *ht;
6886 +-
6887 +- if (m->size >= AHASH_MAX(h)) {
6888 +- ret = -EAGAIN;
6889 +- } else {
6890 +- ht = kzalloc(sizeof(*ht) +
6891 ++ if (!m) {
6892 ++ ret = -ENOMEM;
6893 ++ goto cleanup;
6894 ++ }
6895 ++ m->size = AHASH_INIT_SIZE;
6896 ++ t->hregion[nr].ext_size +=
6897 ++ ext_size(AHASH_INIT_SIZE,
6898 ++ dsize);
6899 ++ RCU_INIT_POINTER(hbucket(t, key), m);
6900 ++ } else if (m->pos >= m->size) {
6901 ++ struct hbucket *ht;
6902 ++
6903 ++ if (m->size >= AHASH_MAX(h)) {
6904 ++ ret = -EAGAIN;
6905 ++ } else {
6906 ++ ht = kzalloc(sizeof(*ht) +
6907 + (m->size + AHASH_INIT_SIZE)
6908 + * dsize,
6909 + GFP_ATOMIC);
6910 +- if (!ht)
6911 +- ret = -ENOMEM;
6912 ++ if (!ht)
6913 ++ ret = -ENOMEM;
6914 ++ }
6915 ++ if (ret < 0)
6916 ++ goto cleanup;
6917 ++ memcpy(ht, m, sizeof(struct hbucket) +
6918 ++ m->size * dsize);
6919 ++ ht->size = m->size + AHASH_INIT_SIZE;
6920 ++ t->hregion[nr].ext_size +=
6921 ++ ext_size(AHASH_INIT_SIZE,
6922 ++ dsize);
6923 ++ kfree(m);
6924 ++ m = ht;
6925 ++ RCU_INIT_POINTER(hbucket(t, key), ht);
6926 + }
6927 +- if (ret < 0)
6928 +- goto cleanup;
6929 +- memcpy(ht, m, sizeof(struct hbucket) +
6930 +- m->size * dsize);
6931 +- ht->size = m->size + AHASH_INIT_SIZE;
6932 +- extsize += ext_size(AHASH_INIT_SIZE, dsize);
6933 +- kfree(m);
6934 +- m = ht;
6935 +- RCU_INIT_POINTER(hbucket(t, key), ht);
6936 +- }
6937 +- d = ahash_data(m, m->pos, dsize);
6938 +- memcpy(d, data, dsize);
6939 +- set_bit(m->pos++, m->used);
6940 ++ d = ahash_data(m, m->pos, dsize);
6941 ++ memcpy(d, data, dsize);
6942 ++ set_bit(m->pos++, m->used);
6943 ++ t->hregion[nr].elements++;
6944 + #ifdef IP_SET_HASH_WITH_NETS
6945 +- mtype_data_reset_flags(d, &flags);
6946 ++ mtype_data_reset_flags(d, &flags);
6947 + #endif
6948 ++ }
6949 + }
6950 ++ rcu_read_unlock_bh();
6951 + }
6952 +- rcu_assign_pointer(h->table, t);
6953 +- set->ext_size = extsize;
6954 +
6955 +- spin_unlock_bh(&set->lock);
6956 ++ /* There can't be any other writer. */
6957 ++ rcu_assign_pointer(h->table, t);
6958 +
6959 + /* Give time to other readers of the set */
6960 + synchronize_rcu();
6961 +
6962 + pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
6963 + orig->htable_bits, orig, t->htable_bits, t);
6964 +- /* If there's nobody else dumping the table, destroy it */
6965 ++ /* Add/delete elements processed by the SET target during resize.
6966 ++ * Kernel-side add cannot trigger a resize and userspace actions
6967 ++ * are serialized by the mutex.
6968 ++ */
6969 ++ list_for_each_safe(l, lt, &h->ad) {
6970 ++ x = list_entry(l, struct mtype_resize_ad, list);
6971 ++ if (x->ad == IPSET_ADD) {
6972 ++ mtype_add(set, &x->d, &x->ext, &x->mext, x->flags);
6973 ++ } else {
6974 ++ mtype_del(set, &x->d, NULL, NULL, 0);
6975 ++ }
6976 ++ list_del(l);
6977 ++ kfree(l);
6978 ++ }
6979 ++ /* If there's nobody else using the table, destroy it */
6980 + if (atomic_dec_and_test(&orig->uref)) {
6981 + pr_debug("Table destroy by resize %p\n", orig);
6982 + mtype_ahash_destroy(set, orig, false);
6983 +@@ -677,15 +810,44 @@ out:
6984 + return ret;
6985 +
6986 + cleanup:
6987 ++ rcu_read_unlock_bh();
6988 + atomic_set(&orig->ref, 0);
6989 + atomic_dec(&orig->uref);
6990 +- spin_unlock_bh(&set->lock);
6991 + mtype_ahash_destroy(set, t, false);
6992 + if (ret == -EAGAIN)
6993 + goto retry;
6994 + goto out;
6995 + }
6996 +
6997 ++/* Get the current number of elements and ext_size in the set */
6998 ++static void
6999 ++mtype_ext_size(struct ip_set *set, u32 *elements, size_t *ext_size)
7000 ++{
7001 ++ struct htype *h = set->data;
7002 ++ const struct htable *t;
7003 ++ u32 i, j, r;
7004 ++ struct hbucket *n;
7005 ++ struct mtype_elem *data;
7006 ++
7007 ++ t = rcu_dereference_bh(h->table);
7008 ++ for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
7009 ++ for (i = ahash_bucket_start(r, t->htable_bits);
7010 ++ i < ahash_bucket_end(r, t->htable_bits); i++) {
7011 ++ n = rcu_dereference_bh(hbucket(t, i));
7012 ++ if (!n)
7013 ++ continue;
7014 ++ for (j = 0; j < n->pos; j++) {
7015 ++ if (!test_bit(j, n->used))
7016 ++ continue;
7017 ++ data = ahash_data(n, j, set->dsize);
7018 ++ if (!SET_ELEM_EXPIRED(set, data))
7019 ++ (*elements)++;
7020 ++ }
7021 ++ }
7022 ++ *ext_size += t->hregion[r].ext_size;
7023 ++ }
7024 ++}
7025 ++
7026 + /* Add an element to a hash and update the internal counters when succeeded,
7027 + * otherwise report the proper error code.
7028 + */
7029 +@@ -698,32 +860,49 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7030 + const struct mtype_elem *d = value;
7031 + struct mtype_elem *data;
7032 + struct hbucket *n, *old = ERR_PTR(-ENOENT);
7033 +- int i, j = -1;
7034 ++ int i, j = -1, ret;
7035 + bool flag_exist = flags & IPSET_FLAG_EXIST;
7036 + bool deleted = false, forceadd = false, reuse = false;
7037 +- u32 key, multi = 0;
7038 ++ u32 r, key, multi = 0, elements, maxelem;
7039 +
7040 +- if (set->elements >= h->maxelem) {
7041 +- if (SET_WITH_TIMEOUT(set))
7042 +- /* FIXME: when set is full, we slow down here */
7043 +- mtype_expire(set, h);
7044 +- if (set->elements >= h->maxelem && SET_WITH_FORCEADD(set))
7045 ++ rcu_read_lock_bh();
7046 ++ t = rcu_dereference_bh(h->table);
7047 ++ key = HKEY(value, h->initval, t->htable_bits);
7048 ++ r = ahash_region(key, t->htable_bits);
7049 ++ atomic_inc(&t->uref);
7050 ++ elements = t->hregion[r].elements;
7051 ++ maxelem = t->maxelem;
7052 ++ if (elements >= maxelem) {
7053 ++ u32 e;
7054 ++ if (SET_WITH_TIMEOUT(set)) {
7055 ++ rcu_read_unlock_bh();
7056 ++ mtype_gc_do(set, h, t, r);
7057 ++ rcu_read_lock_bh();
7058 ++ }
7059 ++ maxelem = h->maxelem;
7060 ++ elements = 0;
7061 ++ for (e = 0; e < ahash_numof_locks(t->htable_bits); e++)
7062 ++ elements += t->hregion[e].elements;
7063 ++ if (elements >= maxelem && SET_WITH_FORCEADD(set))
7064 + forceadd = true;
7065 + }
7066 ++ rcu_read_unlock_bh();
7067 +
7068 +- t = ipset_dereference_protected(h->table, set);
7069 +- key = HKEY(value, h->initval, t->htable_bits);
7070 +- n = __ipset_dereference_protected(hbucket(t, key), 1);
7071 ++ spin_lock_bh(&t->hregion[r].lock);
7072 ++ n = rcu_dereference_bh(hbucket(t, key));
7073 + if (!n) {
7074 +- if (forceadd || set->elements >= h->maxelem)
7075 ++ if (forceadd || elements >= maxelem)
7076 + goto set_full;
7077 + old = NULL;
7078 + n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
7079 + GFP_ATOMIC);
7080 +- if (!n)
7081 +- return -ENOMEM;
7082 ++ if (!n) {
7083 ++ ret = -ENOMEM;
7084 ++ goto unlock;
7085 ++ }
7086 + n->size = AHASH_INIT_SIZE;
7087 +- set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
7088 ++ t->hregion[r].ext_size +=
7089 ++ ext_size(AHASH_INIT_SIZE, set->dsize);
7090 + goto copy_elem;
7091 + }
7092 + for (i = 0; i < n->pos; i++) {
7093 +@@ -737,38 +916,37 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7094 + }
7095 + data = ahash_data(n, i, set->dsize);
7096 + if (mtype_data_equal(data, d, &multi)) {
7097 +- if (flag_exist ||
7098 +- (SET_WITH_TIMEOUT(set) &&
7099 +- ip_set_timeout_expired(ext_timeout(data, set)))) {
7100 ++ if (flag_exist || SET_ELEM_EXPIRED(set, data)) {
7101 + /* Just the extensions could be overwritten */
7102 + j = i;
7103 + goto overwrite_extensions;
7104 + }
7105 +- return -IPSET_ERR_EXIST;
7106 ++ ret = -IPSET_ERR_EXIST;
7107 ++ goto unlock;
7108 + }
7109 + /* Reuse first timed out entry */
7110 +- if (SET_WITH_TIMEOUT(set) &&
7111 +- ip_set_timeout_expired(ext_timeout(data, set)) &&
7112 +- j == -1) {
7113 ++ if (SET_ELEM_EXPIRED(set, data) && j == -1) {
7114 + j = i;
7115 + reuse = true;
7116 + }
7117 + }
7118 + if (reuse || forceadd) {
7119 ++ if (j == -1)
7120 ++ j = 0;
7121 + data = ahash_data(n, j, set->dsize);
7122 + if (!deleted) {
7123 + #ifdef IP_SET_HASH_WITH_NETS
7124 + for (i = 0; i < IPSET_NET_COUNT; i++)
7125 +- mtype_del_cidr(h,
7126 ++ mtype_del_cidr(set, h,
7127 + NCIDR_PUT(DCIDR_GET(data->cidr, i)),
7128 + i);
7129 + #endif
7130 + ip_set_ext_destroy(set, data);
7131 +- set->elements--;
7132 ++ t->hregion[r].elements--;
7133 + }
7134 + goto copy_data;
7135 + }
7136 +- if (set->elements >= h->maxelem)
7137 ++ if (elements >= maxelem)
7138 + goto set_full;
7139 + /* Create a new slot */
7140 + if (n->pos >= n->size) {
7141 +@@ -776,28 +954,32 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7142 + if (n->size >= AHASH_MAX(h)) {
7143 + /* Trigger rehashing */
7144 + mtype_data_next(&h->next, d);
7145 +- return -EAGAIN;
7146 ++ ret = -EAGAIN;
7147 ++ goto resize;
7148 + }
7149 + old = n;
7150 + n = kzalloc(sizeof(*n) +
7151 + (old->size + AHASH_INIT_SIZE) * set->dsize,
7152 + GFP_ATOMIC);
7153 +- if (!n)
7154 +- return -ENOMEM;
7155 ++ if (!n) {
7156 ++ ret = -ENOMEM;
7157 ++ goto unlock;
7158 ++ }
7159 + memcpy(n, old, sizeof(struct hbucket) +
7160 + old->size * set->dsize);
7161 + n->size = old->size + AHASH_INIT_SIZE;
7162 +- set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
7163 ++ t->hregion[r].ext_size +=
7164 ++ ext_size(AHASH_INIT_SIZE, set->dsize);
7165 + }
7166 +
7167 + copy_elem:
7168 + j = n->pos++;
7169 + data = ahash_data(n, j, set->dsize);
7170 + copy_data:
7171 +- set->elements++;
7172 ++ t->hregion[r].elements++;
7173 + #ifdef IP_SET_HASH_WITH_NETS
7174 + for (i = 0; i < IPSET_NET_COUNT; i++)
7175 +- mtype_add_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
7176 ++ mtype_add_cidr(set, h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
7177 + #endif
7178 + memcpy(data, d, sizeof(struct mtype_elem));
7179 + overwrite_extensions:
7180 +@@ -820,13 +1002,41 @@ overwrite_extensions:
7181 + if (old)
7182 + kfree_rcu(old, rcu);
7183 + }
7184 ++ ret = 0;
7185 ++resize:
7186 ++ spin_unlock_bh(&t->hregion[r].lock);
7187 ++ if (atomic_read(&t->ref) && ext->target) {
7188 ++ /* Resize is in process and kernel side add, save values */
7189 ++ struct mtype_resize_ad *x;
7190 ++
7191 ++ x = kzalloc(sizeof(struct mtype_resize_ad), GFP_ATOMIC);
7192 ++ if (!x)
7193 ++ /* Don't bother */
7194 ++ goto out;
7195 ++ x->ad = IPSET_ADD;
7196 ++ memcpy(&x->d, value, sizeof(struct mtype_elem));
7197 ++ memcpy(&x->ext, ext, sizeof(struct ip_set_ext));
7198 ++ memcpy(&x->mext, mext, sizeof(struct ip_set_ext));
7199 ++ x->flags = flags;
7200 ++ spin_lock_bh(&set->lock);
7201 ++ list_add_tail(&x->list, &h->ad);
7202 ++ spin_unlock_bh(&set->lock);
7203 ++ }
7204 ++ goto out;
7205 +
7206 +- return 0;
7207 + set_full:
7208 + if (net_ratelimit())
7209 + pr_warn("Set %s is full, maxelem %u reached\n",
7210 +- set->name, h->maxelem);
7211 +- return -IPSET_ERR_HASH_FULL;
7212 ++ set->name, maxelem);
7213 ++ ret = -IPSET_ERR_HASH_FULL;
7214 ++unlock:
7215 ++ spin_unlock_bh(&t->hregion[r].lock);
7216 ++out:
7217 ++ if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
7218 ++ pr_debug("Table destroy after resize by add: %p\n", t);
7219 ++ mtype_ahash_destroy(set, t, false);
7220 ++ }
7221 ++ return ret;
7222 + }
7223 +
7224 + /* Delete an element from the hash and free up space if possible.
7225 +@@ -840,13 +1050,23 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7226 + const struct mtype_elem *d = value;
7227 + struct mtype_elem *data;
7228 + struct hbucket *n;
7229 +- int i, j, k, ret = -IPSET_ERR_EXIST;
7230 ++ struct mtype_resize_ad *x = NULL;
7231 ++ int i, j, k, r, ret = -IPSET_ERR_EXIST;
7232 + u32 key, multi = 0;
7233 + size_t dsize = set->dsize;
7234 +
7235 +- t = ipset_dereference_protected(h->table, set);
7236 ++ /* Userspace add and resize is excluded by the mutex.
7237 ++ * Kernespace add does not trigger resize.
7238 ++ */
7239 ++ rcu_read_lock_bh();
7240 ++ t = rcu_dereference_bh(h->table);
7241 + key = HKEY(value, h->initval, t->htable_bits);
7242 +- n = __ipset_dereference_protected(hbucket(t, key), 1);
7243 ++ r = ahash_region(key, t->htable_bits);
7244 ++ atomic_inc(&t->uref);
7245 ++ rcu_read_unlock_bh();
7246 ++
7247 ++ spin_lock_bh(&t->hregion[r].lock);
7248 ++ n = rcu_dereference_bh(hbucket(t, key));
7249 + if (!n)
7250 + goto out;
7251 + for (i = 0, k = 0; i < n->pos; i++) {
7252 +@@ -857,8 +1077,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7253 + data = ahash_data(n, i, dsize);
7254 + if (!mtype_data_equal(data, d, &multi))
7255 + continue;
7256 +- if (SET_WITH_TIMEOUT(set) &&
7257 +- ip_set_timeout_expired(ext_timeout(data, set)))
7258 ++ if (SET_ELEM_EXPIRED(set, data))
7259 + goto out;
7260 +
7261 + ret = 0;
7262 +@@ -866,20 +1085,33 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7263 + smp_mb__after_atomic();
7264 + if (i + 1 == n->pos)
7265 + n->pos--;
7266 +- set->elements--;
7267 ++ t->hregion[r].elements--;
7268 + #ifdef IP_SET_HASH_WITH_NETS
7269 + for (j = 0; j < IPSET_NET_COUNT; j++)
7270 +- mtype_del_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, j)),
7271 +- j);
7272 ++ mtype_del_cidr(set, h,
7273 ++ NCIDR_PUT(DCIDR_GET(d->cidr, j)), j);
7274 + #endif
7275 + ip_set_ext_destroy(set, data);
7276 +
7277 ++ if (atomic_read(&t->ref) && ext->target) {
7278 ++ /* Resize is in process and kernel side del,
7279 ++ * save values
7280 ++ */
7281 ++ x = kzalloc(sizeof(struct mtype_resize_ad),
7282 ++ GFP_ATOMIC);
7283 ++ if (x) {
7284 ++ x->ad = IPSET_DEL;
7285 ++ memcpy(&x->d, value,
7286 ++ sizeof(struct mtype_elem));
7287 ++ x->flags = flags;
7288 ++ }
7289 ++ }
7290 + for (; i < n->pos; i++) {
7291 + if (!test_bit(i, n->used))
7292 + k++;
7293 + }
7294 + if (n->pos == 0 && k == 0) {
7295 +- set->ext_size -= ext_size(n->size, dsize);
7296 ++ t->hregion[r].ext_size -= ext_size(n->size, dsize);
7297 + rcu_assign_pointer(hbucket(t, key), NULL);
7298 + kfree_rcu(n, rcu);
7299 + } else if (k >= AHASH_INIT_SIZE) {
7300 +@@ -898,7 +1130,8 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7301 + k++;
7302 + }
7303 + tmp->pos = k;
7304 +- set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
7305 ++ t->hregion[r].ext_size -=
7306 ++ ext_size(AHASH_INIT_SIZE, dsize);
7307 + rcu_assign_pointer(hbucket(t, key), tmp);
7308 + kfree_rcu(n, rcu);
7309 + }
7310 +@@ -906,6 +1139,16 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7311 + }
7312 +
7313 + out:
7314 ++ spin_unlock_bh(&t->hregion[r].lock);
7315 ++ if (x) {
7316 ++ spin_lock_bh(&set->lock);
7317 ++ list_add(&x->list, &h->ad);
7318 ++ spin_unlock_bh(&set->lock);
7319 ++ }
7320 ++ if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
7321 ++ pr_debug("Table destroy after resize by del: %p\n", t);
7322 ++ mtype_ahash_destroy(set, t, false);
7323 ++ }
7324 + return ret;
7325 + }
7326 +
7327 +@@ -991,6 +1234,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7328 + int i, ret = 0;
7329 + u32 key, multi = 0;
7330 +
7331 ++ rcu_read_lock_bh();
7332 + t = rcu_dereference_bh(h->table);
7333 + #ifdef IP_SET_HASH_WITH_NETS
7334 + /* If we test an IP address and not a network address,
7335 +@@ -1022,6 +1266,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7336 + goto out;
7337 + }
7338 + out:
7339 ++ rcu_read_unlock_bh();
7340 + return ret;
7341 + }
7342 +
7343 +@@ -1033,23 +1278,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
7344 + const struct htable *t;
7345 + struct nlattr *nested;
7346 + size_t memsize;
7347 ++ u32 elements = 0;
7348 ++ size_t ext_size = 0;
7349 + u8 htable_bits;
7350 +
7351 +- /* If any members have expired, set->elements will be wrong
7352 +- * mytype_expire function will update it with the right count.
7353 +- * we do not hold set->lock here, so grab it first.
7354 +- * set->elements can still be incorrect in the case of a huge set,
7355 +- * because elements might time out during the listing.
7356 +- */
7357 +- if (SET_WITH_TIMEOUT(set)) {
7358 +- spin_lock_bh(&set->lock);
7359 +- mtype_expire(set, h);
7360 +- spin_unlock_bh(&set->lock);
7361 +- }
7362 +-
7363 + rcu_read_lock_bh();
7364 +- t = rcu_dereference_bh_nfnl(h->table);
7365 +- memsize = mtype_ahash_memsize(h, t) + set->ext_size;
7366 ++ t = rcu_dereference_bh(h->table);
7367 ++ mtype_ext_size(set, &elements, &ext_size);
7368 ++ memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size;
7369 + htable_bits = t->htable_bits;
7370 + rcu_read_unlock_bh();
7371 +
7372 +@@ -1071,7 +1307,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
7373 + #endif
7374 + if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
7375 + nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
7376 +- nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
7377 ++ nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
7378 + goto nla_put_failure;
7379 + if (unlikely(ip_set_put_flags(skb, set)))
7380 + goto nla_put_failure;
7381 +@@ -1091,15 +1327,15 @@ mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
7382 +
7383 + if (start) {
7384 + rcu_read_lock_bh();
7385 +- t = rcu_dereference_bh_nfnl(h->table);
7386 ++ t = ipset_dereference_bh_nfnl(h->table);
7387 + atomic_inc(&t->uref);
7388 + cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
7389 + rcu_read_unlock_bh();
7390 + } else if (cb->args[IPSET_CB_PRIVATE]) {
7391 + t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
7392 + if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
7393 +- /* Resizing didn't destroy the hash table */
7394 +- pr_debug("Table destroy by dump: %p\n", t);
7395 ++ pr_debug("Table destroy after resize "
7396 ++ " by dump: %p\n", t);
7397 + mtype_ahash_destroy(set, t, false);
7398 + }
7399 + cb->args[IPSET_CB_PRIVATE] = 0;
7400 +@@ -1141,8 +1377,7 @@ mtype_list(const struct ip_set *set,
7401 + if (!test_bit(i, n->used))
7402 + continue;
7403 + e = ahash_data(n, i, set->dsize);
7404 +- if (SET_WITH_TIMEOUT(set) &&
7405 +- ip_set_timeout_expired(ext_timeout(e, set)))
7406 ++ if (SET_ELEM_EXPIRED(set, e))
7407 + continue;
7408 + pr_debug("list hash %lu hbucket %p i %u, data %p\n",
7409 + cb->args[IPSET_CB_ARG0], n, i, e);
7410 +@@ -1208,6 +1443,7 @@ static const struct ip_set_type_variant mtype_variant = {
7411 + .uref = mtype_uref,
7412 + .resize = mtype_resize,
7413 + .same_set = mtype_same_set,
7414 ++ .region_lock = true,
7415 + };
7416 +
7417 + #ifdef IP_SET_EMIT_CREATE
7418 +@@ -1226,6 +1462,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
7419 + size_t hsize;
7420 + struct htype *h;
7421 + struct htable *t;
7422 ++ u32 i;
7423 +
7424 + pr_debug("Create set %s with family %s\n",
7425 + set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
7426 +@@ -1294,6 +1531,15 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
7427 + kfree(h);
7428 + return -ENOMEM;
7429 + }
7430 ++ t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
7431 ++ if (!t->hregion) {
7432 ++ kfree(t);
7433 ++ kfree(h);
7434 ++ return -ENOMEM;
7435 ++ }
7436 ++ h->gc.set = set;
7437 ++ for (i = 0; i < ahash_numof_locks(hbits); i++)
7438 ++ spin_lock_init(&t->hregion[i].lock);
7439 + h->maxelem = maxelem;
7440 + #ifdef IP_SET_HASH_WITH_NETMASK
7441 + h->netmask = netmask;
7442 +@@ -1304,9 +1550,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
7443 + get_random_bytes(&h->initval, sizeof(h->initval));
7444 +
7445 + t->htable_bits = hbits;
7446 ++ t->maxelem = h->maxelem / ahash_numof_locks(hbits);
7447 + RCU_INIT_POINTER(h->table, t);
7448 +
7449 +- h->set = set;
7450 ++ INIT_LIST_HEAD(&h->ad);
7451 + set->data = h;
7452 + #ifndef IP_SET_PROTO_UNDEF
7453 + if (set->family == NFPROTO_IPV4) {
7454 +@@ -1329,12 +1576,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
7455 + #ifndef IP_SET_PROTO_UNDEF
7456 + if (set->family == NFPROTO_IPV4)
7457 + #endif
7458 +- IPSET_TOKEN(HTYPE, 4_gc_init)(set,
7459 +- IPSET_TOKEN(HTYPE, 4_gc));
7460 ++ IPSET_TOKEN(HTYPE, 4_gc_init)(&h->gc);
7461 + #ifndef IP_SET_PROTO_UNDEF
7462 + else
7463 +- IPSET_TOKEN(HTYPE, 6_gc_init)(set,
7464 +- IPSET_TOKEN(HTYPE, 6_gc));
7465 ++ IPSET_TOKEN(HTYPE, 6_gc_init)(&h->gc);
7466 + #endif
7467 + }
7468 + pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
7469 +diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
7470 +index f8d2919cf9fd..037e8fce9b30 100644
7471 +--- a/net/netfilter/nft_tunnel.c
7472 ++++ b/net/netfilter/nft_tunnel.c
7473 +@@ -505,8 +505,8 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
7474 + static int nft_tunnel_ports_dump(struct sk_buff *skb,
7475 + struct ip_tunnel_info *info)
7476 + {
7477 +- if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 ||
7478 +- nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0)
7479 ++ if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
7480 ++ nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
7481 + return -1;
7482 +
7483 + return 0;
7484 +diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
7485 +index 6520d9ec1297..1b68a131083c 100644
7486 +--- a/net/netfilter/xt_hashlimit.c
7487 ++++ b/net/netfilter/xt_hashlimit.c
7488 +@@ -36,6 +36,7 @@
7489 + #include <linux/netfilter_ipv6/ip6_tables.h>
7490 + #include <linux/mutex.h>
7491 + #include <linux/kernel.h>
7492 ++#include <linux/refcount.h>
7493 + #include <uapi/linux/netfilter/xt_hashlimit.h>
7494 +
7495 + #define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
7496 +@@ -114,7 +115,7 @@ struct dsthash_ent {
7497 +
7498 + struct xt_hashlimit_htable {
7499 + struct hlist_node node; /* global list of all htables */
7500 +- int use;
7501 ++ refcount_t use;
7502 + u_int8_t family;
7503 + bool rnd_initialized;
7504 +
7505 +@@ -315,7 +316,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
7506 + for (i = 0; i < hinfo->cfg.size; i++)
7507 + INIT_HLIST_HEAD(&hinfo->hash[i]);
7508 +
7509 +- hinfo->use = 1;
7510 ++ refcount_set(&hinfo->use, 1);
7511 + hinfo->count = 0;
7512 + hinfo->family = family;
7513 + hinfo->rnd_initialized = false;
7514 +@@ -434,7 +435,7 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
7515 + hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
7516 + if (!strcmp(name, hinfo->name) &&
7517 + hinfo->family == family) {
7518 +- hinfo->use++;
7519 ++ refcount_inc(&hinfo->use);
7520 + return hinfo;
7521 + }
7522 + }
7523 +@@ -443,12 +444,11 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
7524 +
7525 + static void htable_put(struct xt_hashlimit_htable *hinfo)
7526 + {
7527 +- mutex_lock(&hashlimit_mutex);
7528 +- if (--hinfo->use == 0) {
7529 ++ if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
7530 + hlist_del(&hinfo->node);
7531 ++ mutex_unlock(&hashlimit_mutex);
7532 + htable_destroy(hinfo);
7533 + }
7534 +- mutex_unlock(&hashlimit_mutex);
7535 + }
7536 +
7537 + /* The algorithm used is the Simple Token Bucket Filter (TBF)
7538 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
7539 +index 4e31721e7293..edf3e285e242 100644
7540 +--- a/net/netlink/af_netlink.c
7541 ++++ b/net/netlink/af_netlink.c
7542 +@@ -1014,7 +1014,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
7543 + if (nlk->netlink_bind && groups) {
7544 + int group;
7545 +
7546 +- for (group = 0; group < nlk->ngroups; group++) {
7547 ++ /* nl_groups is a u32, so cap the maximum groups we can bind */
7548 ++ for (group = 0; group < BITS_PER_TYPE(u32); group++) {
7549 + if (!test_bit(group, &groups))
7550 + continue;
7551 + err = nlk->netlink_bind(net, group + 1);
7552 +@@ -1033,7 +1034,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
7553 + netlink_insert(sk, nladdr->nl_pid) :
7554 + netlink_autobind(sock);
7555 + if (err) {
7556 +- netlink_undo_bind(nlk->ngroups, groups, sk);
7557 ++ netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
7558 + goto unlock;
7559 + }
7560 + }
7561 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
7562 +index 7e54d2ab5254..d32d4233d337 100644
7563 +--- a/net/sched/cls_flower.c
7564 ++++ b/net/sched/cls_flower.c
7565 +@@ -305,6 +305,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
7566 + struct cls_fl_filter *f;
7567 +
7568 + list_for_each_entry_rcu(mask, &head->masks, list) {
7569 ++ flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
7570 + fl_clear_masked_range(&skb_key, mask);
7571 +
7572 + skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
7573 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
7574 +index 4ab8208a2dd4..c6d83a64eac3 100644
7575 +--- a/net/sctp/sm_statefuns.c
7576 ++++ b/net/sctp/sm_statefuns.c
7577 +@@ -170,6 +170,16 @@ static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk,
7578 + return true;
7579 + }
7580 +
7581 ++/* Check for format error in an ABORT chunk */
7582 ++static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk)
7583 ++{
7584 ++ struct sctp_errhdr *err;
7585 ++
7586 ++ sctp_walk_errors(err, chunk->chunk_hdr);
7587 ++
7588 ++ return (void *)err == (void *)chunk->chunk_end;
7589 ++}
7590 ++
7591 + /**********************************************************
7592 + * These are the state functions for handling chunk events.
7593 + **********************************************************/
7594 +@@ -2255,6 +2265,9 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort(
7595 + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
7596 + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
7597 +
7598 ++ if (!sctp_err_chunk_valid(chunk))
7599 ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
7600 ++
7601 + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
7602 + }
7603 +
7604 +@@ -2298,6 +2311,9 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort(
7605 + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
7606 + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
7607 +
7608 ++ if (!sctp_err_chunk_valid(chunk))
7609 ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
7610 ++
7611 + /* Stop the T2-shutdown timer. */
7612 + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
7613 + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
7614 +@@ -2565,6 +2581,9 @@ enum sctp_disposition sctp_sf_do_9_1_abort(
7615 + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
7616 + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
7617 +
7618 ++ if (!sctp_err_chunk_valid(chunk))
7619 ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
7620 ++
7621 + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
7622 + }
7623 +
7624 +@@ -2582,16 +2601,8 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort(
7625 +
7626 + /* See if we have an error cause code in the chunk. */
7627 + len = ntohs(chunk->chunk_hdr->length);
7628 +- if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
7629 +- struct sctp_errhdr *err;
7630 +-
7631 +- sctp_walk_errors(err, chunk->chunk_hdr);
7632 +- if ((void *)err != (void *)chunk->chunk_end)
7633 +- return sctp_sf_pdiscard(net, ep, asoc, type, arg,
7634 +- commands);
7635 +-
7636 ++ if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
7637 + error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
7638 +- }
7639 +
7640 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
7641 + /* ASSOC_FAILED will DELETE_TCB. */
7642 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
7643 +index cee5bf4a9bb9..90988a511cd5 100644
7644 +--- a/net/smc/af_smc.c
7645 ++++ b/net/smc/af_smc.c
7646 +@@ -470,6 +470,8 @@ static void smc_switch_to_fallback(struct smc_sock *smc)
7647 + if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
7648 + smc->clcsock->file = smc->sk.sk_socket->file;
7649 + smc->clcsock->file->private_data = smc->clcsock;
7650 ++ smc->clcsock->wq.fasync_list =
7651 ++ smc->sk.sk_socket->wq.fasync_list;
7652 + }
7653 + }
7654 +
7655 +diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
7656 +index 0879f7bed967..86cccc24e52e 100644
7657 +--- a/net/smc/smc_clc.c
7658 ++++ b/net/smc/smc_clc.c
7659 +@@ -372,7 +372,9 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
7660 + dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
7661 + dclc.hdr.version = SMC_CLC_V1;
7662 + dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
7663 +- memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
7664 ++ if (smc->conn.lgr && !smc->conn.lgr->is_smcd)
7665 ++ memcpy(dclc.id_for_peer, local_systemid,
7666 ++ sizeof(local_systemid));
7667 + dclc.peer_diagnosis = htonl(peer_diag_info);
7668 + memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
7669 +
7670 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
7671 +index f9b4fb92c0b1..693e8902161e 100644
7672 +--- a/net/tipc/socket.c
7673 ++++ b/net/tipc/socket.c
7674 +@@ -2441,6 +2441,8 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
7675 + return -ETIMEDOUT;
7676 + if (signal_pending(current))
7677 + return sock_intr_errno(*timeo_p);
7678 ++ if (sk->sk_state == TIPC_DISCONNECTING)
7679 ++ break;
7680 +
7681 + add_wait_queue(sk_sleep(sk), &wait);
7682 + done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
7683 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
7684 +index cd91ad812291..e72d7d787935 100644
7685 +--- a/net/tls/tls_device.c
7686 ++++ b/net/tls/tls_device.c
7687 +@@ -592,7 +592,7 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
7688 + u32 seq, u64 *p_record_sn)
7689 + {
7690 + u64 record_sn = context->hint_record_sn;
7691 +- struct tls_record_info *info;
7692 ++ struct tls_record_info *info, *last;
7693 +
7694 + info = context->retransmit_hint;
7695 + if (!info ||
7696 +@@ -604,6 +604,24 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
7697 + struct tls_record_info, list);
7698 + if (!info)
7699 + return NULL;
7700 ++ /* send the start_marker record if seq number is before the
7701 ++ * tls offload start marker sequence number. This record is
7702 ++ * required to handle TCP packets which are before TLS offload
7703 ++ * started.
7704 ++ * And if it's not start marker, look if this seq number
7705 ++ * belongs to the list.
7706 ++ */
7707 ++ if (likely(!tls_record_is_start_marker(info))) {
7708 ++ /* we have the first record, get the last record to see
7709 ++ * if this seq number belongs to the list.
7710 ++ */
7711 ++ last = list_last_entry(&context->records_list,
7712 ++ struct tls_record_info, list);
7713 ++
7714 ++ if (!between(seq, tls_record_start_seq(info),
7715 ++ last->end_seq))
7716 ++ return NULL;
7717 ++ }
7718 + record_sn = context->unacked_record_sn;
7719 + }
7720 +
7721 +diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
7722 +index a9c0f368db5d..24e18405cdb4 100644
7723 +--- a/net/wireless/ethtool.c
7724 ++++ b/net/wireless/ethtool.c
7725 +@@ -7,9 +7,13 @@
7726 + void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7727 + {
7728 + struct wireless_dev *wdev = dev->ieee80211_ptr;
7729 ++ struct device *pdev = wiphy_dev(wdev->wiphy);
7730 +
7731 +- strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
7732 +- sizeof(info->driver));
7733 ++ if (pdev->driver)
7734 ++ strlcpy(info->driver, pdev->driver->name,
7735 ++ sizeof(info->driver));
7736 ++ else
7737 ++ strlcpy(info->driver, "N/A", sizeof(info->driver));
7738 +
7739 + strlcpy(info->version, init_utsname()->release, sizeof(info->version));
7740 +
7741 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
7742 +index 1e97ac5435b2..114397d737b3 100644
7743 +--- a/net/wireless/nl80211.c
7744 ++++ b/net/wireless/nl80211.c
7745 +@@ -437,6 +437,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
7746 + [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
7747 + [NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG },
7748 + [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
7749 ++ [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
7750 + [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
7751 + [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
7752 + [NL80211_ATTR_PID] = { .type = NLA_U32 },
7753 +@@ -4799,8 +4800,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
7754 + err = nl80211_parse_he_obss_pd(
7755 + info->attrs[NL80211_ATTR_HE_OBSS_PD],
7756 + &params.he_obss_pd);
7757 +- if (err)
7758 +- return err;
7759 ++ goto out;
7760 + }
7761 +
7762 + nl80211_calculate_ap_params(&params);
7763 +@@ -4822,6 +4822,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
7764 + }
7765 + wdev_unlock(wdev);
7766 +
7767 ++out:
7768 + kfree(params.acl);
7769 +
7770 + return err;
7771 +diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
7772 +index 3fa32f83b2d7..a66fc0acad1e 100644
7773 +--- a/scripts/Makefile.lib
7774 ++++ b/scripts/Makefile.lib
7775 +@@ -291,13 +291,13 @@ DT_TMP_SCHEMA := $(objtree)/$(DT_BINDING_DIR)/processed-schema.yaml
7776 + quiet_cmd_dtb_check = CHECK $@
7777 + cmd_dtb_check = $(DT_CHECKER) -u $(srctree)/$(DT_BINDING_DIR) -p $(DT_TMP_SCHEMA) $@ ;
7778 +
7779 +-define rule_dtc_dt_yaml
7780 ++define rule_dtc
7781 + $(call cmd_and_fixdep,dtc,yaml)
7782 + $(call cmd,dtb_check)
7783 + endef
7784 +
7785 + $(obj)/%.dt.yaml: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE
7786 +- $(call if_changed_rule,dtc_dt_yaml)
7787 ++ $(call if_changed_rule,dtc)
7788 +
7789 + dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
7790 +
7791 +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
7792 +index ef8dfd47c7e3..0cac399ce713 100644
7793 +--- a/security/integrity/ima/ima_policy.c
7794 ++++ b/security/integrity/ima/ima_policy.c
7795 +@@ -263,7 +263,7 @@ static void ima_lsm_free_rule(struct ima_rule_entry *entry)
7796 + static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
7797 + {
7798 + struct ima_rule_entry *nentry;
7799 +- int i, result;
7800 ++ int i;
7801 +
7802 + nentry = kmalloc(sizeof(*nentry), GFP_KERNEL);
7803 + if (!nentry)
7804 +@@ -277,7 +277,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
7805 + memset(nentry->lsm, 0, sizeof_field(struct ima_rule_entry, lsm));
7806 +
7807 + for (i = 0; i < MAX_LSM_RULES; i++) {
7808 +- if (!entry->lsm[i].rule)
7809 ++ if (!entry->lsm[i].args_p)
7810 + continue;
7811 +
7812 + nentry->lsm[i].type = entry->lsm[i].type;
7813 +@@ -286,13 +286,13 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
7814 + if (!nentry->lsm[i].args_p)
7815 + goto out_err;
7816 +
7817 +- result = security_filter_rule_init(nentry->lsm[i].type,
7818 +- Audit_equal,
7819 +- nentry->lsm[i].args_p,
7820 +- &nentry->lsm[i].rule);
7821 +- if (result == -EINVAL)
7822 +- pr_warn("ima: rule for LSM \'%d\' is undefined\n",
7823 +- entry->lsm[i].type);
7824 ++ security_filter_rule_init(nentry->lsm[i].type,
7825 ++ Audit_equal,
7826 ++ nentry->lsm[i].args_p,
7827 ++ &nentry->lsm[i].rule);
7828 ++ if (!nentry->lsm[i].rule)
7829 ++ pr_warn("rule for LSM \'%s\' is undefined\n",
7830 ++ (char *)entry->lsm[i].args_p);
7831 + }
7832 + return nentry;
7833 +
7834 +@@ -329,7 +329,7 @@ static void ima_lsm_update_rules(void)
7835 + list_for_each_entry_safe(entry, e, &ima_policy_rules, list) {
7836 + needs_update = 0;
7837 + for (i = 0; i < MAX_LSM_RULES; i++) {
7838 +- if (entry->lsm[i].rule) {
7839 ++ if (entry->lsm[i].args_p) {
7840 + needs_update = 1;
7841 + break;
7842 + }
7843 +@@ -339,8 +339,7 @@ static void ima_lsm_update_rules(void)
7844 +
7845 + result = ima_lsm_update_rule(entry);
7846 + if (result) {
7847 +- pr_err("ima: lsm rule update error %d\n",
7848 +- result);
7849 ++ pr_err("lsm rule update error %d\n", result);
7850 + return;
7851 + }
7852 + }
7853 +@@ -357,7 +356,7 @@ int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
7854 + }
7855 +
7856 + /**
7857 +- * ima_match_rules - determine whether an inode matches the measure rule.
7858 ++ * ima_match_rules - determine whether an inode matches the policy rule.
7859 + * @rule: a pointer to a rule
7860 + * @inode: a pointer to an inode
7861 + * @cred: a pointer to a credentials structure for user validation
7862 +@@ -415,9 +414,12 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
7863 + int rc = 0;
7864 + u32 osid;
7865 +
7866 +- if (!rule->lsm[i].rule)
7867 +- continue;
7868 +-
7869 ++ if (!rule->lsm[i].rule) {
7870 ++ if (!rule->lsm[i].args_p)
7871 ++ continue;
7872 ++ else
7873 ++ return false;
7874 ++ }
7875 + switch (i) {
7876 + case LSM_OBJ_USER:
7877 + case LSM_OBJ_ROLE:
7878 +@@ -823,8 +825,14 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
7879 + entry->lsm[lsm_rule].args_p,
7880 + &entry->lsm[lsm_rule].rule);
7881 + if (!entry->lsm[lsm_rule].rule) {
7882 +- kfree(entry->lsm[lsm_rule].args_p);
7883 +- return -EINVAL;
7884 ++ pr_warn("rule for LSM \'%s\' is undefined\n",
7885 ++ (char *)entry->lsm[lsm_rule].args_p);
7886 ++
7887 ++ if (ima_rules == &ima_default_rules) {
7888 ++ kfree(entry->lsm[lsm_rule].args_p);
7889 ++ result = -EINVAL;
7890 ++ } else
7891 ++ result = 0;
7892 + }
7893 +
7894 + return result;
7895 +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
7896 +index de988589d99b..66cd97cc8b92 100644
7897 +--- a/tools/perf/builtin-report.c
7898 ++++ b/tools/perf/builtin-report.c
7899 +@@ -412,10 +412,10 @@ static int report__setup_sample_type(struct report *rep)
7900 + PERF_SAMPLE_BRANCH_ANY))
7901 + rep->nonany_branch_mode = true;
7902 +
7903 +-#ifndef HAVE_LIBUNWIND_SUPPORT
7904 ++#if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT)
7905 + if (dwarf_callchain_users) {
7906 +- ui__warning("Please install libunwind development packages "
7907 +- "during the perf build.\n");
7908 ++ ui__warning("Please install libunwind or libdw "
7909 ++ "development packages during the perf build.\n");
7910 + }
7911 + #endif
7912 +
7913 +diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
7914 +index d4d3558fdef4..cfc6172ecab7 100644
7915 +--- a/tools/perf/ui/browsers/hists.c
7916 ++++ b/tools/perf/ui/browsers/hists.c
7917 +@@ -3062,6 +3062,7 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
7918 +
7919 + continue;
7920 + }
7921 ++ actions->ms.map = map;
7922 + top = pstack__peek(browser->pstack);
7923 + if (top == &browser->hists->dso_filter) {
7924 + /*
7925 +diff --git a/tools/perf/ui/gtk/Build b/tools/perf/ui/gtk/Build
7926 +index ec22e899a224..9b5d5cbb7af7 100644
7927 +--- a/tools/perf/ui/gtk/Build
7928 ++++ b/tools/perf/ui/gtk/Build
7929 +@@ -7,3 +7,8 @@ gtk-y += util.o
7930 + gtk-y += helpline.o
7931 + gtk-y += progress.o
7932 + gtk-y += annotate.o
7933 ++gtk-y += zalloc.o
7934 ++
7935 ++$(OUTPUT)ui/gtk/zalloc.o: ../lib/zalloc.c FORCE
7936 ++ $(call rule_mkdir)
7937 ++ $(call if_changed_dep,cc_o_c)
7938 +diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
7939 +index fdd5bddb3075..f67960bedebb 100644
7940 +--- a/tools/perf/util/map.c
7941 ++++ b/tools/perf/util/map.c
7942 +@@ -549,6 +549,7 @@ void maps__insert(struct maps *maps, struct map *map)
7943 +
7944 + if (maps_by_name == NULL) {
7945 + __maps__free_maps_by_name(maps);
7946 ++ up_write(&maps->lock);
7947 + return;
7948 + }
7949 +
7950 +diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile
7951 +index cd1f5b3a7774..d6e106fbce11 100644
7952 +--- a/tools/testing/selftests/ftrace/Makefile
7953 ++++ b/tools/testing/selftests/ftrace/Makefile
7954 +@@ -2,7 +2,7 @@
7955 + all:
7956 +
7957 + TEST_PROGS := ftracetest
7958 +-TEST_FILES := test.d
7959 ++TEST_FILES := test.d settings
7960 + EXTRA_CLEAN := $(OUTPUT)/logs/*
7961 +
7962 + include ../lib.mk
7963 +diff --git a/tools/testing/selftests/livepatch/Makefile b/tools/testing/selftests/livepatch/Makefile
7964 +index 3876d8d62494..1acc9e1fa3fb 100644
7965 +--- a/tools/testing/selftests/livepatch/Makefile
7966 ++++ b/tools/testing/selftests/livepatch/Makefile
7967 +@@ -8,4 +8,6 @@ TEST_PROGS := \
7968 + test-state.sh \
7969 + test-ftrace.sh
7970 +
7971 ++TEST_FILES := settings
7972 ++
7973 + include ../lib.mk
7974 +diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
7975 +index 6dd403103800..60273f1bc7d9 100755
7976 +--- a/tools/testing/selftests/net/fib_tests.sh
7977 ++++ b/tools/testing/selftests/net/fib_tests.sh
7978 +@@ -910,6 +910,12 @@ ipv6_rt_replace_mpath()
7979 + check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024"
7980 + log_test $? 0 "Multipath with single path via multipath attribute"
7981 +
7982 ++ # multipath with dev-only
7983 ++ add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
7984 ++ run_cmd "$IP -6 ro replace 2001:db8:104::/64 dev veth1"
7985 ++ check_route6 "2001:db8:104::/64 dev veth1 metric 1024"
7986 ++ log_test $? 0 "Multipath with dev-only"
7987 ++
7988 + # route replace fails - invalid nexthop 1
7989 + add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
7990 + run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3"
7991 +diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
7992 +index d6469535630a..f1053630bb6f 100644
7993 +--- a/tools/testing/selftests/rseq/Makefile
7994 ++++ b/tools/testing/selftests/rseq/Makefile
7995 +@@ -19,6 +19,8 @@ TEST_GEN_PROGS_EXTENDED = librseq.so
7996 +
7997 + TEST_PROGS = run_param_test.sh
7998 +
7999 ++TEST_FILES := settings
8000 ++
8001 + include ../lib.mk
8002 +
8003 + $(OUTPUT)/librseq.so: rseq.c rseq.h rseq-*.h
8004 +diff --git a/tools/testing/selftests/rtc/Makefile b/tools/testing/selftests/rtc/Makefile
8005 +index de9c8566672a..90fa1a346908 100644
8006 +--- a/tools/testing/selftests/rtc/Makefile
8007 ++++ b/tools/testing/selftests/rtc/Makefile
8008 +@@ -6,4 +6,6 @@ TEST_GEN_PROGS = rtctest
8009 +
8010 + TEST_GEN_PROGS_EXTENDED = setdate
8011 +
8012 ++TEST_FILES := settings
8013 ++
8014 + include ../lib.mk
8015 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
8016 +index 2b3f36df3d85..75b7ee1af1c3 100644
8017 +--- a/virt/kvm/kvm_main.c
8018 ++++ b/virt/kvm/kvm_main.c
8019 +@@ -2287,12 +2287,12 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
8020 + if (slots->generation != ghc->generation)
8021 + __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
8022 +
8023 +- if (unlikely(!ghc->memslot))
8024 +- return kvm_write_guest(kvm, gpa, data, len);
8025 +-
8026 + if (kvm_is_error_hva(ghc->hva))
8027 + return -EFAULT;
8028 +
8029 ++ if (unlikely(!ghc->memslot))
8030 ++ return kvm_write_guest(kvm, gpa, data, len);
8031 ++
8032 + r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
8033 + if (r)
8034 + return -EFAULT;
8035 +@@ -2320,12 +2320,12 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
8036 + if (slots->generation != ghc->generation)
8037 + __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
8038 +
8039 +- if (unlikely(!ghc->memslot))
8040 +- return kvm_read_guest(kvm, ghc->gpa, data, len);
8041 +-
8042 + if (kvm_is_error_hva(ghc->hva))
8043 + return -EFAULT;
8044 +
8045 ++ if (unlikely(!ghc->memslot))
8046 ++ return kvm_read_guest(kvm, ghc->gpa, data, len);
8047 ++
8048 + r = __copy_from_user(data, (void __user *)ghc->hva, len);
8049 + if (r)
8050 + return -EFAULT;