Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Thu, 26 Aug 2021 14:06:09
Message-Id: 1629986750.7090d0e6645701aa3bd95b4acb47f303469349ec.mpagano@gentoo
1 commit: 7090d0e6645701aa3bd95b4acb47f303469349ec
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Aug 26 14:05:50 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Aug 26 14:05:50 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7090d0e6
7
8 Linux patch 4.19.205
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1204_linux-4.19.205.patch | 2728 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2732 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 10bff84..6a39170 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -855,6 +855,10 @@ Patch: 1203_linux-4.19.204.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.204
23
24 +Patch: 1204_linux-4.19.205.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.205
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1204_linux-4.19.205.patch b/1204_linux-4.19.205.patch
33 new file mode 100644
34 index 0000000..fe40dfc
35 --- /dev/null
36 +++ b/1204_linux-4.19.205.patch
37 @@ -0,0 +1,2728 @@
38 +diff --git a/Documentation/filesystems/mandatory-locking.txt b/Documentation/filesystems/mandatory-locking.txt
39 +index 0979d1d2ca8bb..a251ca33164ae 100644
40 +--- a/Documentation/filesystems/mandatory-locking.txt
41 ++++ b/Documentation/filesystems/mandatory-locking.txt
42 +@@ -169,3 +169,13 @@ havoc if they lock crucial files. The way around it is to change the file
43 + permissions (remove the setgid bit) before trying to read or write to it.
44 + Of course, that might be a bit tricky if the system is hung :-(
45 +
46 ++7. The "mand" mount option
47 ++--------------------------
48 ++Mandatory locking is disabled on all filesystems by default, and must be
49 ++administratively enabled by mounting with "-o mand". That mount option
50 ++is only allowed if the mounting task has the CAP_SYS_ADMIN capability.
51 ++
52 ++Since kernel v4.5, it is possible to disable mandatory locking
53 ++altogether by setting CONFIG_MANDATORY_FILE_LOCKING to "n". A kernel
54 ++with this disabled will reject attempts to mount filesystems with the
55 ++"mand" mount option with the error status EPERM.
56 +diff --git a/Makefile b/Makefile
57 +index d4ffcafb8efad..abc35829f47ba 100644
58 +--- a/Makefile
59 ++++ b/Makefile
60 +@@ -1,7 +1,7 @@
61 + # SPDX-License-Identifier: GPL-2.0
62 + VERSION = 4
63 + PATCHLEVEL = 19
64 +-SUBLEVEL = 204
65 ++SUBLEVEL = 205
66 + EXTRAVERSION =
67 + NAME = "People's Front"
68 +
69 +diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
70 +index 02bbdfb3f2582..0cc3ac6566c62 100644
71 +--- a/arch/arm/boot/dts/am43x-epos-evm.dts
72 ++++ b/arch/arm/boot/dts/am43x-epos-evm.dts
73 +@@ -590,7 +590,7 @@
74 + status = "okay";
75 + pinctrl-names = "default";
76 + pinctrl-0 = <&i2c0_pins>;
77 +- clock-frequency = <400000>;
78 ++ clock-frequency = <100000>;
79 +
80 + tps65218: tps65218@24 {
81 + reg = <0x24>;
82 +diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
83 +index fca76a696d9d7..9ba4d1630ca31 100644
84 +--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
85 ++++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
86 +@@ -755,14 +755,14 @@
87 + status = "disabled";
88 + };
89 +
90 +- vica: intc@10140000 {
91 ++ vica: interrupt-controller@10140000 {
92 + compatible = "arm,versatile-vic";
93 + interrupt-controller;
94 + #interrupt-cells = <1>;
95 + reg = <0x10140000 0x20>;
96 + };
97 +
98 +- vicb: intc@10140020 {
99 ++ vicb: interrupt-controller@10140020 {
100 + compatible = "arm,versatile-vic";
101 + interrupt-controller;
102 + #interrupt-cells = <1>;
103 +diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
104 +index 53a39661eb13b..ccf16bccc2bc9 100644
105 +--- a/arch/powerpc/kernel/kprobes.c
106 ++++ b/arch/powerpc/kernel/kprobes.c
107 +@@ -277,7 +277,8 @@ int kprobe_handler(struct pt_regs *regs)
108 + if (user_mode(regs))
109 + return 0;
110 +
111 +- if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
112 ++ if (!IS_ENABLED(CONFIG_BOOKE) &&
113 ++ (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
114 + return 0;
115 +
116 + /*
117 +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
118 +index b8c935033d210..4f274d8519865 100644
119 +--- a/arch/x86/include/asm/fpu/internal.h
120 ++++ b/arch/x86/include/asm/fpu/internal.h
121 +@@ -215,6 +215,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
122 + }
123 + }
124 +
125 ++static inline void fxsave(struct fxregs_state *fx)
126 ++{
127 ++ if (IS_ENABLED(CONFIG_X86_32))
128 ++ asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
129 ++ else
130 ++ asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
131 ++}
132 ++
133 + /* These macros all use (%edi)/(%rdi) as the single memory argument. */
134 + #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
135 + #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
136 +@@ -283,28 +291,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
137 + : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
138 + : "memory")
139 +
140 +-/*
141 +- * This function is called only during boot time when x86 caps are not set
142 +- * up and alternative can not be used yet.
143 +- */
144 +-static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
145 +-{
146 +- u64 mask = -1;
147 +- u32 lmask = mask;
148 +- u32 hmask = mask >> 32;
149 +- int err;
150 +-
151 +- WARN_ON(system_state != SYSTEM_BOOTING);
152 +-
153 +- if (static_cpu_has(X86_FEATURE_XSAVES))
154 +- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
155 +- else
156 +- XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
157 +-
158 +- /* We should never fault when copying to a kernel buffer: */
159 +- WARN_ON_FPU(err);
160 +-}
161 +-
162 + /*
163 + * This function is called only during boot time when x86 caps are not set
164 + * up and alternative can not be used yet.
165 +diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
166 +index 93b462e480671..b6dedf6c835c9 100644
167 +--- a/arch/x86/include/asm/svm.h
168 ++++ b/arch/x86/include/asm/svm.h
169 +@@ -118,6 +118,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
170 + #define V_IGN_TPR_SHIFT 20
171 + #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
172 +
173 ++#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
174 ++
175 + #define V_INTR_MASKING_SHIFT 24
176 + #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
177 +
178 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
179 +index a89dac380243a..677508baf95a0 100644
180 +--- a/arch/x86/kernel/apic/io_apic.c
181 ++++ b/arch/x86/kernel/apic/io_apic.c
182 +@@ -1958,7 +1958,8 @@ static struct irq_chip ioapic_chip __read_mostly = {
183 + .irq_set_affinity = ioapic_set_affinity,
184 + .irq_retrigger = irq_chip_retrigger_hierarchy,
185 + .irq_get_irqchip_state = ioapic_irq_get_chip_state,
186 +- .flags = IRQCHIP_SKIP_SET_WAKE,
187 ++ .flags = IRQCHIP_SKIP_SET_WAKE |
188 ++ IRQCHIP_AFFINITY_PRE_STARTUP,
189 + };
190 +
191 + static struct irq_chip ioapic_ir_chip __read_mostly = {
192 +@@ -1971,7 +1972,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
193 + .irq_set_affinity = ioapic_set_affinity,
194 + .irq_retrigger = irq_chip_retrigger_hierarchy,
195 + .irq_get_irqchip_state = ioapic_irq_get_chip_state,
196 +- .flags = IRQCHIP_SKIP_SET_WAKE,
197 ++ .flags = IRQCHIP_SKIP_SET_WAKE |
198 ++ IRQCHIP_AFFINITY_PRE_STARTUP,
199 + };
200 +
201 + static inline void init_IO_APIC_traps(void)
202 +diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
203 +index fb26c956c4421..ca17a38488346 100644
204 +--- a/arch/x86/kernel/apic/msi.c
205 ++++ b/arch/x86/kernel/apic/msi.c
206 +@@ -89,11 +89,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
207 + * The quirk bit is not set in this case.
208 + * - The new vector is the same as the old vector
209 + * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
210 ++ * - The interrupt is not yet started up
211 + * - The new destination CPU is the same as the old destination CPU
212 + */
213 + if (!irqd_msi_nomask_quirk(irqd) ||
214 + cfg->vector == old_cfg.vector ||
215 + old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
216 ++ !irqd_is_started(irqd) ||
217 + cfg->dest_apicid == old_cfg.dest_apicid) {
218 + irq_msi_update_msg(irqd, cfg);
219 + return ret;
220 +@@ -181,7 +183,8 @@ static struct irq_chip pci_msi_controller = {
221 + .irq_retrigger = irq_chip_retrigger_hierarchy,
222 + .irq_compose_msi_msg = irq_msi_compose_msg,
223 + .irq_set_affinity = msi_set_affinity,
224 +- .flags = IRQCHIP_SKIP_SET_WAKE,
225 ++ .flags = IRQCHIP_SKIP_SET_WAKE |
226 ++ IRQCHIP_AFFINITY_PRE_STARTUP,
227 + };
228 +
229 + int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
230 +@@ -282,7 +285,8 @@ static struct irq_chip pci_msi_ir_controller = {
231 + .irq_ack = irq_chip_ack_parent,
232 + .irq_retrigger = irq_chip_retrigger_hierarchy,
233 + .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
234 +- .flags = IRQCHIP_SKIP_SET_WAKE,
235 ++ .flags = IRQCHIP_SKIP_SET_WAKE |
236 ++ IRQCHIP_AFFINITY_PRE_STARTUP,
237 + };
238 +
239 + static struct msi_domain_info pci_msi_ir_domain_info = {
240 +@@ -325,7 +329,8 @@ static struct irq_chip dmar_msi_controller = {
241 + .irq_retrigger = irq_chip_retrigger_hierarchy,
242 + .irq_compose_msi_msg = irq_msi_compose_msg,
243 + .irq_write_msi_msg = dmar_msi_write_msg,
244 +- .flags = IRQCHIP_SKIP_SET_WAKE,
245 ++ .flags = IRQCHIP_SKIP_SET_WAKE |
246 ++ IRQCHIP_AFFINITY_PRE_STARTUP,
247 + };
248 +
249 + static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info,
250 +@@ -423,7 +428,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = {
251 + .irq_retrigger = irq_chip_retrigger_hierarchy,
252 + .irq_compose_msi_msg = irq_msi_compose_msg,
253 + .irq_write_msi_msg = hpet_msi_write_msg,
254 +- .flags = IRQCHIP_SKIP_SET_WAKE,
255 ++ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
256 + };
257 +
258 + static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info,
259 +diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
260 +index 5dfa5ab9a5ae2..6eeb17dfde48e 100644
261 +--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
262 ++++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
263 +@@ -233,15 +233,14 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr)
264 + return chunks >>= shift;
265 + }
266 +
267 +-static int __mon_event_count(u32 rmid, struct rmid_read *rr)
268 ++static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
269 + {
270 + struct mbm_state *m;
271 + u64 chunks, tval;
272 +
273 + tval = __rmid_read(rmid, rr->evtid);
274 + if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
275 +- rr->val = tval;
276 +- return -EINVAL;
277 ++ return tval;
278 + }
279 + switch (rr->evtid) {
280 + case QOS_L3_OCCUP_EVENT_ID:
281 +@@ -253,12 +252,6 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr)
282 + case QOS_L3_MBM_LOCAL_EVENT_ID:
283 + m = &rr->d->mbm_local[rmid];
284 + break;
285 +- default:
286 +- /*
287 +- * Code would never reach here because
288 +- * an invalid event id would fail the __rmid_read.
289 +- */
290 +- return -EINVAL;
291 + }
292 +
293 + if (rr->first) {
294 +@@ -308,23 +301,29 @@ void mon_event_count(void *info)
295 + struct rdtgroup *rdtgrp, *entry;
296 + struct rmid_read *rr = info;
297 + struct list_head *head;
298 ++ u64 ret_val;
299 +
300 + rdtgrp = rr->rgrp;
301 +
302 +- if (__mon_event_count(rdtgrp->mon.rmid, rr))
303 +- return;
304 ++ ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
305 +
306 + /*
307 +- * For Ctrl groups read data from child monitor groups.
308 ++ * For Ctrl groups read data from child monitor groups and
309 ++ * add them together. Count events which are read successfully.
310 ++ * Discard the rmid_read's reporting errors.
311 + */
312 + head = &rdtgrp->mon.crdtgrp_list;
313 +
314 + if (rdtgrp->type == RDTCTRL_GROUP) {
315 + list_for_each_entry(entry, head, mon.crdtgrp_list) {
316 +- if (__mon_event_count(entry->mon.rmid, rr))
317 +- return;
318 ++ if (__mon_event_count(entry->mon.rmid, rr) == 0)
319 ++ ret_val = 0;
320 + }
321 + }
322 ++
323 ++ /* Report error if none of rmid_reads are successful */
324 ++ if (ret_val)
325 ++ rr->val = ret_val;
326 + }
327 +
328 + /*
329 +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
330 +index 601a5da1d196a..7d372db8bee11 100644
331 +--- a/arch/x86/kernel/fpu/xstate.c
332 ++++ b/arch/x86/kernel/fpu/xstate.c
333 +@@ -404,6 +404,24 @@ static void __init print_xstate_offset_size(void)
334 + }
335 + }
336 +
337 ++/*
338 ++ * All supported features have either init state all zeros or are
339 ++ * handled in setup_init_fpu() individually. This is an explicit
340 ++ * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
341 ++ * newly added supported features at build time and make people
342 ++ * actually look at the init state for the new feature.
343 ++ */
344 ++#define XFEATURES_INIT_FPSTATE_HANDLED \
345 ++ (XFEATURE_MASK_FP | \
346 ++ XFEATURE_MASK_SSE | \
347 ++ XFEATURE_MASK_YMM | \
348 ++ XFEATURE_MASK_OPMASK | \
349 ++ XFEATURE_MASK_ZMM_Hi256 | \
350 ++ XFEATURE_MASK_Hi16_ZMM | \
351 ++ XFEATURE_MASK_PKRU | \
352 ++ XFEATURE_MASK_BNDREGS | \
353 ++ XFEATURE_MASK_BNDCSR)
354 ++
355 + /*
356 + * setup the xstate image representing the init state
357 + */
358 +@@ -411,6 +429,8 @@ static void __init setup_init_fpu_buf(void)
359 + {
360 + static int on_boot_cpu __initdata = 1;
361 +
362 ++ BUILD_BUG_ON(XCNTXT_MASK != XFEATURES_INIT_FPSTATE_HANDLED);
363 ++
364 + WARN_ON_FPU(!on_boot_cpu);
365 + on_boot_cpu = 0;
366 +
367 +@@ -429,10 +449,22 @@ static void __init setup_init_fpu_buf(void)
368 + copy_kernel_to_xregs_booting(&init_fpstate.xsave);
369 +
370 + /*
371 +- * Dump the init state again. This is to identify the init state
372 +- * of any feature which is not represented by all zero's.
373 ++ * All components are now in init state. Read the state back so
374 ++ * that init_fpstate contains all non-zero init state. This only
375 ++ * works with XSAVE, but not with XSAVEOPT and XSAVES because
376 ++ * those use the init optimization which skips writing data for
377 ++ * components in init state.
378 ++ *
379 ++ * XSAVE could be used, but that would require to reshuffle the
380 ++ * data when XSAVES is available because XSAVES uses xstate
381 ++ * compaction. But doing so is a pointless exercise because most
382 ++ * components have an all zeros init state except for the legacy
383 ++ * ones (FP and SSE). Those can be saved with FXSAVE into the
384 ++ * legacy area. Adding new features requires to ensure that init
385 ++ * state is all zeroes or if not to add the necessary handling
386 ++ * here.
387 + */
388 +- copy_xregs_to_kernel_booting(&init_fpstate.xsave);
389 ++ fxsave(&init_fpstate.fxsave);
390 + }
391 +
392 + static int xfeature_uncompacted_offset(int xfeature_nr)
393 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
394 +index 72d729f34437d..85181457413e7 100644
395 +--- a/arch/x86/kvm/svm.c
396 ++++ b/arch/x86/kvm/svm.c
397 +@@ -513,6 +513,9 @@ static void recalc_intercepts(struct vcpu_svm *svm)
398 + c->intercept_dr = h->intercept_dr | g->intercept_dr;
399 + c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
400 + c->intercept = h->intercept | g->intercept;
401 ++
402 ++ c->intercept |= (1ULL << INTERCEPT_VMLOAD);
403 ++ c->intercept |= (1ULL << INTERCEPT_VMSAVE);
404 + }
405 +
406 + static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
407 +@@ -1441,12 +1444,7 @@ static __init int svm_hardware_setup(void)
408 + }
409 + }
410 +
411 +- if (vgif) {
412 +- if (!boot_cpu_has(X86_FEATURE_VGIF))
413 +- vgif = false;
414 +- else
415 +- pr_info("Virtual GIF supported\n");
416 +- }
417 ++ vgif = false; /* Disabled for CVE-2021-3653 */
418 +
419 + return 0;
420 +
421 +@@ -3590,7 +3588,13 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
422 + svm->nested.intercept = nested_vmcb->control.intercept;
423 +
424 + svm_flush_tlb(&svm->vcpu, true);
425 +- svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
426 ++
427 ++ svm->vmcb->control.int_ctl &=
428 ++ V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
429 ++
430 ++ svm->vmcb->control.int_ctl |= nested_vmcb->control.int_ctl &
431 ++ (V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK);
432 ++
433 + if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
434 + svm->vcpu.arch.hflags |= HF_VINTR_MASK;
435 + else
436 +diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk
437 +index fd1ab80be0dec..a4cf678cf5c80 100644
438 +--- a/arch/x86/tools/chkobjdump.awk
439 ++++ b/arch/x86/tools/chkobjdump.awk
440 +@@ -10,6 +10,7 @@ BEGIN {
441 +
442 + /^GNU objdump/ {
443 + verstr = ""
444 ++ gsub(/\(.*\)/, "");
445 + for (i = 3; i <= NF; i++)
446 + if (match($(i), "^[0-9]")) {
447 + verstr = $(i);
448 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
449 +index cb88f3b43a940..58a756ca14d85 100644
450 +--- a/drivers/acpi/nfit/core.c
451 ++++ b/drivers/acpi/nfit/core.c
452 +@@ -2834,6 +2834,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
453 + struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
454 + struct nd_mapping_desc *mapping;
455 +
456 ++ /* range index 0 == unmapped in SPA or invalid-SPA */
457 ++ if (memdev->range_index == 0 || spa->range_index == 0)
458 ++ continue;
459 + if (memdev->range_index != spa->range_index)
460 + continue;
461 + if (count >= ND_MAX_MAPPINGS) {
462 +diff --git a/drivers/base/core.c b/drivers/base/core.c
463 +index f7f601858f10d..6e380ad9d08ad 100644
464 +--- a/drivers/base/core.c
465 ++++ b/drivers/base/core.c
466 +@@ -1682,6 +1682,7 @@ void device_initialize(struct device *dev)
467 + device_pm_init(dev);
468 + set_dev_node(dev, -1);
469 + #ifdef CONFIG_GENERIC_MSI_IRQ
470 ++ raw_spin_lock_init(&dev->msi_lock);
471 + INIT_LIST_HEAD(&dev->msi_list);
472 + #endif
473 + INIT_LIST_HEAD(&dev->links.consumers);
474 +diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
475 +index a36452bd9612d..31b5655419b42 100644
476 +--- a/drivers/cpufreq/armada-37xx-cpufreq.c
477 ++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
478 +@@ -102,7 +102,11 @@ struct armada_37xx_dvfs {
479 + };
480 +
481 + static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
482 +- {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
483 ++ /*
484 ++ * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
485 ++ * unstable because we do not know how to configure it properly.
486 ++ */
487 ++ /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
488 + {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
489 + {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
490 + {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
491 +diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
492 +index 8344a60c2131b..a9d3ab94749b1 100644
493 +--- a/drivers/dma/of-dma.c
494 ++++ b/drivers/dma/of-dma.c
495 +@@ -68,8 +68,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
496 + return NULL;
497 +
498 + ofdma_target = of_dma_find_controller(&dma_spec_target);
499 +- if (!ofdma_target)
500 +- return NULL;
501 ++ if (!ofdma_target) {
502 ++ ofdma->dma_router->route_free(ofdma->dma_router->dev,
503 ++ route_data);
504 ++ chan = ERR_PTR(-EPROBE_DEFER);
505 ++ goto err;
506 ++ }
507 +
508 + chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
509 + if (IS_ERR_OR_NULL(chan)) {
510 +@@ -80,6 +84,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
511 + chan->route_data = route_data;
512 + }
513 +
514 ++err:
515 + /*
516 + * Need to put the node back since the ofdma->of_dma_route_allocate
517 + * has taken it for generating the new, translated dma_spec
518 +diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
519 +index 6c94ed7500494..d77bf325f0384 100644
520 +--- a/drivers/dma/sh/usb-dmac.c
521 ++++ b/drivers/dma/sh/usb-dmac.c
522 +@@ -860,8 +860,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
523 +
524 + error:
525 + of_dma_controller_free(pdev->dev.of_node);
526 +- pm_runtime_put(&pdev->dev);
527 + error_pm:
528 ++ pm_runtime_put(&pdev->dev);
529 + pm_runtime_disable(&pdev->dev);
530 + return ret;
531 + }
532 +diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
533 +index 0c5668e897fe7..d891ec05bc48b 100644
534 +--- a/drivers/dma/xilinx/xilinx_dma.c
535 ++++ b/drivers/dma/xilinx/xilinx_dma.c
536 +@@ -332,6 +332,7 @@ struct xilinx_dma_tx_descriptor {
537 + * @genlock: Support genlock mode
538 + * @err: Channel has errors
539 + * @idle: Check for channel idle
540 ++ * @terminating: Check for channel being synchronized by user
541 + * @tasklet: Cleanup work after irq
542 + * @config: Device configuration info
543 + * @flush_on_fsync: Flush on Frame sync
544 +@@ -369,6 +370,7 @@ struct xilinx_dma_chan {
545 + bool genlock;
546 + bool err;
547 + bool idle;
548 ++ bool terminating;
549 + struct tasklet_struct tasklet;
550 + struct xilinx_vdma_config config;
551 + bool flush_on_fsync;
552 +@@ -843,6 +845,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
553 + /* Run any dependencies, then free the descriptor */
554 + dma_run_dependencies(&desc->async_tx);
555 + xilinx_dma_free_tx_descriptor(chan, desc);
556 ++
557 ++ /*
558 ++ * While we ran a callback the user called a terminate function,
559 ++ * which takes care of cleaning up any remaining descriptors
560 ++ */
561 ++ if (chan->terminating)
562 ++ break;
563 + }
564 +
565 + spin_unlock_irqrestore(&chan->lock, flags);
566 +@@ -1612,6 +1621,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
567 + if (desc->cyclic)
568 + chan->cyclic = true;
569 +
570 ++ chan->terminating = false;
571 ++
572 + spin_unlock_irqrestore(&chan->lock, flags);
573 +
574 + return cookie;
575 +@@ -2068,6 +2079,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
576 + }
577 +
578 + /* Remove and free all of the descriptors in the lists */
579 ++ chan->terminating = true;
580 + xilinx_dma_free_descriptors(chan);
581 + chan->idle = true;
582 +
583 +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
584 +index 1d10ee86299d8..57aece8098416 100644
585 +--- a/drivers/i2c/i2c-dev.c
586 ++++ b/drivers/i2c/i2c-dev.c
587 +@@ -149,7 +149,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
588 + if (count > 8192)
589 + count = 8192;
590 +
591 +- tmp = kmalloc(count, GFP_KERNEL);
592 ++ tmp = kzalloc(count, GFP_KERNEL);
593 + if (tmp == NULL)
594 + return -ENOMEM;
595 +
596 +@@ -158,7 +158,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
597 +
598 + ret = i2c_master_recv(client, tmp, count);
599 + if (ret >= 0)
600 +- ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
601 ++ if (copy_to_user(buf, tmp, ret))
602 ++ ret = -EFAULT;
603 + kfree(tmp);
604 + return ret;
605 + }
606 +diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
607 +index 69b9affeef1e6..7dcd4213d38a0 100644
608 +--- a/drivers/iio/adc/palmas_gpadc.c
609 ++++ b/drivers/iio/adc/palmas_gpadc.c
610 +@@ -659,8 +659,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc)
611 +
612 + adc_period = adc->auto_conversion_period;
613 + for (i = 0; i < 16; ++i) {
614 +- if (((1000 * (1 << i)) / 32) < adc_period)
615 +- continue;
616 ++ if (((1000 * (1 << i)) / 32) >= adc_period)
617 ++ break;
618 + }
619 + if (i > 0)
620 + i--;
621 +diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
622 +index 0fcaa2c0b2f43..51ad5a9ed0851 100644
623 +--- a/drivers/iio/humidity/hdc100x.c
624 ++++ b/drivers/iio/humidity/hdc100x.c
625 +@@ -24,6 +24,8 @@
626 + #include <linux/iio/trigger_consumer.h>
627 + #include <linux/iio/triggered_buffer.h>
628 +
629 ++#include <linux/time.h>
630 ++
631 + #define HDC100X_REG_TEMP 0x00
632 + #define HDC100X_REG_HUMIDITY 0x01
633 +
634 +@@ -165,7 +167,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
635 + struct iio_chan_spec const *chan)
636 + {
637 + struct i2c_client *client = data->client;
638 +- int delay = data->adc_int_us[chan->address];
639 ++ int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC;
640 + int ret;
641 + __be16 val;
642 +
643 +@@ -322,7 +324,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
644 + struct iio_dev *indio_dev = pf->indio_dev;
645 + struct hdc100x_data *data = iio_priv(indio_dev);
646 + struct i2c_client *client = data->client;
647 +- int delay = data->adc_int_us[0] + data->adc_int_us[1];
648 ++ int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC;
649 + int ret;
650 +
651 + /* dual read starts at temp register */
652 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
653 +index d2166dfc8b3f8..dcb865d193090 100644
654 +--- a/drivers/iommu/intel-iommu.c
655 ++++ b/drivers/iommu/intel-iommu.c
656 +@@ -1928,7 +1928,7 @@ static inline int guestwidth_to_adjustwidth(int gaw)
657 + static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
658 + int guest_width)
659 + {
660 +- int adjust_width, agaw;
661 ++ int adjust_width, agaw, cap_width;
662 + unsigned long sagaw;
663 + int err;
664 +
665 +@@ -1942,8 +1942,9 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
666 + domain_reserve_special_ranges(domain);
667 +
668 + /* calculate AGAW */
669 +- if (guest_width > cap_mgaw(iommu->cap))
670 +- guest_width = cap_mgaw(iommu->cap);
671 ++ cap_width = min_t(int, cap_mgaw(iommu->cap), agaw_to_width(iommu->agaw));
672 ++ if (guest_width > cap_width)
673 ++ guest_width = cap_width;
674 + domain->gaw = guest_width;
675 + adjust_width = guestwidth_to_adjustwidth(guest_width);
676 + agaw = width_to_agaw(adjust_width);
677 +diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
678 +index 7895320e50c1e..4c8da6af25168 100644
679 +--- a/drivers/ipack/carriers/tpci200.c
680 ++++ b/drivers/ipack/carriers/tpci200.c
681 +@@ -94,16 +94,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200)
682 + free_irq(tpci200->info->pdev->irq, (void *) tpci200);
683 +
684 + pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
685 +- pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
686 +
687 + pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
688 + pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
689 + pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
690 + pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
691 +- pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
692 +
693 + pci_disable_device(tpci200->info->pdev);
694 +- pci_dev_put(tpci200->info->pdev);
695 + }
696 +
697 + static void tpci200_enable_irq(struct tpci200_board *tpci200,
698 +@@ -262,7 +259,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
699 + "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !",
700 + tpci200->info->pdev->bus->number,
701 + tpci200->info->pdev->devfn);
702 +- goto out_disable_pci;
703 ++ goto err_disable_device;
704 + }
705 +
706 + /* Request IO ID INT space (Bar 3) */
707 +@@ -274,7 +271,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
708 + "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !",
709 + tpci200->info->pdev->bus->number,
710 + tpci200->info->pdev->devfn);
711 +- goto out_release_ip_space;
712 ++ goto err_ip_interface_bar;
713 + }
714 +
715 + /* Request MEM8 space (Bar 5) */
716 +@@ -285,7 +282,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
717 + "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
718 + tpci200->info->pdev->bus->number,
719 + tpci200->info->pdev->devfn);
720 +- goto out_release_ioid_int_space;
721 ++ goto err_io_id_int_spaces_bar;
722 + }
723 +
724 + /* Request MEM16 space (Bar 4) */
725 +@@ -296,7 +293,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
726 + "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
727 + tpci200->info->pdev->bus->number,
728 + tpci200->info->pdev->devfn);
729 +- goto out_release_mem8_space;
730 ++ goto err_mem8_space_bar;
731 + }
732 +
733 + /* Map internal tpci200 driver user space */
734 +@@ -310,7 +307,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
735 + tpci200->info->pdev->bus->number,
736 + tpci200->info->pdev->devfn);
737 + res = -ENOMEM;
738 +- goto out_release_mem8_space;
739 ++ goto err_mem16_space_bar;
740 + }
741 +
742 + /* Initialize lock that protects interface_regs */
743 +@@ -349,18 +346,22 @@ static int tpci200_register(struct tpci200_board *tpci200)
744 + "(bn 0x%X, sn 0x%X) unable to register IRQ !",
745 + tpci200->info->pdev->bus->number,
746 + tpci200->info->pdev->devfn);
747 +- goto out_release_ioid_int_space;
748 ++ goto err_interface_regs;
749 + }
750 +
751 + return 0;
752 +
753 +-out_release_mem8_space:
754 ++err_interface_regs:
755 ++ pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
756 ++err_mem16_space_bar:
757 ++ pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
758 ++err_mem8_space_bar:
759 + pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
760 +-out_release_ioid_int_space:
761 ++err_io_id_int_spaces_bar:
762 + pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
763 +-out_release_ip_space:
764 ++err_ip_interface_bar:
765 + pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
766 +-out_disable_pci:
767 ++err_disable_device:
768 + pci_disable_device(tpci200->info->pdev);
769 + return res;
770 + }
771 +@@ -532,7 +533,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
772 + tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
773 + if (!tpci200->info) {
774 + ret = -ENOMEM;
775 +- goto out_err_info;
776 ++ goto err_tpci200;
777 + }
778 +
779 + pci_dev_get(pdev);
780 +@@ -543,7 +544,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
781 + if (ret) {
782 + dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
783 + ret = -EBUSY;
784 +- goto out_err_pci_request;
785 ++ goto err_tpci200_info;
786 + }
787 + tpci200->info->cfg_regs = ioremap_nocache(
788 + pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
789 +@@ -551,7 +552,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
790 + if (!tpci200->info->cfg_regs) {
791 + dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
792 + ret = -EFAULT;
793 +- goto out_err_ioremap;
794 ++ goto err_request_region;
795 + }
796 +
797 + /* Disable byte swapping for 16 bit IP module access. This will ensure
798 +@@ -574,7 +575,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
799 + if (ret) {
800 + dev_err(&pdev->dev, "error during tpci200 install\n");
801 + ret = -ENODEV;
802 +- goto out_err_install;
803 ++ goto err_cfg_regs;
804 + }
805 +
806 + /* Register the carrier in the industry pack bus driver */
807 +@@ -586,7 +587,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
808 + dev_err(&pdev->dev,
809 + "error registering the carrier on ipack driver\n");
810 + ret = -EFAULT;
811 +- goto out_err_bus_register;
812 ++ goto err_tpci200_install;
813 + }
814 +
815 + /* save the bus number given by ipack to logging purpose */
816 +@@ -597,19 +598,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
817 + tpci200_create_device(tpci200, i);
818 + return 0;
819 +
820 +-out_err_bus_register:
821 ++err_tpci200_install:
822 + tpci200_uninstall(tpci200);
823 +- /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
824 +- tpci200->info->cfg_regs = NULL;
825 +-out_err_install:
826 +- if (tpci200->info->cfg_regs)
827 +- iounmap(tpci200->info->cfg_regs);
828 +-out_err_ioremap:
829 ++err_cfg_regs:
830 ++ pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
831 ++err_request_region:
832 + pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
833 +-out_err_pci_request:
834 +- pci_dev_put(pdev);
835 ++err_tpci200_info:
836 + kfree(tpci200->info);
837 +-out_err_info:
838 ++ pci_dev_put(pdev);
839 ++err_tpci200:
840 + kfree(tpci200);
841 + return ret;
842 + }
843 +@@ -619,6 +617,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
844 + ipack_bus_unregister(tpci200->info->ipack_bus);
845 + tpci200_uninstall(tpci200);
846 +
847 ++ pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
848 ++
849 ++ pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
850 ++
851 ++ pci_dev_put(tpci200->info->pdev);
852 ++
853 + kfree(tpci200->info);
854 + kfree(tpci200);
855 + }
856 +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
857 +index 22c454c7aaca6..8e09586f880f1 100644
858 +--- a/drivers/mmc/host/dw_mmc.c
859 ++++ b/drivers/mmc/host/dw_mmc.c
860 +@@ -2043,8 +2043,8 @@ static void dw_mci_tasklet_func(unsigned long priv)
861 + continue;
862 + }
863 +
864 +- dw_mci_stop_dma(host);
865 + send_stop_abort(host, data);
866 ++ dw_mci_stop_dma(host);
867 + state = STATE_SENDING_STOP;
868 + break;
869 + }
870 +@@ -2068,10 +2068,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
871 + */
872 + if (test_and_clear_bit(EVENT_DATA_ERROR,
873 + &host->pending_events)) {
874 +- dw_mci_stop_dma(host);
875 + if (!(host->data_status & (SDMMC_INT_DRTO |
876 + SDMMC_INT_EBE)))
877 + send_stop_abort(host, data);
878 ++ dw_mci_stop_dma(host);
879 + state = STATE_DATA_ERROR;
880 + break;
881 + }
882 +@@ -2104,10 +2104,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
883 + */
884 + if (test_and_clear_bit(EVENT_DATA_ERROR,
885 + &host->pending_events)) {
886 +- dw_mci_stop_dma(host);
887 + if (!(host->data_status & (SDMMC_INT_DRTO |
888 + SDMMC_INT_EBE)))
889 + send_stop_abort(host, data);
890 ++ dw_mci_stop_dma(host);
891 + state = STATE_DATA_ERROR;
892 + break;
893 + }
894 +diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
895 +index b4f6e1a67dd9a..b89c474e6b6bf 100644
896 +--- a/drivers/net/dsa/lan9303-core.c
897 ++++ b/drivers/net/dsa/lan9303-core.c
898 +@@ -566,12 +566,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
899 + return 0;
900 + }
901 +
902 +-typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
903 +- int portmap, void *ctx);
904 ++typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
905 ++ int portmap, void *ctx);
906 +
907 +-static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
908 ++static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
909 + {
910 +- int i;
911 ++ int ret = 0, i;
912 +
913 + mutex_lock(&chip->alr_mutex);
914 + lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
915 +@@ -591,13 +591,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
916 + LAN9303_ALR_DAT1_PORT_BITOFFS;
917 + portmap = alrport_2_portmap[alrport];
918 +
919 +- cb(chip, dat0, dat1, portmap, ctx);
920 ++ ret = cb(chip, dat0, dat1, portmap, ctx);
921 ++ if (ret)
922 ++ break;
923 +
924 + lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
925 + LAN9303_ALR_CMD_GET_NEXT);
926 + lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
927 + }
928 + mutex_unlock(&chip->alr_mutex);
929 ++
930 ++ return ret;
931 + }
932 +
933 + static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
934 +@@ -615,18 +619,20 @@ struct del_port_learned_ctx {
935 + };
936 +
937 + /* Clear learned (non-static) entry on given port */
938 +-static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
939 +- u32 dat1, int portmap, void *ctx)
940 ++static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
941 ++ u32 dat1, int portmap, void *ctx)
942 + {
943 + struct del_port_learned_ctx *del_ctx = ctx;
944 + int port = del_ctx->port;
945 +
946 + if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
947 +- return;
948 ++ return 0;
949 +
950 + /* learned entries has only one port, we can just delete */
951 + dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
952 + lan9303_alr_make_entry_raw(chip, dat0, dat1);
953 ++
954 ++ return 0;
955 + }
956 +
957 + struct port_fdb_dump_ctx {
958 +@@ -635,19 +641,19 @@ struct port_fdb_dump_ctx {
959 + dsa_fdb_dump_cb_t *cb;
960 + };
961 +
962 +-static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
963 +- u32 dat1, int portmap, void *ctx)
964 ++static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
965 ++ u32 dat1, int portmap, void *ctx)
966 + {
967 + struct port_fdb_dump_ctx *dump_ctx = ctx;
968 + u8 mac[ETH_ALEN];
969 + bool is_static;
970 +
971 + if ((BIT(dump_ctx->port) & portmap) == 0)
972 +- return;
973 ++ return 0;
974 +
975 + alr_reg_to_mac(dat0, dat1, mac);
976 + is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
977 +- dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
978 ++ return dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
979 + }
980 +
981 + /* Set a static ALR entry. Delete entry if port_map is zero */
982 +@@ -1214,9 +1220,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
983 + };
984 +
985 + dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
986 +- lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
987 +-
988 +- return 0;
989 ++ return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
990 + }
991 +
992 + static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
993 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
994 +index 6335c4ea09577..2ff6a0be97de6 100644
995 +--- a/drivers/net/dsa/mt7530.c
996 ++++ b/drivers/net/dsa/mt7530.c
997 +@@ -54,6 +54,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
998 + MIB_DESC(2, 0x48, "TxBytes"),
999 + MIB_DESC(1, 0x60, "RxDrop"),
1000 + MIB_DESC(1, 0x64, "RxFiltering"),
1001 ++ MIB_DESC(1, 0x68, "RxUnicast"),
1002 + MIB_DESC(1, 0x6c, "RxMulticast"),
1003 + MIB_DESC(1, 0x70, "RxBroadcast"),
1004 + MIB_DESC(1, 0x74, "RxAlignErr"),
1005 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1006 +index ebcf4ea66385a..55827ac65a154 100644
1007 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1008 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1009 +@@ -282,6 +282,26 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
1010 + return md_dst->u.port_info.port_id;
1011 + }
1012 +
1013 ++static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
1014 ++ struct bnxt_tx_ring_info *txr,
1015 ++ struct netdev_queue *txq)
1016 ++{
1017 ++ netif_tx_stop_queue(txq);
1018 ++
1019 ++ /* netif_tx_stop_queue() must be done before checking
1020 ++ * tx index in bnxt_tx_avail() below, because in
1021 ++ * bnxt_tx_int(), we update tx index before checking for
1022 ++ * netif_tx_queue_stopped().
1023 ++ */
1024 ++ smp_mb();
1025 ++ if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
1026 ++ netif_tx_wake_queue(txq);
1027 ++ return false;
1028 ++ }
1029 ++
1030 ++ return true;
1031 ++}
1032 ++
1033 + static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
1034 + {
1035 + struct bnxt *bp = netdev_priv(dev);
1036 +@@ -309,8 +329,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
1037 +
1038 + free_size = bnxt_tx_avail(bp, txr);
1039 + if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
1040 +- netif_tx_stop_queue(txq);
1041 +- return NETDEV_TX_BUSY;
1042 ++ if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
1043 ++ return NETDEV_TX_BUSY;
1044 + }
1045 +
1046 + length = skb->len;
1047 +@@ -521,16 +541,7 @@ tx_done:
1048 + if (skb->xmit_more && !tx_buf->is_push)
1049 + bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
1050 +
1051 +- netif_tx_stop_queue(txq);
1052 +-
1053 +- /* netif_tx_stop_queue() must be done before checking
1054 +- * tx index in bnxt_tx_avail() below, because in
1055 +- * bnxt_tx_int(), we update tx index before checking for
1056 +- * netif_tx_queue_stopped().
1057 +- */
1058 +- smp_mb();
1059 +- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
1060 +- netif_tx_wake_queue(txq);
1061 ++ bnxt_txr_netif_try_stop_queue(bp, txr, txq);
1062 + }
1063 + return NETDEV_TX_OK;
1064 +
1065 +@@ -614,14 +625,9 @@ next_tx_int:
1066 + smp_mb();
1067 +
1068 + if (unlikely(netif_tx_queue_stopped(txq)) &&
1069 +- (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
1070 +- __netif_tx_lock(txq, smp_processor_id());
1071 +- if (netif_tx_queue_stopped(txq) &&
1072 +- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
1073 +- txr->dev_state != BNXT_DEV_STATE_CLOSING)
1074 +- netif_tx_wake_queue(txq);
1075 +- __netif_tx_unlock(txq);
1076 +- }
1077 ++ bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
1078 ++ READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
1079 ++ netif_tx_wake_queue(txq);
1080 + }
1081 +
1082 + static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
1083 +@@ -6263,10 +6269,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
1084 + for (i = 0; i < bp->cp_nr_rings; i++) {
1085 + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
1086 +
1087 ++ napi_disable(&bp->bnapi[i]->napi);
1088 + if (bp->bnapi[i]->rx_ring)
1089 + cancel_work_sync(&cpr->dim.work);
1090 +-
1091 +- napi_disable(&bp->bnapi[i]->napi);
1092 + }
1093 + }
1094 +
1095 +@@ -6294,9 +6299,11 @@ void bnxt_tx_disable(struct bnxt *bp)
1096 + if (bp->tx_ring) {
1097 + for (i = 0; i < bp->tx_nr_rings; i++) {
1098 + txr = &bp->tx_ring[i];
1099 +- txr->dev_state = BNXT_DEV_STATE_CLOSING;
1100 ++ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
1101 + }
1102 + }
1103 ++ /* Make sure napi polls see @dev_state change */
1104 ++ synchronize_net();
1105 + /* Drop carrier first to prevent TX timeout */
1106 + netif_carrier_off(bp->dev);
1107 + /* Stop all TX queues */
1108 +@@ -6310,8 +6317,10 @@ void bnxt_tx_enable(struct bnxt *bp)
1109 +
1110 + for (i = 0; i < bp->tx_nr_rings; i++) {
1111 + txr = &bp->tx_ring[i];
1112 +- txr->dev_state = 0;
1113 ++ WRITE_ONCE(txr->dev_state, 0);
1114 + }
1115 ++ /* Make sure napi polls see @dev_state change */
1116 ++ synchronize_net();
1117 + netif_tx_wake_all_queues(bp->dev);
1118 + if (bp->link_info.link_up)
1119 + netif_carrier_on(bp->dev);
1120 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
1121 +index 6ed8294f7df8b..a15845e511b2c 100644
1122 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
1123 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
1124 +@@ -3158,8 +3158,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
1125 +
1126 + indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
1127 + ret = QLCRD32(adapter, indirect_addr, &err);
1128 +- if (err == -EIO)
1129 ++ if (err == -EIO) {
1130 ++ qlcnic_83xx_unlock_flash(adapter);
1131 + return err;
1132 ++ }
1133 +
1134 + word = ret;
1135 + *(u32 *)p_data = word;
1136 +diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
1137 +index 8c636c4932274..1001e9a2edd4f 100644
1138 +--- a/drivers/net/hamradio/6pack.c
1139 ++++ b/drivers/net/hamradio/6pack.c
1140 +@@ -859,6 +859,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
1141 + return;
1142 + }
1143 +
1144 ++ if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
1145 ++ pr_err("6pack: cooked buffer overrun, data loss\n");
1146 ++ sp->rx_count = 0;
1147 ++ return;
1148 ++ }
1149 ++
1150 + buf = sp->raw_buf;
1151 + sp->cooked_buf[sp->rx_count_cooked++] =
1152 + buf[0] | ((buf[1] << 2) & 0xc0);
1153 +diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
1154 +index 06aadebc2d5ba..ed60e691cc2b4 100644
1155 +--- a/drivers/net/ieee802154/mac802154_hwsim.c
1156 ++++ b/drivers/net/ieee802154/mac802154_hwsim.c
1157 +@@ -432,7 +432,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
1158 + struct hwsim_edge *e;
1159 + u32 v0, v1;
1160 +
1161 +- if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
1162 ++ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
1163 + !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
1164 + return -EINVAL;
1165 +
1166 +@@ -546,7 +546,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
1167 + u32 v0, v1;
1168 + u8 lqi;
1169 +
1170 +- if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] &&
1171 ++ if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] ||
1172 + !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
1173 + return -EINVAL;
1174 +
1175 +@@ -555,7 +555,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
1176 + hwsim_edge_policy, NULL))
1177 + return -EINVAL;
1178 +
1179 +- if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
1180 ++ if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] ||
1181 + !edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI])
1182 + return -EINVAL;
1183 +
1184 +diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
1185 +index 0a86f1e4c02f5..c16f875ed9ead 100644
1186 +--- a/drivers/net/phy/mdio-mux.c
1187 ++++ b/drivers/net/phy/mdio-mux.c
1188 +@@ -85,6 +85,17 @@ out:
1189 +
1190 + static int parent_count;
1191 +
1192 ++static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
1193 ++{
1194 ++ struct mdio_mux_child_bus *cb = pb->children;
1195 ++
1196 ++ while (cb) {
1197 ++ mdiobus_unregister(cb->mii_bus);
1198 ++ mdiobus_free(cb->mii_bus);
1199 ++ cb = cb->next;
1200 ++ }
1201 ++}
1202 ++
1203 + int mdio_mux_init(struct device *dev,
1204 + struct device_node *mux_node,
1205 + int (*switch_fn)(int cur, int desired, void *data),
1206 +@@ -147,7 +158,7 @@ int mdio_mux_init(struct device *dev,
1207 + cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
1208 + if (!cb) {
1209 + ret_val = -ENOMEM;
1210 +- continue;
1211 ++ goto err_loop;
1212 + }
1213 + cb->bus_number = v;
1214 + cb->parent = pb;
1215 +@@ -155,8 +166,7 @@ int mdio_mux_init(struct device *dev,
1216 + cb->mii_bus = mdiobus_alloc();
1217 + if (!cb->mii_bus) {
1218 + ret_val = -ENOMEM;
1219 +- devm_kfree(dev, cb);
1220 +- continue;
1221 ++ goto err_loop;
1222 + }
1223 + cb->mii_bus->priv = cb;
1224 +
1225 +@@ -168,11 +178,15 @@ int mdio_mux_init(struct device *dev,
1226 + cb->mii_bus->write = mdio_mux_write;
1227 + r = of_mdiobus_register(cb->mii_bus, child_bus_node);
1228 + if (r) {
1229 ++ mdiobus_free(cb->mii_bus);
1230 ++ if (r == -EPROBE_DEFER) {
1231 ++ ret_val = r;
1232 ++ goto err_loop;
1233 ++ }
1234 ++ devm_kfree(dev, cb);
1235 + dev_err(dev,
1236 + "Error: Failed to register MDIO bus for child %pOF\n",
1237 + child_bus_node);
1238 +- mdiobus_free(cb->mii_bus);
1239 +- devm_kfree(dev, cb);
1240 + } else {
1241 + cb->next = pb->children;
1242 + pb->children = cb;
1243 +@@ -185,6 +199,10 @@ int mdio_mux_init(struct device *dev,
1244 +
1245 + dev_err(dev, "Error: No acceptable child buses found\n");
1246 + devm_kfree(dev, pb);
1247 ++
1248 ++err_loop:
1249 ++ mdio_mux_uninit_children(pb);
1250 ++ of_node_put(child_bus_node);
1251 + err_pb_kz:
1252 + put_device(&parent_bus->dev);
1253 + err_parent_bus:
1254 +@@ -196,14 +214,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
1255 + void mdio_mux_uninit(void *mux_handle)
1256 + {
1257 + struct mdio_mux_parent_bus *pb = mux_handle;
1258 +- struct mdio_mux_child_bus *cb = pb->children;
1259 +-
1260 +- while (cb) {
1261 +- mdiobus_unregister(cb->mii_bus);
1262 +- mdiobus_free(cb->mii_bus);
1263 +- cb = cb->next;
1264 +- }
1265 +
1266 ++ mdio_mux_uninit_children(pb);
1267 + put_device(&pb->mii_bus->dev);
1268 + }
1269 + EXPORT_SYMBOL_GPL(mdio_mux_uninit);
1270 +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
1271 +index 1af47aaa7ba57..dc9de8731c564 100644
1272 +--- a/drivers/net/ppp/ppp_generic.c
1273 ++++ b/drivers/net/ppp/ppp_generic.c
1274 +@@ -1125,7 +1125,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
1275 + * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows
1276 + * userspace to infer the device name using to the PPPIOCGUNIT ioctl.
1277 + */
1278 +- if (!tb[IFLA_IFNAME])
1279 ++ if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
1280 + conf.ifname_is_set = false;
1281 +
1282 + err = ppp_dev_configure(src_net, dev, &conf);
1283 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1284 +index 5bd07cdb3e6e2..ac5f72077b267 100644
1285 +--- a/drivers/net/usb/lan78xx.c
1286 ++++ b/drivers/net/usb/lan78xx.c
1287 +@@ -1172,7 +1172,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
1288 + {
1289 + struct phy_device *phydev = dev->net->phydev;
1290 + struct ethtool_link_ksettings ecmd;
1291 +- int ladv, radv, ret;
1292 ++ int ladv, radv, ret, link;
1293 + u32 buf;
1294 +
1295 + /* clear LAN78xx interrupt status */
1296 +@@ -1180,9 +1180,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
1297 + if (unlikely(ret < 0))
1298 + return -EIO;
1299 +
1300 ++ mutex_lock(&phydev->lock);
1301 + phy_read_status(phydev);
1302 ++ link = phydev->link;
1303 ++ mutex_unlock(&phydev->lock);
1304 +
1305 +- if (!phydev->link && dev->link_on) {
1306 ++ if (!link && dev->link_on) {
1307 + dev->link_on = false;
1308 +
1309 + /* reset MAC */
1310 +@@ -1195,7 +1198,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
1311 + return -EIO;
1312 +
1313 + del_timer(&dev->stat_monitor);
1314 +- } else if (phydev->link && !dev->link_on) {
1315 ++ } else if (link && !dev->link_on) {
1316 + dev->link_on = true;
1317 +
1318 + phy_ethtool_ksettings_get(phydev, &ecmd);
1319 +@@ -1485,9 +1488,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1320 +
1321 + static u32 lan78xx_get_link(struct net_device *net)
1322 + {
1323 ++ u32 link;
1324 ++
1325 ++ mutex_lock(&net->phydev->lock);
1326 + phy_read_status(net->phydev);
1327 ++ link = net->phydev->link;
1328 ++ mutex_unlock(&net->phydev->lock);
1329 +
1330 +- return net->phydev->link;
1331 ++ return link;
1332 + }
1333 +
1334 + static void lan78xx_get_drvinfo(struct net_device *net,
1335 +diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
1336 +index 7a364eca46d64..f083fb9038c36 100644
1337 +--- a/drivers/net/wireless/ath/ath.h
1338 ++++ b/drivers/net/wireless/ath/ath.h
1339 +@@ -197,12 +197,13 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
1340 + bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
1341 +
1342 + void ath_hw_setbssidmask(struct ath_common *common);
1343 +-void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
1344 ++void ath_key_delete(struct ath_common *common, u8 hw_key_idx);
1345 + int ath_key_config(struct ath_common *common,
1346 + struct ieee80211_vif *vif,
1347 + struct ieee80211_sta *sta,
1348 + struct ieee80211_key_conf *key);
1349 + bool ath_hw_keyreset(struct ath_common *common, u16 entry);
1350 ++bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac);
1351 + void ath_hw_cycle_counters_update(struct ath_common *common);
1352 + int32_t ath_hw_get_listen_time(struct ath_common *common);
1353 +
1354 +diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
1355 +index 16e052d02c940..0f4836fc3b7c1 100644
1356 +--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
1357 ++++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
1358 +@@ -522,7 +522,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1359 + }
1360 + break;
1361 + case DISABLE_KEY:
1362 +- ath_key_delete(common, key);
1363 ++ ath_key_delete(common, key->hw_key_idx);
1364 + break;
1365 + default:
1366 + ret = -EINVAL;
1367 +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1368 +index a82ad739ab806..16a7bae62b7d3 100644
1369 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1370 ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1371 +@@ -1460,7 +1460,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1372 + }
1373 + break;
1374 + case DISABLE_KEY:
1375 +- ath_key_delete(common, key);
1376 ++ ath_key_delete(common, key->hw_key_idx);
1377 + break;
1378 + default:
1379 + ret = -EINVAL;
1380 +diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
1381 +index 68956cdc8c9ae..4b5687b6c0c9a 100644
1382 +--- a/drivers/net/wireless/ath/ath9k/hw.h
1383 ++++ b/drivers/net/wireless/ath/ath9k/hw.h
1384 +@@ -818,6 +818,7 @@ struct ath_hw {
1385 + struct ath9k_pacal_info pacal_info;
1386 + struct ar5416Stats stats;
1387 + struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
1388 ++ DECLARE_BITMAP(pending_del_keymap, ATH_KEYMAX);
1389 +
1390 + enum ath9k_int imask;
1391 + u32 imrs2_reg;
1392 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
1393 +index e929020d7c9cb..a0097bebcba3b 100644
1394 +--- a/drivers/net/wireless/ath/ath9k/main.c
1395 ++++ b/drivers/net/wireless/ath/ath9k/main.c
1396 +@@ -823,12 +823,80 @@ exit:
1397 + ieee80211_free_txskb(hw, skb);
1398 + }
1399 +
1400 ++static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
1401 ++{
1402 ++ struct ath_buf *bf;
1403 ++ struct ieee80211_tx_info *txinfo;
1404 ++ struct ath_frame_info *fi;
1405 ++
1406 ++ list_for_each_entry(bf, txq_list, list) {
1407 ++ if (bf->bf_state.stale || !bf->bf_mpdu)
1408 ++ continue;
1409 ++
1410 ++ txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
1411 ++ fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
1412 ++ if (fi->keyix == keyix)
1413 ++ return true;
1414 ++ }
1415 ++
1416 ++ return false;
1417 ++}
1418 ++
1419 ++static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
1420 ++{
1421 ++ struct ath_hw *ah = sc->sc_ah;
1422 ++ int i;
1423 ++ struct ath_txq *txq;
1424 ++ bool key_in_use = false;
1425 ++
1426 ++ for (i = 0; !key_in_use && i < ATH9K_NUM_TX_QUEUES; i++) {
1427 ++ if (!ATH_TXQ_SETUP(sc, i))
1428 ++ continue;
1429 ++ txq = &sc->tx.txq[i];
1430 ++ if (!txq->axq_depth)
1431 ++ continue;
1432 ++ if (!ath9k_hw_numtxpending(ah, txq->axq_qnum))
1433 ++ continue;
1434 ++
1435 ++ ath_txq_lock(sc, txq);
1436 ++ key_in_use = ath9k_txq_list_has_key(&txq->axq_q, keyix);
1437 ++ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1438 ++ int idx = txq->txq_tailidx;
1439 ++
1440 ++ while (!key_in_use &&
1441 ++ !list_empty(&txq->txq_fifo[idx])) {
1442 ++ key_in_use = ath9k_txq_list_has_key(
1443 ++ &txq->txq_fifo[idx], keyix);
1444 ++ INCR(idx, ATH_TXFIFO_DEPTH);
1445 ++ }
1446 ++ }
1447 ++ ath_txq_unlock(sc, txq);
1448 ++ }
1449 ++
1450 ++ return key_in_use;
1451 ++}
1452 ++
1453 ++static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix)
1454 ++{
1455 ++ struct ath_hw *ah = sc->sc_ah;
1456 ++ struct ath_common *common = ath9k_hw_common(ah);
1457 ++
1458 ++ if (!test_bit(keyix, ah->pending_del_keymap) ||
1459 ++ ath9k_txq_has_key(sc, keyix))
1460 ++ return;
1461 ++
1462 ++ /* No more TXQ frames point to this key cache entry, so delete it. */
1463 ++ clear_bit(keyix, ah->pending_del_keymap);
1464 ++ ath_key_delete(common, keyix);
1465 ++}
1466 ++
1467 + static void ath9k_stop(struct ieee80211_hw *hw)
1468 + {
1469 + struct ath_softc *sc = hw->priv;
1470 + struct ath_hw *ah = sc->sc_ah;
1471 + struct ath_common *common = ath9k_hw_common(ah);
1472 + bool prev_idle;
1473 ++ int i;
1474 +
1475 + ath9k_deinit_channel_context(sc);
1476 +
1477 +@@ -896,6 +964,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1478 +
1479 + spin_unlock_bh(&sc->sc_pcu_lock);
1480 +
1481 ++ for (i = 0; i < ATH_KEYMAX; i++)
1482 ++ ath9k_pending_key_del(sc, i);
1483 ++
1484 ++ /* Clear key cache entries explicitly to get rid of any potentially
1485 ++ * remaining keys.
1486 ++ */
1487 ++ ath9k_cmn_init_crypto(sc->sc_ah);
1488 ++
1489 + ath9k_ps_restore(sc);
1490 +
1491 + sc->ps_idle = prev_idle;
1492 +@@ -1541,12 +1617,11 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
1493 + {
1494 + struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1495 + struct ath_node *an = (struct ath_node *) sta->drv_priv;
1496 +- struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
1497 +
1498 + if (!an->ps_key)
1499 + return;
1500 +
1501 +- ath_key_delete(common, &ps_key);
1502 ++ ath_key_delete(common, an->ps_key);
1503 + an->ps_key = 0;
1504 + an->key_idx[0] = 0;
1505 + }
1506 +@@ -1708,6 +1783,12 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1507 + if (sta)
1508 + an = (struct ath_node *)sta->drv_priv;
1509 +
1510 ++ /* Delete pending key cache entries if no more frames are pointing to
1511 ++ * them in TXQs.
1512 ++ */
1513 ++ for (i = 0; i < ATH_KEYMAX; i++)
1514 ++ ath9k_pending_key_del(sc, i);
1515 ++
1516 + switch (cmd) {
1517 + case SET_KEY:
1518 + if (sta)
1519 +@@ -1737,7 +1818,15 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1520 + }
1521 + break;
1522 + case DISABLE_KEY:
1523 +- ath_key_delete(common, key);
1524 ++ if (ath9k_txq_has_key(sc, key->hw_key_idx)) {
1525 ++ /* Delay key cache entry deletion until there are no
1526 ++ * remaining TXQ frames pointing to this entry.
1527 ++ */
1528 ++ set_bit(key->hw_key_idx, sc->sc_ah->pending_del_keymap);
1529 ++ ath_hw_keysetmac(common, key->hw_key_idx, NULL);
1530 ++ } else {
1531 ++ ath_key_delete(common, key->hw_key_idx);
1532 ++ }
1533 + if (an) {
1534 + for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1535 + if (an->key_idx[i] != key->hw_key_idx)
1536 +diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
1537 +index 1816b4e7dc264..61b59a804e308 100644
1538 +--- a/drivers/net/wireless/ath/key.c
1539 ++++ b/drivers/net/wireless/ath/key.c
1540 +@@ -84,8 +84,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
1541 + }
1542 + EXPORT_SYMBOL(ath_hw_keyreset);
1543 +
1544 +-static bool ath_hw_keysetmac(struct ath_common *common,
1545 +- u16 entry, const u8 *mac)
1546 ++bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
1547 + {
1548 + u32 macHi, macLo;
1549 + u32 unicast_flag = AR_KEYTABLE_VALID;
1550 +@@ -125,6 +124,7 @@ static bool ath_hw_keysetmac(struct ath_common *common,
1551 +
1552 + return true;
1553 + }
1554 ++EXPORT_SYMBOL(ath_hw_keysetmac);
1555 +
1556 + static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
1557 + const struct ath_keyval *k,
1558 +@@ -581,29 +581,38 @@ EXPORT_SYMBOL(ath_key_config);
1559 + /*
1560 + * Delete Key.
1561 + */
1562 +-void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
1563 ++void ath_key_delete(struct ath_common *common, u8 hw_key_idx)
1564 + {
1565 +- ath_hw_keyreset(common, key->hw_key_idx);
1566 +- if (key->hw_key_idx < IEEE80211_WEP_NKID)
1567 ++ /* Leave CCMP and TKIP (main key) configured to avoid disabling
1568 ++ * encryption for potentially pending frames already in a TXQ with the
1569 ++ * keyix pointing to this key entry. Instead, only clear the MAC address
1570 ++ * to prevent RX processing from using this key cache entry.
1571 ++ */
1572 ++ if (test_bit(hw_key_idx, common->ccmp_keymap) ||
1573 ++ test_bit(hw_key_idx, common->tkip_keymap))
1574 ++ ath_hw_keysetmac(common, hw_key_idx, NULL);
1575 ++ else
1576 ++ ath_hw_keyreset(common, hw_key_idx);
1577 ++ if (hw_key_idx < IEEE80211_WEP_NKID)
1578 + return;
1579 +
1580 +- clear_bit(key->hw_key_idx, common->keymap);
1581 +- clear_bit(key->hw_key_idx, common->ccmp_keymap);
1582 +- if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
1583 ++ clear_bit(hw_key_idx, common->keymap);
1584 ++ clear_bit(hw_key_idx, common->ccmp_keymap);
1585 ++ if (!test_bit(hw_key_idx, common->tkip_keymap))
1586 + return;
1587 +
1588 +- clear_bit(key->hw_key_idx + 64, common->keymap);
1589 ++ clear_bit(hw_key_idx + 64, common->keymap);
1590 +
1591 +- clear_bit(key->hw_key_idx, common->tkip_keymap);
1592 +- clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
1593 ++ clear_bit(hw_key_idx, common->tkip_keymap);
1594 ++ clear_bit(hw_key_idx + 64, common->tkip_keymap);
1595 +
1596 + if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
1597 +- ath_hw_keyreset(common, key->hw_key_idx + 32);
1598 +- clear_bit(key->hw_key_idx + 32, common->keymap);
1599 +- clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
1600 ++ ath_hw_keyreset(common, hw_key_idx + 32);
1601 ++ clear_bit(hw_key_idx + 32, common->keymap);
1602 ++ clear_bit(hw_key_idx + 64 + 32, common->keymap);
1603 +
1604 +- clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
1605 +- clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
1606 ++ clear_bit(hw_key_idx + 32, common->tkip_keymap);
1607 ++ clear_bit(hw_key_idx + 64 + 32, common->tkip_keymap);
1608 + }
1609 + }
1610 + EXPORT_SYMBOL(ath_key_delete);
1611 +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
1612 +index 23a363fd4c59c..bc80b0f0ea1ba 100644
1613 +--- a/drivers/pci/msi.c
1614 ++++ b/drivers/pci/msi.c
1615 +@@ -170,24 +170,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
1616 + * reliably as devices without an INTx disable bit will then generate a
1617 + * level IRQ which will never be cleared.
1618 + */
1619 +-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
1620 ++void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
1621 + {
1622 +- u32 mask_bits = desc->masked;
1623 ++ raw_spinlock_t *lock = &desc->dev->msi_lock;
1624 ++ unsigned long flags;
1625 +
1626 + if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
1627 +- return 0;
1628 ++ return;
1629 +
1630 +- mask_bits &= ~mask;
1631 +- mask_bits |= flag;
1632 ++ raw_spin_lock_irqsave(lock, flags);
1633 ++ desc->masked &= ~mask;
1634 ++ desc->masked |= flag;
1635 + pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
1636 +- mask_bits);
1637 +-
1638 +- return mask_bits;
1639 ++ desc->masked);
1640 ++ raw_spin_unlock_irqrestore(lock, flags);
1641 + }
1642 +
1643 + static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
1644 + {
1645 +- desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
1646 ++ __pci_msi_desc_mask_irq(desc, mask, flag);
1647 + }
1648 +
1649 + static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
1650 +@@ -302,10 +303,28 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
1651 + /* Don't touch the hardware now */
1652 + } else if (entry->msi_attrib.is_msix) {
1653 + void __iomem *base = pci_msix_desc_addr(entry);
1654 ++ bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT);
1655 ++
1656 ++ /*
1657 ++ * The specification mandates that the entry is masked
1658 ++ * when the message is modified:
1659 ++ *
1660 ++ * "If software changes the Address or Data value of an
1661 ++ * entry while the entry is unmasked, the result is
1662 ++ * undefined."
1663 ++ */
1664 ++ if (unmasked)
1665 ++ __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT);
1666 +
1667 + writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
1668 + writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
1669 + writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
1670 ++
1671 ++ if (unmasked)
1672 ++ __pci_msix_desc_mask_irq(entry, 0);
1673 ++
1674 ++ /* Ensure that the writes are visible in the device */
1675 ++ readl(base + PCI_MSIX_ENTRY_DATA);
1676 + } else {
1677 + int pos = dev->msi_cap;
1678 + u16 msgctl;
1679 +@@ -326,6 +345,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
1680 + pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
1681 + msg->data);
1682 + }
1683 ++ /* Ensure that the writes are visible in the device */
1684 ++ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
1685 + }
1686 + entry->msg = *msg;
1687 + }
1688 +@@ -619,21 +640,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
1689 + /* Configure MSI capability structure */
1690 + ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
1691 + if (ret) {
1692 +- msi_mask_irq(entry, mask, ~mask);
1693 ++ msi_mask_irq(entry, mask, 0);
1694 + free_msi_irqs(dev);
1695 + return ret;
1696 + }
1697 +
1698 + ret = msi_verify_entries(dev);
1699 + if (ret) {
1700 +- msi_mask_irq(entry, mask, ~mask);
1701 ++ msi_mask_irq(entry, mask, 0);
1702 + free_msi_irqs(dev);
1703 + return ret;
1704 + }
1705 +
1706 + ret = populate_msi_sysfs(dev);
1707 + if (ret) {
1708 +- msi_mask_irq(entry, mask, ~mask);
1709 ++ msi_mask_irq(entry, mask, 0);
1710 + free_msi_irqs(dev);
1711 + return ret;
1712 + }
1713 +@@ -674,6 +695,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
1714 + {
1715 + struct cpumask *curmsk, *masks = NULL;
1716 + struct msi_desc *entry;
1717 ++ void __iomem *addr;
1718 + int ret, i;
1719 +
1720 + if (affd)
1721 +@@ -693,6 +715,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
1722 +
1723 + entry->msi_attrib.is_msix = 1;
1724 + entry->msi_attrib.is_64 = 1;
1725 ++
1726 + if (entries)
1727 + entry->msi_attrib.entry_nr = entries[i].entry;
1728 + else
1729 +@@ -700,6 +723,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
1730 + entry->msi_attrib.default_irq = dev->irq;
1731 + entry->mask_base = base;
1732 +
1733 ++ addr = pci_msix_desc_addr(entry);
1734 ++ if (addr)
1735 ++ entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
1736 ++
1737 + list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
1738 + if (masks)
1739 + curmsk++;
1740 +@@ -710,21 +737,27 @@ out:
1741 + return ret;
1742 + }
1743 +
1744 +-static void msix_program_entries(struct pci_dev *dev,
1745 +- struct msix_entry *entries)
1746 ++static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
1747 + {
1748 + struct msi_desc *entry;
1749 +- int i = 0;
1750 +
1751 + for_each_pci_msi_entry(entry, dev) {
1752 +- if (entries)
1753 +- entries[i++].vector = entry->irq;
1754 +- entry->masked = readl(pci_msix_desc_addr(entry) +
1755 +- PCI_MSIX_ENTRY_VECTOR_CTRL);
1756 +- msix_mask_irq(entry, 1);
1757 ++ if (entries) {
1758 ++ entries->vector = entry->irq;
1759 ++ entries++;
1760 ++ }
1761 + }
1762 + }
1763 +
1764 ++static void msix_mask_all(void __iomem *base, int tsize)
1765 ++{
1766 ++ u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
1767 ++ int i;
1768 ++
1769 ++ for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
1770 ++ writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
1771 ++}
1772 ++
1773 + /**
1774 + * msix_capability_init - configure device's MSI-X capability
1775 + * @dev: pointer to the pci_dev data structure of MSI-X device function
1776 +@@ -739,22 +772,33 @@ static void msix_program_entries(struct pci_dev *dev,
1777 + static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
1778 + int nvec, const struct irq_affinity *affd)
1779 + {
1780 +- int ret;
1781 +- u16 control;
1782 + void __iomem *base;
1783 ++ int ret, tsize;
1784 ++ u16 control;
1785 +
1786 +- /* Ensure MSI-X is disabled while it is set up */
1787 +- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1788 ++ /*
1789 ++ * Some devices require MSI-X to be enabled before the MSI-X
1790 ++ * registers can be accessed. Mask all the vectors to prevent
1791 ++ * interrupts coming in before they're fully set up.
1792 ++ */
1793 ++ pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
1794 ++ PCI_MSIX_FLAGS_ENABLE);
1795 +
1796 + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
1797 + /* Request & Map MSI-X table region */
1798 +- base = msix_map_region(dev, msix_table_size(control));
1799 +- if (!base)
1800 +- return -ENOMEM;
1801 ++ tsize = msix_table_size(control);
1802 ++ base = msix_map_region(dev, tsize);
1803 ++ if (!base) {
1804 ++ ret = -ENOMEM;
1805 ++ goto out_disable;
1806 ++ }
1807 ++
1808 ++ /* Ensure that all table entries are masked. */
1809 ++ msix_mask_all(base, tsize);
1810 +
1811 + ret = msix_setup_entries(dev, base, entries, nvec, affd);
1812 + if (ret)
1813 +- return ret;
1814 ++ goto out_disable;
1815 +
1816 + ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
1817 + if (ret)
1818 +@@ -765,15 +809,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
1819 + if (ret)
1820 + goto out_free;
1821 +
1822 +- /*
1823 +- * Some devices require MSI-X to be enabled before we can touch the
1824 +- * MSI-X registers. We need to mask all the vectors to prevent
1825 +- * interrupts coming in before they're fully set up.
1826 +- */
1827 +- pci_msix_clear_and_set_ctrl(dev, 0,
1828 +- PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
1829 +-
1830 +- msix_program_entries(dev, entries);
1831 ++ msix_update_entries(dev, entries);
1832 +
1833 + ret = populate_msi_sysfs(dev);
1834 + if (ret)
1835 +@@ -807,6 +843,9 @@ out_avail:
1836 + out_free:
1837 + free_msi_irqs(dev);
1838 +
1839 ++out_disable:
1840 ++ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1841 ++
1842 + return ret;
1843 + }
1844 +
1845 +@@ -894,8 +933,7 @@ static void pci_msi_shutdown(struct pci_dev *dev)
1846 +
1847 + /* Return the device with MSI unmasked as initial states */
1848 + mask = msi_mask(desc->msi_attrib.multi_cap);
1849 +- /* Keep cached state to be restored */
1850 +- __pci_msi_desc_mask_irq(desc, mask, ~mask);
1851 ++ msi_mask_irq(desc, mask, 0);
1852 +
1853 + /* Restore dev->irq to its default pin-assertion irq */
1854 + dev->irq = desc->msi_attrib.default_irq;
1855 +@@ -980,10 +1018,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
1856 + }
1857 +
1858 + /* Return the device with MSI-X masked as initial states */
1859 +- for_each_pci_msi_entry(entry, dev) {
1860 +- /* Keep cached states to be restored */
1861 ++ for_each_pci_msi_entry(entry, dev)
1862 + __pci_msix_desc_mask_irq(entry, 1);
1863 +- }
1864 +
1865 + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1866 + pci_intx_for_msi(dev, 1);
1867 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1868 +index f287a9f919da1..7e873b6b7d558 100644
1869 +--- a/drivers/pci/quirks.c
1870 ++++ b/drivers/pci/quirks.c
1871 +@@ -1882,6 +1882,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
1872 + }
1873 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
1874 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
1875 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
1876 +
1877 + #ifdef CONFIG_X86_IO_APIC
1878 + static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
1879 +diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
1880 +index d137c480db46b..dd04aedd76e0b 100644
1881 +--- a/drivers/ptp/Kconfig
1882 ++++ b/drivers/ptp/Kconfig
1883 +@@ -91,7 +91,8 @@ config DP83640_PHY
1884 + config PTP_1588_CLOCK_PCH
1885 + tristate "Intel PCH EG20T as PTP clock"
1886 + depends on X86_32 || COMPILE_TEST
1887 +- depends on HAS_IOMEM && NET
1888 ++ depends on HAS_IOMEM && PCI
1889 ++ depends on NET
1890 + imply PTP_1588_CLOCK
1891 + help
1892 + This driver adds support for using the PCH EG20T as a PTP
1893 +diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
1894 +index 6c629ef1bc4e3..b3c23edd4b6cb 100644
1895 +--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
1896 ++++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
1897 +@@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
1898 + if (!h->ctlr)
1899 + err = SCSI_DH_RES_TEMP_UNAVAIL;
1900 + else {
1901 +- list_add_rcu(&h->node, &h->ctlr->dh_list);
1902 + h->sdev = sdev;
1903 ++ list_add_rcu(&h->node, &h->ctlr->dh_list);
1904 + }
1905 + spin_unlock(&list_lock);
1906 + err = SCSI_DH_OK;
1907 +@@ -779,11 +779,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
1908 + spin_lock(&list_lock);
1909 + if (h->ctlr) {
1910 + list_del_rcu(&h->node);
1911 +- h->sdev = NULL;
1912 + kref_put(&h->ctlr->kref, release_controller);
1913 + }
1914 + spin_unlock(&list_lock);
1915 + sdev->handler_data = NULL;
1916 ++ synchronize_rcu();
1917 + kfree(h);
1918 + }
1919 +
1920 +diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
1921 +index 8428247015db6..81df2c94b7473 100644
1922 +--- a/drivers/scsi/megaraid/megaraid_mm.c
1923 ++++ b/drivers/scsi/megaraid/megaraid_mm.c
1924 +@@ -250,7 +250,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
1925 + mimd_t mimd;
1926 + uint32_t adapno;
1927 + int iterator;
1928 +-
1929 ++ bool is_found;
1930 +
1931 + if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
1932 + *rval = -EFAULT;
1933 +@@ -266,12 +266,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
1934 +
1935 + adapter = NULL;
1936 + iterator = 0;
1937 ++ is_found = false;
1938 +
1939 + list_for_each_entry(adapter, &adapters_list_g, list) {
1940 +- if (iterator++ == adapno) break;
1941 ++ if (iterator++ == adapno) {
1942 ++ is_found = true;
1943 ++ break;
1944 ++ }
1945 + }
1946 +
1947 +- if (!adapter) {
1948 ++ if (!is_found) {
1949 + *rval = -ENODEV;
1950 + return NULL;
1951 + }
1952 +@@ -737,6 +741,7 @@ ioctl_done(uioc_t *kioc)
1953 + uint32_t adapno;
1954 + int iterator;
1955 + mraid_mmadp_t* adapter;
1956 ++ bool is_found;
1957 +
1958 + /*
1959 + * When the kioc returns from driver, make sure it still doesn't
1960 +@@ -759,19 +764,23 @@ ioctl_done(uioc_t *kioc)
1961 + iterator = 0;
1962 + adapter = NULL;
1963 + adapno = kioc->adapno;
1964 ++ is_found = false;
1965 +
1966 + con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
1967 + "ioctl that was timedout before\n"));
1968 +
1969 + list_for_each_entry(adapter, &adapters_list_g, list) {
1970 +- if (iterator++ == adapno) break;
1971 ++ if (iterator++ == adapno) {
1972 ++ is_found = true;
1973 ++ break;
1974 ++ }
1975 + }
1976 +
1977 + kioc->timedout = 0;
1978 +
1979 +- if (adapter) {
1980 ++ if (is_found)
1981 + mraid_mm_dealloc_kioc( adapter, kioc );
1982 +- }
1983 ++
1984 + }
1985 + else {
1986 + wake_up(&wait_q);
1987 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
1988 +index 009a5b2aa3d02..149465de35b2d 100644
1989 +--- a/drivers/scsi/scsi_scan.c
1990 ++++ b/drivers/scsi/scsi_scan.c
1991 +@@ -462,7 +462,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
1992 + error = shost->hostt->target_alloc(starget);
1993 +
1994 + if(error) {
1995 +- dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
1996 ++ if (error != -ENXIO)
1997 ++ dev_err(dev, "target allocation failed, error %d\n", error);
1998 + /* don't want scsi_target_reap to do the final
1999 + * put because it will be under the host lock */
2000 + scsi_target_destroy(starget);
2001 +diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
2002 +index d5879142dbef1..ddf0371ad52b2 100644
2003 +--- a/drivers/slimbus/messaging.c
2004 ++++ b/drivers/slimbus/messaging.c
2005 +@@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
2006 + int ret = 0;
2007 +
2008 + spin_lock_irqsave(&ctrl->txn_lock, flags);
2009 +- ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
2010 ++ ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
2011 + SLIM_MAX_TIDS, GFP_ATOMIC);
2012 + if (ret < 0) {
2013 + spin_unlock_irqrestore(&ctrl->txn_lock, flags);
2014 +@@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
2015 + goto slim_xfer_err;
2016 + }
2017 + }
2018 +-
2019 ++ /* Initialize tid to invalid value */
2020 ++ txn->tid = 0;
2021 + need_tid = slim_tid_txn(txn->mt, txn->mc);
2022 +
2023 + if (need_tid) {
2024 +@@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
2025 + txn->mt, txn->mc, txn->la, ret);
2026 +
2027 + slim_xfer_err:
2028 +- if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) {
2029 ++ if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) {
2030 + /*
2031 + * remove runtime-pm vote if this was TX only, or
2032 + * if there was error during this transaction
2033 +diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
2034 +index 44021620d1013..1a5311fb45a5f 100644
2035 +--- a/drivers/slimbus/qcom-ngd-ctrl.c
2036 ++++ b/drivers/slimbus/qcom-ngd-ctrl.c
2037 +@@ -1060,7 +1060,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
2038 + {
2039 + u32 cfg = readl_relaxed(ctrl->ngd->base);
2040 +
2041 +- if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
2042 ++ if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
2043 ++ ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
2044 + qcom_slim_ngd_init_dma(ctrl);
2045 +
2046 + /* By default enable message queues */
2047 +@@ -1111,6 +1112,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
2048 + dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
2049 + return 0;
2050 + }
2051 ++ qcom_slim_ngd_setup(ctrl);
2052 + return 0;
2053 + }
2054 +
2055 +@@ -1496,6 +1498,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
2056 + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
2057 + int ret = 0;
2058 +
2059 ++ qcom_slim_ngd_exit_dma(ctrl);
2060 + if (!ctrl->qmi.handle)
2061 + return 0;
2062 +
2063 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
2064 +index 732327756ee11..7a58f629155d2 100644
2065 +--- a/drivers/vhost/vhost.c
2066 ++++ b/drivers/vhost/vhost.c
2067 +@@ -678,10 +678,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
2068 + (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
2069 + }
2070 +
2071 ++/* Make sure 64 bit math will not overflow. */
2072 + static bool vhost_overflow(u64 uaddr, u64 size)
2073 + {
2074 +- /* Make sure 64 bit math will not overflow. */
2075 +- return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
2076 ++ if (uaddr > ULONG_MAX || size > ULONG_MAX)
2077 ++ return true;
2078 ++
2079 ++ if (!size)
2080 ++ return false;
2081 ++
2082 ++ return uaddr > ULONG_MAX - size + 1;
2083 + }
2084 +
2085 + /* Caller should have vq mutex and device mutex. */
2086 +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
2087 +index a2f8130e18fec..d138027034fd6 100644
2088 +--- a/drivers/xen/events/events_base.c
2089 ++++ b/drivers/xen/events/events_base.c
2090 +@@ -133,12 +133,12 @@ static void disable_dynirq(struct irq_data *data);
2091 +
2092 + static DEFINE_PER_CPU(unsigned int, irq_epoch);
2093 +
2094 +-static void clear_evtchn_to_irq_row(unsigned row)
2095 ++static void clear_evtchn_to_irq_row(int *evtchn_row)
2096 + {
2097 + unsigned col;
2098 +
2099 + for (col = 0; col < EVTCHN_PER_ROW; col++)
2100 +- WRITE_ONCE(evtchn_to_irq[row][col], -1);
2101 ++ WRITE_ONCE(evtchn_row[col], -1);
2102 + }
2103 +
2104 + static void clear_evtchn_to_irq_all(void)
2105 +@@ -148,7 +148,7 @@ static void clear_evtchn_to_irq_all(void)
2106 + for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
2107 + if (evtchn_to_irq[row] == NULL)
2108 + continue;
2109 +- clear_evtchn_to_irq_row(row);
2110 ++ clear_evtchn_to_irq_row(evtchn_to_irq[row]);
2111 + }
2112 + }
2113 +
2114 +@@ -156,6 +156,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
2115 + {
2116 + unsigned row;
2117 + unsigned col;
2118 ++ int *evtchn_row;
2119 +
2120 + if (evtchn >= xen_evtchn_max_channels())
2121 + return -EINVAL;
2122 +@@ -168,11 +169,18 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
2123 + if (irq == -1)
2124 + return 0;
2125 +
2126 +- evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
2127 +- if (evtchn_to_irq[row] == NULL)
2128 ++ evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
2129 ++ if (evtchn_row == NULL)
2130 + return -ENOMEM;
2131 +
2132 +- clear_evtchn_to_irq_row(row);
2133 ++ clear_evtchn_to_irq_row(evtchn_row);
2134 ++
2135 ++ /*
2136 ++ * We've prepared an empty row for the mapping. If a different
2137 ++ * thread was faster inserting it, we can drop ours.
2138 ++ */
2139 ++ if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
2140 ++ free_page((unsigned long) evtchn_row);
2141 + }
2142 +
2143 + WRITE_ONCE(evtchn_to_irq[row][col], irq);
2144 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2145 +index d29f4cf125d27..6f02a3f77fa83 100644
2146 +--- a/fs/btrfs/inode.c
2147 ++++ b/fs/btrfs/inode.c
2148 +@@ -9556,8 +9556,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
2149 + bool sync_log_dest = false;
2150 + bool commit_transaction = false;
2151 +
2152 +- /* we only allow rename subvolume link between subvolumes */
2153 +- if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
2154 ++ /*
2155 ++ * For non-subvolumes allow exchange only within one subvolume, in the
2156 ++ * same inode namespace. Two subvolumes (represented as directory) can
2157 ++ * be exchanged as they're a logical link and have a fixed inode number.
2158 ++ */
2159 ++ if (root != dest &&
2160 ++ (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
2161 ++ new_ino != BTRFS_FIRST_FREE_OBJECTID))
2162 + return -EXDEV;
2163 +
2164 + btrfs_init_log_ctx(&ctx_root, old_inode);
2165 +diff --git a/fs/namespace.c b/fs/namespace.c
2166 +index edd397fa29913..2f3c6a0350a8d 100644
2167 +--- a/fs/namespace.c
2168 ++++ b/fs/namespace.c
2169 +@@ -1610,13 +1610,22 @@ static inline bool may_mount(void)
2170 + return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
2171 + }
2172 +
2173 ++#ifdef CONFIG_MANDATORY_FILE_LOCKING
2174 ++static bool may_mandlock(void)
2175 ++{
2176 ++ pr_warn_once("======================================================\n"
2177 ++ "WARNING: the mand mount option is being deprecated and\n"
2178 ++ " will be removed in v5.15!\n"
2179 ++ "======================================================\n");
2180 ++ return capable(CAP_SYS_ADMIN);
2181 ++}
2182 ++#else
2183 + static inline bool may_mandlock(void)
2184 + {
2185 +-#ifndef CONFIG_MANDATORY_FILE_LOCKING
2186 ++ pr_warn("VFS: \"mand\" mount option not supported");
2187 + return false;
2188 +-#endif
2189 +- return capable(CAP_SYS_ADMIN);
2190 + }
2191 ++#endif
2192 +
2193 + /*
2194 + * Now umount can handle mount points as well as block devices.
2195 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
2196 +index ad8766e1635e4..a26e6f5034a63 100644
2197 +--- a/include/asm-generic/vmlinux.lds.h
2198 ++++ b/include/asm-generic/vmlinux.lds.h
2199 +@@ -508,6 +508,7 @@
2200 + NOINSTR_TEXT \
2201 + *(.text..refcount) \
2202 + *(.ref.text) \
2203 ++ *(.text.asan.* .text.tsan.*) \
2204 + MEM_KEEP(init.text*) \
2205 + MEM_KEEP(exit.text*) \
2206 +
2207 +diff --git a/include/linux/device.h b/include/linux/device.h
2208 +index b1c8150e9ea57..37e359d81a86f 100644
2209 +--- a/include/linux/device.h
2210 ++++ b/include/linux/device.h
2211 +@@ -998,6 +998,7 @@ struct device {
2212 + struct dev_pin_info *pins;
2213 + #endif
2214 + #ifdef CONFIG_GENERIC_MSI_IRQ
2215 ++ raw_spinlock_t msi_lock;
2216 + struct list_head msi_list;
2217 + #endif
2218 +
2219 +diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
2220 +index a64f21a97369a..131f93f8d5872 100644
2221 +--- a/include/linux/inetdevice.h
2222 ++++ b/include/linux/inetdevice.h
2223 +@@ -41,7 +41,7 @@ struct in_device {
2224 + unsigned long mr_qri; /* Query Response Interval */
2225 + unsigned char mr_qrv; /* Query Robustness Variable */
2226 + unsigned char mr_gq_running;
2227 +- unsigned char mr_ifc_count;
2228 ++ u32 mr_ifc_count;
2229 + struct timer_list mr_gq_timer; /* general query timer */
2230 + struct timer_list mr_ifc_timer; /* interface change timer */
2231 +
2232 +diff --git a/include/linux/irq.h b/include/linux/irq.h
2233 +index a042faefb9b73..9504267414a45 100644
2234 +--- a/include/linux/irq.h
2235 ++++ b/include/linux/irq.h
2236 +@@ -535,6 +535,7 @@ struct irq_chip {
2237 + * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
2238 + * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
2239 + * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
2240 ++ * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup
2241 + */
2242 + enum {
2243 + IRQCHIP_SET_TYPE_MASKED = (1 << 0),
2244 +@@ -545,6 +546,7 @@ enum {
2245 + IRQCHIP_ONESHOT_SAFE = (1 << 5),
2246 + IRQCHIP_EOI_THREADED = (1 << 6),
2247 + IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
2248 ++ IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
2249 + };
2250 +
2251 + #include <linux/irqdesc.h>
2252 +diff --git a/include/linux/msi.h b/include/linux/msi.h
2253 +index 5dd171849a27e..62982e6afddfd 100644
2254 +--- a/include/linux/msi.h
2255 ++++ b/include/linux/msi.h
2256 +@@ -150,7 +150,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
2257 + void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
2258 +
2259 + u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
2260 +-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
2261 ++void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
2262 + void pci_msi_mask_irq(struct irq_data *data);
2263 + void pci_msi_unmask_irq(struct irq_data *data);
2264 +
2265 +diff --git a/include/net/psample.h b/include/net/psample.h
2266 +index 94cb37a7bf756..796f01e5635d7 100644
2267 +--- a/include/net/psample.h
2268 ++++ b/include/net/psample.h
2269 +@@ -18,6 +18,8 @@ struct psample_group {
2270 + struct psample_group *psample_group_get(struct net *net, u32 group_num);
2271 + void psample_group_put(struct psample_group *group);
2272 +
2273 ++struct sk_buff;
2274 ++
2275 + #if IS_ENABLED(CONFIG_PSAMPLE)
2276 +
2277 + void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
2278 +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
2279 +index 09d914e486a2d..9afbd89b6096e 100644
2280 +--- a/kernel/irq/chip.c
2281 ++++ b/kernel/irq/chip.c
2282 +@@ -265,8 +265,11 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
2283 + } else {
2284 + switch (__irq_startup_managed(desc, aff, force)) {
2285 + case IRQ_STARTUP_NORMAL:
2286 ++ if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
2287 ++ irq_setup_affinity(desc);
2288 + ret = __irq_startup(desc);
2289 +- irq_setup_affinity(desc);
2290 ++ if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
2291 ++ irq_setup_affinity(desc);
2292 + break;
2293 + case IRQ_STARTUP_MANAGED:
2294 + irq_do_set_affinity(d, aff, false);
2295 +diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
2296 +index 604974f2afb19..88269dd5a8cad 100644
2297 +--- a/kernel/irq/msi.c
2298 ++++ b/kernel/irq/msi.c
2299 +@@ -477,11 +477,6 @@ skip_activate:
2300 + return 0;
2301 +
2302 + cleanup:
2303 +- for_each_msi_vector(desc, i, dev) {
2304 +- irq_data = irq_domain_get_irq_data(domain, i);
2305 +- if (irqd_is_activated(irq_data))
2306 +- irq_domain_deactivate_irq(irq_data);
2307 +- }
2308 + msi_domain_free_irqs(domain, dev);
2309 + return ret;
2310 + }
2311 +@@ -494,7 +489,15 @@ cleanup:
2312 + */
2313 + void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
2314 + {
2315 ++ struct irq_data *irq_data;
2316 + struct msi_desc *desc;
2317 ++ int i;
2318 ++
2319 ++ for_each_msi_vector(desc, i, dev) {
2320 ++ irq_data = irq_domain_get_irq_data(domain, i);
2321 ++ if (irqd_is_activated(irq_data))
2322 ++ irq_domain_deactivate_irq(irq_data);
2323 ++ }
2324 +
2325 + for_each_msi_entry(desc, dev) {
2326 + /*
2327 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
2328 +index bbde8d3d6c8ae..44d1340634f61 100644
2329 +--- a/kernel/trace/trace_events_hist.c
2330 ++++ b/kernel/trace/trace_events_hist.c
2331 +@@ -3786,6 +3786,8 @@ onmatch_create_field_var(struct hist_trigger_data *hist_data,
2332 + event = data->onmatch.match_event;
2333 + }
2334 +
2335 ++ if (!event)
2336 ++ goto free;
2337 + /*
2338 + * At this point, we're looking at a field on another
2339 + * event. Because we can't modify a hist trigger on
2340 +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
2341 +index 253975cce943e..0cbd0bca971ff 100644
2342 +--- a/net/bluetooth/hidp/core.c
2343 ++++ b/net/bluetooth/hidp/core.c
2344 +@@ -1282,7 +1282,7 @@ static int hidp_session_thread(void *arg)
2345 +
2346 + /* cleanup runtime environment */
2347 + remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
2348 +- remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait);
2349 ++ remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
2350 + wake_up_interruptible(&session->report_queue);
2351 + hidp_del_timer(session);
2352 +
2353 +diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
2354 +index 5aa508a08a691..b5fb2b682e191 100644
2355 +--- a/net/bridge/br_if.c
2356 ++++ b/net/bridge/br_if.c
2357 +@@ -604,6 +604,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
2358 +
2359 + err = dev_set_allmulti(dev, 1);
2360 + if (err) {
2361 ++ br_multicast_del_port(p);
2362 + kfree(p); /* kobject not yet init'd, manually free */
2363 + goto err1;
2364 + }
2365 +@@ -708,6 +709,7 @@ err4:
2366 + err3:
2367 + sysfs_remove_link(br->ifobj, p->dev->name);
2368 + err2:
2369 ++ br_multicast_del_port(p);
2370 + kobject_put(&p->kobj);
2371 + dev_set_allmulti(dev, -1);
2372 + err1:
2373 +diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
2374 +index f91e3816806ba..aec3c724665f8 100644
2375 +--- a/net/dccp/dccp.h
2376 ++++ b/net/dccp/dccp.h
2377 +@@ -44,9 +44,9 @@ extern bool dccp_debug;
2378 + #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
2379 + #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
2380 + #else
2381 +-#define dccp_pr_debug(format, a...)
2382 +-#define dccp_pr_debug_cat(format, a...)
2383 +-#define dccp_debug(format, a...)
2384 ++#define dccp_pr_debug(format, a...) do {} while (0)
2385 ++#define dccp_pr_debug_cat(format, a...) do {} while (0)
2386 ++#define dccp_debug(format, a...) do {} while (0)
2387 + #endif
2388 +
2389 + extern struct inet_hashinfo dccp_hashinfo;
2390 +diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
2391 +index 89819745e482c..14c6fac039f9e 100644
2392 +--- a/net/ieee802154/socket.c
2393 ++++ b/net/ieee802154/socket.c
2394 +@@ -1002,6 +1002,11 @@ static const struct proto_ops ieee802154_dgram_ops = {
2395 + #endif
2396 + };
2397 +
2398 ++static void ieee802154_sock_destruct(struct sock *sk)
2399 ++{
2400 ++ skb_queue_purge(&sk->sk_receive_queue);
2401 ++}
2402 ++
2403 + /* Create a socket. Initialise the socket, blank the addresses
2404 + * set the state.
2405 + */
2406 +@@ -1042,7 +1047,7 @@ static int ieee802154_create(struct net *net, struct socket *sock,
2407 + sock->ops = ops;
2408 +
2409 + sock_init_data(sock, sk);
2410 +- /* FIXME: sk->sk_destruct */
2411 ++ sk->sk_destruct = ieee802154_sock_destruct;
2412 + sk->sk_family = PF_IEEE802154;
2413 +
2414 + /* Checksums on by default */
2415 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
2416 +index ffa847fc96194..dca7fe0ae24ad 100644
2417 +--- a/net/ipv4/igmp.c
2418 ++++ b/net/ipv4/igmp.c
2419 +@@ -807,10 +807,17 @@ static void igmp_gq_timer_expire(struct timer_list *t)
2420 + static void igmp_ifc_timer_expire(struct timer_list *t)
2421 + {
2422 + struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
2423 ++ u32 mr_ifc_count;
2424 +
2425 + igmpv3_send_cr(in_dev);
2426 +- if (in_dev->mr_ifc_count) {
2427 +- in_dev->mr_ifc_count--;
2428 ++restart:
2429 ++ mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count);
2430 ++
2431 ++ if (mr_ifc_count) {
2432 ++ if (cmpxchg(&in_dev->mr_ifc_count,
2433 ++ mr_ifc_count,
2434 ++ mr_ifc_count - 1) != mr_ifc_count)
2435 ++ goto restart;
2436 + igmp_ifc_start_timer(in_dev,
2437 + unsolicited_report_interval(in_dev));
2438 + }
2439 +@@ -822,7 +829,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
2440 + struct net *net = dev_net(in_dev->dev);
2441 + if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
2442 + return;
2443 +- in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
2444 ++ WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
2445 + igmp_ifc_start_timer(in_dev, 1);
2446 + }
2447 +
2448 +@@ -961,7 +968,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
2449 + in_dev->mr_qri;
2450 + }
2451 + /* cancel the interface change timer */
2452 +- in_dev->mr_ifc_count = 0;
2453 ++ WRITE_ONCE(in_dev->mr_ifc_count, 0);
2454 + if (del_timer(&in_dev->mr_ifc_timer))
2455 + __in_dev_put(in_dev);
2456 + /* clear deleted report items */
2457 +@@ -1739,7 +1746,7 @@ void ip_mc_down(struct in_device *in_dev)
2458 + igmp_group_dropped(pmc);
2459 +
2460 + #ifdef CONFIG_IP_MULTICAST
2461 +- in_dev->mr_ifc_count = 0;
2462 ++ WRITE_ONCE(in_dev->mr_ifc_count, 0);
2463 + if (del_timer(&in_dev->mr_ifc_timer))
2464 + __in_dev_put(in_dev);
2465 + in_dev->mr_gq_running = 0;
2466 +@@ -1956,7 +1963,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
2467 + pmc->sfmode = MCAST_INCLUDE;
2468 + #ifdef CONFIG_IP_MULTICAST
2469 + pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
2470 +- in_dev->mr_ifc_count = pmc->crcount;
2471 ++ WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
2472 + for (psf = pmc->sources; psf; psf = psf->sf_next)
2473 + psf->sf_crcount = 0;
2474 + igmp_ifc_event(pmc->interface);
2475 +@@ -2135,7 +2142,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
2476 + /* else no filters; keep old mode for reports */
2477 +
2478 + pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
2479 +- in_dev->mr_ifc_count = pmc->crcount;
2480 ++ WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
2481 + for (psf = pmc->sources; psf; psf = psf->sf_next)
2482 + psf->sf_crcount = 0;
2483 + igmp_ifc_event(in_dev);
2484 +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
2485 +index b70c9365e1313..1740de0530726 100644
2486 +--- a/net/ipv4/tcp_bbr.c
2487 ++++ b/net/ipv4/tcp_bbr.c
2488 +@@ -985,7 +985,7 @@ static void bbr_init(struct sock *sk)
2489 + bbr->prior_cwnd = 0;
2490 + tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2491 + bbr->rtt_cnt = 0;
2492 +- bbr->next_rtt_delivered = 0;
2493 ++ bbr->next_rtt_delivered = tp->delivered;
2494 + bbr->prev_ca_state = TCP_CA_Open;
2495 + bbr->packet_conservation = 0;
2496 +
2497 +diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
2498 +index 4105081dc1df0..6f390c2e4c8ea 100644
2499 +--- a/net/mac80211/debugfs_sta.c
2500 ++++ b/net/mac80211/debugfs_sta.c
2501 +@@ -80,6 +80,7 @@ static const char * const sta_flag_names[] = {
2502 + FLAG(MPSP_OWNER),
2503 + FLAG(MPSP_RECIPIENT),
2504 + FLAG(PS_DELIVER),
2505 ++ FLAG(USES_ENCRYPTION),
2506 + #undef FLAG
2507 + };
2508 +
2509 +diff --git a/net/mac80211/key.c b/net/mac80211/key.c
2510 +index 6775d6cb7d3d4..7fc55177db847 100644
2511 +--- a/net/mac80211/key.c
2512 ++++ b/net/mac80211/key.c
2513 +@@ -341,6 +341,7 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
2514 + if (sta) {
2515 + if (pairwise) {
2516 + rcu_assign_pointer(sta->ptk[idx], new);
2517 ++ set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION);
2518 + sta->ptk_idx = idx;
2519 + ieee80211_check_fast_xmit(sta);
2520 + } else {
2521 +diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
2522 +index c33bc5fc0f2d8..75d982ff7f3d0 100644
2523 +--- a/net/mac80211/sta_info.h
2524 ++++ b/net/mac80211/sta_info.h
2525 +@@ -102,6 +102,7 @@ enum ieee80211_sta_info_flags {
2526 + WLAN_STA_MPSP_OWNER,
2527 + WLAN_STA_MPSP_RECIPIENT,
2528 + WLAN_STA_PS_DELIVER,
2529 ++ WLAN_STA_USES_ENCRYPTION,
2530 +
2531 + NUM_WLAN_STA_FLAGS,
2532 + };
2533 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
2534 +index 98d048630ad2f..3530d1a5fc98e 100644
2535 +--- a/net/mac80211/tx.c
2536 ++++ b/net/mac80211/tx.c
2537 +@@ -593,10 +593,13 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
2538 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
2539 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
2540 +
2541 +- if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
2542 ++ if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
2543 + tx->key = NULL;
2544 +- else if (tx->sta &&
2545 +- (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
2546 ++ return TX_CONTINUE;
2547 ++ }
2548 ++
2549 ++ if (tx->sta &&
2550 ++ (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
2551 + tx->key = key;
2552 + else if (ieee80211_is_group_privacy_action(tx->skb) &&
2553 + (key = rcu_dereference(tx->sdata->default_multicast_key)))
2554 +@@ -657,6 +660,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
2555 + if (!skip_hw && tx->key &&
2556 + tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
2557 + info->control.hw_key = &tx->key->conf;
2558 ++ } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
2559 ++ test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
2560 ++ return TX_DROP;
2561 + }
2562 +
2563 + return TX_CONTINUE;
2564 +diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
2565 +index 64e69d6683cab..93fee41060192 100644
2566 +--- a/net/netfilter/nft_exthdr.c
2567 ++++ b/net/netfilter/nft_exthdr.c
2568 +@@ -137,7 +137,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
2569 + unsigned int i, optl, tcphdr_len, offset;
2570 + struct tcphdr *tcph;
2571 + u8 *opt;
2572 +- u32 src;
2573 +
2574 + tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
2575 + if (!tcph)
2576 +@@ -146,7 +145,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
2577 + opt = (u8 *)tcph;
2578 + for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
2579 + union {
2580 +- u8 octet;
2581 + __be16 v16;
2582 + __be32 v32;
2583 + } old, new;
2584 +@@ -167,13 +165,13 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
2585 + if (!tcph)
2586 + return;
2587 +
2588 +- src = regs->data[priv->sreg];
2589 + offset = i + priv->offset;
2590 +
2591 + switch (priv->len) {
2592 + case 2:
2593 + old.v16 = get_unaligned((u16 *)(opt + offset));
2594 +- new.v16 = src;
2595 ++ new.v16 = (__force __be16)nft_reg_load16(
2596 ++ &regs->data[priv->sreg]);
2597 +
2598 + switch (priv->type) {
2599 + case TCPOPT_MSS:
2600 +@@ -191,7 +189,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
2601 + old.v16, new.v16, false);
2602 + break;
2603 + case 4:
2604 +- new.v32 = src;
2605 ++ new.v32 = regs->data[priv->sreg];
2606 + old.v32 = get_unaligned((u32 *)(opt + offset));
2607 +
2608 + if (old.v32 == new.v32)
2609 +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
2610 +index cc70d651d13e0..e34979fcefd22 100644
2611 +--- a/net/vmw_vsock/virtio_transport.c
2612 ++++ b/net/vmw_vsock/virtio_transport.c
2613 +@@ -373,11 +373,14 @@ static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
2614 +
2615 + static void virtio_vsock_reset_sock(struct sock *sk)
2616 + {
2617 +- lock_sock(sk);
2618 ++ /* vmci_transport.c doesn't take sk_lock here either. At least we're
2619 ++ * under vsock_table_lock so the sock cannot disappear while we're
2620 ++ * executing.
2621 ++ */
2622 ++
2623 + sk->sk_state = TCP_CLOSE;
2624 + sk->sk_err = ECONNRESET;
2625 + sk->sk_error_report(sk);
2626 +- release_sock(sk);
2627 + }
2628 +
2629 + static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
2630 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
2631 +index 6099a9f1cb3d6..ff263ad19230a 100644
2632 +--- a/sound/pci/hda/hda_generic.c
2633 ++++ b/sound/pci/hda/hda_generic.c
2634 +@@ -3470,7 +3470,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
2635 + struct hda_gen_spec *spec = codec->spec;
2636 + const struct hda_input_mux *imux;
2637 + struct nid_path *path;
2638 +- int i, adc_idx, err = 0;
2639 ++ int i, adc_idx, ret, err = 0;
2640 +
2641 + imux = &spec->input_mux;
2642 + adc_idx = kcontrol->id.index;
2643 +@@ -3480,9 +3480,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
2644 + if (!path || !path->ctls[type])
2645 + continue;
2646 + kcontrol->private_value = path->ctls[type];
2647 +- err = func(kcontrol, ucontrol);
2648 +- if (err < 0)
2649 ++ ret = func(kcontrol, ucontrol);
2650 ++ if (ret < 0) {
2651 ++ err = ret;
2652 + break;
2653 ++ }
2654 ++ if (ret > 0)
2655 ++ err = 1;
2656 + }
2657 + mutex_unlock(&codec->control_mutex);
2658 + if (err >= 0 && spec->cap_sync_hook)
2659 +diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
2660 +index fddfd227a9c0e..4cb3e11c66af7 100644
2661 +--- a/sound/soc/codecs/cs42l42.c
2662 ++++ b/sound/soc/codecs/cs42l42.c
2663 +@@ -404,7 +404,7 @@ static const struct regmap_config cs42l42_regmap = {
2664 + .cache_type = REGCACHE_RBTREE,
2665 + };
2666 +
2667 +-static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
2668 ++static DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 100, true);
2669 + static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
2670 +
2671 + static const char * const cs42l42_hpf_freq_text[] = {
2672 +@@ -424,34 +424,23 @@ static SOC_ENUM_SINGLE_DECL(cs42l42_wnf3_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
2673 + CS42L42_ADC_WNF_CF_SHIFT,
2674 + cs42l42_wnf3_freq_text);
2675 +
2676 +-static const char * const cs42l42_wnf05_freq_text[] = {
2677 +- "280Hz", "315Hz", "350Hz", "385Hz",
2678 +- "420Hz", "455Hz", "490Hz", "525Hz"
2679 +-};
2680 +-
2681 +-static SOC_ENUM_SINGLE_DECL(cs42l42_wnf05_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
2682 +- CS42L42_ADC_WNF_CF_SHIFT,
2683 +- cs42l42_wnf05_freq_text);
2684 +-
2685 + static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
2686 + /* ADC Volume and Filter Controls */
2687 + SOC_SINGLE("ADC Notch Switch", CS42L42_ADC_CTL,
2688 +- CS42L42_ADC_NOTCH_DIS_SHIFT, true, false),
2689 ++ CS42L42_ADC_NOTCH_DIS_SHIFT, true, true),
2690 + SOC_SINGLE("ADC Weak Force Switch", CS42L42_ADC_CTL,
2691 + CS42L42_ADC_FORCE_WEAK_VCM_SHIFT, true, false),
2692 + SOC_SINGLE("ADC Invert Switch", CS42L42_ADC_CTL,
2693 + CS42L42_ADC_INV_SHIFT, true, false),
2694 + SOC_SINGLE("ADC Boost Switch", CS42L42_ADC_CTL,
2695 + CS42L42_ADC_DIG_BOOST_SHIFT, true, false),
2696 +- SOC_SINGLE_SX_TLV("ADC Volume", CS42L42_ADC_VOLUME,
2697 +- CS42L42_ADC_VOL_SHIFT, 0xA0, 0x6C, adc_tlv),
2698 ++ SOC_SINGLE_S8_TLV("ADC Volume", CS42L42_ADC_VOLUME, -97, 12, adc_tlv),
2699 + SOC_SINGLE("ADC WNF Switch", CS42L42_ADC_WNF_HPF_CTL,
2700 + CS42L42_ADC_WNF_EN_SHIFT, true, false),
2701 + SOC_SINGLE("ADC HPF Switch", CS42L42_ADC_WNF_HPF_CTL,
2702 + CS42L42_ADC_HPF_EN_SHIFT, true, false),
2703 + SOC_ENUM("HPF Corner Freq", cs42l42_hpf_freq_enum),
2704 + SOC_ENUM("WNF 3dB Freq", cs42l42_wnf3_freq_enum),
2705 +- SOC_ENUM("WNF 05dB Freq", cs42l42_wnf05_freq_enum),
2706 +
2707 + /* DAC Volume and Filter Controls */
2708 + SOC_SINGLE("DACA Invert Switch", CS42L42_DAC_CTL1,
2709 +@@ -670,15 +659,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
2710 + CS42L42_FSYNC_PULSE_WIDTH_MASK,
2711 + CS42L42_FRAC1_VAL(fsync - 1) <<
2712 + CS42L42_FSYNC_PULSE_WIDTH_SHIFT);
2713 +- snd_soc_component_update_bits(component,
2714 +- CS42L42_ASP_FRM_CFG,
2715 +- CS42L42_ASP_5050_MASK,
2716 +- CS42L42_ASP_5050_MASK);
2717 +- /* Set the frame delay to 1.0 SCLK clocks */
2718 +- snd_soc_component_update_bits(component, CS42L42_ASP_FRM_CFG,
2719 +- CS42L42_ASP_FSD_MASK,
2720 +- CS42L42_ASP_FSD_1_0 <<
2721 +- CS42L42_ASP_FSD_SHIFT);
2722 + /* Set the sample rates (96k or lower) */
2723 + snd_soc_component_update_bits(component, CS42L42_FS_RATE_EN,
2724 + CS42L42_FS_EN_MASK,
2725 +@@ -774,7 +754,18 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
2726 + /* interface format */
2727 + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
2728 + case SND_SOC_DAIFMT_I2S:
2729 +- case SND_SOC_DAIFMT_LEFT_J:
2730 ++ /*
2731 ++ * 5050 mode, frame starts on falling edge of LRCLK,
2732 ++ * frame delayed by 1.0 SCLKs
2733 ++ */
2734 ++ snd_soc_component_update_bits(component,
2735 ++ CS42L42_ASP_FRM_CFG,
2736 ++ CS42L42_ASP_STP_MASK |
2737 ++ CS42L42_ASP_5050_MASK |
2738 ++ CS42L42_ASP_FSD_MASK,
2739 ++ CS42L42_ASP_5050_MASK |
2740 ++ (CS42L42_ASP_FSD_1_0 <<
2741 ++ CS42L42_ASP_FSD_SHIFT));
2742 + break;
2743 + default:
2744 + return -EINVAL;
2745 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2746 +index be773101d8760..682ee41ec75c9 100644
2747 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2748 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2749 +@@ -135,7 +135,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
2750 + snd_pcm_uframes_t period_size;
2751 + ssize_t periodbytes;
2752 + ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
2753 +- u32 buffer_addr = virt_to_phys(substream->dma_buffer.area);
2754 ++ u32 buffer_addr = virt_to_phys(substream->runtime->dma_area);
2755 +
2756 + channels = substream->runtime->channels;
2757 + period_size = substream->runtime->period_size;
2758 +@@ -241,7 +241,6 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream,
2759 + /* set codec params and inform SST driver the same */
2760 + sst_fill_pcm_params(substream, &param);
2761 + sst_fill_alloc_params(substream, &alloc_params);
2762 +- substream->runtime->dma_area = substream->dma_buffer.area;
2763 + str_params.sparams = param;
2764 + str_params.aparams = alloc_params;
2765 + str_params.codec = SST_CODEC_TYPE_PCM;