Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Wed, 01 Dec 2021 12:49:22
Message-Id: 1638362947.13a0ba67243adf18e654cb01e36e3016691fbdfc.mpagano@gentoo
1 commit: 13a0ba67243adf18e654cb01e36e3016691fbdfc
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Dec 1 12:49:07 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Dec 1 12:49:07 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=13a0ba67
7
8 Linux patch 5.10.83
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1082_linux-5.10.83.patch | 5729 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5733 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index db2b4487..7050b7a7 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -371,6 +371,10 @@ Patch: 1081_linux-5.10.82.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.82
23
24 +Patch: 1082_linux-5.10.83.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.83
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1082_linux-5.10.83.patch b/1082_linux-5.10.83.patch
33 new file mode 100644
34 index 00000000..3905baee
35 --- /dev/null
36 +++ b/1082_linux-5.10.83.patch
37 @@ -0,0 +1,5729 @@
38 +diff --git a/Documentation/networking/ipvs-sysctl.rst b/Documentation/networking/ipvs-sysctl.rst
39 +index 2afccc63856ee..1cfbf1add2fc9 100644
40 +--- a/Documentation/networking/ipvs-sysctl.rst
41 ++++ b/Documentation/networking/ipvs-sysctl.rst
42 +@@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER
43 +
44 + 0: disable any special handling on port reuse. The new
45 + connection will be delivered to the same real server that was
46 +- servicing the previous connection. This will effectively
47 +- disable expire_nodest_conn.
48 ++ servicing the previous connection.
49 +
50 + bit 1: enable rescheduling of new connections when it is safe.
51 + That is, whenever expire_nodest_conn and for TCP sockets, when
52 +diff --git a/Makefile b/Makefile
53 +index 84b15766ad66f..4646baabfe783 100644
54 +--- a/Makefile
55 ++++ b/Makefile
56 +@@ -1,7 +1,7 @@
57 + # SPDX-License-Identifier: GPL-2.0
58 + VERSION = 5
59 + PATCHLEVEL = 10
60 +-SUBLEVEL = 82
61 ++SUBLEVEL = 83
62 + EXTRAVERSION =
63 + NAME = Dare mighty things
64 +
65 +diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
66 +index 398ecd7b9b68b..4ade854bdcdaf 100644
67 +--- a/arch/arm/boot/dts/bcm2711.dtsi
68 ++++ b/arch/arm/boot/dts/bcm2711.dtsi
69 +@@ -480,11 +480,17 @@
70 + #address-cells = <3>;
71 + #interrupt-cells = <1>;
72 + #size-cells = <2>;
73 +- interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
74 ++ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
75 + <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
76 + interrupt-names = "pcie", "msi";
77 + interrupt-map-mask = <0x0 0x0 0x0 0x7>;
78 + interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143
79 ++ IRQ_TYPE_LEVEL_HIGH>,
80 ++ <0 0 0 2 &gicv2 GIC_SPI 144
81 ++ IRQ_TYPE_LEVEL_HIGH>,
82 ++ <0 0 0 3 &gicv2 GIC_SPI 145
83 ++ IRQ_TYPE_LEVEL_HIGH>,
84 ++ <0 0 0 4 &gicv2 GIC_SPI 146
85 + IRQ_TYPE_LEVEL_HIGH>;
86 + msi-controller;
87 + msi-parent = <&pcie0>;
88 +diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
89 +index 72b0df6910bd5..9fdad20c40d17 100644
90 +--- a/arch/arm/boot/dts/bcm5301x.dtsi
91 ++++ b/arch/arm/boot/dts/bcm5301x.dtsi
92 +@@ -242,6 +242,8 @@
93 +
94 + gpio-controller;
95 + #gpio-cells = <2>;
96 ++ interrupt-controller;
97 ++ #interrupt-cells = <2>;
98 + };
99 +
100 + pcie0: pcie@12000 {
101 +@@ -408,7 +410,7 @@
102 + i2c0: i2c@18009000 {
103 + compatible = "brcm,iproc-i2c";
104 + reg = <0x18009000 0x50>;
105 +- interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
106 ++ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
107 + #address-cells = <1>;
108 + #size-cells = <0>;
109 + clock-frequency = <100000>;
110 +diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
111 +index fc2608b18a0d0..18f01190dcfd4 100644
112 +--- a/arch/arm/mach-socfpga/core.h
113 ++++ b/arch/arm/mach-socfpga/core.h
114 +@@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
115 + u32 socfpga_sdram_self_refresh(u32 sdr_base);
116 + extern unsigned int socfpga_sdram_self_refresh_sz;
117 +
118 +-extern char secondary_trampoline, secondary_trampoline_end;
119 ++extern char secondary_trampoline[], secondary_trampoline_end[];
120 +
121 + extern unsigned long socfpga_cpu1start_addr;
122 +
123 +diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
124 +index fbb80b883e5dd..201191cf68f32 100644
125 +--- a/arch/arm/mach-socfpga/platsmp.c
126 ++++ b/arch/arm/mach-socfpga/platsmp.c
127 +@@ -20,14 +20,14 @@
128 +
129 + static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
130 + {
131 +- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
132 ++ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
133 +
134 + if (socfpga_cpu1start_addr) {
135 + /* This will put CPU #1 into reset. */
136 + writel(RSTMGR_MPUMODRST_CPU1,
137 + rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
138 +
139 +- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
140 ++ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
141 +
142 + writel(__pa_symbol(secondary_startup),
143 + sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
144 +@@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
145 +
146 + static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
147 + {
148 +- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
149 ++ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
150 +
151 + if (socfpga_cpu1start_addr) {
152 + writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
153 + SOCFPGA_A10_RSTMGR_MODMPURST);
154 +- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
155 ++ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
156 +
157 + writel(__pa_symbol(secondary_startup),
158 + sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
159 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
160 +index 94a748e95231b..23d756fe0fd6c 100644
161 +--- a/arch/mips/Kconfig
162 ++++ b/arch/mips/Kconfig
163 +@@ -3189,7 +3189,7 @@ config STACKTRACE_SUPPORT
164 + config PGTABLE_LEVELS
165 + int
166 + default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
167 +- default 3 if 64BIT && !PAGE_SIZE_64KB
168 ++ default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
169 + default 2
170 +
171 + config MIPS_AUTO_PFN_OFFSET
172 +diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
173 +index 067cb3eb16141..d120201910acf 100644
174 +--- a/arch/mips/kernel/cpu-probe.c
175 ++++ b/arch/mips/kernel/cpu-probe.c
176 +@@ -1721,8 +1721,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
177 +
178 + static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
179 + {
180 +- decode_configs(c);
181 +-
182 + /* All Loongson processors covered here define ExcCode 16 as GSExc. */
183 + c->options |= MIPS_CPU_GSEXCEX;
184 +
185 +@@ -1783,6 +1781,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
186 + panic("Unknown Loongson Processor ID!");
187 + break;
188 + }
189 ++
190 ++ decode_configs(c);
191 + }
192 + #else
193 + static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
194 +diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
195 +index 3d208afd15bc6..2769eb991f58d 100644
196 +--- a/arch/parisc/kernel/vmlinux.lds.S
197 ++++ b/arch/parisc/kernel/vmlinux.lds.S
198 +@@ -57,8 +57,6 @@ SECTIONS
199 + {
200 + . = KERNEL_BINARY_TEXT_START;
201 +
202 +- _stext = .; /* start of kernel text, includes init code & data */
203 +-
204 + __init_begin = .;
205 + HEAD_TEXT_SECTION
206 + MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
207 +@@ -82,6 +80,7 @@ SECTIONS
208 + /* freed after init ends here */
209 +
210 + _text = .; /* Text and read-only data */
211 ++ _stext = .;
212 + MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
213 + .text ALIGN(PAGE_SIZE) : {
214 + TEXT_TEXT
215 +diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
216 +index f8e3d15ddf694..abb057a86739b 100644
217 +--- a/arch/powerpc/kernel/head_32.h
218 ++++ b/arch/powerpc/kernel/head_32.h
219 +@@ -333,11 +333,11 @@ label:
220 + mfspr r1, SPRN_SPRG_THREAD
221 + lwz r1, TASK_CPU - THREAD(r1)
222 + slwi r1, r1, 3
223 +- addis r1, r1, emergency_ctx@ha
224 ++ addis r1, r1, emergency_ctx-PAGE_OFFSET@ha
225 + #else
226 +- lis r1, emergency_ctx@ha
227 ++ lis r1, emergency_ctx-PAGE_OFFSET@ha
228 + #endif
229 +- lwz r1, emergency_ctx@l(r1)
230 ++ lwz r1, emergency_ctx-PAGE_OFFSET@l(r1)
231 + addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
232 + EXCEPTION_PROLOG_2
233 + SAVE_NVGPRS(r11)
234 +diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
235 +index 4621905bdd9ea..121fca2bcd82b 100644
236 +--- a/arch/powerpc/kvm/book3s_hv_builtin.c
237 ++++ b/arch/powerpc/kvm/book3s_hv_builtin.c
238 +@@ -867,6 +867,7 @@ static void flush_guest_tlb(struct kvm *kvm)
239 + "r" (0) : "memory");
240 + }
241 + asm volatile("ptesync": : :"memory");
242 ++ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
243 + asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
244 + } else {
245 + for (set = 0; set < kvm->arch.tlb_sets; ++set) {
246 +@@ -877,7 +878,9 @@ static void flush_guest_tlb(struct kvm *kvm)
247 + rb += PPC_BIT(51); /* increment set number */
248 + }
249 + asm volatile("ptesync": : :"memory");
250 +- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
251 ++ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
252 ++ if (cpu_has_feature(CPU_FTR_ARCH_300))
253 ++ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
254 + }
255 + }
256 +
257 +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
258 +index 18205f851c247..fabaedddc90cb 100644
259 +--- a/arch/s390/mm/pgtable.c
260 ++++ b/arch/s390/mm/pgtable.c
261 +@@ -988,6 +988,7 @@ EXPORT_SYMBOL(get_guest_storage_key);
262 + int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
263 + unsigned long *oldpte, unsigned long *oldpgste)
264 + {
265 ++ struct vm_area_struct *vma;
266 + unsigned long pgstev;
267 + spinlock_t *ptl;
268 + pgste_t pgste;
269 +@@ -997,6 +998,10 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
270 + WARN_ON_ONCE(orc > ESSA_MAX);
271 + if (unlikely(orc > ESSA_MAX))
272 + return -EINVAL;
273 ++
274 ++ vma = find_vma(mm, hva);
275 ++ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
276 ++ return -EFAULT;
277 + ptep = get_locked_pte(mm, hva, &ptl);
278 + if (unlikely(!ptep))
279 + return -EFAULT;
280 +@@ -1089,10 +1094,14 @@ EXPORT_SYMBOL(pgste_perform_essa);
281 + int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
282 + unsigned long bits, unsigned long value)
283 + {
284 ++ struct vm_area_struct *vma;
285 + spinlock_t *ptl;
286 + pgste_t new;
287 + pte_t *ptep;
288 +
289 ++ vma = find_vma(mm, hva);
290 ++ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
291 ++ return -EFAULT;
292 + ptep = get_locked_pte(mm, hva, &ptl);
293 + if (unlikely(!ptep))
294 + return -EFAULT;
295 +@@ -1117,9 +1126,13 @@ EXPORT_SYMBOL(set_pgste_bits);
296 + */
297 + int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
298 + {
299 ++ struct vm_area_struct *vma;
300 + spinlock_t *ptl;
301 + pte_t *ptep;
302 +
303 ++ vma = find_vma(mm, hva);
304 ++ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
305 ++ return -EFAULT;
306 + ptep = get_locked_pte(mm, hva, &ptl);
307 + if (unlikely(!ptep))
308 + return -EFAULT;
309 +diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
310 +index e3dd64aa43737..18bd428f11ac0 100644
311 +--- a/drivers/acpi/property.c
312 ++++ b/drivers/acpi/property.c
313 +@@ -1110,15 +1110,10 @@ struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
314 + /* All data nodes have parent pointer so just return that */
315 + return to_acpi_data_node(fwnode)->parent;
316 + } else if (is_acpi_device_node(fwnode)) {
317 +- acpi_handle handle, parent_handle;
318 ++ struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
319 +
320 +- handle = to_acpi_device_node(fwnode)->handle;
321 +- if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) {
322 +- struct acpi_device *adev;
323 +-
324 +- if (!acpi_bus_get_device(parent_handle, &adev))
325 +- return acpi_fwnode_handle(adev);
326 +- }
327 ++ if (dev)
328 ++ return acpi_fwnode_handle(to_acpi_device(dev));
329 + }
330 +
331 + return NULL;
332 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
333 +index 8f14ad7ab5bd8..a1255971e50ce 100644
334 +--- a/drivers/android/binder.c
335 ++++ b/drivers/android/binder.c
336 +@@ -3091,7 +3091,7 @@ static void binder_transaction(struct binder_proc *proc,
337 + t->from = thread;
338 + else
339 + t->from = NULL;
340 +- t->sender_euid = proc->cred->euid;
341 ++ t->sender_euid = task_euid(proc->tsk);
342 + t->to_proc = target_proc;
343 + t->to_thread = target_thread;
344 + t->code = tr->code;
345 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
346 +index 10078a7435644..ff7b62597b525 100644
347 +--- a/drivers/block/xen-blkfront.c
348 ++++ b/drivers/block/xen-blkfront.c
349 +@@ -80,6 +80,7 @@ enum blkif_state {
350 + BLKIF_STATE_DISCONNECTED,
351 + BLKIF_STATE_CONNECTED,
352 + BLKIF_STATE_SUSPENDED,
353 ++ BLKIF_STATE_ERROR,
354 + };
355 +
356 + struct grant {
357 +@@ -89,6 +90,7 @@ struct grant {
358 + };
359 +
360 + enum blk_req_status {
361 ++ REQ_PROCESSING,
362 + REQ_WAITING,
363 + REQ_DONE,
364 + REQ_ERROR,
365 +@@ -543,10 +545,10 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
366 +
367 + id = get_id_from_freelist(rinfo);
368 + rinfo->shadow[id].request = req;
369 +- rinfo->shadow[id].status = REQ_WAITING;
370 ++ rinfo->shadow[id].status = REQ_PROCESSING;
371 + rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
372 +
373 +- (*ring_req)->u.rw.id = id;
374 ++ rinfo->shadow[id].req.u.rw.id = id;
375 +
376 + return id;
377 + }
378 +@@ -554,11 +556,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
379 + static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
380 + {
381 + struct blkfront_info *info = rinfo->dev_info;
382 +- struct blkif_request *ring_req;
383 ++ struct blkif_request *ring_req, *final_ring_req;
384 + unsigned long id;
385 +
386 + /* Fill out a communications ring structure. */
387 +- id = blkif_ring_get_request(rinfo, req, &ring_req);
388 ++ id = blkif_ring_get_request(rinfo, req, &final_ring_req);
389 ++ ring_req = &rinfo->shadow[id].req;
390 +
391 + ring_req->operation = BLKIF_OP_DISCARD;
392 + ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
393 +@@ -569,8 +572,9 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
394 + else
395 + ring_req->u.discard.flag = 0;
396 +
397 +- /* Keep a private copy so we can reissue requests when recovering. */
398 +- rinfo->shadow[id].req = *ring_req;
399 ++ /* Copy the request to the ring page. */
400 ++ *final_ring_req = *ring_req;
401 ++ rinfo->shadow[id].status = REQ_WAITING;
402 +
403 + return 0;
404 + }
405 +@@ -703,6 +707,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
406 + {
407 + struct blkfront_info *info = rinfo->dev_info;
408 + struct blkif_request *ring_req, *extra_ring_req = NULL;
409 ++ struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
410 + unsigned long id, extra_id = NO_ASSOCIATED_ID;
411 + bool require_extra_req = false;
412 + int i;
413 +@@ -747,7 +752,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
414 + }
415 +
416 + /* Fill out a communications ring structure. */
417 +- id = blkif_ring_get_request(rinfo, req, &ring_req);
418 ++ id = blkif_ring_get_request(rinfo, req, &final_ring_req);
419 ++ ring_req = &rinfo->shadow[id].req;
420 +
421 + num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
422 + num_grant = 0;
423 +@@ -798,7 +804,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
424 + ring_req->u.rw.nr_segments = num_grant;
425 + if (unlikely(require_extra_req)) {
426 + extra_id = blkif_ring_get_request(rinfo, req,
427 +- &extra_ring_req);
428 ++ &final_extra_ring_req);
429 ++ extra_ring_req = &rinfo->shadow[extra_id].req;
430 ++
431 + /*
432 + * Only the first request contains the scatter-gather
433 + * list.
434 +@@ -840,10 +848,13 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
435 + if (setup.segments)
436 + kunmap_atomic(setup.segments);
437 +
438 +- /* Keep a private copy so we can reissue requests when recovering. */
439 +- rinfo->shadow[id].req = *ring_req;
440 +- if (unlikely(require_extra_req))
441 +- rinfo->shadow[extra_id].req = *extra_ring_req;
442 ++ /* Copy request(s) to the ring page. */
443 ++ *final_ring_req = *ring_req;
444 ++ rinfo->shadow[id].status = REQ_WAITING;
445 ++ if (unlikely(require_extra_req)) {
446 ++ *final_extra_ring_req = *extra_ring_req;
447 ++ rinfo->shadow[extra_id].status = REQ_WAITING;
448 ++ }
449 +
450 + if (new_persistent_gnts)
451 + gnttab_free_grant_references(setup.gref_head);
452 +@@ -1415,8 +1426,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
453 + static int blkif_get_final_status(enum blk_req_status s1,
454 + enum blk_req_status s2)
455 + {
456 +- BUG_ON(s1 == REQ_WAITING);
457 +- BUG_ON(s2 == REQ_WAITING);
458 ++ BUG_ON(s1 < REQ_DONE);
459 ++ BUG_ON(s2 < REQ_DONE);
460 +
461 + if (s1 == REQ_ERROR || s2 == REQ_ERROR)
462 + return BLKIF_RSP_ERROR;
463 +@@ -1449,7 +1460,7 @@ static bool blkif_completion(unsigned long *id,
464 + s->status = blkif_rsp_to_req_status(bret->status);
465 +
466 + /* Wait the second response if not yet here. */
467 +- if (s2->status == REQ_WAITING)
468 ++ if (s2->status < REQ_DONE)
469 + return false;
470 +
471 + bret->status = blkif_get_final_status(s->status,
472 +@@ -1557,7 +1568,7 @@ static bool blkif_completion(unsigned long *id,
473 + static irqreturn_t blkif_interrupt(int irq, void *dev_id)
474 + {
475 + struct request *req;
476 +- struct blkif_response *bret;
477 ++ struct blkif_response bret;
478 + RING_IDX i, rp;
479 + unsigned long flags;
480 + struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
481 +@@ -1568,54 +1579,76 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
482 +
483 + spin_lock_irqsave(&rinfo->ring_lock, flags);
484 + again:
485 +- rp = rinfo->ring.sring->rsp_prod;
486 +- rmb(); /* Ensure we see queued responses up to 'rp'. */
487 ++ rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
488 ++ virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
489 ++ if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
490 ++ pr_alert("%s: illegal number of responses %u\n",
491 ++ info->gd->disk_name, rp - rinfo->ring.rsp_cons);
492 ++ goto err;
493 ++ }
494 +
495 + for (i = rinfo->ring.rsp_cons; i != rp; i++) {
496 + unsigned long id;
497 ++ unsigned int op;
498 ++
499 ++ RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
500 ++ id = bret.id;
501 +
502 +- bret = RING_GET_RESPONSE(&rinfo->ring, i);
503 +- id = bret->id;
504 + /*
505 + * The backend has messed up and given us an id that we would
506 + * never have given to it (we stamp it up to BLK_RING_SIZE -
507 + * look in get_id_from_freelist.
508 + */
509 + if (id >= BLK_RING_SIZE(info)) {
510 +- WARN(1, "%s: response to %s has incorrect id (%ld)\n",
511 +- info->gd->disk_name, op_name(bret->operation), id);
512 +- /* We can't safely get the 'struct request' as
513 +- * the id is busted. */
514 +- continue;
515 ++ pr_alert("%s: response has incorrect id (%ld)\n",
516 ++ info->gd->disk_name, id);
517 ++ goto err;
518 + }
519 ++ if (rinfo->shadow[id].status != REQ_WAITING) {
520 ++ pr_alert("%s: response references no pending request\n",
521 ++ info->gd->disk_name);
522 ++ goto err;
523 ++ }
524 ++
525 ++ rinfo->shadow[id].status = REQ_PROCESSING;
526 + req = rinfo->shadow[id].request;
527 +
528 +- if (bret->operation != BLKIF_OP_DISCARD) {
529 ++ op = rinfo->shadow[id].req.operation;
530 ++ if (op == BLKIF_OP_INDIRECT)
531 ++ op = rinfo->shadow[id].req.u.indirect.indirect_op;
532 ++ if (bret.operation != op) {
533 ++ pr_alert("%s: response has wrong operation (%u instead of %u)\n",
534 ++ info->gd->disk_name, bret.operation, op);
535 ++ goto err;
536 ++ }
537 ++
538 ++ if (bret.operation != BLKIF_OP_DISCARD) {
539 + /*
540 + * We may need to wait for an extra response if the
541 + * I/O request is split in 2
542 + */
543 +- if (!blkif_completion(&id, rinfo, bret))
544 ++ if (!blkif_completion(&id, rinfo, &bret))
545 + continue;
546 + }
547 +
548 + if (add_id_to_freelist(rinfo, id)) {
549 + WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
550 +- info->gd->disk_name, op_name(bret->operation), id);
551 ++ info->gd->disk_name, op_name(bret.operation), id);
552 + continue;
553 + }
554 +
555 +- if (bret->status == BLKIF_RSP_OKAY)
556 ++ if (bret.status == BLKIF_RSP_OKAY)
557 + blkif_req(req)->error = BLK_STS_OK;
558 + else
559 + blkif_req(req)->error = BLK_STS_IOERR;
560 +
561 +- switch (bret->operation) {
562 ++ switch (bret.operation) {
563 + case BLKIF_OP_DISCARD:
564 +- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
565 ++ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
566 + struct request_queue *rq = info->rq;
567 +- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
568 +- info->gd->disk_name, op_name(bret->operation));
569 ++
570 ++ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
571 ++ info->gd->disk_name, op_name(bret.operation));
572 + blkif_req(req)->error = BLK_STS_NOTSUPP;
573 + info->feature_discard = 0;
574 + info->feature_secdiscard = 0;
575 +@@ -1625,15 +1658,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
576 + break;
577 + case BLKIF_OP_FLUSH_DISKCACHE:
578 + case BLKIF_OP_WRITE_BARRIER:
579 +- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
580 +- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
581 +- info->gd->disk_name, op_name(bret->operation));
582 ++ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
583 ++ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
584 ++ info->gd->disk_name, op_name(bret.operation));
585 + blkif_req(req)->error = BLK_STS_NOTSUPP;
586 + }
587 +- if (unlikely(bret->status == BLKIF_RSP_ERROR &&
588 ++ if (unlikely(bret.status == BLKIF_RSP_ERROR &&
589 + rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
590 +- printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
591 +- info->gd->disk_name, op_name(bret->operation));
592 ++ pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
593 ++ info->gd->disk_name, op_name(bret.operation));
594 + blkif_req(req)->error = BLK_STS_NOTSUPP;
595 + }
596 + if (unlikely(blkif_req(req)->error)) {
597 +@@ -1646,9 +1679,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
598 + fallthrough;
599 + case BLKIF_OP_READ:
600 + case BLKIF_OP_WRITE:
601 +- if (unlikely(bret->status != BLKIF_RSP_OKAY))
602 +- dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
603 +- "request: %x\n", bret->status);
604 ++ if (unlikely(bret.status != BLKIF_RSP_OKAY))
605 ++ dev_dbg_ratelimited(&info->xbdev->dev,
606 ++ "Bad return from blkdev data request: %#x\n",
607 ++ bret.status);
608 +
609 + break;
610 + default:
611 +@@ -1674,6 +1708,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
612 + spin_unlock_irqrestore(&rinfo->ring_lock, flags);
613 +
614 + return IRQ_HANDLED;
615 ++
616 ++ err:
617 ++ info->connected = BLKIF_STATE_ERROR;
618 ++
619 ++ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
620 ++
621 ++ pr_alert("%s disabled for further use\n", info->gd->disk_name);
622 ++ return IRQ_HANDLED;
623 + }
624 +
625 +
626 +diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
627 +index 9e44479f02842..a4e4aa9a35426 100644
628 +--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
629 ++++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
630 +@@ -106,9 +106,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
631 + scmi_pd_data->domains = domains;
632 + scmi_pd_data->num_domains = num_domains;
633 +
634 +- of_genpd_add_provider_onecell(np, scmi_pd_data);
635 +-
636 +- return 0;
637 ++ return of_genpd_add_provider_onecell(np, scmi_pd_data);
638 + }
639 +
640 + static const struct scmi_device_id scmi_id_table[] = {
641 +diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
642 +index 581aa5e9b0778..dd7c3d5e8b0bb 100644
643 +--- a/drivers/firmware/smccc/soc_id.c
644 ++++ b/drivers/firmware/smccc/soc_id.c
645 +@@ -50,7 +50,7 @@ static int __init smccc_soc_init(void)
646 + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
647 + ARM_SMCCC_ARCH_SOC_ID, &res);
648 +
649 +- if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
650 ++ if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) {
651 + pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
652 + return 0;
653 + }
654 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
655 +index c7d6a677d86d8..bea451a39d601 100644
656 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
657 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
658 +@@ -137,6 +137,11 @@ MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
659 + #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
660 + #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
661 +
662 ++#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025
663 ++#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1
664 ++#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
665 ++#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
666 ++
667 + enum ta_ras_gfx_subblock {
668 + /*CPC*/
669 + TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
670 +@@ -4147,19 +4152,38 @@ failed_kiq_read:
671 +
672 + static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
673 + {
674 +- uint64_t clock;
675 ++ uint64_t clock, clock_lo, clock_hi, hi_check;
676 +
677 +- amdgpu_gfx_off_ctrl(adev, false);
678 +- mutex_lock(&adev->gfx.gpu_clock_mutex);
679 +- if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
680 +- clock = gfx_v9_0_kiq_read_clock(adev);
681 +- } else {
682 +- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
683 +- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
684 +- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
685 ++ switch (adev->asic_type) {
686 ++ case CHIP_RENOIR:
687 ++ preempt_disable();
688 ++ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
689 ++ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
690 ++ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
691 ++ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
692 ++ * roughly every 42 seconds.
693 ++ */
694 ++ if (hi_check != clock_hi) {
695 ++ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
696 ++ clock_hi = hi_check;
697 ++ }
698 ++ preempt_enable();
699 ++ clock = clock_lo | (clock_hi << 32ULL);
700 ++ break;
701 ++ default:
702 ++ amdgpu_gfx_off_ctrl(adev, false);
703 ++ mutex_lock(&adev->gfx.gpu_clock_mutex);
704 ++ if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
705 ++ clock = gfx_v9_0_kiq_read_clock(adev);
706 ++ } else {
707 ++ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
708 ++ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
709 ++ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
710 ++ }
711 ++ mutex_unlock(&adev->gfx.gpu_clock_mutex);
712 ++ amdgpu_gfx_off_ctrl(adev, true);
713 ++ break;
714 + }
715 +- mutex_unlock(&adev->gfx.gpu_clock_mutex);
716 +- amdgpu_gfx_off_ctrl(adev, true);
717 + return clock;
718 + }
719 +
720 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
721 +index d9525fbedad2d..a5b6f36fe1d72 100644
722 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
723 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
724 +@@ -1963,8 +1963,8 @@ static int dm_resume(void *handle)
725 +
726 + for (i = 0; i < dc_state->stream_count; i++) {
727 + dc_state->streams[i]->mode_changed = true;
728 +- for (j = 0; j < dc_state->stream_status->plane_count; j++) {
729 +- dc_state->stream_status->plane_states[j]->update_flags.raw
730 ++ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
731 ++ dc_state->stream_status[i].plane_states[j]->update_flags.raw
732 + = 0xffffffff;
733 + }
734 + }
735 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
736 +index cd41b2e6cc879..18502fd6ebaa0 100644
737 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
738 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
739 +@@ -207,11 +207,13 @@ int
740 + gm200_acr_wpr_parse(struct nvkm_acr *acr)
741 + {
742 + const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
743 ++ struct nvkm_acr_lsfw *lsfw;
744 +
745 + while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
746 + wpr_header_dump(&acr->subdev, hdr);
747 +- if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
748 +- return -ENOMEM;
749 ++ lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
750 ++ if (IS_ERR(lsfw))
751 ++ return PTR_ERR(lsfw);
752 + }
753 +
754 + return 0;
755 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
756 +index 80eb9d8dbc803..e5c8303a5b7b7 100644
757 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
758 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
759 +@@ -161,11 +161,13 @@ int
760 + gp102_acr_wpr_parse(struct nvkm_acr *acr)
761 + {
762 + const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
763 ++ struct nvkm_acr_lsfw *lsfw;
764 +
765 + while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
766 + wpr_header_v1_dump(&acr->subdev, hdr);
767 +- if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
768 +- return -ENOMEM;
769 ++ lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
770 ++ if (IS_ERR(lsfw))
771 ++ return PTR_ERR(lsfw);
772 + }
773 +
774 + return 0;
775 +diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
776 +index cc74a3f3a07af..9006b9861c90c 100644
777 +--- a/drivers/gpu/drm/vc4/vc4_bo.c
778 ++++ b/drivers/gpu/drm/vc4/vc4_bo.c
779 +@@ -389,7 +389,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
780 +
781 + bo = kzalloc(sizeof(*bo), GFP_KERNEL);
782 + if (!bo)
783 +- return ERR_PTR(-ENOMEM);
784 ++ return NULL;
785 +
786 + bo->madv = VC4_MADV_WILLNEED;
787 + refcount_set(&bo->usecnt, 0);
788 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
789 +index b2719cf37aa52..c25274275258f 100644
790 +--- a/drivers/hid/wacom_wac.c
791 ++++ b/drivers/hid/wacom_wac.c
792 +@@ -2578,6 +2578,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
793 + return;
794 +
795 + switch (equivalent_usage) {
796 ++ case HID_DG_CONFIDENCE:
797 ++ wacom_wac->hid_data.confidence = value;
798 ++ break;
799 + case HID_GD_X:
800 + wacom_wac->hid_data.x = value;
801 + break;
802 +@@ -2610,7 +2613,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
803 + }
804 +
805 + if (usage->usage_index + 1 == field->report_count) {
806 +- if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
807 ++ if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
808 ++ wacom_wac->hid_data.confidence)
809 + wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
810 + }
811 + }
812 +@@ -2625,6 +2629,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
813 +
814 + wacom_wac->is_invalid_bt_frame = false;
815 +
816 ++ hid_data->confidence = true;
817 ++
818 + for (i = 0; i < report->maxfield; i++) {
819 + struct hid_field *field = report->field[i];
820 + int j;
821 +diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
822 +index e3835407e8d23..8dea7cb298e69 100644
823 +--- a/drivers/hid/wacom_wac.h
824 ++++ b/drivers/hid/wacom_wac.h
825 +@@ -300,6 +300,7 @@ struct hid_data {
826 + bool tipswitch;
827 + bool barrelswitch;
828 + bool barrelswitch2;
829 ++ bool confidence;
830 + int x;
831 + int y;
832 + int pressure;
833 +diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
834 +index 5ecc0bc608ec6..fb61bdca4c2c1 100644
835 +--- a/drivers/iommu/amd/iommu_v2.c
836 ++++ b/drivers/iommu/amd/iommu_v2.c
837 +@@ -927,10 +927,8 @@ static int __init amd_iommu_v2_init(void)
838 + {
839 + int ret;
840 +
841 +- pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@××××.de>\n");
842 +-
843 + if (!amd_iommu_v2_supported()) {
844 +- pr_info("AMD IOMMUv2 functionality not available on this system\n");
845 ++ pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
846 + /*
847 + * Load anyway to provide the symbols to other modules
848 + * which may use AMD IOMMUv2 optionally.
849 +@@ -947,6 +945,8 @@ static int __init amd_iommu_v2_init(void)
850 +
851 + amd_iommu_register_ppr_notifier(&ppr_nb);
852 +
853 ++ pr_info("AMD IOMMUv2 loaded and initialized\n");
854 ++
855 + return 0;
856 +
857 + out:
858 +diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
859 +index d5d5d28d0b36a..2e5698fbc3a87 100644
860 +--- a/drivers/media/cec/core/cec-adap.c
861 ++++ b/drivers/media/cec/core/cec-adap.c
862 +@@ -1199,6 +1199,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
863 + if (abort)
864 + dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
865 + msg->flags = dst->flags;
866 ++ msg->sequence = dst->sequence;
867 + /* Remove it from the wait_queue */
868 + list_del_init(&data->list);
869 +
870 +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
871 +index 20cbd71cba9d9..a4bd85b200a3e 100644
872 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c
873 ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
874 +@@ -263,7 +263,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
875 + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
876 + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
877 + | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
878 +- | ESDHC_FLAG_CQHCI
879 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE
880 + | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
881 + };
882 +@@ -272,7 +271,6 @@ static struct esdhc_soc_data usdhc_imx8mm_data = {
883 + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
884 + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
885 + | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
886 +- | ESDHC_FLAG_CQHCI
887 + | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
888 + };
889 +
890 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
891 +index 07d131fac7606..d42e86cdff12e 100644
892 +--- a/drivers/mmc/host/sdhci.c
893 ++++ b/drivers/mmc/host/sdhci.c
894 +@@ -772,7 +772,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
895 + len -= offset;
896 + }
897 +
898 +- BUG_ON(len > 65536);
899 ++ /*
900 ++ * The block layer forces a minimum segment size of PAGE_SIZE,
901 ++ * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
902 ++ * multiple descriptors, noting that the ADMA table is sized
903 ++ * for 4KiB chunks anyway, so it will be big enough.
904 ++ */
905 ++ while (len > host->max_adma) {
906 ++ int n = 32 * 1024; /* 32KiB*/
907 ++
908 ++ __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
909 ++ addr += n;
910 ++ len -= n;
911 ++ }
912 +
913 + /* tran, valid */
914 + if (len)
915 +@@ -3948,6 +3960,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
916 + * descriptor for each segment, plus 1 for a nop end descriptor.
917 + */
918 + host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
919 ++ host->max_adma = 65536;
920 +
921 + return host;
922 + }
923 +@@ -4611,10 +4624,12 @@ int sdhci_setup_host(struct sdhci_host *host)
924 + * be larger than 64 KiB though.
925 + */
926 + if (host->flags & SDHCI_USE_ADMA) {
927 +- if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
928 ++ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
929 ++ host->max_adma = 65532; /* 32-bit alignment */
930 + mmc->max_seg_size = 65535;
931 +- else
932 ++ } else {
933 + mmc->max_seg_size = 65536;
934 ++ }
935 + } else {
936 + mmc->max_seg_size = mmc->max_req_size;
937 + }
938 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
939 +index 960fed78529e1..8b1650f37fbba 100644
940 +--- a/drivers/mmc/host/sdhci.h
941 ++++ b/drivers/mmc/host/sdhci.h
942 +@@ -338,7 +338,8 @@ struct sdhci_adma2_64_desc {
943 +
944 + /*
945 + * Maximum segments assuming a 512KiB maximum requisition size and a minimum
946 +- * 4KiB page size.
947 ++ * 4KiB page size. Note this also allows enough for multiple descriptors in
948 ++ * case of PAGE_SIZE >= 64KiB.
949 + */
950 + #define SDHCI_MAX_SEGS 128
951 +
952 +@@ -540,6 +541,7 @@ struct sdhci_host {
953 + unsigned int blocks; /* remaining PIO blocks */
954 +
955 + int sg_count; /* Mapped sg entries */
956 ++ int max_adma; /* Max. length in ADMA descriptor */
957 +
958 + void *adma_table; /* ADMA descriptor table */
959 + void *align_buffer; /* Bounce buffer */
960 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
961 +index e27af38f6b161..6e7da1dc2e8c3 100644
962 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
963 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
964 +@@ -679,9 +679,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
965 + roundup_size = ilog2(roundup_size);
966 +
967 + for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
968 +- tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
969 ++ tc_valid[i] = 1;
970 + tc_size[i] = roundup_size;
971 +- tc_offset[i] = rss_size * i;
972 ++ tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
973 + }
974 +
975 + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
976 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
977 +index ea85b06857fa2..90f5ec982d513 100644
978 +--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
979 ++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
980 +@@ -719,12 +719,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
981 + *
982 + * Change the ITR settings for a specific queue.
983 + **/
984 +-static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
985 +- struct ethtool_coalesce *ec, int queue)
986 ++static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
987 ++ struct ethtool_coalesce *ec, int queue)
988 + {
989 + struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
990 + struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
991 + struct iavf_q_vector *q_vector;
992 ++ u16 itr_setting;
993 ++
994 ++ itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
995 ++
996 ++ if (ec->rx_coalesce_usecs != itr_setting &&
997 ++ ec->use_adaptive_rx_coalesce) {
998 ++ netif_info(adapter, drv, adapter->netdev,
999 ++ "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
1000 ++ return -EINVAL;
1001 ++ }
1002 ++
1003 ++ itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
1004 ++
1005 ++ if (ec->tx_coalesce_usecs != itr_setting &&
1006 ++ ec->use_adaptive_tx_coalesce) {
1007 ++ netif_info(adapter, drv, adapter->netdev,
1008 ++ "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
1009 ++ return -EINVAL;
1010 ++ }
1011 +
1012 + rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
1013 + tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
1014 +@@ -747,6 +766,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
1015 + * the Tx and Rx ITR values based on the values we have entered
1016 + * into the q_vector, no need to write the values now.
1017 + */
1018 ++ return 0;
1019 + }
1020 +
1021 + /**
1022 +@@ -788,9 +808,11 @@ static int __iavf_set_coalesce(struct net_device *netdev,
1023 + */
1024 + if (queue < 0) {
1025 + for (i = 0; i < adapter->num_active_queues; i++)
1026 +- iavf_set_itr_per_queue(adapter, ec, i);
1027 ++ if (iavf_set_itr_per_queue(adapter, ec, i))
1028 ++ return -EINVAL;
1029 + } else if (queue < adapter->num_active_queues) {
1030 +- iavf_set_itr_per_queue(adapter, ec, queue);
1031 ++ if (iavf_set_itr_per_queue(adapter, ec, queue))
1032 ++ return -EINVAL;
1033 + } else {
1034 + netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
1035 + adapter->num_active_queues - 1);
1036 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
1037 +index dc944d605a741..52ac6cc08e83e 100644
1038 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
1039 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
1040 +@@ -83,8 +83,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
1041 + if (!vsi->rx_rings)
1042 + goto err_rings;
1043 +
1044 +- /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
1045 +- vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
1046 ++ /* txq_map needs to have enough space to track both Tx (stack) rings
1047 ++ * and XDP rings; at this point vsi->num_xdp_txq might not be set,
1048 ++ * so use num_possible_cpus() as we want to always provide XDP ring
1049 ++ * per CPU, regardless of queue count settings from user that might
1050 ++ * have come from ethtool's set_channels() callback;
1051 ++ */
1052 ++ vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
1053 + sizeof(*vsi->txq_map), GFP_KERNEL);
1054 +
1055 + if (!vsi->txq_map)
1056 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
1057 +index 5b67d24b2b5ed..746a5bd178d3b 100644
1058 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
1059 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
1060 +@@ -2397,7 +2397,18 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
1061 + ice_stat_str(status));
1062 + goto clear_xdp_rings;
1063 + }
1064 +- ice_vsi_assign_bpf_prog(vsi, prog);
1065 ++
1066 ++ /* assign the prog only when it's not already present on VSI;
1067 ++ * this flow is a subject of both ethtool -L and ndo_bpf flows;
1068 ++ * VSI rebuild that happens under ethtool -L can expose us to
1069 ++ * the bpf_prog refcount issues as we would be swapping same
1070 ++ * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
1071 ++ * on it as it would be treated as an 'old_prog'; for ndo_bpf
1072 ++ * this is not harmful as dev_xdp_install bumps the refcount
1073 ++ * before calling the op exposed by the driver;
1074 ++ */
1075 ++ if (!ice_is_xdp_ena_vsi(vsi))
1076 ++ ice_vsi_assign_bpf_prog(vsi, prog);
1077 +
1078 + return 0;
1079 + clear_xdp_rings:
1080 +@@ -2527,6 +2538,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
1081 + if (xdp_ring_err)
1082 + NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
1083 + } else {
1084 ++ /* safe to call even when prog == vsi->xdp_prog as
1085 ++ * dev_xdp_install in net/core/dev.c incremented prog's
1086 ++ * refcount so corresponding bpf_prog_put won't cause
1087 ++ * underflow
1088 ++ */
1089 + ice_vsi_assign_bpf_prog(vsi, prog);
1090 + }
1091 +
1092 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
1093 +index e24fb122c03a2..d5432d1448c05 100644
1094 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
1095 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
1096 +@@ -8032,7 +8032,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
1097 + if (likely(napi_complete_done(napi, work_done)))
1098 + igb_ring_irq_enable(q_vector);
1099 +
1100 +- return min(work_done, budget - 1);
1101 ++ return work_done;
1102 + }
1103 +
1104 + /**
1105 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1106 +index ec9b6c564300e..e220d44df2e65 100644
1107 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1108 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1109 +@@ -4652,11 +4652,13 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
1110 + mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
1111 + }
1112 +
1113 ++ if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
1114 ++ netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
1115 ++ mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
1116 ++ return -EINVAL;
1117 ++ }
1118 ++
1119 + if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
1120 +- if (port->xdp_prog) {
1121 +- netdev_err(dev, "Jumbo frames are not supported with XDP\n");
1122 +- return -EINVAL;
1123 +- }
1124 + if (priv->percpu_pools) {
1125 + netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
1126 + mvpp2_bm_switch_buffers(priv, false);
1127 +@@ -4942,8 +4944,8 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
1128 + bool running = netif_running(port->dev);
1129 + bool reset = !prog != !port->xdp_prog;
1130 +
1131 +- if (port->dev->mtu > ETH_DATA_LEN) {
1132 +- NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
1133 ++ if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
1134 ++ NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
1135 + return -EOPNOTSUPP;
1136 + }
1137 +
1138 +diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
1139 +index 7d83e1f91ef17..9101d00e96b9d 100644
1140 +--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
1141 ++++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
1142 +@@ -439,8 +439,8 @@ static int prestera_port_bridge_join(struct prestera_port *port,
1143 +
1144 + br_port = prestera_bridge_port_add(bridge, port->dev);
1145 + if (IS_ERR(br_port)) {
1146 +- err = PTR_ERR(br_port);
1147 +- goto err_brport_create;
1148 ++ prestera_bridge_put(bridge);
1149 ++ return PTR_ERR(br_port);
1150 + }
1151 +
1152 + if (bridge->vlan_enabled)
1153 +@@ -454,8 +454,6 @@ static int prestera_port_bridge_join(struct prestera_port *port,
1154 +
1155 + err_port_join:
1156 + prestera_bridge_port_put(br_port);
1157 +-err_brport_create:
1158 +- prestera_bridge_put(bridge);
1159 + return err;
1160 + }
1161 +
1162 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
1163 +index c010db2c9dba9..443dc44452ef8 100644
1164 +--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
1165 ++++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
1166 +@@ -234,6 +234,7 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port)
1167 + static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
1168 + u8 *last_module)
1169 + {
1170 ++ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
1171 + u8 module, width;
1172 + int err;
1173 +
1174 +@@ -249,6 +250,9 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
1175 + if (module == *last_module)
1176 + return 0;
1177 + *last_module = module;
1178 ++
1179 ++ if (WARN_ON_ONCE(module >= max_ports))
1180 ++ return -EINVAL;
1181 + mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
1182 +
1183 + return 0;
1184 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1185 +index b08853f71b2be..4110e15c22c79 100644
1186 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1187 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1188 +@@ -2052,9 +2052,14 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1189 + struct mlxsw_sp *mlxsw_sp = priv;
1190 + struct mlxsw_sp_port *mlxsw_sp_port;
1191 + enum mlxsw_reg_pude_oper_status status;
1192 ++ unsigned int max_ports;
1193 + u8 local_port;
1194 +
1195 ++ max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1196 + local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1197 ++
1198 ++ if (WARN_ON_ONCE(!local_port || local_port >= max_ports))
1199 ++ return;
1200 + mlxsw_sp_port = mlxsw_sp->ports[local_port];
1201 + if (!mlxsw_sp_port)
1202 + return;
1203 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
1204 +index ca8090a28dec6..50eca2daad843 100644
1205 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
1206 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
1207 +@@ -568,10 +568,13 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
1208 + u8 domain_number, u16 sequence_id,
1209 + u64 timestamp)
1210 + {
1211 ++ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1212 + struct mlxsw_sp_port *mlxsw_sp_port;
1213 + struct mlxsw_sp1_ptp_key key;
1214 + u8 types;
1215 +
1216 ++ if (WARN_ON_ONCE(local_port >= max_ports))
1217 ++ return;
1218 + mlxsw_sp_port = mlxsw_sp->ports[local_port];
1219 + if (!mlxsw_sp_port)
1220 + return;
1221 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1222 +index 4381f8c6c3fb7..53128382fc2e0 100644
1223 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1224 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1225 +@@ -2177,6 +2177,7 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1226 + char *rauhtd_pl,
1227 + int ent_index)
1228 + {
1229 ++ u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
1230 + struct net_device *dev;
1231 + struct neighbour *n;
1232 + __be32 dipn;
1233 +@@ -2185,6 +2186,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1234 +
1235 + mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1236 +
1237 ++ if (WARN_ON_ONCE(rif >= max_rifs))
1238 ++ return;
1239 + if (!mlxsw_sp->router->rifs[rif]) {
1240 + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1241 + return;
1242 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1243 +index 6501ce94ace58..368fa0e5ad315 100644
1244 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1245 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1246 +@@ -2410,6 +2410,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1247 + char *sfn_pl, int rec_index,
1248 + bool adding)
1249 + {
1250 ++ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1251 + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1252 + struct mlxsw_sp_bridge_device *bridge_device;
1253 + struct mlxsw_sp_bridge_port *bridge_port;
1254 +@@ -2422,6 +2423,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1255 + int err;
1256 +
1257 + mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1258 ++
1259 ++ if (WARN_ON_ONCE(local_port >= max_ports))
1260 ++ return;
1261 + mlxsw_sp_port = mlxsw_sp->ports[local_port];
1262 + if (!mlxsw_sp_port) {
1263 + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1264 +diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
1265 +index 3eea8cf076c48..481f89d193f77 100644
1266 +--- a/drivers/net/ethernet/microchip/lan743x_main.c
1267 ++++ b/drivers/net/ethernet/microchip/lan743x_main.c
1268 +@@ -922,8 +922,7 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
1269 + }
1270 +
1271 + static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
1272 +- u8 duplex, u16 local_adv,
1273 +- u16 remote_adv)
1274 ++ u16 local_adv, u16 remote_adv)
1275 + {
1276 + struct lan743x_phy *phy = &adapter->phy;
1277 + u8 cap;
1278 +@@ -951,7 +950,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
1279 +
1280 + phy_print_status(phydev);
1281 + if (phydev->state == PHY_RUNNING) {
1282 +- struct ethtool_link_ksettings ksettings;
1283 + int remote_advertisement = 0;
1284 + int local_advertisement = 0;
1285 +
1286 +@@ -988,18 +986,14 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
1287 + }
1288 + lan743x_csr_write(adapter, MAC_CR, data);
1289 +
1290 +- memset(&ksettings, 0, sizeof(ksettings));
1291 +- phy_ethtool_get_link_ksettings(netdev, &ksettings);
1292 + local_advertisement =
1293 + linkmode_adv_to_mii_adv_t(phydev->advertising);
1294 + remote_advertisement =
1295 + linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
1296 +
1297 +- lan743x_phy_update_flowcontrol(adapter,
1298 +- ksettings.base.duplex,
1299 +- local_advertisement,
1300 ++ lan743x_phy_update_flowcontrol(adapter, local_advertisement,
1301 + remote_advertisement);
1302 +- lan743x_ptp_update_latency(adapter, ksettings.base.speed);
1303 ++ lan743x_ptp_update_latency(adapter, phydev->speed);
1304 + }
1305 + }
1306 +
1307 +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
1308 +index 8c45b236649a9..52401915828a1 100644
1309 +--- a/drivers/net/ethernet/mscc/ocelot.c
1310 ++++ b/drivers/net/ethernet/mscc/ocelot.c
1311 +@@ -811,12 +811,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
1312 + switch (cfg.rx_filter) {
1313 + case HWTSTAMP_FILTER_NONE:
1314 + break;
1315 +- case HWTSTAMP_FILTER_ALL:
1316 +- case HWTSTAMP_FILTER_SOME:
1317 +- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1318 +- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1319 +- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1320 +- case HWTSTAMP_FILTER_NTP_ALL:
1321 + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1322 + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1323 + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1324 +@@ -935,7 +929,10 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
1325 + SOF_TIMESTAMPING_RAW_HARDWARE;
1326 + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
1327 + BIT(HWTSTAMP_TX_ONESTEP_SYNC);
1328 +- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1329 ++ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1330 ++ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
1331 ++ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1332 ++ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1333 +
1334 + return 0;
1335 + }
1336 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
1337 +index df5b748be068c..cc2ce452000a3 100644
1338 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
1339 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
1340 +@@ -557,7 +557,6 @@ struct nfp_net_dp {
1341 + * @exn_name: Name for Exception interrupt
1342 + * @shared_handler: Handler for shared interrupts
1343 + * @shared_name: Name for shared interrupt
1344 +- * @me_freq_mhz: ME clock_freq (MHz)
1345 + * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
1346 + * @reconfig_sync_present and HW reconfiguration request
1347 + * regs/machinery from async requests (sync must take
1348 +@@ -640,8 +639,6 @@ struct nfp_net {
1349 + irq_handler_t shared_handler;
1350 + char shared_name[IFNAMSIZ + 8];
1351 +
1352 +- u32 me_freq_mhz;
1353 +-
1354 + bool link_up;
1355 + spinlock_t link_status_lock;
1356 +
1357 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1358 +index c036a1d0f8de6..cd0c9623f7dd2 100644
1359 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1360 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
1361 +@@ -1347,7 +1347,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
1362 + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1363 + * count.
1364 + */
1365 +- factor = nn->me_freq_mhz / 16;
1366 ++ factor = nn->tlv_caps.me_freq_mhz / 16;
1367 +
1368 + /* Each pair of (usecs, max_frames) fields specifies that interrupts
1369 + * should be coalesced until
1370 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
1371 +index a4ca283e02284..617c960cfb5a5 100644
1372 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
1373 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
1374 +@@ -258,6 +258,7 @@ int stmmac_mdio_register(struct net_device *ndev);
1375 + int stmmac_mdio_reset(struct mii_bus *mii);
1376 + void stmmac_set_ethtool_ops(struct net_device *netdev);
1377 +
1378 ++int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
1379 + void stmmac_ptp_register(struct stmmac_priv *priv);
1380 + void stmmac_ptp_unregister(struct stmmac_priv *priv);
1381 + int stmmac_resume(struct device *dev);
1382 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1383 +index 4a75e73f06bbd..a8c5492cb39be 100644
1384 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1385 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1386 +@@ -47,6 +47,13 @@
1387 + #include "dwxgmac2.h"
1388 + #include "hwif.h"
1389 +
1390 ++/* As long as the interface is active, we keep the timestamping counter enabled
1391 ++ * with fine resolution and binary rollover. This avoid non-monotonic behavior
1392 ++ * (clock jumps) when changing timestamping settings at runtime.
1393 ++ */
1394 ++#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
1395 ++ PTP_TCR_TSCTRLSSR)
1396 ++
1397 + #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
1398 + #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
1399 +
1400 +@@ -508,8 +515,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1401 + {
1402 + struct stmmac_priv *priv = netdev_priv(dev);
1403 + struct hwtstamp_config config;
1404 +- struct timespec64 now;
1405 +- u64 temp = 0;
1406 + u32 ptp_v2 = 0;
1407 + u32 tstamp_all = 0;
1408 + u32 ptp_over_ipv4_udp = 0;
1409 +@@ -518,11 +523,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1410 + u32 snap_type_sel = 0;
1411 + u32 ts_master_en = 0;
1412 + u32 ts_event_en = 0;
1413 +- u32 sec_inc = 0;
1414 +- u32 value = 0;
1415 +- bool xmac;
1416 +-
1417 +- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
1418 +
1419 + if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
1420 + netdev_alert(priv->dev, "No support for HW time stamping\n");
1421 +@@ -684,42 +684,17 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1422 + priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
1423 + priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
1424 +
1425 +- if (!priv->hwts_tx_en && !priv->hwts_rx_en)
1426 +- stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
1427 +- else {
1428 +- value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
1429 +- tstamp_all | ptp_v2 | ptp_over_ethernet |
1430 +- ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
1431 +- ts_master_en | snap_type_sel);
1432 +- stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
1433 +-
1434 +- /* program Sub Second Increment reg */
1435 +- stmmac_config_sub_second_increment(priv,
1436 +- priv->ptpaddr, priv->plat->clk_ptp_rate,
1437 +- xmac, &sec_inc);
1438 +- temp = div_u64(1000000000ULL, sec_inc);
1439 +-
1440 +- /* Store sub second increment and flags for later use */
1441 +- priv->sub_second_inc = sec_inc;
1442 +- priv->systime_flags = value;
1443 +-
1444 +- /* calculate default added value:
1445 +- * formula is :
1446 +- * addend = (2^32)/freq_div_ratio;
1447 +- * where, freq_div_ratio = 1e9ns/sec_inc
1448 +- */
1449 +- temp = (u64)(temp << 32);
1450 +- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
1451 +- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
1452 +-
1453 +- /* initialize system time */
1454 +- ktime_get_real_ts64(&now);
1455 ++ priv->systime_flags = STMMAC_HWTS_ACTIVE;
1456 +
1457 +- /* lower 32 bits of tv_sec are safe until y2106 */
1458 +- stmmac_init_systime(priv, priv->ptpaddr,
1459 +- (u32)now.tv_sec, now.tv_nsec);
1460 ++ if (priv->hwts_tx_en || priv->hwts_rx_en) {
1461 ++ priv->systime_flags |= tstamp_all | ptp_v2 |
1462 ++ ptp_over_ethernet | ptp_over_ipv6_udp |
1463 ++ ptp_over_ipv4_udp | ts_event_en |
1464 ++ ts_master_en | snap_type_sel;
1465 + }
1466 +
1467 ++ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
1468 ++
1469 + memcpy(&priv->tstamp_config, &config, sizeof(config));
1470 +
1471 + return copy_to_user(ifr->ifr_data, &config,
1472 +@@ -747,6 +722,66 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1473 + sizeof(*config)) ? -EFAULT : 0;
1474 + }
1475 +
1476 ++/**
1477 ++ * stmmac_init_tstamp_counter - init hardware timestamping counter
1478 ++ * @priv: driver private structure
1479 ++ * @systime_flags: timestamping flags
1480 ++ * Description:
1481 ++ * Initialize hardware counter for packet timestamping.
1482 ++ * This is valid as long as the interface is open and not suspended.
1483 ++ * Will be rerun after resuming from suspend, case in which the timestamping
1484 ++ * flags updated by stmmac_hwtstamp_set() also need to be restored.
1485 ++ */
1486 ++int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
1487 ++{
1488 ++ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
1489 ++ struct timespec64 now;
1490 ++ u32 sec_inc = 0;
1491 ++ u64 temp = 0;
1492 ++ int ret;
1493 ++
1494 ++ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
1495 ++ return -EOPNOTSUPP;
1496 ++
1497 ++ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
1498 ++ if (ret < 0) {
1499 ++ netdev_warn(priv->dev,
1500 ++ "failed to enable PTP reference clock: %pe\n",
1501 ++ ERR_PTR(ret));
1502 ++ return ret;
1503 ++ }
1504 ++
1505 ++ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
1506 ++ priv->systime_flags = systime_flags;
1507 ++
1508 ++ /* program Sub Second Increment reg */
1509 ++ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
1510 ++ priv->plat->clk_ptp_rate,
1511 ++ xmac, &sec_inc);
1512 ++ temp = div_u64(1000000000ULL, sec_inc);
1513 ++
1514 ++ /* Store sub second increment for later use */
1515 ++ priv->sub_second_inc = sec_inc;
1516 ++
1517 ++ /* calculate default added value:
1518 ++ * formula is :
1519 ++ * addend = (2^32)/freq_div_ratio;
1520 ++ * where, freq_div_ratio = 1e9ns/sec_inc
1521 ++ */
1522 ++ temp = (u64)(temp << 32);
1523 ++ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
1524 ++ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
1525 ++
1526 ++ /* initialize system time */
1527 ++ ktime_get_real_ts64(&now);
1528 ++
1529 ++ /* lower 32 bits of tv_sec are safe until y2106 */
1530 ++ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
1531 ++
1532 ++ return 0;
1533 ++}
1534 ++EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
1535 ++
1536 + /**
1537 + * stmmac_init_ptp - init PTP
1538 + * @priv: driver private structure
1539 +@@ -757,9 +792,11 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1540 + static int stmmac_init_ptp(struct stmmac_priv *priv)
1541 + {
1542 + bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
1543 ++ int ret;
1544 +
1545 +- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
1546 +- return -EOPNOTSUPP;
1547 ++ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
1548 ++ if (ret)
1549 ++ return ret;
1550 +
1551 + priv->adv_ts = 0;
1552 + /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
1553 +@@ -2721,10 +2758,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1554 + stmmac_mmc_setup(priv);
1555 +
1556 + if (init_ptp) {
1557 +- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
1558 +- if (ret < 0)
1559 +- netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
1560 +-
1561 + ret = stmmac_init_ptp(priv);
1562 + if (ret == -EOPNOTSUPP)
1563 + netdev_warn(priv->dev, "PTP not supported by HW\n");
1564 +@@ -5238,7 +5271,6 @@ int stmmac_suspend(struct device *dev)
1565 + struct net_device *ndev = dev_get_drvdata(dev);
1566 + struct stmmac_priv *priv = netdev_priv(ndev);
1567 + u32 chan;
1568 +- int ret;
1569 +
1570 + if (!ndev || !netif_running(ndev))
1571 + return 0;
1572 +@@ -5280,13 +5312,6 @@ int stmmac_suspend(struct device *dev)
1573 +
1574 + stmmac_mac_set(priv, priv->ioaddr, false);
1575 + pinctrl_pm_select_sleep_state(priv->device);
1576 +- /* Disable clock in case of PWM is off */
1577 +- clk_disable_unprepare(priv->plat->clk_ptp_ref);
1578 +- ret = pm_runtime_force_suspend(dev);
1579 +- if (ret) {
1580 +- mutex_unlock(&priv->lock);
1581 +- return ret;
1582 +- }
1583 + }
1584 + mutex_unlock(&priv->lock);
1585 +
1586 +@@ -5351,12 +5376,6 @@ int stmmac_resume(struct device *dev)
1587 + priv->irq_wake = 0;
1588 + } else {
1589 + pinctrl_pm_select_default_state(priv->device);
1590 +- /* enable the clk previously disabled */
1591 +- ret = pm_runtime_force_resume(dev);
1592 +- if (ret)
1593 +- return ret;
1594 +- if (priv->plat->clk_ptp_ref)
1595 +- clk_prepare_enable(priv->plat->clk_ptp_ref);
1596 + /* reset the phy so that it's ready */
1597 + if (priv->mii)
1598 + stmmac_mdio_reset(priv->mii);
1599 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
1600 +index 035f9aef4308f..3183d8826981e 100644
1601 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
1602 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
1603 +@@ -9,6 +9,7 @@
1604 + *******************************************************************************/
1605 +
1606 + #include <linux/platform_device.h>
1607 ++#include <linux/pm_runtime.h>
1608 + #include <linux/module.h>
1609 + #include <linux/io.h>
1610 + #include <linux/of.h>
1611 +@@ -778,9 +779,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev)
1612 + return stmmac_bus_clks_config(priv, true);
1613 + }
1614 +
1615 ++static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
1616 ++{
1617 ++ struct net_device *ndev = dev_get_drvdata(dev);
1618 ++ struct stmmac_priv *priv = netdev_priv(ndev);
1619 ++ int ret;
1620 ++
1621 ++ if (!netif_running(ndev))
1622 ++ return 0;
1623 ++
1624 ++ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
1625 ++ /* Disable clock in case of PWM is off */
1626 ++ clk_disable_unprepare(priv->plat->clk_ptp_ref);
1627 ++
1628 ++ ret = pm_runtime_force_suspend(dev);
1629 ++ if (ret)
1630 ++ return ret;
1631 ++ }
1632 ++
1633 ++ return 0;
1634 ++}
1635 ++
1636 ++static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
1637 ++{
1638 ++ struct net_device *ndev = dev_get_drvdata(dev);
1639 ++ struct stmmac_priv *priv = netdev_priv(ndev);
1640 ++ int ret;
1641 ++
1642 ++ if (!netif_running(ndev))
1643 ++ return 0;
1644 ++
1645 ++ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
1646 ++ /* enable the clk previously disabled */
1647 ++ ret = pm_runtime_force_resume(dev);
1648 ++ if (ret)
1649 ++ return ret;
1650 ++
1651 ++ stmmac_init_tstamp_counter(priv, priv->systime_flags);
1652 ++ }
1653 ++
1654 ++ return 0;
1655 ++}
1656 ++
1657 + const struct dev_pm_ops stmmac_pltfr_pm_ops = {
1658 + SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
1659 + SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
1660 ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
1661 + };
1662 + EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
1663 +
1664 +diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
1665 +index cad820568f751..966c3b4ad59d1 100644
1666 +--- a/drivers/net/mdio/mdio-aspeed.c
1667 ++++ b/drivers/net/mdio/mdio-aspeed.c
1668 +@@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
1669 +
1670 + iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
1671 +
1672 ++ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
1673 ++ !(ctrl & ASPEED_MDIO_CTRL_FIRE),
1674 ++ ASPEED_MDIO_INTERVAL_US,
1675 ++ ASPEED_MDIO_TIMEOUT_US);
1676 ++ if (rc < 0)
1677 ++ return rc;
1678 ++
1679 + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
1680 + data & ASPEED_MDIO_DATA_IDLE,
1681 + ASPEED_MDIO_INTERVAL_US,
1682 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
1683 +index 899496f089d2e..57b1b138522e0 100644
1684 +--- a/drivers/net/phy/phylink.c
1685 ++++ b/drivers/net/phy/phylink.c
1686 +@@ -644,6 +644,7 @@ static void phylink_resolve(struct work_struct *w)
1687 + struct phylink_link_state link_state;
1688 + struct net_device *ndev = pl->netdev;
1689 + bool mac_config = false;
1690 ++ bool retrigger = false;
1691 + bool cur_link_state;
1692 +
1693 + mutex_lock(&pl->state_mutex);
1694 +@@ -657,6 +658,7 @@ static void phylink_resolve(struct work_struct *w)
1695 + link_state.link = false;
1696 + } else if (pl->mac_link_dropped) {
1697 + link_state.link = false;
1698 ++ retrigger = true;
1699 + } else {
1700 + switch (pl->cur_link_an_mode) {
1701 + case MLO_AN_PHY:
1702 +@@ -673,6 +675,19 @@ static void phylink_resolve(struct work_struct *w)
1703 + case MLO_AN_INBAND:
1704 + phylink_mac_pcs_get_state(pl, &link_state);
1705 +
1706 ++ /* The PCS may have a latching link-fail indicator.
1707 ++ * If the link was up, bring the link down and
1708 ++ * re-trigger the resolve. Otherwise, re-read the
1709 ++ * PCS state to get the current status of the link.
1710 ++ */
1711 ++ if (!link_state.link) {
1712 ++ if (cur_link_state)
1713 ++ retrigger = true;
1714 ++ else
1715 ++ phylink_mac_pcs_get_state(pl,
1716 ++ &link_state);
1717 ++ }
1718 ++
1719 + /* If we have a phy, the "up" state is the union of
1720 + * both the PHY and the MAC */
1721 + if (pl->phydev)
1722 +@@ -680,6 +695,15 @@ static void phylink_resolve(struct work_struct *w)
1723 +
1724 + /* Only update if the PHY link is up */
1725 + if (pl->phydev && pl->phy_state.link) {
1726 ++ /* If the interface has changed, force a
1727 ++ * link down event if the link isn't already
1728 ++ * down, and re-resolve.
1729 ++ */
1730 ++ if (link_state.interface !=
1731 ++ pl->phy_state.interface) {
1732 ++ retrigger = true;
1733 ++ link_state.link = false;
1734 ++ }
1735 + link_state.interface = pl->phy_state.interface;
1736 +
1737 + /* If we have a PHY, we need to update with
1738 +@@ -721,7 +745,7 @@ static void phylink_resolve(struct work_struct *w)
1739 + else
1740 + phylink_link_up(pl, link_state);
1741 + }
1742 +- if (!link_state.link && pl->mac_link_dropped) {
1743 ++ if (!link_state.link && retrigger) {
1744 + pl->mac_link_dropped = false;
1745 + queue_work(system_power_efficient_wq, &pl->resolve);
1746 + }
1747 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1748 +index dd79534910b05..8505024b89e9e 100644
1749 +--- a/drivers/net/xen-netfront.c
1750 ++++ b/drivers/net/xen-netfront.c
1751 +@@ -126,21 +126,17 @@ struct netfront_queue {
1752 +
1753 + /*
1754 + * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
1755 +- * are linked from tx_skb_freelist through skb_entry.link.
1756 +- *
1757 +- * NB. Freelist index entries are always going to be less than
1758 +- * PAGE_OFFSET, whereas pointers to skbs will always be equal or
1759 +- * greater than PAGE_OFFSET: we use this property to distinguish
1760 +- * them.
1761 ++ * are linked from tx_skb_freelist through tx_link.
1762 + */
1763 +- union skb_entry {
1764 +- struct sk_buff *skb;
1765 +- unsigned long link;
1766 +- } tx_skbs[NET_TX_RING_SIZE];
1767 ++ struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
1768 ++ unsigned short tx_link[NET_TX_RING_SIZE];
1769 ++#define TX_LINK_NONE 0xffff
1770 ++#define TX_PENDING 0xfffe
1771 + grant_ref_t gref_tx_head;
1772 + grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
1773 + struct page *grant_tx_page[NET_TX_RING_SIZE];
1774 + unsigned tx_skb_freelist;
1775 ++ unsigned int tx_pend_queue;
1776 +
1777 + spinlock_t rx_lock ____cacheline_aligned_in_smp;
1778 + struct xen_netif_rx_front_ring rx;
1779 +@@ -173,6 +169,9 @@ struct netfront_info {
1780 + bool netback_has_xdp_headroom;
1781 + bool netfront_xdp_enabled;
1782 +
1783 ++ /* Is device behaving sane? */
1784 ++ bool broken;
1785 ++
1786 + atomic_t rx_gso_checksum_fixup;
1787 + };
1788 +
1789 +@@ -181,33 +180,25 @@ struct netfront_rx_info {
1790 + struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
1791 + };
1792 +
1793 +-static void skb_entry_set_link(union skb_entry *list, unsigned short id)
1794 +-{
1795 +- list->link = id;
1796 +-}
1797 +-
1798 +-static int skb_entry_is_link(const union skb_entry *list)
1799 +-{
1800 +- BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
1801 +- return (unsigned long)list->skb < PAGE_OFFSET;
1802 +-}
1803 +-
1804 + /*
1805 + * Access macros for acquiring freeing slots in tx_skbs[].
1806 + */
1807 +
1808 +-static void add_id_to_freelist(unsigned *head, union skb_entry *list,
1809 +- unsigned short id)
1810 ++static void add_id_to_list(unsigned *head, unsigned short *list,
1811 ++ unsigned short id)
1812 + {
1813 +- skb_entry_set_link(&list[id], *head);
1814 ++ list[id] = *head;
1815 + *head = id;
1816 + }
1817 +
1818 +-static unsigned short get_id_from_freelist(unsigned *head,
1819 +- union skb_entry *list)
1820 ++static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
1821 + {
1822 + unsigned int id = *head;
1823 +- *head = list[id].link;
1824 ++
1825 ++ if (id != TX_LINK_NONE) {
1826 ++ *head = list[id];
1827 ++ list[id] = TX_LINK_NONE;
1828 ++ }
1829 + return id;
1830 + }
1831 +
1832 +@@ -363,7 +354,7 @@ static int xennet_open(struct net_device *dev)
1833 + unsigned int i = 0;
1834 + struct netfront_queue *queue = NULL;
1835 +
1836 +- if (!np->queues)
1837 ++ if (!np->queues || np->broken)
1838 + return -ENODEV;
1839 +
1840 + for (i = 0; i < num_queues; ++i) {
1841 +@@ -391,27 +382,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1842 + unsigned short id;
1843 + struct sk_buff *skb;
1844 + bool more_to_do;
1845 ++ const struct device *dev = &queue->info->netdev->dev;
1846 +
1847 + BUG_ON(!netif_carrier_ok(queue->info->netdev));
1848 +
1849 + do {
1850 + prod = queue->tx.sring->rsp_prod;
1851 ++ if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
1852 ++ dev_alert(dev, "Illegal number of responses %u\n",
1853 ++ prod - queue->tx.rsp_cons);
1854 ++ goto err;
1855 ++ }
1856 + rmb(); /* Ensure we see responses up to 'rp'. */
1857 +
1858 + for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
1859 +- struct xen_netif_tx_response *txrsp;
1860 ++ struct xen_netif_tx_response txrsp;
1861 +
1862 +- txrsp = RING_GET_RESPONSE(&queue->tx, cons);
1863 +- if (txrsp->status == XEN_NETIF_RSP_NULL)
1864 ++ RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
1865 ++ if (txrsp.status == XEN_NETIF_RSP_NULL)
1866 + continue;
1867 +
1868 +- id = txrsp->id;
1869 +- skb = queue->tx_skbs[id].skb;
1870 ++ id = txrsp.id;
1871 ++ if (id >= RING_SIZE(&queue->tx)) {
1872 ++ dev_alert(dev,
1873 ++ "Response has incorrect id (%u)\n",
1874 ++ id);
1875 ++ goto err;
1876 ++ }
1877 ++ if (queue->tx_link[id] != TX_PENDING) {
1878 ++ dev_alert(dev,
1879 ++ "Response for inactive request\n");
1880 ++ goto err;
1881 ++ }
1882 ++
1883 ++ queue->tx_link[id] = TX_LINK_NONE;
1884 ++ skb = queue->tx_skbs[id];
1885 ++ queue->tx_skbs[id] = NULL;
1886 + if (unlikely(gnttab_query_foreign_access(
1887 + queue->grant_tx_ref[id]) != 0)) {
1888 +- pr_alert("%s: warning -- grant still in use by backend domain\n",
1889 +- __func__);
1890 +- BUG();
1891 ++ dev_alert(dev,
1892 ++ "Grant still in use by backend domain\n");
1893 ++ goto err;
1894 + }
1895 + gnttab_end_foreign_access_ref(
1896 + queue->grant_tx_ref[id], GNTMAP_readonly);
1897 +@@ -419,7 +430,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1898 + &queue->gref_tx_head, queue->grant_tx_ref[id]);
1899 + queue->grant_tx_ref[id] = GRANT_INVALID_REF;
1900 + queue->grant_tx_page[id] = NULL;
1901 +- add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
1902 ++ add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
1903 + dev_kfree_skb_irq(skb);
1904 + }
1905 +
1906 +@@ -429,13 +440,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1907 + } while (more_to_do);
1908 +
1909 + xennet_maybe_wake_tx(queue);
1910 ++
1911 ++ return;
1912 ++
1913 ++ err:
1914 ++ queue->info->broken = true;
1915 ++ dev_alert(dev, "Disabled for further use\n");
1916 + }
1917 +
1918 + struct xennet_gnttab_make_txreq {
1919 + struct netfront_queue *queue;
1920 + struct sk_buff *skb;
1921 + struct page *page;
1922 +- struct xen_netif_tx_request *tx; /* Last request */
1923 ++ struct xen_netif_tx_request *tx; /* Last request on ring page */
1924 ++ struct xen_netif_tx_request tx_local; /* Last request local copy*/
1925 + unsigned int size;
1926 + };
1927 +
1928 +@@ -451,7 +469,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
1929 + struct netfront_queue *queue = info->queue;
1930 + struct sk_buff *skb = info->skb;
1931 +
1932 +- id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
1933 ++ id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
1934 + tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
1935 + ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
1936 + WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
1937 +@@ -459,34 +477,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
1938 + gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
1939 + gfn, GNTMAP_readonly);
1940 +
1941 +- queue->tx_skbs[id].skb = skb;
1942 ++ queue->tx_skbs[id] = skb;
1943 + queue->grant_tx_page[id] = page;
1944 + queue->grant_tx_ref[id] = ref;
1945 +
1946 +- tx->id = id;
1947 +- tx->gref = ref;
1948 +- tx->offset = offset;
1949 +- tx->size = len;
1950 +- tx->flags = 0;
1951 ++ info->tx_local.id = id;
1952 ++ info->tx_local.gref = ref;
1953 ++ info->tx_local.offset = offset;
1954 ++ info->tx_local.size = len;
1955 ++ info->tx_local.flags = 0;
1956 ++
1957 ++ *tx = info->tx_local;
1958 ++
1959 ++ /*
1960 ++ * Put the request in the pending queue, it will be set to be pending
1961 ++ * when the producer index is about to be raised.
1962 ++ */
1963 ++ add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
1964 +
1965 + info->tx = tx;
1966 +- info->size += tx->size;
1967 ++ info->size += info->tx_local.size;
1968 + }
1969 +
1970 + static struct xen_netif_tx_request *xennet_make_first_txreq(
1971 +- struct netfront_queue *queue, struct sk_buff *skb,
1972 +- struct page *page, unsigned int offset, unsigned int len)
1973 ++ struct xennet_gnttab_make_txreq *info,
1974 ++ unsigned int offset, unsigned int len)
1975 + {
1976 +- struct xennet_gnttab_make_txreq info = {
1977 +- .queue = queue,
1978 +- .skb = skb,
1979 +- .page = page,
1980 +- .size = 0,
1981 +- };
1982 ++ info->size = 0;
1983 +
1984 +- gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
1985 ++ gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
1986 +
1987 +- return info.tx;
1988 ++ return info->tx;
1989 + }
1990 +
1991 + static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
1992 +@@ -499,35 +520,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
1993 + xennet_tx_setup_grant(gfn, offset, len, data);
1994 + }
1995 +
1996 +-static struct xen_netif_tx_request *xennet_make_txreqs(
1997 +- struct netfront_queue *queue, struct xen_netif_tx_request *tx,
1998 +- struct sk_buff *skb, struct page *page,
1999 ++static void xennet_make_txreqs(
2000 ++ struct xennet_gnttab_make_txreq *info,
2001 ++ struct page *page,
2002 + unsigned int offset, unsigned int len)
2003 + {
2004 +- struct xennet_gnttab_make_txreq info = {
2005 +- .queue = queue,
2006 +- .skb = skb,
2007 +- .tx = tx,
2008 +- };
2009 +-
2010 + /* Skip unused frames from start of page */
2011 + page += offset >> PAGE_SHIFT;
2012 + offset &= ~PAGE_MASK;
2013 +
2014 + while (len) {
2015 +- info.page = page;
2016 +- info.size = 0;
2017 ++ info->page = page;
2018 ++ info->size = 0;
2019 +
2020 + gnttab_foreach_grant_in_range(page, offset, len,
2021 + xennet_make_one_txreq,
2022 +- &info);
2023 ++ info);
2024 +
2025 + page++;
2026 + offset = 0;
2027 +- len -= info.size;
2028 ++ len -= info->size;
2029 + }
2030 +-
2031 +- return info.tx;
2032 + }
2033 +
2034 + /*
2035 +@@ -574,19 +587,34 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
2036 + return queue_idx;
2037 + }
2038 +
2039 ++static void xennet_mark_tx_pending(struct netfront_queue *queue)
2040 ++{
2041 ++ unsigned int i;
2042 ++
2043 ++ while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
2044 ++ TX_LINK_NONE)
2045 ++ queue->tx_link[i] = TX_PENDING;
2046 ++}
2047 ++
2048 + static int xennet_xdp_xmit_one(struct net_device *dev,
2049 + struct netfront_queue *queue,
2050 + struct xdp_frame *xdpf)
2051 + {
2052 + struct netfront_info *np = netdev_priv(dev);
2053 + struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
2054 ++ struct xennet_gnttab_make_txreq info = {
2055 ++ .queue = queue,
2056 ++ .skb = NULL,
2057 ++ .page = virt_to_page(xdpf->data),
2058 ++ };
2059 + int notify;
2060 +
2061 +- xennet_make_first_txreq(queue, NULL,
2062 +- virt_to_page(xdpf->data),
2063 ++ xennet_make_first_txreq(&info,
2064 + offset_in_page(xdpf->data),
2065 + xdpf->len);
2066 +
2067 ++ xennet_mark_tx_pending(queue);
2068 ++
2069 + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
2070 + if (notify)
2071 + notify_remote_via_irq(queue->tx_irq);
2072 +@@ -611,6 +639,8 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
2073 + int drops = 0;
2074 + int i, err;
2075 +
2076 ++ if (unlikely(np->broken))
2077 ++ return -ENODEV;
2078 + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2079 + return -EINVAL;
2080 +
2081 +@@ -640,7 +670,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
2082 + {
2083 + struct netfront_info *np = netdev_priv(dev);
2084 + struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
2085 +- struct xen_netif_tx_request *tx, *first_tx;
2086 ++ struct xen_netif_tx_request *first_tx;
2087 + unsigned int i;
2088 + int notify;
2089 + int slots;
2090 +@@ -649,6 +679,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
2091 + unsigned int len;
2092 + unsigned long flags;
2093 + struct netfront_queue *queue = NULL;
2094 ++ struct xennet_gnttab_make_txreq info = { };
2095 + unsigned int num_queues = dev->real_num_tx_queues;
2096 + u16 queue_index;
2097 + struct sk_buff *nskb;
2098 +@@ -656,6 +687,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
2099 + /* Drop the packet if no queues are set up */
2100 + if (num_queues < 1)
2101 + goto drop;
2102 ++ if (unlikely(np->broken))
2103 ++ goto drop;
2104 + /* Determine which queue to transmit this SKB on */
2105 + queue_index = skb_get_queue_mapping(skb);
2106 + queue = &np->queues[queue_index];
2107 +@@ -706,21 +739,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
2108 + }
2109 +
2110 + /* First request for the linear area. */
2111 +- first_tx = tx = xennet_make_first_txreq(queue, skb,
2112 +- page, offset, len);
2113 +- offset += tx->size;
2114 ++ info.queue = queue;
2115 ++ info.skb = skb;
2116 ++ info.page = page;
2117 ++ first_tx = xennet_make_first_txreq(&info, offset, len);
2118 ++ offset += info.tx_local.size;
2119 + if (offset == PAGE_SIZE) {
2120 + page++;
2121 + offset = 0;
2122 + }
2123 +- len -= tx->size;
2124 ++ len -= info.tx_local.size;
2125 +
2126 + if (skb->ip_summed == CHECKSUM_PARTIAL)
2127 + /* local packet? */
2128 +- tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
2129 ++ first_tx->flags |= XEN_NETTXF_csum_blank |
2130 ++ XEN_NETTXF_data_validated;
2131 + else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2132 + /* remote but checksummed. */
2133 +- tx->flags |= XEN_NETTXF_data_validated;
2134 ++ first_tx->flags |= XEN_NETTXF_data_validated;
2135 +
2136 + /* Optional extra info after the first request. */
2137 + if (skb_shinfo(skb)->gso_size) {
2138 +@@ -729,7 +765,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
2139 + gso = (struct xen_netif_extra_info *)
2140 + RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
2141 +
2142 +- tx->flags |= XEN_NETTXF_extra_info;
2143 ++ first_tx->flags |= XEN_NETTXF_extra_info;
2144 +
2145 + gso->u.gso.size = skb_shinfo(skb)->gso_size;
2146 + gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
2147 +@@ -743,12 +779,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
2148 + }
2149 +
2150 + /* Requests for the rest of the linear area. */
2151 +- tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
2152 ++ xennet_make_txreqs(&info, page, offset, len);
2153 +
2154 + /* Requests for all the frags. */
2155 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2156 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2157 +- tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
2158 ++ xennet_make_txreqs(&info, skb_frag_page(frag),
2159 + skb_frag_off(frag),
2160 + skb_frag_size(frag));
2161 + }
2162 +@@ -759,6 +795,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
2163 + /* timestamp packet in software */
2164 + skb_tx_timestamp(skb);
2165 +
2166 ++ xennet_mark_tx_pending(queue);
2167 ++
2168 + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
2169 + if (notify)
2170 + notify_remote_via_irq(queue->tx_irq);
2171 +@@ -816,7 +854,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
2172 + RING_IDX rp)
2173 +
2174 + {
2175 +- struct xen_netif_extra_info *extra;
2176 ++ struct xen_netif_extra_info extra;
2177 + struct device *dev = &queue->info->netdev->dev;
2178 + RING_IDX cons = queue->rx.rsp_cons;
2179 + int err = 0;
2180 +@@ -832,24 +870,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
2181 + break;
2182 + }
2183 +
2184 +- extra = (struct xen_netif_extra_info *)
2185 +- RING_GET_RESPONSE(&queue->rx, ++cons);
2186 ++ RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
2187 +
2188 +- if (unlikely(!extra->type ||
2189 +- extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
2190 ++ if (unlikely(!extra.type ||
2191 ++ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
2192 + if (net_ratelimit())
2193 + dev_warn(dev, "Invalid extra type: %d\n",
2194 +- extra->type);
2195 ++ extra.type);
2196 + err = -EINVAL;
2197 + } else {
2198 +- memcpy(&extras[extra->type - 1], extra,
2199 +- sizeof(*extra));
2200 ++ extras[extra.type - 1] = extra;
2201 + }
2202 +
2203 + skb = xennet_get_rx_skb(queue, cons);
2204 + ref = xennet_get_rx_ref(queue, cons);
2205 + xennet_move_rx_slot(queue, skb, ref);
2206 +- } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
2207 ++ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
2208 +
2209 + queue->rx.rsp_cons = cons;
2210 + return err;
2211 +@@ -907,7 +943,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
2212 + struct sk_buff_head *list,
2213 + bool *need_xdp_flush)
2214 + {
2215 +- struct xen_netif_rx_response *rx = &rinfo->rx;
2216 ++ struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
2217 + int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
2218 + RING_IDX cons = queue->rx.rsp_cons;
2219 + struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
2220 +@@ -991,7 +1027,8 @@ next:
2221 + break;
2222 + }
2223 +
2224 +- rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
2225 ++ RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
2226 ++ rx = &rx_local;
2227 + skb = xennet_get_rx_skb(queue, cons + slots);
2228 + ref = xennet_get_rx_ref(queue, cons + slots);
2229 + slots++;
2230 +@@ -1046,10 +1083,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
2231 + struct sk_buff *nskb;
2232 +
2233 + while ((nskb = __skb_dequeue(list))) {
2234 +- struct xen_netif_rx_response *rx =
2235 +- RING_GET_RESPONSE(&queue->rx, ++cons);
2236 ++ struct xen_netif_rx_response rx;
2237 + skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
2238 +
2239 ++ RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
2240 ++
2241 + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
2242 + unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
2243 +
2244 +@@ -1064,7 +1102,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
2245 +
2246 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2247 + skb_frag_page(nfrag),
2248 +- rx->offset, rx->status, PAGE_SIZE);
2249 ++ rx.offset, rx.status, PAGE_SIZE);
2250 +
2251 + skb_shinfo(nskb)->nr_frags = 0;
2252 + kfree_skb(nskb);
2253 +@@ -1158,12 +1196,19 @@ static int xennet_poll(struct napi_struct *napi, int budget)
2254 + skb_queue_head_init(&tmpq);
2255 +
2256 + rp = queue->rx.sring->rsp_prod;
2257 ++ if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
2258 ++ dev_alert(&dev->dev, "Illegal number of responses %u\n",
2259 ++ rp - queue->rx.rsp_cons);
2260 ++ queue->info->broken = true;
2261 ++ spin_unlock(&queue->rx_lock);
2262 ++ return 0;
2263 ++ }
2264 + rmb(); /* Ensure we see queued responses up to 'rp'. */
2265 +
2266 + i = queue->rx.rsp_cons;
2267 + work_done = 0;
2268 + while ((i != rp) && (work_done < budget)) {
2269 +- memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
2270 ++ RING_COPY_RESPONSE(&queue->rx, i, rx);
2271 + memset(extras, 0, sizeof(rinfo.extras));
2272 +
2273 + err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
2274 +@@ -1288,17 +1333,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
2275 +
2276 + for (i = 0; i < NET_TX_RING_SIZE; i++) {
2277 + /* Skip over entries which are actually freelist references */
2278 +- if (skb_entry_is_link(&queue->tx_skbs[i]))
2279 ++ if (!queue->tx_skbs[i])
2280 + continue;
2281 +
2282 +- skb = queue->tx_skbs[i].skb;
2283 ++ skb = queue->tx_skbs[i];
2284 ++ queue->tx_skbs[i] = NULL;
2285 + get_page(queue->grant_tx_page[i]);
2286 + gnttab_end_foreign_access(queue->grant_tx_ref[i],
2287 + GNTMAP_readonly,
2288 + (unsigned long)page_address(queue->grant_tx_page[i]));
2289 + queue->grant_tx_page[i] = NULL;
2290 + queue->grant_tx_ref[i] = GRANT_INVALID_REF;
2291 +- add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
2292 ++ add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
2293 + dev_kfree_skb_irq(skb);
2294 + }
2295 + }
2296 +@@ -1378,6 +1424,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
2297 + struct netfront_queue *queue = dev_id;
2298 + unsigned long flags;
2299 +
2300 ++ if (queue->info->broken)
2301 ++ return IRQ_HANDLED;
2302 ++
2303 + spin_lock_irqsave(&queue->tx_lock, flags);
2304 + xennet_tx_buf_gc(queue);
2305 + spin_unlock_irqrestore(&queue->tx_lock, flags);
2306 +@@ -1390,6 +1439,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
2307 + struct netfront_queue *queue = dev_id;
2308 + struct net_device *dev = queue->info->netdev;
2309 +
2310 ++ if (queue->info->broken)
2311 ++ return IRQ_HANDLED;
2312 ++
2313 + if (likely(netif_carrier_ok(dev) &&
2314 + RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
2315 + napi_schedule(&queue->napi);
2316 +@@ -1411,6 +1463,10 @@ static void xennet_poll_controller(struct net_device *dev)
2317 + struct netfront_info *info = netdev_priv(dev);
2318 + unsigned int num_queues = dev->real_num_tx_queues;
2319 + unsigned int i;
2320 ++
2321 ++ if (info->broken)
2322 ++ return;
2323 ++
2324 + for (i = 0; i < num_queues; ++i)
2325 + xennet_interrupt(0, &info->queues[i]);
2326 + }
2327 +@@ -1482,6 +1538,11 @@ static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2328 +
2329 + static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2330 + {
2331 ++ struct netfront_info *np = netdev_priv(dev);
2332 ++
2333 ++ if (np->broken)
2334 ++ return -ENODEV;
2335 ++
2336 + switch (xdp->command) {
2337 + case XDP_SETUP_PROG:
2338 + return xennet_xdp_set(dev, xdp->prog, xdp->extack);
2339 +@@ -1859,13 +1920,15 @@ static int xennet_init_queue(struct netfront_queue *queue)
2340 + snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2341 + devid, queue->id);
2342 +
2343 +- /* Initialise tx_skbs as a free chain containing every entry. */
2344 ++ /* Initialise tx_skb_freelist as a free chain containing every entry. */
2345 + queue->tx_skb_freelist = 0;
2346 ++ queue->tx_pend_queue = TX_LINK_NONE;
2347 + for (i = 0; i < NET_TX_RING_SIZE; i++) {
2348 +- skb_entry_set_link(&queue->tx_skbs[i], i+1);
2349 ++ queue->tx_link[i] = i + 1;
2350 + queue->grant_tx_ref[i] = GRANT_INVALID_REF;
2351 + queue->grant_tx_page[i] = NULL;
2352 + }
2353 ++ queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2354 +
2355 + /* Clear out rx_skbs */
2356 + for (i = 0; i < NET_RX_RING_SIZE; i++) {
2357 +@@ -2134,6 +2197,9 @@ static int talk_to_netback(struct xenbus_device *dev,
2358 + if (info->queues)
2359 + xennet_destroy_queues(info);
2360 +
2361 ++ /* For the case of a reconnect reset the "broken" indicator. */
2362 ++ info->broken = false;
2363 ++
2364 + err = xennet_create_queues(info, &num_queues);
2365 + if (err < 0) {
2366 + xenbus_dev_fatal(dev, err, "creating queues");
2367 +diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
2368 +index b575997244482..c81690b2a681b 100644
2369 +--- a/drivers/nvme/target/io-cmd-file.c
2370 ++++ b/drivers/nvme/target/io-cmd-file.c
2371 +@@ -8,6 +8,7 @@
2372 + #include <linux/uio.h>
2373 + #include <linux/falloc.h>
2374 + #include <linux/file.h>
2375 ++#include <linux/fs.h>
2376 + #include "nvmet.h"
2377 +
2378 + #define NVMET_MAX_MPOOL_BVEC 16
2379 +@@ -266,7 +267,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
2380 +
2381 + if (req->ns->buffered_io) {
2382 + if (likely(!req->f.mpool_alloc) &&
2383 +- nvmet_file_execute_io(req, IOCB_NOWAIT))
2384 ++ (req->ns->file->f_mode & FMODE_NOWAIT) &&
2385 ++ nvmet_file_execute_io(req, IOCB_NOWAIT))
2386 + return;
2387 + nvmet_file_submit_buffered_io(req);
2388 + } else
2389 +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
2390 +index 1251fd6e92780..96b67a70cbbbd 100644
2391 +--- a/drivers/nvme/target/tcp.c
2392 ++++ b/drivers/nvme/target/tcp.c
2393 +@@ -688,10 +688,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
2394 + static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
2395 + {
2396 + struct nvmet_tcp_queue *queue = cmd->queue;
2397 ++ int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
2398 + struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
2399 + struct kvec iov = {
2400 + .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
2401 +- .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
2402 ++ .iov_len = left
2403 + };
2404 + int ret;
2405 +
2406 +@@ -705,6 +706,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
2407 + return ret;
2408 +
2409 + cmd->offset += ret;
2410 ++ left -= ret;
2411 ++
2412 ++ if (left)
2413 ++ return -EAGAIN;
2414 +
2415 + if (queue->nvme_sq.sqhd_disabled) {
2416 + cmd->queue->snd_cmd = NULL;
2417 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
2418 +index 434522465d983..604b294bb15c9 100644
2419 +--- a/drivers/pci/controller/pci-aardvark.c
2420 ++++ b/drivers/pci/controller/pci-aardvark.c
2421 +@@ -306,11 +306,6 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
2422 + return readl(pcie->base + reg);
2423 + }
2424 +
2425 +-static inline u16 advk_read16(struct advk_pcie *pcie, u64 reg)
2426 +-{
2427 +- return advk_readl(pcie, (reg & ~0x3)) >> ((reg & 0x3) * 8);
2428 +-}
2429 +-
2430 + static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
2431 + {
2432 + u32 val;
2433 +@@ -384,16 +379,9 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
2434 +
2435 + static void advk_pcie_issue_perst(struct advk_pcie *pcie)
2436 + {
2437 +- u32 reg;
2438 +-
2439 + if (!pcie->reset_gpio)
2440 + return;
2441 +
2442 +- /* PERST does not work for some cards when link training is enabled */
2443 +- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
2444 +- reg &= ~LINK_TRAINING_EN;
2445 +- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
2446 +-
2447 + /* 10ms delay is needed for some cards */
2448 + dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
2449 + gpiod_set_value_cansleep(pcie->reset_gpio, 1);
2450 +@@ -401,53 +389,46 @@ static void advk_pcie_issue_perst(struct advk_pcie *pcie)
2451 + gpiod_set_value_cansleep(pcie->reset_gpio, 0);
2452 + }
2453 +
2454 +-static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen)
2455 ++static void advk_pcie_train_link(struct advk_pcie *pcie)
2456 + {
2457 +- int ret, neg_gen;
2458 ++ struct device *dev = &pcie->pdev->dev;
2459 + u32 reg;
2460 ++ int ret;
2461 +
2462 +- /* Setup link speed */
2463 ++ /*
2464 ++ * Setup PCIe rev / gen compliance based on device tree property
2465 ++ * 'max-link-speed' which also forces maximal link speed.
2466 ++ */
2467 + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
2468 + reg &= ~PCIE_GEN_SEL_MSK;
2469 +- if (gen == 3)
2470 ++ if (pcie->link_gen == 3)
2471 + reg |= SPEED_GEN_3;
2472 +- else if (gen == 2)
2473 ++ else if (pcie->link_gen == 2)
2474 + reg |= SPEED_GEN_2;
2475 + else
2476 + reg |= SPEED_GEN_1;
2477 + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
2478 +
2479 + /*
2480 +- * Enable link training. This is not needed in every call to this
2481 +- * function, just once suffices, but it does not break anything either.
2482 ++ * Set maximal link speed value also into PCIe Link Control 2 register.
2483 ++ * Armada 3700 Functional Specification says that default value is based
2484 ++ * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
2485 + */
2486 ++ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
2487 ++ reg &= ~PCI_EXP_LNKCTL2_TLS;
2488 ++ if (pcie->link_gen == 3)
2489 ++ reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
2490 ++ else if (pcie->link_gen == 2)
2491 ++ reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
2492 ++ else
2493 ++ reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
2494 ++ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
2495 ++
2496 ++ /* Enable link training after selecting PCIe generation */
2497 + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
2498 + reg |= LINK_TRAINING_EN;
2499 + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
2500 +
2501 +- /*
2502 +- * Start link training immediately after enabling it.
2503 +- * This solves problems for some buggy cards.
2504 +- */
2505 +- reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
2506 +- reg |= PCI_EXP_LNKCTL_RL;
2507 +- advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
2508 +-
2509 +- ret = advk_pcie_wait_for_link(pcie);
2510 +- if (ret)
2511 +- return ret;
2512 +-
2513 +- reg = advk_read16(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKSTA);
2514 +- neg_gen = reg & PCI_EXP_LNKSTA_CLS;
2515 +-
2516 +- return neg_gen;
2517 +-}
2518 +-
2519 +-static void advk_pcie_train_link(struct advk_pcie *pcie)
2520 +-{
2521 +- struct device *dev = &pcie->pdev->dev;
2522 +- int neg_gen = -1, gen;
2523 +-
2524 + /*
2525 + * Reset PCIe card via PERST# signal. Some cards are not detected
2526 + * during link training when they are in some non-initial state.
2527 +@@ -458,41 +439,18 @@ static void advk_pcie_train_link(struct advk_pcie *pcie)
2528 + * PERST# signal could have been asserted by pinctrl subsystem before
2529 + * probe() callback has been called or issued explicitly by reset gpio
2530 + * function advk_pcie_issue_perst(), making the endpoint going into
2531 +- * fundamental reset. As required by PCI Express spec a delay for at
2532 +- * least 100ms after such a reset before link training is needed.
2533 +- */
2534 +- msleep(PCI_PM_D3COLD_WAIT);
2535 +-
2536 +- /*
2537 +- * Try link training at link gen specified by device tree property
2538 +- * 'max-link-speed'. If this fails, iteratively train at lower gen.
2539 +- */
2540 +- for (gen = pcie->link_gen; gen > 0; --gen) {
2541 +- neg_gen = advk_pcie_train_at_gen(pcie, gen);
2542 +- if (neg_gen > 0)
2543 +- break;
2544 +- }
2545 +-
2546 +- if (neg_gen < 0)
2547 +- goto err;
2548 +-
2549 +- /*
2550 +- * After successful training if negotiated gen is lower than requested,
2551 +- * train again on negotiated gen. This solves some stability issues for
2552 +- * some buggy gen1 cards.
2553 ++ * fundamental reset. As required by PCI Express spec (PCI Express
2554 ++ * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
2555 ++ * Conventional Reset) a delay for at least 100ms after such a reset
2556 ++ * before sending a Configuration Request to the device is needed.
2557 ++ * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
2558 ++ * waits for link at least 900ms.
2559 + */
2560 +- if (neg_gen < gen) {
2561 +- gen = neg_gen;
2562 +- neg_gen = advk_pcie_train_at_gen(pcie, gen);
2563 +- }
2564 +-
2565 +- if (neg_gen == gen) {
2566 +- dev_info(dev, "link up at gen %i\n", gen);
2567 +- return;
2568 +- }
2569 +-
2570 +-err:
2571 +- dev_err(dev, "link never came up\n");
2572 ++ ret = advk_pcie_wait_for_link(pcie);
2573 ++ if (ret < 0)
2574 ++ dev_err(dev, "link never came up\n");
2575 ++ else
2576 ++ dev_info(dev, "link up\n");
2577 + }
2578 +
2579 + /*
2580 +@@ -692,6 +650,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
2581 + u32 reg;
2582 + unsigned int status;
2583 + char *strcomp_status, *str_posted;
2584 ++ int ret;
2585 +
2586 + reg = advk_readl(pcie, PIO_STAT);
2587 + status = (reg & PIO_COMPLETION_STATUS_MASK) >>
2588 +@@ -716,6 +675,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
2589 + case PIO_COMPLETION_STATUS_OK:
2590 + if (reg & PIO_ERR_STATUS) {
2591 + strcomp_status = "COMP_ERR";
2592 ++ ret = -EFAULT;
2593 + break;
2594 + }
2595 + /* Get the read result */
2596 +@@ -723,9 +683,11 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
2597 + *val = advk_readl(pcie, PIO_RD_DATA);
2598 + /* No error */
2599 + strcomp_status = NULL;
2600 ++ ret = 0;
2601 + break;
2602 + case PIO_COMPLETION_STATUS_UR:
2603 + strcomp_status = "UR";
2604 ++ ret = -EOPNOTSUPP;
2605 + break;
2606 + case PIO_COMPLETION_STATUS_CRS:
2607 + if (allow_crs && val) {
2608 +@@ -743,6 +705,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
2609 + */
2610 + *val = CFG_RD_CRS_VAL;
2611 + strcomp_status = NULL;
2612 ++ ret = 0;
2613 + break;
2614 + }
2615 + /* PCIe r4.0, sec 2.3.2, says:
2616 +@@ -758,21 +721,24 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
2617 + * Request and taking appropriate action, e.g., complete the
2618 + * Request to the host as a failed transaction.
2619 + *
2620 +- * To simplify implementation do not re-issue the Configuration
2621 +- * Request and complete the Request as a failed transaction.
2622 ++ * So return -EAGAIN and caller (pci-aardvark.c driver) will
2623 ++ * re-issue request again up to the PIO_RETRY_CNT retries.
2624 + */
2625 + strcomp_status = "CRS";
2626 ++ ret = -EAGAIN;
2627 + break;
2628 + case PIO_COMPLETION_STATUS_CA:
2629 + strcomp_status = "CA";
2630 ++ ret = -ECANCELED;
2631 + break;
2632 + default:
2633 + strcomp_status = "Unknown";
2634 ++ ret = -EINVAL;
2635 + break;
2636 + }
2637 +
2638 + if (!strcomp_status)
2639 +- return 0;
2640 ++ return ret;
2641 +
2642 + if (reg & PIO_NON_POSTED_REQ)
2643 + str_posted = "Non-posted";
2644 +@@ -782,7 +748,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
2645 + dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
2646 + str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
2647 +
2648 +- return -EFAULT;
2649 ++ return ret;
2650 + }
2651 +
2652 + static int advk_pcie_wait_pio(struct advk_pcie *pcie)
2653 +@@ -790,13 +756,13 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
2654 + struct device *dev = &pcie->pdev->dev;
2655 + int i;
2656 +
2657 +- for (i = 0; i < PIO_RETRY_CNT; i++) {
2658 ++ for (i = 1; i <= PIO_RETRY_CNT; i++) {
2659 + u32 start, isr;
2660 +
2661 + start = advk_readl(pcie, PIO_START);
2662 + isr = advk_readl(pcie, PIO_ISR);
2663 + if (!start && isr)
2664 +- return 0;
2665 ++ return i;
2666 + udelay(PIO_RETRY_DELAY);
2667 + }
2668 +
2669 +@@ -984,7 +950,6 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
2670 + static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
2671 + {
2672 + struct pci_bridge_emul *bridge = &pcie->bridge;
2673 +- int ret;
2674 +
2675 + bridge->conf.vendor =
2676 + cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
2677 +@@ -1004,19 +969,14 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
2678 + /* Support interrupt A for MSI feature */
2679 + bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
2680 +
2681 ++ /* Indicates supports for Completion Retry Status */
2682 ++ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
2683 ++
2684 + bridge->has_pcie = true;
2685 + bridge->data = pcie;
2686 + bridge->ops = &advk_pci_bridge_emul_ops;
2687 +
2688 +- /* PCIe config space can be initialized after pci_bridge_emul_init() */
2689 +- ret = pci_bridge_emul_init(bridge, 0);
2690 +- if (ret < 0)
2691 +- return ret;
2692 +-
2693 +- /* Indicates supports for Completion Retry Status */
2694 +- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
2695 +-
2696 +- return 0;
2697 ++ return pci_bridge_emul_init(bridge, 0);
2698 + }
2699 +
2700 + static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
2701 +@@ -1068,6 +1028,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2702 + int where, int size, u32 *val)
2703 + {
2704 + struct advk_pcie *pcie = bus->sysdata;
2705 ++ int retry_count;
2706 + bool allow_crs;
2707 + u32 reg;
2708 + int ret;
2709 +@@ -1090,18 +1051,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2710 + (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
2711 + PCI_EXP_RTCTL_CRSSVE);
2712 +
2713 +- if (advk_pcie_pio_is_running(pcie)) {
2714 +- /*
2715 +- * If it is possible return Completion Retry Status so caller
2716 +- * tries to issue the request again instead of failing.
2717 +- */
2718 +- if (allow_crs) {
2719 +- *val = CFG_RD_CRS_VAL;
2720 +- return PCIBIOS_SUCCESSFUL;
2721 +- }
2722 +- *val = 0xffffffff;
2723 +- return PCIBIOS_SET_FAILED;
2724 +- }
2725 ++ if (advk_pcie_pio_is_running(pcie))
2726 ++ goto try_crs;
2727 +
2728 + /* Program the control register */
2729 + reg = advk_readl(pcie, PIO_CTRL);
2730 +@@ -1120,30 +1071,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2731 + /* Program the data strobe */
2732 + advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
2733 +
2734 +- /* Clear PIO DONE ISR and start the transfer */
2735 +- advk_writel(pcie, 1, PIO_ISR);
2736 +- advk_writel(pcie, 1, PIO_START);
2737 ++ retry_count = 0;
2738 ++ do {
2739 ++ /* Clear PIO DONE ISR and start the transfer */
2740 ++ advk_writel(pcie, 1, PIO_ISR);
2741 ++ advk_writel(pcie, 1, PIO_START);
2742 +
2743 +- ret = advk_pcie_wait_pio(pcie);
2744 +- if (ret < 0) {
2745 +- /*
2746 +- * If it is possible return Completion Retry Status so caller
2747 +- * tries to issue the request again instead of failing.
2748 +- */
2749 +- if (allow_crs) {
2750 +- *val = CFG_RD_CRS_VAL;
2751 +- return PCIBIOS_SUCCESSFUL;
2752 +- }
2753 +- *val = 0xffffffff;
2754 +- return PCIBIOS_SET_FAILED;
2755 +- }
2756 ++ ret = advk_pcie_wait_pio(pcie);
2757 ++ if (ret < 0)
2758 ++ goto try_crs;
2759 +
2760 +- /* Check PIO status and get the read result */
2761 +- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
2762 +- if (ret < 0) {
2763 +- *val = 0xffffffff;
2764 +- return PCIBIOS_SET_FAILED;
2765 +- }
2766 ++ retry_count += ret;
2767 ++
2768 ++ /* Check PIO status and get the read result */
2769 ++ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
2770 ++ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
2771 ++
2772 ++ if (ret < 0)
2773 ++ goto fail;
2774 +
2775 + if (size == 1)
2776 + *val = (*val >> (8 * (where & 3))) & 0xff;
2777 +@@ -1151,6 +1096,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2778 + *val = (*val >> (8 * (where & 3))) & 0xffff;
2779 +
2780 + return PCIBIOS_SUCCESSFUL;
2781 ++
2782 ++try_crs:
2783 ++ /*
2784 ++ * If it is possible, return Completion Retry Status so that caller
2785 ++ * tries to issue the request again instead of failing.
2786 ++ */
2787 ++ if (allow_crs) {
2788 ++ *val = CFG_RD_CRS_VAL;
2789 ++ return PCIBIOS_SUCCESSFUL;
2790 ++ }
2791 ++
2792 ++fail:
2793 ++ *val = 0xffffffff;
2794 ++ return PCIBIOS_SET_FAILED;
2795 + }
2796 +
2797 + static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2798 +@@ -1159,6 +1118,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2799 + struct advk_pcie *pcie = bus->sysdata;
2800 + u32 reg;
2801 + u32 data_strobe = 0x0;
2802 ++ int retry_count;
2803 + int offset;
2804 + int ret;
2805 +
2806 +@@ -1200,19 +1160,22 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2807 + /* Program the data strobe */
2808 + advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
2809 +
2810 +- /* Clear PIO DONE ISR and start the transfer */
2811 +- advk_writel(pcie, 1, PIO_ISR);
2812 +- advk_writel(pcie, 1, PIO_START);
2813 ++ retry_count = 0;
2814 ++ do {
2815 ++ /* Clear PIO DONE ISR and start the transfer */
2816 ++ advk_writel(pcie, 1, PIO_ISR);
2817 ++ advk_writel(pcie, 1, PIO_START);
2818 +
2819 +- ret = advk_pcie_wait_pio(pcie);
2820 +- if (ret < 0)
2821 +- return PCIBIOS_SET_FAILED;
2822 ++ ret = advk_pcie_wait_pio(pcie);
2823 ++ if (ret < 0)
2824 ++ return PCIBIOS_SET_FAILED;
2825 +
2826 +- ret = advk_pcie_check_pio_status(pcie, false, NULL);
2827 +- if (ret < 0)
2828 +- return PCIBIOS_SET_FAILED;
2829 ++ retry_count += ret;
2830 +
2831 +- return PCIBIOS_SUCCESSFUL;
2832 ++ ret = advk_pcie_check_pio_status(pcie, false, NULL);
2833 ++ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
2834 ++
2835 ++ return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
2836 + }
2837 +
2838 + static struct pci_ops advk_pcie_ops = {
2839 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2840 +index 31c384108bc9c..8418b59b3743b 100644
2841 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2842 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2843 +@@ -3675,7 +3675,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
2844 +
2845 + shost_for_each_device(sdev, ioc->shost) {
2846 + sas_device_priv_data = sdev->hostdata;
2847 +- if (!sas_device_priv_data)
2848 ++ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
2849 + continue;
2850 + if (sas_device_priv_data->sas_target->sas_address
2851 + != sas_address)
2852 +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
2853 +index 3fc7c2a31c191..1a3f5adc68849 100644
2854 +--- a/drivers/scsi/scsi_debug.c
2855 ++++ b/drivers/scsi/scsi_debug.c
2856 +@@ -4628,6 +4628,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
2857 + struct sdeb_zone_state *zsp)
2858 + {
2859 + enum sdebug_z_cond zc;
2860 ++ struct sdeb_store_info *sip = devip2sip(devip, false);
2861 +
2862 + if (zbc_zone_is_conv(zsp))
2863 + return;
2864 +@@ -4639,6 +4640,10 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
2865 + if (zsp->z_cond == ZC4_CLOSED)
2866 + devip->nr_closed--;
2867 +
2868 ++ if (zsp->z_wp > zsp->z_start)
2869 ++ memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
2870 ++ (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
2871 ++
2872 + zsp->z_non_seq_resource = false;
2873 + zsp->z_wp = zsp->z_start;
2874 + zsp->z_cond = ZC1_EMPTY;
2875 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
2876 +index 8de67679a8782..42db9c52208e6 100644
2877 +--- a/drivers/scsi/scsi_sysfs.c
2878 ++++ b/drivers/scsi/scsi_sysfs.c
2879 +@@ -816,7 +816,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
2880 +
2881 + mutex_lock(&sdev->state_mutex);
2882 + if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
2883 +- ret = count;
2884 ++ ret = 0;
2885 + } else {
2886 + ret = scsi_device_set_state(sdev, state);
2887 + if (ret == 0 && state == SDEV_RUNNING)
2888 +diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
2889 +index cf263a58a1489..6fd549a424d53 100644
2890 +--- a/drivers/staging/fbtft/fb_ssd1351.c
2891 ++++ b/drivers/staging/fbtft/fb_ssd1351.c
2892 +@@ -187,7 +187,6 @@ static struct fbtft_display display = {
2893 + },
2894 + };
2895 +
2896 +-#ifdef CONFIG_FB_BACKLIGHT
2897 + static int update_onboard_backlight(struct backlight_device *bd)
2898 + {
2899 + struct fbtft_par *par = bl_get_data(bd);
2900 +@@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par)
2901 + if (!par->fbtftops.unregister_backlight)
2902 + par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
2903 + }
2904 +-#else
2905 +-static void register_onboard_backlight(struct fbtft_par *par) { };
2906 +-#endif
2907 +
2908 + FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
2909 +
2910 +diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
2911 +index 3723269890d5f..d0c8d85f3db0f 100644
2912 +--- a/drivers/staging/fbtft/fbtft-core.c
2913 ++++ b/drivers/staging/fbtft/fbtft-core.c
2914 +@@ -128,7 +128,6 @@ static int fbtft_request_gpios(struct fbtft_par *par)
2915 + return 0;
2916 + }
2917 +
2918 +-#ifdef CONFIG_FB_BACKLIGHT
2919 + static int fbtft_backlight_update_status(struct backlight_device *bd)
2920 + {
2921 + struct fbtft_par *par = bl_get_data(bd);
2922 +@@ -161,6 +160,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par)
2923 + par->info->bl_dev = NULL;
2924 + }
2925 + }
2926 ++EXPORT_SYMBOL(fbtft_unregister_backlight);
2927 +
2928 + static const struct backlight_ops fbtft_bl_ops = {
2929 + .get_brightness = fbtft_backlight_get_brightness,
2930 +@@ -198,12 +198,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
2931 + if (!par->fbtftops.unregister_backlight)
2932 + par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
2933 + }
2934 +-#else
2935 +-void fbtft_register_backlight(struct fbtft_par *par) { };
2936 +-void fbtft_unregister_backlight(struct fbtft_par *par) { };
2937 +-#endif
2938 + EXPORT_SYMBOL(fbtft_register_backlight);
2939 +-EXPORT_SYMBOL(fbtft_unregister_backlight);
2940 +
2941 + static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
2942 + int ye)
2943 +@@ -853,13 +848,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
2944 + fb_info->fix.smem_len >> 10, text1,
2945 + HZ / fb_info->fbdefio->delay, text2);
2946 +
2947 +-#ifdef CONFIG_FB_BACKLIGHT
2948 + /* Turn on backlight if available */
2949 + if (fb_info->bl_dev) {
2950 + fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
2951 + fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
2952 + }
2953 +-#endif
2954 +
2955 + return 0;
2956 +
2957 +diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
2958 +index 3011b8abce389..a9576f92efaa4 100644
2959 +--- a/drivers/staging/greybus/audio_helper.c
2960 ++++ b/drivers/staging/greybus/audio_helper.c
2961 +@@ -192,7 +192,11 @@ int gbaudio_remove_component_controls(struct snd_soc_component *component,
2962 + unsigned int num_controls)
2963 + {
2964 + struct snd_card *card = component->card->snd_card;
2965 ++ int err;
2966 +
2967 +- return gbaudio_remove_controls(card, component->dev, controls,
2968 +- num_controls, component->name_prefix);
2969 ++ down_write(&card->controls_rwsem);
2970 ++ err = gbaudio_remove_controls(card, component->dev, controls,
2971 ++ num_controls, component->name_prefix);
2972 ++ up_write(&card->controls_rwsem);
2973 ++ return err;
2974 + }
2975 +diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2976 +index 663675efcfe4c..99c27d6b42333 100644
2977 +--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2978 ++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2979 +@@ -2551,13 +2551,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
2980 + free_irq(dev->irq, dev);
2981 + priv->irq = 0;
2982 + }
2983 +- free_rtllib(dev);
2984 +
2985 + if (dev->mem_start != 0) {
2986 + iounmap((void __iomem *)dev->mem_start);
2987 + release_mem_region(pci_resource_start(pdev, 1),
2988 + pci_resource_len(pdev, 1));
2989 + }
2990 ++
2991 ++ free_rtllib(dev);
2992 + } else {
2993 + priv = rtllib_priv(dev);
2994 + }
2995 +diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
2996 +index 92c9a476defc9..8f143c09a1696 100644
2997 +--- a/drivers/tty/hvc/hvc_xen.c
2998 ++++ b/drivers/tty/hvc/hvc_xen.c
2999 +@@ -86,7 +86,11 @@ static int __write_console(struct xencons_info *xencons,
3000 + cons = intf->out_cons;
3001 + prod = intf->out_prod;
3002 + mb(); /* update queue values before going on */
3003 +- BUG_ON((prod - cons) > sizeof(intf->out));
3004 ++
3005 ++ if ((prod - cons) > sizeof(intf->out)) {
3006 ++ pr_err_once("xencons: Illegal ring page indices");
3007 ++ return -EINVAL;
3008 ++ }
3009 +
3010 + while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
3011 + intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
3012 +@@ -114,7 +118,10 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
3013 + */
3014 + while (len) {
3015 + int sent = __write_console(cons, data, len);
3016 +-
3017 ++
3018 ++ if (sent < 0)
3019 ++ return sent;
3020 ++
3021 + data += sent;
3022 + len -= sent;
3023 +
3024 +@@ -138,7 +145,11 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
3025 + cons = intf->in_cons;
3026 + prod = intf->in_prod;
3027 + mb(); /* get pointers before reading ring */
3028 +- BUG_ON((prod - cons) > sizeof(intf->in));
3029 ++
3030 ++ if ((prod - cons) > sizeof(intf->in)) {
3031 ++ pr_err_once("xencons: Illegal ring page indices");
3032 ++ return -EINVAL;
3033 ++ }
3034 +
3035 + while (cons != prod && recv < len)
3036 + buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
3037 +diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
3038 +index b4c6527fe5f66..f798455942844 100644
3039 +--- a/drivers/usb/chipidea/ci_hdrc_imx.c
3040 ++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
3041 +@@ -425,15 +425,15 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
3042 + data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
3043 + if (IS_ERR(data->phy)) {
3044 + ret = PTR_ERR(data->phy);
3045 +- if (ret == -ENODEV) {
3046 +- data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
3047 +- if (IS_ERR(data->phy)) {
3048 +- ret = PTR_ERR(data->phy);
3049 +- if (ret == -ENODEV)
3050 +- data->phy = NULL;
3051 +- else
3052 +- goto err_clk;
3053 +- }
3054 ++ if (ret != -ENODEV)
3055 ++ goto err_clk;
3056 ++ data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
3057 ++ if (IS_ERR(data->phy)) {
3058 ++ ret = PTR_ERR(data->phy);
3059 ++ if (ret == -ENODEV)
3060 ++ data->phy = NULL;
3061 ++ else
3062 ++ goto err_clk;
3063 + }
3064 + }
3065 +
3066 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3067 +index 95a9bae72f135..3f406519da58d 100644
3068 +--- a/drivers/usb/core/hub.c
3069 ++++ b/drivers/usb/core/hub.c
3070 +@@ -4628,8 +4628,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
3071 + if (oldspeed == USB_SPEED_LOW)
3072 + delay = HUB_LONG_RESET_TIME;
3073 +
3074 +- mutex_lock(hcd->address0_mutex);
3075 +-
3076 + /* Reset the device; full speed may morph to high speed */
3077 + /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
3078 + retval = hub_port_reset(hub, port1, udev, delay, false);
3079 +@@ -4940,7 +4938,6 @@ fail:
3080 + hub_port_disable(hub, port1, 0);
3081 + update_devnum(udev, devnum); /* for disconnect processing */
3082 + }
3083 +- mutex_unlock(hcd->address0_mutex);
3084 + return retval;
3085 + }
3086 +
3087 +@@ -5115,6 +5112,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
3088 + struct usb_port *port_dev = hub->ports[port1 - 1];
3089 + struct usb_device *udev = port_dev->child;
3090 + static int unreliable_port = -1;
3091 ++ bool retry_locked;
3092 +
3093 + /* Disconnect any existing devices under this port */
3094 + if (udev) {
3095 +@@ -5170,8 +5168,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
3096 + unit_load = 100;
3097 +
3098 + status = 0;
3099 +- for (i = 0; i < PORT_INIT_TRIES; i++) {
3100 +
3101 ++ for (i = 0; i < PORT_INIT_TRIES; i++) {
3102 ++ usb_lock_port(port_dev);
3103 ++ mutex_lock(hcd->address0_mutex);
3104 ++ retry_locked = true;
3105 + /* reallocate for each attempt, since references
3106 + * to the previous one can escape in various ways
3107 + */
3108 +@@ -5179,6 +5180,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
3109 + if (!udev) {
3110 + dev_err(&port_dev->dev,
3111 + "couldn't allocate usb_device\n");
3112 ++ mutex_unlock(hcd->address0_mutex);
3113 ++ usb_unlock_port(port_dev);
3114 + goto done;
3115 + }
3116 +
3117 +@@ -5200,12 +5203,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
3118 + }
3119 +
3120 + /* reset (non-USB 3.0 devices) and get descriptor */
3121 +- usb_lock_port(port_dev);
3122 + status = hub_port_init(hub, udev, port1, i);
3123 +- usb_unlock_port(port_dev);
3124 + if (status < 0)
3125 + goto loop;
3126 +
3127 ++ mutex_unlock(hcd->address0_mutex);
3128 ++ usb_unlock_port(port_dev);
3129 ++ retry_locked = false;
3130 ++
3131 + if (udev->quirks & USB_QUIRK_DELAY_INIT)
3132 + msleep(2000);
3133 +
3134 +@@ -5298,6 +5303,10 @@ loop:
3135 + usb_ep0_reinit(udev);
3136 + release_devnum(udev);
3137 + hub_free_dev(udev);
3138 ++ if (retry_locked) {
3139 ++ mutex_unlock(hcd->address0_mutex);
3140 ++ usb_unlock_port(port_dev);
3141 ++ }
3142 + usb_put_dev(udev);
3143 + if ((status == -ENOTCONN) || (status == -ENOTSUPP))
3144 + break;
3145 +@@ -5839,6 +5848,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
3146 + bos = udev->bos;
3147 + udev->bos = NULL;
3148 +
3149 ++ mutex_lock(hcd->address0_mutex);
3150 ++
3151 + for (i = 0; i < PORT_INIT_TRIES; ++i) {
3152 +
3153 + /* ep0 maxpacket size may change; let the HCD know about it.
3154 +@@ -5848,6 +5859,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
3155 + if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
3156 + break;
3157 + }
3158 ++ mutex_unlock(hcd->address0_mutex);
3159 +
3160 + if (ret < 0)
3161 + goto re_enumerate;
3162 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
3163 +index 7207a36c6e26b..449f19c3633c2 100644
3164 +--- a/drivers/usb/dwc2/gadget.c
3165 ++++ b/drivers/usb/dwc2/gadget.c
3166 +@@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
3167 + }
3168 + ctrl |= DXEPCTL_CNAK;
3169 + } else {
3170 ++ hs_req->req.frame_number = hs_ep->target_frame;
3171 ++ hs_req->req.actual = 0;
3172 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
3173 + return;
3174 + }
3175 +@@ -2856,9 +2858,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
3176 +
3177 + do {
3178 + hs_req = get_ep_head(hs_ep);
3179 +- if (hs_req)
3180 ++ if (hs_req) {
3181 ++ hs_req->req.frame_number = hs_ep->target_frame;
3182 ++ hs_req->req.actual = 0;
3183 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
3184 + -ENODATA);
3185 ++ }
3186 + dwc2_gadget_incr_frame_num(hs_ep);
3187 + /* Update current frame number value. */
3188 + hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
3189 +@@ -2911,8 +2916,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
3190 +
3191 + while (dwc2_gadget_target_frame_elapsed(ep)) {
3192 + hs_req = get_ep_head(ep);
3193 +- if (hs_req)
3194 ++ if (hs_req) {
3195 ++ hs_req->req.frame_number = ep->target_frame;
3196 ++ hs_req->req.actual = 0;
3197 + dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
3198 ++ }
3199 +
3200 + dwc2_gadget_incr_frame_num(ep);
3201 + /* Update current frame number value. */
3202 +@@ -3001,8 +3009,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
3203 +
3204 + while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
3205 + hs_req = get_ep_head(hs_ep);
3206 +- if (hs_req)
3207 ++ if (hs_req) {
3208 ++ hs_req->req.frame_number = hs_ep->target_frame;
3209 ++ hs_req->req.actual = 0;
3210 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
3211 ++ }
3212 +
3213 + dwc2_gadget_incr_frame_num(hs_ep);
3214 + /* Update current frame number value. */
3215 +diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
3216 +index 68bbac64b7536..94af71e9856f2 100644
3217 +--- a/drivers/usb/dwc2/hcd_queue.c
3218 ++++ b/drivers/usb/dwc2/hcd_queue.c
3219 +@@ -59,7 +59,7 @@
3220 + #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
3221 +
3222 + /* If we get a NAK, wait this long before retrying */
3223 +-#define DWC2_RETRY_WAIT_DELAY 1*1E6L
3224 ++#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
3225 +
3226 + /**
3227 + * dwc2_periodic_channel_available() - Checks that a channel is available for a
3228 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3229 +index b75fe568096f9..e9a87e1f49508 100644
3230 +--- a/drivers/usb/dwc3/gadget.c
3231 ++++ b/drivers/usb/dwc3/gadget.c
3232 +@@ -310,13 +310,24 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
3233 + if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
3234 + int link_state;
3235 +
3236 ++ /*
3237 ++ * Initiate remote wakeup if the link state is in U3 when
3238 ++ * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
3239 ++ * link state is in U1/U2, no remote wakeup is needed. The Start
3240 ++ * Transfer command will initiate the link recovery.
3241 ++ */
3242 + link_state = dwc3_gadget_get_link_state(dwc);
3243 +- if (link_state == DWC3_LINK_STATE_U1 ||
3244 +- link_state == DWC3_LINK_STATE_U2 ||
3245 +- link_state == DWC3_LINK_STATE_U3) {
3246 ++ switch (link_state) {
3247 ++ case DWC3_LINK_STATE_U2:
3248 ++ if (dwc->gadget->speed >= USB_SPEED_SUPER)
3249 ++ break;
3250 ++
3251 ++ fallthrough;
3252 ++ case DWC3_LINK_STATE_U3:
3253 + ret = __dwc3_gadget_wakeup(dwc);
3254 + dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
3255 + ret);
3256 ++ break;
3257 + }
3258 + }
3259 +
3260 +@@ -2907,6 +2918,9 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
3261 + struct dwc3 *dwc = dep->dwc;
3262 + bool no_started_trb = true;
3263 +
3264 ++ if (!dep->endpoint.desc)
3265 ++ return no_started_trb;
3266 ++
3267 + dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
3268 +
3269 + if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
3270 +@@ -2954,6 +2968,9 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
3271 + {
3272 + int status = 0;
3273 +
3274 ++ if (!dep->endpoint.desc)
3275 ++ return;
3276 ++
3277 + if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
3278 + dwc3_gadget_endpoint_frame_from_event(dep, event);
3279 +
3280 +@@ -3007,6 +3024,14 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
3281 + if (cmd != DWC3_DEPCMD_ENDTRANSFER)
3282 + return;
3283 +
3284 ++ /*
3285 ++ * The END_TRANSFER command will cause the controller to generate a
3286 ++ * NoStream Event, and it's not due to the host DP NoStream rejection.
3287 ++ * Ignore the next NoStream event.
3288 ++ */
3289 ++ if (dep->stream_capable)
3290 ++ dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
3291 ++
3292 + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
3293 + dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
3294 + dwc3_gadget_ep_cleanup_cancelled_requests(dep);
3295 +@@ -3229,14 +3254,6 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
3296 + WARN_ON_ONCE(ret);
3297 + dep->resource_index = 0;
3298 +
3299 +- /*
3300 +- * The END_TRANSFER command will cause the controller to generate a
3301 +- * NoStream Event, and it's not due to the host DP NoStream rejection.
3302 +- * Ignore the next NoStream event.
3303 +- */
3304 +- if (dep->stream_capable)
3305 +- dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
3306 +-
3307 + if (!interrupt)
3308 + dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
3309 + else
3310 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3311 +index c7356718a7c66..28ffe4e358b77 100644
3312 +--- a/drivers/usb/serial/option.c
3313 ++++ b/drivers/usb/serial/option.c
3314 +@@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = {
3315 + .driver_info = NCTRL(2) },
3316 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
3317 + .driver_info = NCTRL(0) | ZLP },
3318 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
3319 ++ .driver_info = NCTRL(0) | ZLP },
3320 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
3321 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
3322 + .driver_info = RSVD(1) },
3323 +@@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = {
3324 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
3325 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
3326 + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
3327 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
3328 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
3329 ++ .driver_info = RSVD(4) },
3330 + { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
3331 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
3332 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
3333 +diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
3334 +index 99562cc65ca69..700e38e921523 100644
3335 +--- a/drivers/usb/typec/tcpm/fusb302.c
3336 ++++ b/drivers/usb/typec/tcpm/fusb302.c
3337 +@@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
3338 + ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
3339 + FUSB_REG_MASK_BC_LVL |
3340 + FUSB_REG_MASK_COMP_CHNG,
3341 +- FUSB_REG_MASK_COMP_CHNG);
3342 ++ FUSB_REG_MASK_BC_LVL);
3343 + if (ret < 0) {
3344 + fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
3345 + ret);
3346 + goto done;
3347 + }
3348 + chip->intr_comp_chng = true;
3349 ++ chip->intr_bc_lvl = false;
3350 + break;
3351 + case TYPEC_CC_RD:
3352 + ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
3353 + FUSB_REG_MASK_BC_LVL |
3354 + FUSB_REG_MASK_COMP_CHNG,
3355 +- FUSB_REG_MASK_BC_LVL);
3356 ++ FUSB_REG_MASK_COMP_CHNG);
3357 + if (ret < 0) {
3358 + fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
3359 + ret);
3360 + goto done;
3361 + }
3362 + chip->intr_bc_lvl = true;
3363 ++ chip->intr_comp_chng = false;
3364 + break;
3365 + default:
3366 + break;
3367 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
3368 +index a483cec31d5cb..5cd1ee66d2326 100644
3369 +--- a/drivers/vhost/vsock.c
3370 ++++ b/drivers/vhost/vsock.c
3371 +@@ -494,7 +494,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
3372 + virtio_transport_free_pkt(pkt);
3373 +
3374 + len += sizeof(pkt->hdr);
3375 +- vhost_add_used(vq, head, len);
3376 ++ vhost_add_used(vq, head, 0);
3377 + total_len += len;
3378 + added = true;
3379 + } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
3380 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
3381 +index 8a75092bb148b..98d870672dc5e 100644
3382 +--- a/drivers/xen/xenbus/xenbus_probe.c
3383 ++++ b/drivers/xen/xenbus/xenbus_probe.c
3384 +@@ -846,7 +846,7 @@ static struct notifier_block xenbus_resume_nb = {
3385 +
3386 + static int __init xenbus_init(void)
3387 + {
3388 +- int err = 0;
3389 ++ int err;
3390 + uint64_t v = 0;
3391 + xen_store_domain_type = XS_UNKNOWN;
3392 +
3393 +@@ -886,6 +886,29 @@ static int __init xenbus_init(void)
3394 + err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
3395 + if (err)
3396 + goto out_error;
3397 ++ /*
3398 ++ * Uninitialized hvm_params are zero and return no error.
3399 ++ * Although it is theoretically possible to have
3400 ++ * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
3401 ++ * not zero when valid. If zero, it means that Xenstore hasn't
3402 ++ * been properly initialized. Instead of attempting to map a
3403 ++ * wrong guest physical address return error.
3404 ++ *
3405 ++ * Also recognize all bits set as an invalid value.
3406 ++ */
3407 ++ if (!v || !~v) {
3408 ++ err = -ENOENT;
3409 ++ goto out_error;
3410 ++ }
3411 ++ /* Avoid truncation on 32-bit. */
3412 ++#if BITS_PER_LONG == 32
3413 ++ if (v > ULONG_MAX) {
3414 ++ pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
3415 ++ __func__, v);
3416 ++ err = -EINVAL;
3417 ++ goto out_error;
3418 ++ }
3419 ++#endif
3420 + xen_store_gfn = (unsigned long)v;
3421 + xen_store_interface =
3422 + xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
3423 +@@ -920,8 +943,10 @@ static int __init xenbus_init(void)
3424 + */
3425 + proc_create_mount_point("xen");
3426 + #endif
3427 ++ return 0;
3428 +
3429 + out_error:
3430 ++ xen_store_domain_type = XS_UNKNOWN;
3431 + return err;
3432 + }
3433 +
3434 +diff --git a/fs/ceph/super.c b/fs/ceph/super.c
3435 +index f33bfb255db8f..08c8d34c98091 100644
3436 +--- a/fs/ceph/super.c
3437 ++++ b/fs/ceph/super.c
3438 +@@ -52,8 +52,7 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
3439 + struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
3440 + struct ceph_mon_client *monc = &fsc->client->monc;
3441 + struct ceph_statfs st;
3442 +- u64 fsid;
3443 +- int err;
3444 ++ int i, err;
3445 + u64 data_pool;
3446 +
3447 + if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
3448 +@@ -99,12 +98,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
3449 + buf->f_namelen = NAME_MAX;
3450 +
3451 + /* Must convert the fsid, for consistent values across arches */
3452 ++ buf->f_fsid.val[0] = 0;
3453 + mutex_lock(&monc->mutex);
3454 +- fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
3455 +- le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
3456 ++ for (i = 0 ; i < sizeof(monc->monmap->fsid) / sizeof(__le32) ; ++i)
3457 ++ buf->f_fsid.val[0] ^= le32_to_cpu(((__le32 *)&monc->monmap->fsid)[i]);
3458 + mutex_unlock(&monc->mutex);
3459 +
3460 +- buf->f_fsid = u64_to_fsid(fsid);
3461 ++ /* fold the fs_cluster_id into the upper bits */
3462 ++ buf->f_fsid.val[1] = monc->fs_cluster_id;
3463 +
3464 + return 0;
3465 + }
3466 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
3467 +index 67139f9d583f2..6c06870f90184 100644
3468 +--- a/fs/cifs/file.c
3469 ++++ b/fs/cifs/file.c
3470 +@@ -2618,12 +2618,23 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
3471 + tcon = tlink_tcon(smbfile->tlink);
3472 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
3473 + server = tcon->ses->server;
3474 +- if (server->ops->flush)
3475 +- rc = server->ops->flush(xid, tcon, &smbfile->fid);
3476 +- else
3477 ++ if (server->ops->flush == NULL) {
3478 + rc = -ENOSYS;
3479 ++ goto strict_fsync_exit;
3480 ++ }
3481 ++
3482 ++ if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
3483 ++ smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
3484 ++ if (smbfile) {
3485 ++ rc = server->ops->flush(xid, tcon, &smbfile->fid);
3486 ++ cifsFileInfo_put(smbfile);
3487 ++ } else
3488 ++ cifs_dbg(FYI, "ignore fsync for file not open for write\n");
3489 ++ } else
3490 ++ rc = server->ops->flush(xid, tcon, &smbfile->fid);
3491 + }
3492 +
3493 ++strict_fsync_exit:
3494 + free_xid(xid);
3495 + return rc;
3496 + }
3497 +@@ -2635,6 +2646,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
3498 + struct cifs_tcon *tcon;
3499 + struct TCP_Server_Info *server;
3500 + struct cifsFileInfo *smbfile = file->private_data;
3501 ++ struct inode *inode = file_inode(file);
3502 + struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
3503 +
3504 + rc = file_write_and_wait_range(file, start, end);
3505 +@@ -2651,12 +2663,23 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
3506 + tcon = tlink_tcon(smbfile->tlink);
3507 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
3508 + server = tcon->ses->server;
3509 +- if (server->ops->flush)
3510 +- rc = server->ops->flush(xid, tcon, &smbfile->fid);
3511 +- else
3512 ++ if (server->ops->flush == NULL) {
3513 + rc = -ENOSYS;
3514 ++ goto fsync_exit;
3515 ++ }
3516 ++
3517 ++ if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
3518 ++ smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
3519 ++ if (smbfile) {
3520 ++ rc = server->ops->flush(xid, tcon, &smbfile->fid);
3521 ++ cifsFileInfo_put(smbfile);
3522 ++ } else
3523 ++ cifs_dbg(FYI, "ignore fsync for file not open for write\n");
3524 ++ } else
3525 ++ rc = server->ops->flush(xid, tcon, &smbfile->fid);
3526 + }
3527 +
3528 ++fsync_exit:
3529 + free_xid(xid);
3530 + return rc;
3531 + }
3532 +diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
3533 +index de9986d2f82fd..5c11199d753a6 100644
3534 +--- a/fs/erofs/utils.c
3535 ++++ b/fs/erofs/utils.c
3536 +@@ -154,7 +154,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
3537 + * however in order to avoid some race conditions, add a
3538 + * DBG_BUGON to observe this in advance.
3539 + */
3540 +- DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp);
3541 ++ DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
3542 +
3543 + /* last refcount should be connected with its managed pslot. */
3544 + erofs_workgroup_unfreeze(grp, 0);
3545 +@@ -169,15 +169,19 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
3546 + unsigned int freed = 0;
3547 + unsigned long index;
3548 +
3549 ++ xa_lock(&sbi->managed_pslots);
3550 + xa_for_each(&sbi->managed_pslots, index, grp) {
3551 + /* try to shrink each valid workgroup */
3552 + if (!erofs_try_to_release_workgroup(sbi, grp))
3553 + continue;
3554 ++ xa_unlock(&sbi->managed_pslots);
3555 +
3556 + ++freed;
3557 + if (!--nr_shrink)
3558 +- break;
3559 ++ return freed;
3560 ++ xa_lock(&sbi->managed_pslots);
3561 + }
3562 ++ xa_unlock(&sbi->managed_pslots);
3563 + return freed;
3564 + }
3565 +
3566 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
3567 +index 597a145c08ef5..7e625806bd4a2 100644
3568 +--- a/fs/f2fs/node.c
3569 ++++ b/fs/f2fs/node.c
3570 +@@ -1389,6 +1389,7 @@ page_hit:
3571 + nid, nid_of_node(page), ino_of_node(page),
3572 + ofs_of_node(page), cpver_of_node(page),
3573 + next_blkaddr_of_node(page));
3574 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
3575 + err = -EINVAL;
3576 + out_err:
3577 + ClearPageUptodate(page);
3578 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
3579 +index fb1917730e0e4..d100b5dfedbd2 100644
3580 +--- a/fs/fuse/dev.c
3581 ++++ b/fs/fuse/dev.c
3582 +@@ -851,17 +851,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
3583 + goto out_put_old;
3584 + }
3585 +
3586 ++ get_page(newpage);
3587 ++
3588 ++ if (!(buf->flags & PIPE_BUF_FLAG_LRU))
3589 ++ lru_cache_add(newpage);
3590 ++
3591 + /*
3592 + * Release while we have extra ref on stolen page. Otherwise
3593 + * anon_pipe_buf_release() might think the page can be reused.
3594 + */
3595 + pipe_buf_release(cs->pipe, buf);
3596 +
3597 +- get_page(newpage);
3598 +-
3599 +- if (!(buf->flags & PIPE_BUF_FLAG_LRU))
3600 +- lru_cache_add(newpage);
3601 +-
3602 + err = 0;
3603 + spin_lock(&cs->req->waitq.lock);
3604 + if (test_bit(FR_ABORTED, &cs->req->flags))
3605 +diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
3606 +index c078f88552695..f2248d9d4db51 100644
3607 +--- a/fs/nfs/nfs42xdr.c
3608 ++++ b/fs/nfs/nfs42xdr.c
3609 +@@ -1396,8 +1396,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
3610 + status = decode_clone(xdr);
3611 + if (status)
3612 + goto out;
3613 +- status = decode_getfattr(xdr, res->dst_fattr, res->server);
3614 +-
3615 ++ decode_getfattr(xdr, res->dst_fattr, res->server);
3616 + out:
3617 + res->rpc_status = status;
3618 + return status;
3619 +diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
3620 +index c3a345c28a933..0e4278d4a7691 100644
3621 +--- a/fs/proc/vmcore.c
3622 ++++ b/fs/proc/vmcore.c
3623 +@@ -124,9 +124,13 @@ ssize_t read_from_oldmem(char *buf, size_t count,
3624 + nr_bytes = count;
3625 +
3626 + /* If pfn is not ram, return zeros for sparse dump files */
3627 +- if (pfn_is_ram(pfn) == 0)
3628 +- memset(buf, 0, nr_bytes);
3629 +- else {
3630 ++ if (pfn_is_ram(pfn) == 0) {
3631 ++ tmp = 0;
3632 ++ if (!userbuf)
3633 ++ memset(buf, 0, nr_bytes);
3634 ++ else if (clear_user(buf, nr_bytes))
3635 ++ tmp = -EFAULT;
3636 ++ } else {
3637 + if (encrypted)
3638 + tmp = copy_oldmem_page_encrypted(pfn, buf,
3639 + nr_bytes,
3640 +@@ -135,10 +139,10 @@ ssize_t read_from_oldmem(char *buf, size_t count,
3641 + else
3642 + tmp = copy_oldmem_page(pfn, buf, nr_bytes,
3643 + offset, userbuf);
3644 +-
3645 +- if (tmp < 0)
3646 +- return tmp;
3647 + }
3648 ++ if (tmp < 0)
3649 ++ return tmp;
3650 ++
3651 + *ppos += nr_bytes;
3652 + count -= nr_bytes;
3653 + buf += nr_bytes;
3654 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
3655 +index 1f62a4eec283c..474a0d852614f 100644
3656 +--- a/include/linux/bpf.h
3657 ++++ b/include/linux/bpf.h
3658 +@@ -173,7 +173,7 @@ struct bpf_map {
3659 + atomic64_t usercnt;
3660 + struct work_struct work;
3661 + struct mutex freeze_mutex;
3662 +- u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
3663 ++ atomic64_t writecnt;
3664 + };
3665 +
3666 + static inline bool map_value_has_spin_lock(const struct bpf_map *map)
3667 +@@ -1252,6 +1252,7 @@ void bpf_map_charge_move(struct bpf_map_memory *dst,
3668 + void *bpf_map_area_alloc(u64 size, int numa_node);
3669 + void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
3670 + void bpf_map_area_free(void *base);
3671 ++bool bpf_map_write_active(const struct bpf_map *map);
3672 + void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
3673 + int generic_map_lookup_batch(struct bpf_map *map,
3674 + const union bpf_attr *attr,
3675 +diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
3676 +index a06a78c67f19f..08325105131a2 100644
3677 +--- a/include/linux/ipc_namespace.h
3678 ++++ b/include/linux/ipc_namespace.h
3679 +@@ -132,6 +132,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
3680 + return ns;
3681 + }
3682 +
3683 ++static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
3684 ++{
3685 ++ if (ns) {
3686 ++ if (refcount_inc_not_zero(&ns->count))
3687 ++ return ns;
3688 ++ }
3689 ++
3690 ++ return NULL;
3691 ++}
3692 ++
3693 + extern void put_ipc_ns(struct ipc_namespace *ns);
3694 + #else
3695 + static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
3696 +@@ -148,6 +158,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
3697 + return ns;
3698 + }
3699 +
3700 ++static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
3701 ++{
3702 ++ return ns;
3703 ++}
3704 ++
3705 + static inline void put_ipc_ns(struct ipc_namespace *ns)
3706 + {
3707 + }
3708 +diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
3709 +index 24cacb1ca654d..fa75f325dad53 100644
3710 +--- a/include/linux/sched/task.h
3711 ++++ b/include/linux/sched/task.h
3712 +@@ -158,7 +158,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
3713 + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
3714 + * subscriptions and synchronises with wait4(). Also used in procfs. Also
3715 + * pins the final release of task.io_context. Also protects ->cpuset and
3716 +- * ->cgroup.subsys[]. And ->vfork_done.
3717 ++ * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
3718 + *
3719 + * Nests both inside and outside of read_lock(&tasklist_lock).
3720 + * It must not be nested with write_lock_irq(&tasklist_lock),
3721 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
3722 +index ac5ff3c3afb14..88bc66b8d02b0 100644
3723 +--- a/include/net/ip6_fib.h
3724 ++++ b/include/net/ip6_fib.h
3725 +@@ -491,6 +491,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3726 + struct fib6_config *cfg, gfp_t gfp_flags,
3727 + struct netlink_ext_ack *extack);
3728 + void fib6_nh_release(struct fib6_nh *fib6_nh);
3729 ++void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
3730 +
3731 + int call_fib6_entry_notifiers(struct net *net,
3732 + enum fib_event_type event_type,
3733 +diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
3734 +index 8fce558b5fea3..14a43111ffc6a 100644
3735 +--- a/include/net/ipv6_stubs.h
3736 ++++ b/include/net/ipv6_stubs.h
3737 +@@ -47,6 +47,7 @@ struct ipv6_stub {
3738 + struct fib6_config *cfg, gfp_t gfp_flags,
3739 + struct netlink_ext_ack *extack);
3740 + void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
3741 ++ void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
3742 + void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
3743 + int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify);
3744 + void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
3745 +diff --git a/include/net/nl802154.h b/include/net/nl802154.h
3746 +index ddcee128f5d9a..145acb8f25095 100644
3747 +--- a/include/net/nl802154.h
3748 ++++ b/include/net/nl802154.h
3749 +@@ -19,6 +19,8 @@
3750 + *
3751 + */
3752 +
3753 ++#include <linux/types.h>
3754 ++
3755 + #define NL802154_GENL_NAME "nl802154"
3756 +
3757 + enum nl802154_commands {
3758 +@@ -150,10 +152,9 @@ enum nl802154_attrs {
3759 + };
3760 +
3761 + enum nl802154_iftype {
3762 +- /* for backwards compatibility TODO */
3763 +- NL802154_IFTYPE_UNSPEC = -1,
3764 ++ NL802154_IFTYPE_UNSPEC = (~(__u32)0),
3765 +
3766 +- NL802154_IFTYPE_NODE,
3767 ++ NL802154_IFTYPE_NODE = 0,
3768 + NL802154_IFTYPE_MONITOR,
3769 + NL802154_IFTYPE_COORD,
3770 +
3771 +diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
3772 +index 2af7a1cd66589..b39cdbc522ec7 100644
3773 +--- a/include/xen/interface/io/ring.h
3774 ++++ b/include/xen/interface/io/ring.h
3775 +@@ -1,21 +1,53 @@
3776 +-/* SPDX-License-Identifier: GPL-2.0 */
3777 + /******************************************************************************
3778 + * ring.h
3779 + *
3780 + * Shared producer-consumer ring macros.
3781 + *
3782 ++ * Permission is hereby granted, free of charge, to any person obtaining a copy
3783 ++ * of this software and associated documentation files (the "Software"), to
3784 ++ * deal in the Software without restriction, including without limitation the
3785 ++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3786 ++ * sell copies of the Software, and to permit persons to whom the Software is
3787 ++ * furnished to do so, subject to the following conditions:
3788 ++ *
3789 ++ * The above copyright notice and this permission notice shall be included in
3790 ++ * all copies or substantial portions of the Software.
3791 ++ *
3792 ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3793 ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3794 ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3795 ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3796 ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3797 ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3798 ++ * DEALINGS IN THE SOFTWARE.
3799 ++ *
3800 + * Tim Deegan and Andrew Warfield November 2004.
3801 + */
3802 +
3803 + #ifndef __XEN_PUBLIC_IO_RING_H__
3804 + #define __XEN_PUBLIC_IO_RING_H__
3805 +
3806 ++/*
3807 ++ * When #include'ing this header, you need to provide the following
3808 ++ * declaration upfront:
3809 ++ * - standard integers types (uint8_t, uint16_t, etc)
3810 ++ * They are provided by stdint.h of the standard headers.
3811 ++ *
3812 ++ * In addition, if you intend to use the FLEX macros, you also need to
3813 ++ * provide the following, before invoking the FLEX macros:
3814 ++ * - size_t
3815 ++ * - memcpy
3816 ++ * - grant_ref_t
3817 ++ * These declarations are provided by string.h of the standard headers,
3818 ++ * and grant_table.h from the Xen public headers.
3819 ++ */
3820 ++
3821 + #include <xen/interface/grant_table.h>
3822 +
3823 + typedef unsigned int RING_IDX;
3824 +
3825 + /* Round a 32-bit unsigned constant down to the nearest power of two. */
3826 +-#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3827 ++#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3828 + #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
3829 + #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
3830 + #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
3831 +@@ -27,82 +59,79 @@ typedef unsigned int RING_IDX;
3832 + * A ring contains as many entries as will fit, rounded down to the nearest
3833 + * power of two (so we can mask with (size-1) to loop around).
3834 + */
3835 +-#define __CONST_RING_SIZE(_s, _sz) \
3836 +- (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
3837 +- sizeof(((struct _s##_sring *)0)->ring[0])))
3838 +-
3839 ++#define __CONST_RING_SIZE(_s, _sz) \
3840 ++ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
3841 ++ sizeof(((struct _s##_sring *)0)->ring[0])))
3842 + /*
3843 + * The same for passing in an actual pointer instead of a name tag.
3844 + */
3845 +-#define __RING_SIZE(_s, _sz) \
3846 +- (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3847 ++#define __RING_SIZE(_s, _sz) \
3848 ++ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3849 +
3850 + /*
3851 + * Macros to make the correct C datatypes for a new kind of ring.
3852 + *
3853 + * To make a new ring datatype, you need to have two message structures,
3854 +- * let's say struct request, and struct response already defined.
3855 ++ * let's say request_t, and response_t already defined.
3856 + *
3857 + * In a header where you want the ring datatype declared, you then do:
3858 + *
3859 +- * DEFINE_RING_TYPES(mytag, struct request, struct response);
3860 ++ * DEFINE_RING_TYPES(mytag, request_t, response_t);
3861 + *
3862 + * These expand out to give you a set of types, as you can see below.
3863 + * The most important of these are:
3864 + *
3865 +- * struct mytag_sring - The shared ring.
3866 +- * struct mytag_front_ring - The 'front' half of the ring.
3867 +- * struct mytag_back_ring - The 'back' half of the ring.
3868 ++ * mytag_sring_t - The shared ring.
3869 ++ * mytag_front_ring_t - The 'front' half of the ring.
3870 ++ * mytag_back_ring_t - The 'back' half of the ring.
3871 + *
3872 + * To initialize a ring in your code you need to know the location and size
3873 + * of the shared memory area (PAGE_SIZE, for instance). To initialise
3874 + * the front half:
3875 + *
3876 +- * struct mytag_front_ring front_ring;
3877 +- * SHARED_RING_INIT((struct mytag_sring *)shared_page);
3878 +- * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
3879 +- * PAGE_SIZE);
3880 ++ * mytag_front_ring_t front_ring;
3881 ++ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
3882 ++ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3883 + *
3884 + * Initializing the back follows similarly (note that only the front
3885 + * initializes the shared ring):
3886 + *
3887 +- * struct mytag_back_ring back_ring;
3888 +- * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
3889 +- * PAGE_SIZE);
3890 ++ * mytag_back_ring_t back_ring;
3891 ++ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3892 + */
3893 +
3894 +-#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3895 +- \
3896 +-/* Shared ring entry */ \
3897 +-union __name##_sring_entry { \
3898 +- __req_t req; \
3899 +- __rsp_t rsp; \
3900 +-}; \
3901 +- \
3902 +-/* Shared ring page */ \
3903 +-struct __name##_sring { \
3904 +- RING_IDX req_prod, req_event; \
3905 +- RING_IDX rsp_prod, rsp_event; \
3906 +- uint8_t pad[48]; \
3907 +- union __name##_sring_entry ring[1]; /* variable-length */ \
3908 +-}; \
3909 +- \
3910 +-/* "Front" end's private variables */ \
3911 +-struct __name##_front_ring { \
3912 +- RING_IDX req_prod_pvt; \
3913 +- RING_IDX rsp_cons; \
3914 +- unsigned int nr_ents; \
3915 +- struct __name##_sring *sring; \
3916 +-}; \
3917 +- \
3918 +-/* "Back" end's private variables */ \
3919 +-struct __name##_back_ring { \
3920 +- RING_IDX rsp_prod_pvt; \
3921 +- RING_IDX req_cons; \
3922 +- unsigned int nr_ents; \
3923 +- struct __name##_sring *sring; \
3924 +-};
3925 +-
3926 ++#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3927 ++ \
3928 ++/* Shared ring entry */ \
3929 ++union __name##_sring_entry { \
3930 ++ __req_t req; \
3931 ++ __rsp_t rsp; \
3932 ++}; \
3933 ++ \
3934 ++/* Shared ring page */ \
3935 ++struct __name##_sring { \
3936 ++ RING_IDX req_prod, req_event; \
3937 ++ RING_IDX rsp_prod, rsp_event; \
3938 ++ uint8_t __pad[48]; \
3939 ++ union __name##_sring_entry ring[1]; /* variable-length */ \
3940 ++}; \
3941 ++ \
3942 ++/* "Front" end's private variables */ \
3943 ++struct __name##_front_ring { \
3944 ++ RING_IDX req_prod_pvt; \
3945 ++ RING_IDX rsp_cons; \
3946 ++ unsigned int nr_ents; \
3947 ++ struct __name##_sring *sring; \
3948 ++}; \
3949 ++ \
3950 ++/* "Back" end's private variables */ \
3951 ++struct __name##_back_ring { \
3952 ++ RING_IDX rsp_prod_pvt; \
3953 ++ RING_IDX req_cons; \
3954 ++ unsigned int nr_ents; \
3955 ++ struct __name##_sring *sring; \
3956 ++}; \
3957 ++ \
3958 + /*
3959 + * Macros for manipulating rings.
3960 + *
3961 +@@ -119,94 +148,99 @@ struct __name##_back_ring { \
3962 + */
3963 +
3964 + /* Initialising empty rings */
3965 +-#define SHARED_RING_INIT(_s) do { \
3966 +- (_s)->req_prod = (_s)->rsp_prod = 0; \
3967 +- (_s)->req_event = (_s)->rsp_event = 1; \
3968 +- memset((_s)->pad, 0, sizeof((_s)->pad)); \
3969 ++#define SHARED_RING_INIT(_s) do { \
3970 ++ (_s)->req_prod = (_s)->rsp_prod = 0; \
3971 ++ (_s)->req_event = (_s)->rsp_event = 1; \
3972 ++ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
3973 + } while(0)
3974 +
3975 +-#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
3976 +- (_r)->req_prod_pvt = (_i); \
3977 +- (_r)->rsp_cons = (_i); \
3978 +- (_r)->nr_ents = __RING_SIZE(_s, __size); \
3979 +- (_r)->sring = (_s); \
3980 ++#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
3981 ++ (_r)->req_prod_pvt = (_i); \
3982 ++ (_r)->rsp_cons = (_i); \
3983 ++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
3984 ++ (_r)->sring = (_s); \
3985 + } while (0)
3986 +
3987 + #define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
3988 +
3989 +-#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
3990 +- (_r)->rsp_prod_pvt = (_i); \
3991 +- (_r)->req_cons = (_i); \
3992 +- (_r)->nr_ents = __RING_SIZE(_s, __size); \
3993 +- (_r)->sring = (_s); \
3994 ++#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
3995 ++ (_r)->rsp_prod_pvt = (_i); \
3996 ++ (_r)->req_cons = (_i); \
3997 ++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
3998 ++ (_r)->sring = (_s); \
3999 + } while (0)
4000 +
4001 + #define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
4002 +
4003 + /* How big is this ring? */
4004 +-#define RING_SIZE(_r) \
4005 ++#define RING_SIZE(_r) \
4006 + ((_r)->nr_ents)
4007 +
4008 + /* Number of free requests (for use on front side only). */
4009 +-#define RING_FREE_REQUESTS(_r) \
4010 ++#define RING_FREE_REQUESTS(_r) \
4011 + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
4012 +
4013 + /* Test if there is an empty slot available on the front ring.
4014 + * (This is only meaningful from the front. )
4015 + */
4016 +-#define RING_FULL(_r) \
4017 ++#define RING_FULL(_r) \
4018 + (RING_FREE_REQUESTS(_r) == 0)
4019 +
4020 + /* Test if there are outstanding messages to be processed on a ring. */
4021 +-#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
4022 ++#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
4023 + ((_r)->sring->rsp_prod - (_r)->rsp_cons)
4024 +
4025 +-#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
4026 +- ({ \
4027 +- unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
4028 +- unsigned int rsp = RING_SIZE(_r) - \
4029 +- ((_r)->req_cons - (_r)->rsp_prod_pvt); \
4030 +- req < rsp ? req : rsp; \
4031 +- })
4032 ++#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
4033 ++ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
4034 ++ unsigned int rsp = RING_SIZE(_r) - \
4035 ++ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
4036 ++ req < rsp ? req : rsp; \
4037 ++})
4038 +
4039 + /* Direct access to individual ring elements, by index. */
4040 +-#define RING_GET_REQUEST(_r, _idx) \
4041 ++#define RING_GET_REQUEST(_r, _idx) \
4042 + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
4043 +
4044 ++#define RING_GET_RESPONSE(_r, _idx) \
4045 ++ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
4046 ++
4047 + /*
4048 +- * Get a local copy of a request.
4049 ++ * Get a local copy of a request/response.
4050 + *
4051 +- * Use this in preference to RING_GET_REQUEST() so all processing is
4052 ++ * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
4053 + * done on a local copy that cannot be modified by the other end.
4054 + *
4055 + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
4056 +- * to be ineffective where _req is a struct which consists of only bitfields.
4057 ++ * to be ineffective where dest is a struct which consists of only bitfields.
4058 + */
4059 +-#define RING_COPY_REQUEST(_r, _idx, _req) do { \
4060 +- /* Use volatile to force the copy into _req. */ \
4061 +- *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
4062 ++#define RING_COPY_(type, r, idx, dest) do { \
4063 ++ /* Use volatile to force the copy into dest. */ \
4064 ++ *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
4065 + } while (0)
4066 +
4067 +-#define RING_GET_RESPONSE(_r, _idx) \
4068 +- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
4069 ++#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
4070 ++#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
4071 +
4072 + /* Loop termination condition: Would the specified index overflow the ring? */
4073 +-#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
4074 ++#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
4075 + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
4076 +
4077 + /* Ill-behaved frontend determination: Can there be this many requests? */
4078 +-#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
4079 ++#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
4080 + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
4081 +
4082 ++/* Ill-behaved backend determination: Can there be this many responses? */
4083 ++#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
4084 ++ (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
4085 +
4086 +-#define RING_PUSH_REQUESTS(_r) do { \
4087 +- virt_wmb(); /* back sees requests /before/ updated producer index */ \
4088 +- (_r)->sring->req_prod = (_r)->req_prod_pvt; \
4089 ++#define RING_PUSH_REQUESTS(_r) do { \
4090 ++ virt_wmb(); /* back sees requests /before/ updated producer index */\
4091 ++ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
4092 + } while (0)
4093 +
4094 +-#define RING_PUSH_RESPONSES(_r) do { \
4095 +- virt_wmb(); /* front sees responses /before/ updated producer index */ \
4096 +- (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
4097 ++#define RING_PUSH_RESPONSES(_r) do { \
4098 ++ virt_wmb(); /* front sees resps /before/ updated producer index */ \
4099 ++ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
4100 + } while (0)
4101 +
4102 + /*
4103 +@@ -239,40 +273,40 @@ struct __name##_back_ring { \
4104 + * field appropriately.
4105 + */
4106 +
4107 +-#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
4108 +- RING_IDX __old = (_r)->sring->req_prod; \
4109 +- RING_IDX __new = (_r)->req_prod_pvt; \
4110 +- virt_wmb(); /* back sees requests /before/ updated producer index */ \
4111 +- (_r)->sring->req_prod = __new; \
4112 +- virt_mb(); /* back sees new requests /before/ we check req_event */ \
4113 +- (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
4114 +- (RING_IDX)(__new - __old)); \
4115 ++#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
4116 ++ RING_IDX __old = (_r)->sring->req_prod; \
4117 ++ RING_IDX __new = (_r)->req_prod_pvt; \
4118 ++ virt_wmb(); /* back sees requests /before/ updated producer index */\
4119 ++ (_r)->sring->req_prod = __new; \
4120 ++ virt_mb(); /* back sees new requests /before/ we check req_event */ \
4121 ++ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
4122 ++ (RING_IDX)(__new - __old)); \
4123 + } while (0)
4124 +
4125 +-#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
4126 +- RING_IDX __old = (_r)->sring->rsp_prod; \
4127 +- RING_IDX __new = (_r)->rsp_prod_pvt; \
4128 +- virt_wmb(); /* front sees responses /before/ updated producer index */ \
4129 +- (_r)->sring->rsp_prod = __new; \
4130 +- virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
4131 +- (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
4132 +- (RING_IDX)(__new - __old)); \
4133 ++#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
4134 ++ RING_IDX __old = (_r)->sring->rsp_prod; \
4135 ++ RING_IDX __new = (_r)->rsp_prod_pvt; \
4136 ++ virt_wmb(); /* front sees resps /before/ updated producer index */ \
4137 ++ (_r)->sring->rsp_prod = __new; \
4138 ++ virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
4139 ++ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
4140 ++ (RING_IDX)(__new - __old)); \
4141 + } while (0)
4142 +
4143 +-#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
4144 +- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
4145 +- if (_work_to_do) break; \
4146 +- (_r)->sring->req_event = (_r)->req_cons + 1; \
4147 +- virt_mb(); \
4148 +- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
4149 ++#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
4150 ++ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
4151 ++ if (_work_to_do) break; \
4152 ++ (_r)->sring->req_event = (_r)->req_cons + 1; \
4153 ++ virt_mb(); \
4154 ++ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
4155 + } while (0)
4156 +
4157 +-#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
4158 +- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
4159 +- if (_work_to_do) break; \
4160 +- (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
4161 +- virt_mb(); \
4162 +- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
4163 ++#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
4164 ++ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
4165 ++ if (_work_to_do) break; \
4166 ++ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
4167 ++ virt_mb(); \
4168 ++ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
4169 + } while (0)
4170 +
4171 +
4172 +diff --git a/ipc/shm.c b/ipc/shm.c
4173 +index e25c7c6106bcf..471ac3e7498d5 100644
4174 +--- a/ipc/shm.c
4175 ++++ b/ipc/shm.c
4176 +@@ -62,9 +62,18 @@ struct shmid_kernel /* private to the kernel */
4177 + struct pid *shm_lprid;
4178 + struct user_struct *mlock_user;
4179 +
4180 +- /* The task created the shm object. NULL if the task is dead. */
4181 ++ /*
4182 ++ * The task created the shm object, for
4183 ++ * task_lock(shp->shm_creator)
4184 ++ */
4185 + struct task_struct *shm_creator;
4186 +- struct list_head shm_clist; /* list by creator */
4187 ++
4188 ++ /*
4189 ++ * List by creator. task_lock(->shm_creator) required for read/write.
4190 ++ * If list_empty(), then the creator is dead already.
4191 ++ */
4192 ++ struct list_head shm_clist;
4193 ++ struct ipc_namespace *ns;
4194 + } __randomize_layout;
4195 +
4196 + /* shm_mode upper byte flags */
4197 +@@ -115,6 +124,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
4198 + struct shmid_kernel *shp;
4199 +
4200 + shp = container_of(ipcp, struct shmid_kernel, shm_perm);
4201 ++ WARN_ON(ns != shp->ns);
4202 +
4203 + if (shp->shm_nattch) {
4204 + shp->shm_perm.mode |= SHM_DEST;
4205 +@@ -225,10 +235,43 @@ static void shm_rcu_free(struct rcu_head *head)
4206 + kvfree(shp);
4207 + }
4208 +
4209 +-static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
4210 ++/*
4211 ++ * It has to be called with shp locked.
4212 ++ * It must be called before ipc_rmid()
4213 ++ */
4214 ++static inline void shm_clist_rm(struct shmid_kernel *shp)
4215 + {
4216 +- list_del(&s->shm_clist);
4217 +- ipc_rmid(&shm_ids(ns), &s->shm_perm);
4218 ++ struct task_struct *creator;
4219 ++
4220 ++ /* ensure that shm_creator does not disappear */
4221 ++ rcu_read_lock();
4222 ++
4223 ++ /*
4224 ++ * A concurrent exit_shm may do a list_del_init() as well.
4225 ++ * Just do nothing if exit_shm already did the work
4226 ++ */
4227 ++ if (!list_empty(&shp->shm_clist)) {
4228 ++ /*
4229 ++ * shp->shm_creator is guaranteed to be valid *only*
4230 ++ * if shp->shm_clist is not empty.
4231 ++ */
4232 ++ creator = shp->shm_creator;
4233 ++
4234 ++ task_lock(creator);
4235 ++ /*
4236 ++ * list_del_init() is a nop if the entry was already removed
4237 ++ * from the list.
4238 ++ */
4239 ++ list_del_init(&shp->shm_clist);
4240 ++ task_unlock(creator);
4241 ++ }
4242 ++ rcu_read_unlock();
4243 ++}
4244 ++
4245 ++static inline void shm_rmid(struct shmid_kernel *s)
4246 ++{
4247 ++ shm_clist_rm(s);
4248 ++ ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
4249 + }
4250 +
4251 +
4252 +@@ -283,7 +326,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
4253 + shm_file = shp->shm_file;
4254 + shp->shm_file = NULL;
4255 + ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
4256 +- shm_rmid(ns, shp);
4257 ++ shm_rmid(shp);
4258 + shm_unlock(shp);
4259 + if (!is_file_hugepages(shm_file))
4260 + shmem_lock(shm_file, 0, shp->mlock_user);
4261 +@@ -306,10 +349,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
4262 + *
4263 + * 2) sysctl kernel.shm_rmid_forced is set to 1.
4264 + */
4265 +-static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
4266 ++static bool shm_may_destroy(struct shmid_kernel *shp)
4267 + {
4268 + return (shp->shm_nattch == 0) &&
4269 +- (ns->shm_rmid_forced ||
4270 ++ (shp->ns->shm_rmid_forced ||
4271 + (shp->shm_perm.mode & SHM_DEST));
4272 + }
4273 +
4274 +@@ -340,7 +383,7 @@ static void shm_close(struct vm_area_struct *vma)
4275 + ipc_update_pid(&shp->shm_lprid, task_tgid(current));
4276 + shp->shm_dtim = ktime_get_real_seconds();
4277 + shp->shm_nattch--;
4278 +- if (shm_may_destroy(ns, shp))
4279 ++ if (shm_may_destroy(shp))
4280 + shm_destroy(ns, shp);
4281 + else
4282 + shm_unlock(shp);
4283 +@@ -361,10 +404,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
4284 + *
4285 + * As shp->* are changed under rwsem, it's safe to skip shp locking.
4286 + */
4287 +- if (shp->shm_creator != NULL)
4288 ++ if (!list_empty(&shp->shm_clist))
4289 + return 0;
4290 +
4291 +- if (shm_may_destroy(ns, shp)) {
4292 ++ if (shm_may_destroy(shp)) {
4293 + shm_lock_by_ptr(shp);
4294 + shm_destroy(ns, shp);
4295 + }
4296 +@@ -382,48 +425,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
4297 + /* Locking assumes this will only be called with task == current */
4298 + void exit_shm(struct task_struct *task)
4299 + {
4300 +- struct ipc_namespace *ns = task->nsproxy->ipc_ns;
4301 +- struct shmid_kernel *shp, *n;
4302 ++ for (;;) {
4303 ++ struct shmid_kernel *shp;
4304 ++ struct ipc_namespace *ns;
4305 +
4306 +- if (list_empty(&task->sysvshm.shm_clist))
4307 +- return;
4308 ++ task_lock(task);
4309 ++
4310 ++ if (list_empty(&task->sysvshm.shm_clist)) {
4311 ++ task_unlock(task);
4312 ++ break;
4313 ++ }
4314 ++
4315 ++ shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
4316 ++ shm_clist);
4317 +
4318 +- /*
4319 +- * If kernel.shm_rmid_forced is not set then only keep track of
4320 +- * which shmids are orphaned, so that a later set of the sysctl
4321 +- * can clean them up.
4322 +- */
4323 +- if (!ns->shm_rmid_forced) {
4324 +- down_read(&shm_ids(ns).rwsem);
4325 +- list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
4326 +- shp->shm_creator = NULL;
4327 + /*
4328 +- * Only under read lock but we are only called on current
4329 +- * so no entry on the list will be shared.
4330 ++ * 1) Get pointer to the ipc namespace. It is worth to say
4331 ++ * that this pointer is guaranteed to be valid because
4332 ++ * shp lifetime is always shorter than namespace lifetime
4333 ++ * in which shp lives.
4334 ++ * We taken task_lock it means that shp won't be freed.
4335 + */
4336 +- list_del(&task->sysvshm.shm_clist);
4337 +- up_read(&shm_ids(ns).rwsem);
4338 +- return;
4339 +- }
4340 ++ ns = shp->ns;
4341 +
4342 +- /*
4343 +- * Destroy all already created segments, that were not yet mapped,
4344 +- * and mark any mapped as orphan to cover the sysctl toggling.
4345 +- * Destroy is skipped if shm_may_destroy() returns false.
4346 +- */
4347 +- down_write(&shm_ids(ns).rwsem);
4348 +- list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
4349 +- shp->shm_creator = NULL;
4350 ++ /*
4351 ++ * 2) If kernel.shm_rmid_forced is not set then only keep track of
4352 ++ * which shmids are orphaned, so that a later set of the sysctl
4353 ++ * can clean them up.
4354 ++ */
4355 ++ if (!ns->shm_rmid_forced)
4356 ++ goto unlink_continue;
4357 +
4358 +- if (shm_may_destroy(ns, shp)) {
4359 +- shm_lock_by_ptr(shp);
4360 +- shm_destroy(ns, shp);
4361 ++ /*
4362 ++ * 3) get a reference to the namespace.
4363 ++ * The refcount could be already 0. If it is 0, then
4364 ++ * the shm objects will be free by free_ipc_work().
4365 ++ */
4366 ++ ns = get_ipc_ns_not_zero(ns);
4367 ++ if (!ns) {
4368 ++unlink_continue:
4369 ++ list_del_init(&shp->shm_clist);
4370 ++ task_unlock(task);
4371 ++ continue;
4372 + }
4373 +- }
4374 +
4375 +- /* Remove the list head from any segments still attached. */
4376 +- list_del(&task->sysvshm.shm_clist);
4377 +- up_write(&shm_ids(ns).rwsem);
4378 ++ /*
4379 ++ * 4) get a reference to shp.
4380 ++ * This cannot fail: shm_clist_rm() is called before
4381 ++ * ipc_rmid(), thus the refcount cannot be 0.
4382 ++ */
4383 ++ WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
4384 ++
4385 ++ /*
4386 ++ * 5) unlink the shm segment from the list of segments
4387 ++ * created by current.
4388 ++ * This must be done last. After unlinking,
4389 ++ * only the refcounts obtained above prevent IPC_RMID
4390 ++ * from destroying the segment or the namespace.
4391 ++ */
4392 ++ list_del_init(&shp->shm_clist);
4393 ++
4394 ++ task_unlock(task);
4395 ++
4396 ++ /*
4397 ++ * 6) we have all references
4398 ++ * Thus lock & if needed destroy shp.
4399 ++ */
4400 ++ down_write(&shm_ids(ns).rwsem);
4401 ++ shm_lock_by_ptr(shp);
4402 ++ /*
4403 ++ * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
4404 ++ * safe to call ipc_rcu_putref here
4405 ++ */
4406 ++ ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
4407 ++
4408 ++ if (ipc_valid_object(&shp->shm_perm)) {
4409 ++ if (shm_may_destroy(shp))
4410 ++ shm_destroy(ns, shp);
4411 ++ else
4412 ++ shm_unlock(shp);
4413 ++ } else {
4414 ++ /*
4415 ++ * Someone else deleted the shp from namespace
4416 ++ * idr/kht while we have waited.
4417 ++ * Just unlock and continue.
4418 ++ */
4419 ++ shm_unlock(shp);
4420 ++ }
4421 ++
4422 ++ up_write(&shm_ids(ns).rwsem);
4423 ++ put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
4424 ++ }
4425 + }
4426 +
4427 + static vm_fault_t shm_fault(struct vm_fault *vmf)
4428 +@@ -680,7 +772,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
4429 + if (error < 0)
4430 + goto no_id;
4431 +
4432 ++ shp->ns = ns;
4433 ++
4434 ++ task_lock(current);
4435 + list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
4436 ++ task_unlock(current);
4437 +
4438 + /*
4439 + * shmid gets reported as "inode#" in /proc/pid/maps.
4440 +@@ -1573,7 +1669,8 @@ out_nattch:
4441 + down_write(&shm_ids(ns).rwsem);
4442 + shp = shm_lock(ns, shmid);
4443 + shp->shm_nattch--;
4444 +- if (shm_may_destroy(ns, shp))
4445 ++
4446 ++ if (shm_may_destroy(shp))
4447 + shm_destroy(ns, shp);
4448 + else
4449 + shm_unlock(shp);
4450 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
4451 +index 5b6da64da46d7..bb9a9cb1f321e 100644
4452 +--- a/kernel/bpf/syscall.c
4453 ++++ b/kernel/bpf/syscall.c
4454 +@@ -127,6 +127,21 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
4455 + return map;
4456 + }
4457 +
4458 ++static void bpf_map_write_active_inc(struct bpf_map *map)
4459 ++{
4460 ++ atomic64_inc(&map->writecnt);
4461 ++}
4462 ++
4463 ++static void bpf_map_write_active_dec(struct bpf_map *map)
4464 ++{
4465 ++ atomic64_dec(&map->writecnt);
4466 ++}
4467 ++
4468 ++bool bpf_map_write_active(const struct bpf_map *map)
4469 ++{
4470 ++ return atomic64_read(&map->writecnt) != 0;
4471 ++}
4472 ++
4473 + static u32 bpf_map_value_size(struct bpf_map *map)
4474 + {
4475 + if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
4476 +@@ -588,11 +603,8 @@ static void bpf_map_mmap_open(struct vm_area_struct *vma)
4477 + {
4478 + struct bpf_map *map = vma->vm_file->private_data;
4479 +
4480 +- if (vma->vm_flags & VM_MAYWRITE) {
4481 +- mutex_lock(&map->freeze_mutex);
4482 +- map->writecnt++;
4483 +- mutex_unlock(&map->freeze_mutex);
4484 +- }
4485 ++ if (vma->vm_flags & VM_MAYWRITE)
4486 ++ bpf_map_write_active_inc(map);
4487 + }
4488 +
4489 + /* called for all unmapped memory region (including initial) */
4490 +@@ -600,11 +612,8 @@ static void bpf_map_mmap_close(struct vm_area_struct *vma)
4491 + {
4492 + struct bpf_map *map = vma->vm_file->private_data;
4493 +
4494 +- if (vma->vm_flags & VM_MAYWRITE) {
4495 +- mutex_lock(&map->freeze_mutex);
4496 +- map->writecnt--;
4497 +- mutex_unlock(&map->freeze_mutex);
4498 +- }
4499 ++ if (vma->vm_flags & VM_MAYWRITE)
4500 ++ bpf_map_write_active_dec(map);
4501 + }
4502 +
4503 + static const struct vm_operations_struct bpf_map_default_vmops = {
4504 +@@ -654,7 +663,7 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
4505 + goto out;
4506 +
4507 + if (vma->vm_flags & VM_MAYWRITE)
4508 +- map->writecnt++;
4509 ++ bpf_map_write_active_inc(map);
4510 + out:
4511 + mutex_unlock(&map->freeze_mutex);
4512 + return err;
4513 +@@ -1086,6 +1095,7 @@ static int map_update_elem(union bpf_attr *attr)
4514 + map = __bpf_map_get(f);
4515 + if (IS_ERR(map))
4516 + return PTR_ERR(map);
4517 ++ bpf_map_write_active_inc(map);
4518 + if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4519 + err = -EPERM;
4520 + goto err_put;
4521 +@@ -1127,6 +1137,7 @@ free_value:
4522 + free_key:
4523 + kfree(key);
4524 + err_put:
4525 ++ bpf_map_write_active_dec(map);
4526 + fdput(f);
4527 + return err;
4528 + }
4529 +@@ -1149,6 +1160,7 @@ static int map_delete_elem(union bpf_attr *attr)
4530 + map = __bpf_map_get(f);
4531 + if (IS_ERR(map))
4532 + return PTR_ERR(map);
4533 ++ bpf_map_write_active_inc(map);
4534 + if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4535 + err = -EPERM;
4536 + goto err_put;
4537 +@@ -1179,6 +1191,7 @@ static int map_delete_elem(union bpf_attr *attr)
4538 + out:
4539 + kfree(key);
4540 + err_put:
4541 ++ bpf_map_write_active_dec(map);
4542 + fdput(f);
4543 + return err;
4544 + }
4545 +@@ -1483,6 +1496,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
4546 + map = __bpf_map_get(f);
4547 + if (IS_ERR(map))
4548 + return PTR_ERR(map);
4549 ++ bpf_map_write_active_inc(map);
4550 + if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
4551 + !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4552 + err = -EPERM;
4553 +@@ -1524,6 +1538,7 @@ free_value:
4554 + free_key:
4555 + kfree(key);
4556 + err_put:
4557 ++ bpf_map_write_active_dec(map);
4558 + fdput(f);
4559 + return err;
4560 + }
4561 +@@ -1550,8 +1565,7 @@ static int map_freeze(const union bpf_attr *attr)
4562 + }
4563 +
4564 + mutex_lock(&map->freeze_mutex);
4565 +-
4566 +- if (map->writecnt) {
4567 ++ if (bpf_map_write_active(map)) {
4568 + err = -EBUSY;
4569 + goto err_put;
4570 + }
4571 +@@ -3976,6 +3990,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
4572 + union bpf_attr __user *uattr,
4573 + int cmd)
4574 + {
4575 ++ bool has_read = cmd == BPF_MAP_LOOKUP_BATCH ||
4576 ++ cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
4577 ++ bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
4578 + struct bpf_map *map;
4579 + int err, ufd;
4580 + struct fd f;
4581 +@@ -3988,16 +4005,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
4582 + map = __bpf_map_get(f);
4583 + if (IS_ERR(map))
4584 + return PTR_ERR(map);
4585 +-
4586 +- if ((cmd == BPF_MAP_LOOKUP_BATCH ||
4587 +- cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
4588 +- !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4589 ++ if (has_write)
4590 ++ bpf_map_write_active_inc(map);
4591 ++ if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4592 + err = -EPERM;
4593 + goto err_put;
4594 + }
4595 +-
4596 +- if (cmd != BPF_MAP_LOOKUP_BATCH &&
4597 +- !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4598 ++ if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4599 + err = -EPERM;
4600 + goto err_put;
4601 + }
4602 +@@ -4010,8 +4024,9 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
4603 + BPF_DO_BATCH(map->ops->map_update_batch);
4604 + else
4605 + BPF_DO_BATCH(map->ops->map_delete_batch);
4606 +-
4607 + err_put:
4608 ++ if (has_write)
4609 ++ bpf_map_write_active_dec(map);
4610 + fdput(f);
4611 + return err;
4612 + }
4613 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
4614 +index a15826a9a644f..5a2b28e6816ee 100644
4615 +--- a/kernel/bpf/verifier.c
4616 ++++ b/kernel/bpf/verifier.c
4617 +@@ -3486,7 +3486,22 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
4618 +
4619 + static bool bpf_map_is_rdonly(const struct bpf_map *map)
4620 + {
4621 +- return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
4622 ++ /* A map is considered read-only if the following condition are true:
4623 ++ *
4624 ++ * 1) BPF program side cannot change any of the map content. The
4625 ++ * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
4626 ++ * and was set at map creation time.
4627 ++ * 2) The map value(s) have been initialized from user space by a
4628 ++ * loader and then "frozen", such that no new map update/delete
4629 ++ * operations from syscall side are possible for the rest of
4630 ++ * the map's lifetime from that point onwards.
4631 ++ * 3) Any parallel/pending map update/delete operations from syscall
4632 ++ * side have been completed. Only after that point, it's safe to
4633 ++ * assume that map value(s) are immutable.
4634 ++ */
4635 ++ return (map->map_flags & BPF_F_RDONLY_PROG) &&
4636 ++ READ_ONCE(map->frozen) &&
4637 ++ !bpf_map_write_active(map);
4638 + }
4639 +
4640 + static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
4641 +diff --git a/kernel/cpu.c b/kernel/cpu.c
4642 +index 67c22941b5f27..c06ced18f78ad 100644
4643 +--- a/kernel/cpu.c
4644 ++++ b/kernel/cpu.c
4645 +@@ -31,6 +31,7 @@
4646 + #include <linux/smpboot.h>
4647 + #include <linux/relay.h>
4648 + #include <linux/slab.h>
4649 ++#include <linux/scs.h>
4650 + #include <linux/percpu-rwsem.h>
4651 + #include <linux/cpuset.h>
4652 +
4653 +@@ -551,6 +552,12 @@ static int bringup_cpu(unsigned int cpu)
4654 + struct task_struct *idle = idle_thread_get(cpu);
4655 + int ret;
4656 +
4657 ++ /*
4658 ++ * Reset stale stack state from the last time this CPU was online.
4659 ++ */
4660 ++ scs_task_reset(idle);
4661 ++ kasan_unpoison_task_stack(idle);
4662 ++
4663 + /*
4664 + * Some architectures have to walk the irq descriptors to
4665 + * setup the vector space for the cpu which comes online.
4666 +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
4667 +index 2fc7d509a34fc..bf640fd6142a0 100644
4668 +--- a/kernel/power/hibernate.c
4669 ++++ b/kernel/power/hibernate.c
4670 +@@ -688,7 +688,7 @@ static int load_image_and_restore(void)
4671 + goto Unlock;
4672 +
4673 + error = swsusp_read(&flags);
4674 +- swsusp_close(FMODE_READ);
4675 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
4676 + if (!error)
4677 + error = hibernation_restore(flags & SF_PLATFORM_MODE);
4678 +
4679 +@@ -978,7 +978,7 @@ static int software_resume(void)
4680 + /* The snapshot device should not be opened while we're running */
4681 + if (!hibernate_acquire()) {
4682 + error = -EBUSY;
4683 +- swsusp_close(FMODE_READ);
4684 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
4685 + goto Unlock;
4686 + }
4687 +
4688 +@@ -1013,7 +1013,7 @@ static int software_resume(void)
4689 + pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
4690 + return error;
4691 + Close_Finish:
4692 +- swsusp_close(FMODE_READ);
4693 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
4694 + goto Finish;
4695 + }
4696 +
4697 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4698 +index e456cce772a3a..304aad997da11 100644
4699 +--- a/kernel/sched/core.c
4700 ++++ b/kernel/sched/core.c
4701 +@@ -6523,9 +6523,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
4702 + idle->se.exec_start = sched_clock();
4703 + idle->flags |= PF_IDLE;
4704 +
4705 +- scs_task_reset(idle);
4706 +- kasan_unpoison_task_stack(idle);
4707 +-
4708 + #ifdef CONFIG_SMP
4709 + /*
4710 + * Its possible that init_idle() gets called multiple times on a task,
4711 +@@ -6681,7 +6678,6 @@ void idle_task_exit(void)
4712 + finish_arch_post_lock_switch();
4713 + }
4714 +
4715 +- scs_task_reset(current);
4716 + /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
4717 + }
4718 +
4719 +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
4720 +index 15a811d34cd82..8d67f7f448400 100644
4721 +--- a/kernel/trace/trace.h
4722 ++++ b/kernel/trace/trace.h
4723 +@@ -1506,14 +1506,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
4724 + if (eflags & EVENT_FILE_FL_TRIGGER_COND)
4725 + *tt = event_triggers_call(file, entry, event);
4726 +
4727 +- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
4728 +- (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
4729 +- !filter_match_preds(file->filter, entry))) {
4730 +- __trace_event_discard_commit(buffer, event);
4731 +- return true;
4732 +- }
4733 ++ if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
4734 ++ EVENT_FILE_FL_FILTERED |
4735 ++ EVENT_FILE_FL_PID_FILTER))))
4736 ++ return false;
4737 ++
4738 ++ if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
4739 ++ goto discard;
4740 ++
4741 ++ if (file->flags & EVENT_FILE_FL_FILTERED &&
4742 ++ !filter_match_preds(file->filter, entry))
4743 ++ goto discard;
4744 ++
4745 ++ if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
4746 ++ trace_event_ignore_this_pid(file))
4747 ++ goto discard;
4748 +
4749 + return false;
4750 ++ discard:
4751 ++ __trace_event_discard_commit(buffer, event);
4752 ++ return true;
4753 + }
4754 +
4755 + /**
4756 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
4757 +index ab3cb67b869e5..7cc5f0a77c3cc 100644
4758 +--- a/kernel/trace/trace_events.c
4759 ++++ b/kernel/trace/trace_events.c
4760 +@@ -2462,12 +2462,22 @@ static struct trace_event_file *
4761 + trace_create_new_event(struct trace_event_call *call,
4762 + struct trace_array *tr)
4763 + {
4764 ++ struct trace_pid_list *no_pid_list;
4765 ++ struct trace_pid_list *pid_list;
4766 + struct trace_event_file *file;
4767 +
4768 + file = kmem_cache_alloc(file_cachep, GFP_TRACE);
4769 + if (!file)
4770 + return NULL;
4771 +
4772 ++ pid_list = rcu_dereference_protected(tr->filtered_pids,
4773 ++ lockdep_is_held(&event_mutex));
4774 ++ no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
4775 ++ lockdep_is_held(&event_mutex));
4776 ++
4777 ++ if (pid_list || no_pid_list)
4778 ++ file->flags |= EVENT_FILE_FL_PID_FILTER;
4779 ++
4780 + file->event_call = call;
4781 + file->tr = tr;
4782 + atomic_set(&file->sm_ref, 0);
4783 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
4784 +index 0dd6e286e5196..9900d4e3808cc 100644
4785 +--- a/kernel/trace/trace_uprobe.c
4786 ++++ b/kernel/trace/trace_uprobe.c
4787 +@@ -1312,6 +1312,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
4788 + return 0;
4789 +
4790 + list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
4791 ++ tu = container_of(pos, struct trace_uprobe, tp);
4792 + err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
4793 + if (err) {
4794 + uprobe_perf_close(call, event);
4795 +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
4796 +index ad3780067a7d8..d12c9a8a9953e 100644
4797 +--- a/net/8021q/vlan.c
4798 ++++ b/net/8021q/vlan.c
4799 +@@ -181,9 +181,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
4800 + if (err)
4801 + goto out_unregister_netdev;
4802 +
4803 +- /* Account for reference in struct vlan_dev_priv */
4804 +- dev_hold(real_dev);
4805 +-
4806 + vlan_stacked_transfer_operstate(real_dev, dev, vlan);
4807 + linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
4808 +
4809 +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
4810 +index c7eba7dab0938..86a1c99025ea0 100644
4811 +--- a/net/8021q/vlan_dev.c
4812 ++++ b/net/8021q/vlan_dev.c
4813 +@@ -606,6 +606,9 @@ static int vlan_dev_init(struct net_device *dev)
4814 + if (!vlan->vlan_pcpu_stats)
4815 + return -ENOMEM;
4816 +
4817 ++ /* Get vlan's reference to real_dev */
4818 ++ dev_hold(real_dev);
4819 ++
4820 + return 0;
4821 + }
4822 +
4823 +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
4824 +index 1075cc2136ac6..8bd3f5e3c0e7a 100644
4825 +--- a/net/ipv4/nexthop.c
4826 ++++ b/net/ipv4/nexthop.c
4827 +@@ -924,15 +924,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
4828 + /* if any FIB entries reference this nexthop, any dst entries
4829 + * need to be regenerated
4830 + */
4831 +-static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
4832 ++static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
4833 ++ struct nexthop *replaced_nh)
4834 + {
4835 + struct fib6_info *f6i;
4836 ++ struct nh_group *nhg;
4837 ++ int i;
4838 +
4839 + if (!list_empty(&nh->fi_list))
4840 + rt_cache_flush(net);
4841 +
4842 + list_for_each_entry(f6i, &nh->f6i_list, nh_list)
4843 + ipv6_stub->fib6_update_sernum(net, f6i);
4844 ++
4845 ++ /* if an IPv6 group was replaced, we have to release all old
4846 ++ * dsts to make sure all refcounts are released
4847 ++ */
4848 ++ if (!replaced_nh->is_group)
4849 ++ return;
4850 ++
4851 ++ /* new dsts must use only the new nexthop group */
4852 ++ synchronize_net();
4853 ++
4854 ++ nhg = rtnl_dereference(replaced_nh->nh_grp);
4855 ++ for (i = 0; i < nhg->num_nh; i++) {
4856 ++ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
4857 ++ struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
4858 ++
4859 ++ if (nhi->family == AF_INET6)
4860 ++ ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
4861 ++ }
4862 + }
4863 +
4864 + static int replace_nexthop_grp(struct net *net, struct nexthop *old,
4865 +@@ -1111,7 +1132,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old,
4866 + err = replace_nexthop_single(net, old, new, extack);
4867 +
4868 + if (!err) {
4869 +- nh_rt_cache_flush(net, old);
4870 ++ nh_rt_cache_flush(net, old, new);
4871 +
4872 + __remove_nexthop(net, new, NULL);
4873 + nexthop_put(new);
4874 +@@ -1355,11 +1376,15 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
4875 + /* sets nh_dev if successful */
4876 + err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
4877 + extack);
4878 +- if (err)
4879 ++ if (err) {
4880 ++ /* IPv6 is not enabled, don't call fib6_nh_release */
4881 ++ if (err == -EAFNOSUPPORT)
4882 ++ goto out;
4883 + ipv6_stub->fib6_nh_release(fib6_nh);
4884 +- else
4885 ++ } else {
4886 + nh->nh_flags = fib6_nh->fib_nh_flags;
4887 +-
4888 ++ }
4889 ++out:
4890 + return err;
4891 + }
4892 +
4893 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4894 +index bb16c88f58a3c..63c81af41b43e 100644
4895 +--- a/net/ipv4/tcp.c
4896 ++++ b/net/ipv4/tcp.c
4897 +@@ -3931,7 +3931,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
4898 + }
4899 + #ifdef CONFIG_MMU
4900 + case TCP_ZEROCOPY_RECEIVE: {
4901 +- struct tcp_zerocopy_receive zc;
4902 ++ struct tcp_zerocopy_receive zc = {};
4903 + int err;
4904 +
4905 + if (get_user(len, optlen))
4906 +@@ -3949,7 +3949,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
4907 + lock_sock(sk);
4908 + err = tcp_zerocopy_receive(sk, &zc);
4909 + release_sock(sk);
4910 +- if (len == sizeof(zc))
4911 ++ if (len >= offsetofend(struct tcp_zerocopy_receive, err))
4912 + goto zerocopy_rcv_sk_err;
4913 + switch (len) {
4914 + case offsetofend(struct tcp_zerocopy_receive, err):
4915 +diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
4916 +index c7bf5b26bf0c2..fffa011a007d4 100644
4917 +--- a/net/ipv4/tcp_cubic.c
4918 ++++ b/net/ipv4/tcp_cubic.c
4919 +@@ -337,8 +337,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
4920 + return;
4921 +
4922 + if (tcp_in_slow_start(tp)) {
4923 +- if (hystart && after(ack, ca->end_seq))
4924 +- bictcp_hystart_reset(sk);
4925 + acked = tcp_slow_start(tp, acked);
4926 + if (!acked)
4927 + return;
4928 +@@ -398,6 +396,9 @@ static void hystart_update(struct sock *sk, u32 delay)
4929 + struct bictcp *ca = inet_csk_ca(sk);
4930 + u32 threshold;
4931 +
4932 ++ if (after(tp->snd_una, ca->end_seq))
4933 ++ bictcp_hystart_reset(sk);
4934 ++
4935 + if (hystart_detect & HYSTART_ACK_TRAIN) {
4936 + u32 now = bictcp_clock_us(sk);
4937 +
4938 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
4939 +index e648fbebb1670..090575346daf6 100644
4940 +--- a/net/ipv6/af_inet6.c
4941 ++++ b/net/ipv6/af_inet6.c
4942 +@@ -1016,6 +1016,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
4943 + .ip6_mtu_from_fib6 = ip6_mtu_from_fib6,
4944 + .fib6_nh_init = fib6_nh_init,
4945 + .fib6_nh_release = fib6_nh_release,
4946 ++ .fib6_nh_release_dsts = fib6_nh_release_dsts,
4947 + .fib6_update_sernum = fib6_update_sernum_stub,
4948 + .fib6_rt_update = fib6_rt_update,
4949 + .ip6_del_rt = ip6_del_rt,
4950 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4951 +index c2f8e69d7d7a0..54cabf1c2ae15 100644
4952 +--- a/net/ipv6/ip6_output.c
4953 ++++ b/net/ipv6/ip6_output.c
4954 +@@ -193,7 +193,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
4955 + #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
4956 + /* Policy lookup after SNAT yielded a new policy */
4957 + if (skb_dst(skb)->xfrm) {
4958 +- IPCB(skb)->flags |= IPSKB_REROUTED;
4959 ++ IP6CB(skb)->flags |= IP6SKB_REROUTED;
4960 + return dst_output(net, sk, skb);
4961 + }
4962 + #endif
4963 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4964 +index a68a7d7c07280..6fef0d7586bf6 100644
4965 +--- a/net/ipv6/route.c
4966 ++++ b/net/ipv6/route.c
4967 +@@ -3570,6 +3570,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh)
4968 + fib_nh_common_release(&fib6_nh->nh_common);
4969 + }
4970 +
4971 ++void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
4972 ++{
4973 ++ int cpu;
4974 ++
4975 ++ if (!fib6_nh->rt6i_pcpu)
4976 ++ return;
4977 ++
4978 ++ for_each_possible_cpu(cpu) {
4979 ++ struct rt6_info *pcpu_rt, **ppcpu_rt;
4980 ++
4981 ++ ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
4982 ++ pcpu_rt = xchg(ppcpu_rt, NULL);
4983 ++ if (pcpu_rt) {
4984 ++ dst_dev_put(&pcpu_rt->dst);
4985 ++ dst_release(&pcpu_rt->dst);
4986 ++ }
4987 ++ }
4988 ++}
4989 ++
4990 + static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
4991 + gfp_t gfp_flags,
4992 + struct netlink_ext_ack *extack)
4993 +diff --git a/net/mptcp/options.c b/net/mptcp/options.c
4994 +index ac0233c9cd349..64afe71e2129a 100644
4995 +--- a/net/mptcp/options.c
4996 ++++ b/net/mptcp/options.c
4997 +@@ -368,9 +368,10 @@ static void schedule_3rdack_retransmission(struct sock *sk)
4998 +
4999 + /* reschedule with a timeout above RTT, as we must look only for drop */
5000 + if (tp->srtt_us)
5001 +- timeout = tp->srtt_us << 1;
5002 ++ timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
5003 + else
5004 + timeout = TCP_TIMEOUT_INIT;
5005 ++ timeout += jiffies;
5006 +
5007 + WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
5008 + icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
5009 +diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
5010 +index ba9ae482141b0..dda8b76b77988 100644
5011 +--- a/net/ncsi/ncsi-cmd.c
5012 ++++ b/net/ncsi/ncsi-cmd.c
5013 +@@ -18,6 +18,8 @@
5014 + #include "internal.h"
5015 + #include "ncsi-pkt.h"
5016 +
5017 ++static const int padding_bytes = 26;
5018 ++
5019 + u32 ncsi_calculate_checksum(unsigned char *data, int len)
5020 + {
5021 + u32 checksum = 0;
5022 +@@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb,
5023 + {
5024 + struct ncsi_cmd_oem_pkt *cmd;
5025 + unsigned int len;
5026 ++ int payload;
5027 ++ /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2
5028 ++ * requires payload to be padded with 0 to
5029 ++ * 32-bit boundary before the checksum field.
5030 ++ * Ensure the padding bytes are accounted for in
5031 ++ * skb allocation
5032 ++ */
5033 +
5034 ++ payload = ALIGN(nca->payload, 4);
5035 + len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
5036 +- if (nca->payload < 26)
5037 +- len += 26;
5038 +- else
5039 +- len += nca->payload;
5040 ++ len += max(payload, padding_bytes);
5041 +
5042 + cmd = skb_put_zero(skb, len);
5043 + memcpy(&cmd->mfr_id, nca->data, nca->payload);
5044 +@@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
5045 + struct net_device *dev = nd->dev;
5046 + int hlen = LL_RESERVED_SPACE(dev);
5047 + int tlen = dev->needed_tailroom;
5048 ++ int payload;
5049 + int len = hlen + tlen;
5050 + struct sk_buff *skb;
5051 + struct ncsi_request *nr;
5052 +@@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
5053 + return NULL;
5054 +
5055 + /* NCSI command packet has 16-bytes header, payload, 4 bytes checksum.
5056 ++ * Payload needs padding so that the checksum field following payload is
5057 ++ * aligned to 32-bit boundary.
5058 + * The packet needs padding if its payload is less than 26 bytes to
5059 + * meet 64 bytes minimal ethernet frame length.
5060 + */
5061 + len += sizeof(struct ncsi_cmd_pkt_hdr) + 4;
5062 +- if (nca->payload < 26)
5063 +- len += 26;
5064 +- else
5065 +- len += nca->payload;
5066 ++ payload = ALIGN(nca->payload, 4);
5067 ++ len += max(payload, padding_bytes);
5068 +
5069 + /* Allocate skb */
5070 + skb = alloc_skb(len, GFP_ATOMIC);
5071 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
5072 +index c0b8215ab3d47..3a76da58d88bb 100644
5073 +--- a/net/netfilter/ipvs/ip_vs_core.c
5074 ++++ b/net/netfilter/ipvs/ip_vs_core.c
5075 +@@ -1976,7 +1976,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
5076 + struct ip_vs_proto_data *pd;
5077 + struct ip_vs_conn *cp;
5078 + int ret, pkts;
5079 +- int conn_reuse_mode;
5080 + struct sock *sk;
5081 +
5082 + /* Already marked as IPVS request or reply? */
5083 +@@ -2053,15 +2052,16 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
5084 + cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
5085 + ipvs, af, skb, &iph);
5086 +
5087 +- conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
5088 +- if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
5089 ++ if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
5090 ++ int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
5091 + bool old_ct = false, resched = false;
5092 +
5093 + if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
5094 + unlikely(!atomic_read(&cp->dest->weight))) {
5095 + resched = true;
5096 + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
5097 +- } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
5098 ++ } else if (conn_reuse_mode &&
5099 ++ is_new_conn_expected(cp, conn_reuse_mode)) {
5100 + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
5101 + if (!atomic_read(&cp->n_control)) {
5102 + resched = true;
5103 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
5104 +index cb4cfa4f61a8d..60a1a666e797a 100644
5105 +--- a/net/netfilter/nf_conntrack_netlink.c
5106 ++++ b/net/netfilter/nf_conntrack_netlink.c
5107 +@@ -973,11 +973,9 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
5108 + CTA_TUPLE_REPLY,
5109 + filter->family,
5110 + &filter->zone,
5111 +- filter->orig_flags);
5112 +- if (err < 0) {
5113 +- err = -EINVAL;
5114 ++ filter->reply_flags);
5115 ++ if (err < 0)
5116 + goto err_filter;
5117 +- }
5118 + }
5119 +
5120 + return filter;
5121 +diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
5122 +index a6b654b028dd4..d1862782be450 100644
5123 +--- a/net/netfilter/nf_flow_table_offload.c
5124 ++++ b/net/netfilter/nf_flow_table_offload.c
5125 +@@ -63,11 +63,11 @@ static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
5126 + sizeof(struct in6_addr));
5127 + if (memcmp(&key->enc_ipv6.src, &in6addr_any,
5128 + sizeof(struct in6_addr)))
5129 +- memset(&key->enc_ipv6.src, 0xff,
5130 ++ memset(&mask->enc_ipv6.src, 0xff,
5131 + sizeof(struct in6_addr));
5132 + if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
5133 + sizeof(struct in6_addr)))
5134 +- memset(&key->enc_ipv6.dst, 0xff,
5135 ++ memset(&mask->enc_ipv6.dst, 0xff,
5136 + sizeof(struct in6_addr));
5137 + enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
5138 + key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
5139 +diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
5140 +index c76701ac35abf..c34cb6e81d855 100644
5141 +--- a/net/sched/sch_ets.c
5142 ++++ b/net/sched/sch_ets.c
5143 +@@ -667,12 +667,14 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
5144 + q->classes[i].deficit = quanta[i];
5145 + }
5146 + }
5147 ++ for (i = q->nbands; i < oldbands; i++) {
5148 ++ qdisc_tree_flush_backlog(q->classes[i].qdisc);
5149 ++ if (i >= q->nstrict)
5150 ++ list_del(&q->classes[i].alist);
5151 ++ }
5152 + q->nstrict = nstrict;
5153 + memcpy(q->prio2band, priomap, sizeof(priomap));
5154 +
5155 +- for (i = q->nbands; i < oldbands; i++)
5156 +- qdisc_tree_flush_backlog(q->classes[i].qdisc);
5157 +-
5158 + for (i = 0; i < q->nbands; i++)
5159 + q->classes[i].quantum = quanta[i];
5160 +
5161 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
5162 +index cfb5b9be0569d..ac8265e35b2d2 100644
5163 +--- a/net/smc/af_smc.c
5164 ++++ b/net/smc/af_smc.c
5165 +@@ -1864,8 +1864,10 @@ static int smc_listen(struct socket *sock, int backlog)
5166 + smc->clcsock->sk->sk_user_data =
5167 + (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
5168 + rc = kernel_listen(smc->clcsock, backlog);
5169 +- if (rc)
5170 ++ if (rc) {
5171 ++ smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
5172 + goto out;
5173 ++ }
5174 + sk->sk_max_ack_backlog = backlog;
5175 + sk->sk_ack_backlog = 0;
5176 + sk->sk_state = SMC_LISTEN;
5177 +@@ -2096,8 +2098,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
5178 + static int smc_shutdown(struct socket *sock, int how)
5179 + {
5180 + struct sock *sk = sock->sk;
5181 ++ bool do_shutdown = true;
5182 + struct smc_sock *smc;
5183 + int rc = -EINVAL;
5184 ++ int old_state;
5185 + int rc1 = 0;
5186 +
5187 + smc = smc_sk(sk);
5188 +@@ -2124,7 +2128,11 @@ static int smc_shutdown(struct socket *sock, int how)
5189 + }
5190 + switch (how) {
5191 + case SHUT_RDWR: /* shutdown in both directions */
5192 ++ old_state = sk->sk_state;
5193 + rc = smc_close_active(smc);
5194 ++ if (old_state == SMC_ACTIVE &&
5195 ++ sk->sk_state == SMC_PEERCLOSEWAIT1)
5196 ++ do_shutdown = false;
5197 + break;
5198 + case SHUT_WR:
5199 + rc = smc_close_shutdown_write(smc);
5200 +@@ -2134,7 +2142,7 @@ static int smc_shutdown(struct socket *sock, int how)
5201 + /* nothing more to do because peer is not involved */
5202 + break;
5203 + }
5204 +- if (smc->clcsock)
5205 ++ if (do_shutdown && smc->clcsock)
5206 + rc1 = kernel_sock_shutdown(smc->clcsock, how);
5207 + /* map sock_shutdown_cmd constants to sk_shutdown value range */
5208 + sk->sk_shutdown |= how + 1;
5209 +diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
5210 +index 0f9ffba07d268..04620b53b74a7 100644
5211 +--- a/net/smc/smc_close.c
5212 ++++ b/net/smc/smc_close.c
5213 +@@ -228,6 +228,12 @@ again:
5214 + /* send close request */
5215 + rc = smc_close_final(conn);
5216 + sk->sk_state = SMC_PEERCLOSEWAIT1;
5217 ++
5218 ++ /* actively shutdown clcsock before peer close it,
5219 ++ * prevent peer from entering TIME_WAIT state.
5220 ++ */
5221 ++ if (smc->clcsock && smc->clcsock->sk)
5222 ++ rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
5223 + } else {
5224 + /* peer event has changed the state */
5225 + goto again;
5226 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
5227 +index 109d790eaebe2..cd625b672429f 100644
5228 +--- a/net/smc/smc_core.c
5229 ++++ b/net/smc/smc_core.c
5230 +@@ -1209,14 +1209,26 @@ static void smc_link_down_work(struct work_struct *work)
5231 + mutex_unlock(&lgr->llc_conf_mutex);
5232 + }
5233 +
5234 +-/* Determine vlan of internal TCP socket.
5235 +- * @vlan_id: address to store the determined vlan id into
5236 +- */
5237 ++static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
5238 ++ struct netdev_nested_priv *priv)
5239 ++{
5240 ++ unsigned short *vlan_id = (unsigned short *)priv->data;
5241 ++
5242 ++ if (is_vlan_dev(lower_dev)) {
5243 ++ *vlan_id = vlan_dev_vlan_id(lower_dev);
5244 ++ return 1;
5245 ++ }
5246 ++
5247 ++ return 0;
5248 ++}
5249 ++
5250 ++/* Determine vlan of internal TCP socket. */
5251 + int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
5252 + {
5253 + struct dst_entry *dst = sk_dst_get(clcsock->sk);
5254 ++ struct netdev_nested_priv priv;
5255 + struct net_device *ndev;
5256 +- int i, nest_lvl, rc = 0;
5257 ++ int rc = 0;
5258 +
5259 + ini->vlan_id = 0;
5260 + if (!dst) {
5261 +@@ -1234,20 +1246,9 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
5262 + goto out_rel;
5263 + }
5264 +
5265 ++ priv.data = (void *)&ini->vlan_id;
5266 + rtnl_lock();
5267 +- nest_lvl = ndev->lower_level;
5268 +- for (i = 0; i < nest_lvl; i++) {
5269 +- struct list_head *lower = &ndev->adj_list.lower;
5270 +-
5271 +- if (list_empty(lower))
5272 +- break;
5273 +- lower = lower->next;
5274 +- ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
5275 +- if (is_vlan_dev(ndev)) {
5276 +- ini->vlan_id = vlan_dev_vlan_id(ndev);
5277 +- break;
5278 +- }
5279 +- }
5280 ++ netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
5281 + rtnl_unlock();
5282 +
5283 + out_rel:
5284 +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
5285 +index 32a51b20509c9..58d22d6b86ae6 100644
5286 +--- a/net/tls/tls_main.c
5287 ++++ b/net/tls/tls_main.c
5288 +@@ -61,7 +61,7 @@ static DEFINE_MUTEX(tcpv6_prot_mutex);
5289 + static const struct proto *saved_tcpv4_prot;
5290 + static DEFINE_MUTEX(tcpv4_prot_mutex);
5291 + static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
5292 +-static struct proto_ops tls_sw_proto_ops;
5293 ++static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
5294 + static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
5295 + const struct proto *base);
5296 +
5297 +@@ -71,6 +71,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
5298 +
5299 + WRITE_ONCE(sk->sk_prot,
5300 + &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
5301 ++ WRITE_ONCE(sk->sk_socket->ops,
5302 ++ &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
5303 + }
5304 +
5305 + int wait_on_pending_writer(struct sock *sk, long *timeo)
5306 +@@ -578,8 +580,6 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
5307 + if (tx) {
5308 + ctx->sk_write_space = sk->sk_write_space;
5309 + sk->sk_write_space = tls_write_space;
5310 +- } else {
5311 +- sk->sk_socket->ops = &tls_sw_proto_ops;
5312 + }
5313 + goto out;
5314 +
5315 +@@ -637,6 +637,39 @@ struct tls_context *tls_ctx_create(struct sock *sk)
5316 + return ctx;
5317 + }
5318 +
5319 ++static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
5320 ++ const struct proto_ops *base)
5321 ++{
5322 ++ ops[TLS_BASE][TLS_BASE] = *base;
5323 ++
5324 ++ ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
5325 ++ ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
5326 ++
5327 ++ ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
5328 ++ ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
5329 ++
5330 ++ ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
5331 ++ ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
5332 ++
5333 ++#ifdef CONFIG_TLS_DEVICE
5334 ++ ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
5335 ++ ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL;
5336 ++
5337 ++ ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
5338 ++ ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL;
5339 ++
5340 ++ ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
5341 ++
5342 ++ ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
5343 ++
5344 ++ ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
5345 ++ ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL;
5346 ++#endif
5347 ++#ifdef CONFIG_TLS_TOE
5348 ++ ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
5349 ++#endif
5350 ++}
5351 ++
5352 + static void tls_build_proto(struct sock *sk)
5353 + {
5354 + int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
5355 +@@ -648,6 +681,8 @@ static void tls_build_proto(struct sock *sk)
5356 + mutex_lock(&tcpv6_prot_mutex);
5357 + if (likely(prot != saved_tcpv6_prot)) {
5358 + build_protos(tls_prots[TLSV6], prot);
5359 ++ build_proto_ops(tls_proto_ops[TLSV6],
5360 ++ sk->sk_socket->ops);
5361 + smp_store_release(&saved_tcpv6_prot, prot);
5362 + }
5363 + mutex_unlock(&tcpv6_prot_mutex);
5364 +@@ -658,6 +693,8 @@ static void tls_build_proto(struct sock *sk)
5365 + mutex_lock(&tcpv4_prot_mutex);
5366 + if (likely(prot != saved_tcpv4_prot)) {
5367 + build_protos(tls_prots[TLSV4], prot);
5368 ++ build_proto_ops(tls_proto_ops[TLSV4],
5369 ++ sk->sk_socket->ops);
5370 + smp_store_release(&saved_tcpv4_prot, prot);
5371 + }
5372 + mutex_unlock(&tcpv4_prot_mutex);
5373 +@@ -868,10 +905,6 @@ static int __init tls_register(void)
5374 + if (err)
5375 + return err;
5376 +
5377 +- tls_sw_proto_ops = inet_stream_ops;
5378 +- tls_sw_proto_ops.splice_read = tls_sw_splice_read;
5379 +- tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked;
5380 +-
5381 + tls_device_init();
5382 + tcp_register_ulp(&tcp_tls_ulp_ops);
5383 +
5384 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
5385 +index 14cce61160a58..122d5daed8b61 100644
5386 +--- a/net/tls/tls_sw.c
5387 ++++ b/net/tls/tls_sw.c
5388 +@@ -2007,21 +2007,18 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
5389 + if (!skb)
5390 + goto splice_read_end;
5391 +
5392 +- if (!ctx->decrypted) {
5393 +- err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
5394 +-
5395 +- /* splice does not support reading control messages */
5396 +- if (ctx->control != TLS_RECORD_TYPE_DATA) {
5397 +- err = -EINVAL;
5398 +- goto splice_read_end;
5399 +- }
5400 ++ err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
5401 ++ if (err < 0) {
5402 ++ tls_err_abort(sk, -EBADMSG);
5403 ++ goto splice_read_end;
5404 ++ }
5405 +
5406 +- if (err < 0) {
5407 +- tls_err_abort(sk, -EBADMSG);
5408 +- goto splice_read_end;
5409 +- }
5410 +- ctx->decrypted = 1;
5411 ++ /* splice does not support reading control messages */
5412 ++ if (ctx->control != TLS_RECORD_TYPE_DATA) {
5413 ++ err = -EINVAL;
5414 ++ goto splice_read_end;
5415 + }
5416 ++
5417 + rxm = strp_msg(skb);
5418 +
5419 + chunk = min_t(unsigned int, rxm->full_len, len);
5420 +diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
5421 +index 6cdb3db7507b1..fc61571a3ac73 100644
5422 +--- a/sound/hda/intel-dsp-config.c
5423 ++++ b/sound/hda/intel-dsp-config.c
5424 +@@ -298,6 +298,15 @@ static const struct config_entry config_table[] = {
5425 + },
5426 + #endif
5427 +
5428 ++/* JasperLake */
5429 ++#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
5430 ++ {
5431 ++ .flags = FLAG_SOF,
5432 ++ .device = 0x4dc8,
5433 ++ .codec_hid = "ESSX8336",
5434 ++ },
5435 ++#endif
5436 ++
5437 + /* Tigerlake */
5438 + #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
5439 + {
5440 +diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
5441 +index d4ff377eb3a34..6d636bdcaa5a3 100644
5442 +--- a/sound/pci/ctxfi/ctamixer.c
5443 ++++ b/sound/pci/ctxfi/ctamixer.c
5444 +@@ -23,16 +23,15 @@
5445 +
5446 + #define BLANK_SLOT 4094
5447 +
5448 +-static int amixer_master(struct rsc *rsc)
5449 ++static void amixer_master(struct rsc *rsc)
5450 + {
5451 + rsc->conj = 0;
5452 +- return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
5453 ++ rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
5454 + }
5455 +
5456 +-static int amixer_next_conj(struct rsc *rsc)
5457 ++static void amixer_next_conj(struct rsc *rsc)
5458 + {
5459 + rsc->conj++;
5460 +- return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
5461 + }
5462 +
5463 + static int amixer_index(const struct rsc *rsc)
5464 +@@ -331,16 +330,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
5465 +
5466 + /* SUM resource management */
5467 +
5468 +-static int sum_master(struct rsc *rsc)
5469 ++static void sum_master(struct rsc *rsc)
5470 + {
5471 + rsc->conj = 0;
5472 +- return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
5473 ++ rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
5474 + }
5475 +
5476 +-static int sum_next_conj(struct rsc *rsc)
5477 ++static void sum_next_conj(struct rsc *rsc)
5478 + {
5479 + rsc->conj++;
5480 +- return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
5481 + }
5482 +
5483 + static int sum_index(const struct rsc *rsc)
5484 +diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
5485 +index 4cb47b5a792c5..aae544dff8868 100644
5486 +--- a/sound/pci/ctxfi/ctdaio.c
5487 ++++ b/sound/pci/ctxfi/ctdaio.c
5488 +@@ -51,12 +51,12 @@ static const struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
5489 + [SPDIFIO] = {.left = 0x05, .right = 0x85},
5490 + };
5491 +
5492 +-static int daio_master(struct rsc *rsc)
5493 ++static void daio_master(struct rsc *rsc)
5494 + {
5495 + /* Actually, this is not the resource index of DAIO.
5496 + * For DAO, it is the input mapper index. And, for DAI,
5497 + * it is the output time-slot index. */
5498 +- return rsc->conj = rsc->idx;
5499 ++ rsc->conj = rsc->idx;
5500 + }
5501 +
5502 + static int daio_index(const struct rsc *rsc)
5503 +@@ -64,19 +64,19 @@ static int daio_index(const struct rsc *rsc)
5504 + return rsc->conj;
5505 + }
5506 +
5507 +-static int daio_out_next_conj(struct rsc *rsc)
5508 ++static void daio_out_next_conj(struct rsc *rsc)
5509 + {
5510 +- return rsc->conj += 2;
5511 ++ rsc->conj += 2;
5512 + }
5513 +
5514 +-static int daio_in_next_conj_20k1(struct rsc *rsc)
5515 ++static void daio_in_next_conj_20k1(struct rsc *rsc)
5516 + {
5517 +- return rsc->conj += 0x200;
5518 ++ rsc->conj += 0x200;
5519 + }
5520 +
5521 +-static int daio_in_next_conj_20k2(struct rsc *rsc)
5522 ++static void daio_in_next_conj_20k2(struct rsc *rsc)
5523 + {
5524 +- return rsc->conj += 0x100;
5525 ++ rsc->conj += 0x100;
5526 + }
5527 +
5528 + static const struct rsc_ops daio_out_rsc_ops = {
5529 +diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
5530 +index 61e51e35ba168..edf9d9ef9b848 100644
5531 +--- a/sound/pci/ctxfi/ctresource.c
5532 ++++ b/sound/pci/ctxfi/ctresource.c
5533 +@@ -109,18 +109,17 @@ static int audio_ring_slot(const struct rsc *rsc)
5534 + return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
5535 + }
5536 +
5537 +-static int rsc_next_conj(struct rsc *rsc)
5538 ++static void rsc_next_conj(struct rsc *rsc)
5539 + {
5540 + unsigned int i;
5541 + for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
5542 + i++;
5543 + rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
5544 +- return rsc->conj;
5545 + }
5546 +
5547 +-static int rsc_master(struct rsc *rsc)
5548 ++static void rsc_master(struct rsc *rsc)
5549 + {
5550 +- return rsc->conj = rsc->idx;
5551 ++ rsc->conj = rsc->idx;
5552 + }
5553 +
5554 + static const struct rsc_ops rsc_generic_ops = {
5555 +diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
5556 +index 93e47488a1c1c..92146054af582 100644
5557 +--- a/sound/pci/ctxfi/ctresource.h
5558 ++++ b/sound/pci/ctxfi/ctresource.h
5559 +@@ -39,8 +39,8 @@ struct rsc {
5560 + };
5561 +
5562 + struct rsc_ops {
5563 +- int (*master)(struct rsc *rsc); /* Move to master resource */
5564 +- int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
5565 ++ void (*master)(struct rsc *rsc); /* Move to master resource */
5566 ++ void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
5567 + int (*index)(const struct rsc *rsc); /* Return the index of resource */
5568 + /* Return the output slot number */
5569 + int (*output_slot)(const struct rsc *rsc);
5570 +diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
5571 +index 37c18ce84974a..7d2bda0c3d3de 100644
5572 +--- a/sound/pci/ctxfi/ctsrc.c
5573 ++++ b/sound/pci/ctxfi/ctsrc.c
5574 +@@ -590,16 +590,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
5575 +
5576 + /* SRCIMP resource manager operations */
5577 +
5578 +-static int srcimp_master(struct rsc *rsc)
5579 ++static void srcimp_master(struct rsc *rsc)
5580 + {
5581 + rsc->conj = 0;
5582 +- return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
5583 ++ rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
5584 + }
5585 +
5586 +-static int srcimp_next_conj(struct rsc *rsc)
5587 ++static void srcimp_next_conj(struct rsc *rsc)
5588 + {
5589 + rsc->conj++;
5590 +- return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
5591 + }
5592 +
5593 + static int srcimp_index(const struct rsc *rsc)
5594 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5595 +index 2eb06351de1fb..b980fa617229e 100644
5596 +--- a/sound/pci/hda/patch_realtek.c
5597 ++++ b/sound/pci/hda/patch_realtek.c
5598 +@@ -6467,6 +6467,27 @@ static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *co
5599 + alc_write_coef_idx(codec, 0x45, 0x5089);
5600 + }
5601 +
5602 ++static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
5603 ++ WRITE_COEF(0x1a, 0x9003), WRITE_COEF(0x1b, 0x0e2b), WRITE_COEF(0x37, 0xfe06),
5604 ++ WRITE_COEF(0x38, 0x4981), WRITE_COEF(0x45, 0xd489), WRITE_COEF(0x46, 0x0074),
5605 ++ WRITE_COEF(0x49, 0x0149),
5606 ++ {}
5607 ++};
5608 ++
5609 ++static void alc233_fixup_no_audio_jack(struct hda_codec *codec,
5610 ++ const struct hda_fixup *fix,
5611 ++ int action)
5612 ++{
5613 ++ /*
5614 ++ * The audio jack input and output is not detected on the ASRock NUC Box
5615 ++ * 1100 series when cold booting without this fix. Warm rebooting from a
5616 ++ * certain other OS makes the audio functional, as COEF settings are
5617 ++ * preserved in this case. This fix sets these altered COEF values as
5618 ++ * the default.
5619 ++ */
5620 ++ alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs);
5621 ++}
5622 ++
5623 + enum {
5624 + ALC269_FIXUP_GPIO2,
5625 + ALC269_FIXUP_SONY_VAIO,
5626 +@@ -6685,6 +6706,7 @@ enum {
5627 + ALC287_FIXUP_13S_GEN2_SPEAKERS,
5628 + ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
5629 + ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
5630 ++ ALC233_FIXUP_NO_AUDIO_JACK,
5631 + };
5632 +
5633 + static const struct hda_fixup alc269_fixups[] = {
5634 +@@ -8399,6 +8421,10 @@ static const struct hda_fixup alc269_fixups[] = {
5635 + .chained = true,
5636 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
5637 + },
5638 ++ [ALC233_FIXUP_NO_AUDIO_JACK] = {
5639 ++ .type = HDA_FIXUP_FUNC,
5640 ++ .v.func = alc233_fixup_no_audio_jack,
5641 ++ },
5642 + };
5643 +
5644 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5645 +@@ -8578,6 +8604,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5646 + SND_PCI_QUIRK(0x103c, 0x8728, "HP EliteBook 840 G7", ALC285_FIXUP_HP_GPIO_LED),
5647 + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
5648 + SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
5649 ++ SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
5650 + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
5651 + SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
5652 + SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
5653 +@@ -8831,6 +8858,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5654 + SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5655 + SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
5656 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
5657 ++ SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
5658 + SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
5659 + SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
5660 + SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
5661 +diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
5662 +index d18ae5e3ee809..699b59cd389c0 100644
5663 +--- a/sound/soc/codecs/wcd934x.c
5664 ++++ b/sound/soc/codecs/wcd934x.c
5665 +@@ -1812,9 +1812,8 @@ static int wcd934x_hw_params(struct snd_pcm_substream *substream,
5666 + }
5667 +
5668 + wcd->dai[dai->id].sconfig.rate = params_rate(params);
5669 +- wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
5670 +
5671 +- return 0;
5672 ++ return wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
5673 + }
5674 +
5675 + static int wcd934x_hw_free(struct snd_pcm_substream *substream,
5676 +diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
5677 +index 9766725c29166..84cf190aa01a6 100644
5678 +--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
5679 ++++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
5680 +@@ -269,9 +269,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
5681 +
5682 + if (ret < 0) {
5683 + dev_err(dev, "%s: q6asm_open_write failed\n", __func__);
5684 +- q6asm_audio_client_free(prtd->audio_client);
5685 +- prtd->audio_client = NULL;
5686 +- return -ENOMEM;
5687 ++ goto open_err;
5688 + }
5689 +
5690 + prtd->session_id = q6asm_get_session_id(prtd->audio_client);
5691 +@@ -279,7 +277,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
5692 + prtd->session_id, substream->stream);
5693 + if (ret) {
5694 + dev_err(dev, "%s: stream reg failed ret:%d\n", __func__, ret);
5695 +- return ret;
5696 ++ goto routing_err;
5697 + }
5698 +
5699 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
5700 +@@ -301,10 +299,19 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
5701 + }
5702 + if (ret < 0)
5703 + dev_info(dev, "%s: CMD Format block failed\n", __func__);
5704 ++ else
5705 ++ prtd->state = Q6ASM_STREAM_RUNNING;
5706 +
5707 +- prtd->state = Q6ASM_STREAM_RUNNING;
5708 ++ return ret;
5709 +
5710 +- return 0;
5711 ++routing_err:
5712 ++ q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE);
5713 ++open_err:
5714 ++ q6asm_unmap_memory_regions(substream->stream, prtd->audio_client);
5715 ++ q6asm_audio_client_free(prtd->audio_client);
5716 ++ prtd->audio_client = NULL;
5717 ++
5718 ++ return ret;
5719 + }
5720 +
5721 + static int q6asm_dai_trigger(struct snd_soc_component *component,
5722 +diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
5723 +index 0a6b9433f6acf..934b3f282bccd 100644
5724 +--- a/sound/soc/qcom/qdsp6/q6routing.c
5725 ++++ b/sound/soc/qcom/qdsp6/q6routing.c
5726 +@@ -491,7 +491,11 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
5727 + session->port_id = be_id;
5728 + snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
5729 + } else {
5730 +- session->port_id = -1;
5731 ++ if (session->port_id == be_id) {
5732 ++ session->port_id = -1;
5733 ++ return 0;
5734 ++ }
5735 ++
5736 + snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
5737 + }
5738 +
5739 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
5740 +index 1030e11017b27..4d24ac255d253 100644
5741 +--- a/sound/soc/soc-topology.c
5742 ++++ b/sound/soc/soc-topology.c
5743 +@@ -2873,6 +2873,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
5744 + /* remove dynamic controls from the component driver */
5745 + int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
5746 + {
5747 ++ struct snd_card *card = comp->card->snd_card;
5748 + struct snd_soc_dobj *dobj, *next_dobj;
5749 + int pass = SOC_TPLG_PASS_END;
5750 +
5751 +@@ -2880,6 +2881,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
5752 + while (pass >= SOC_TPLG_PASS_START) {
5753 +
5754 + /* remove mixer controls */
5755 ++ down_write(&card->controls_rwsem);
5756 + list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
5757 + list) {
5758 +
5759 +@@ -2923,6 +2925,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
5760 + break;
5761 + }
5762 + }
5763 ++ up_write(&card->controls_rwsem);
5764 + pass--;
5765 + }
5766 +