Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 01 Dec 2021 12:50:16
Message-Id: 1638363000.c8da7bf8b0ad28bb7c66f0670dd26e0f61fd1417.mpagano@gentoo
1 commit: c8da7bf8b0ad28bb7c66f0670dd26e0f61fd1417
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Dec 1 12:50:00 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Dec 1 12:50:00 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c8da7bf8
7
8 Linux patch 5.4.163
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1162_linux-5.4.163.patch | 4555 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4559 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 645731c2..f9d7be72 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -691,6 +691,10 @@ Patch: 1161_linux-5.4.162.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.162
23
24 +Patch: 1162_linux-5.4.163.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.163
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1162_linux-5.4.163.patch b/1162_linux-5.4.163.patch
33 new file mode 100644
34 index 00000000..a8520b08
35 --- /dev/null
36 +++ b/1162_linux-5.4.163.patch
37 @@ -0,0 +1,4555 @@
38 +diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
39 +index 38dc56a577604..ecec514b31550 100644
40 +--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
41 ++++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
42 +@@ -43,19 +43,19 @@ group emmc_nb
43 +
44 + group pwm0
45 + - pin 11 (GPIO1-11)
46 +- - functions pwm, gpio
47 ++ - functions pwm, led, gpio
48 +
49 + group pwm1
50 + - pin 12
51 +- - functions pwm, gpio
52 ++ - functions pwm, led, gpio
53 +
54 + group pwm2
55 + - pin 13
56 +- - functions pwm, gpio
57 ++ - functions pwm, led, gpio
58 +
59 + group pwm3
60 + - pin 14
61 +- - functions pwm, gpio
62 ++ - functions pwm, led, gpio
63 +
64 + group pmic1
65 + - pin 7
66 +diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt
67 +index 056898685d408..fc531c29a2e83 100644
68 +--- a/Documentation/networking/ipvs-sysctl.txt
69 ++++ b/Documentation/networking/ipvs-sysctl.txt
70 +@@ -30,8 +30,7 @@ conn_reuse_mode - INTEGER
71 +
72 + 0: disable any special handling on port reuse. The new
73 + connection will be delivered to the same real server that was
74 +- servicing the previous connection. This will effectively
75 +- disable expire_nodest_conn.
76 ++ servicing the previous connection.
77 +
78 + bit 1: enable rescheduling of new connections when it is safe.
79 + That is, whenever expire_nodest_conn and for TCP sockets, when
80 +diff --git a/Makefile b/Makefile
81 +index e8b05f7d3b238..91d77df0128b4 100644
82 +--- a/Makefile
83 ++++ b/Makefile
84 +@@ -1,7 +1,7 @@
85 + # SPDX-License-Identifier: GPL-2.0
86 + VERSION = 5
87 + PATCHLEVEL = 4
88 +-SUBLEVEL = 162
89 ++SUBLEVEL = 163
90 + EXTRAVERSION =
91 + NAME = Kleptomaniac Octopus
92 +
93 +diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
94 +index 9711170649b69..05d67f9769118 100644
95 +--- a/arch/arm/boot/dts/bcm5301x.dtsi
96 ++++ b/arch/arm/boot/dts/bcm5301x.dtsi
97 +@@ -242,6 +242,8 @@
98 +
99 + gpio-controller;
100 + #gpio-cells = <2>;
101 ++ interrupt-controller;
102 ++ #interrupt-cells = <2>;
103 + };
104 +
105 + pcie0: pcie@12000 {
106 +@@ -387,7 +389,7 @@
107 + i2c0: i2c@18009000 {
108 + compatible = "brcm,iproc-i2c";
109 + reg = <0x18009000 0x50>;
110 +- interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
111 ++ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
112 + #address-cells = <1>;
113 + #size-cells = <0>;
114 + clock-frequency = <100000>;
115 +diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
116 +index fc2608b18a0d0..18f01190dcfd4 100644
117 +--- a/arch/arm/mach-socfpga/core.h
118 ++++ b/arch/arm/mach-socfpga/core.h
119 +@@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
120 + u32 socfpga_sdram_self_refresh(u32 sdr_base);
121 + extern unsigned int socfpga_sdram_self_refresh_sz;
122 +
123 +-extern char secondary_trampoline, secondary_trampoline_end;
124 ++extern char secondary_trampoline[], secondary_trampoline_end[];
125 +
126 + extern unsigned long socfpga_cpu1start_addr;
127 +
128 +diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
129 +index fbb80b883e5dd..201191cf68f32 100644
130 +--- a/arch/arm/mach-socfpga/platsmp.c
131 ++++ b/arch/arm/mach-socfpga/platsmp.c
132 +@@ -20,14 +20,14 @@
133 +
134 + static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
135 + {
136 +- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
137 ++ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
138 +
139 + if (socfpga_cpu1start_addr) {
140 + /* This will put CPU #1 into reset. */
141 + writel(RSTMGR_MPUMODRST_CPU1,
142 + rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
143 +
144 +- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
145 ++ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
146 +
147 + writel(__pa_symbol(secondary_startup),
148 + sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
149 +@@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
150 +
151 + static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
152 + {
153 +- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
154 ++ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
155 +
156 + if (socfpga_cpu1start_addr) {
157 + writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
158 + SOCFPGA_A10_RSTMGR_MODMPURST);
159 +- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
160 ++ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
161 +
162 + writel(__pa_symbol(secondary_startup),
163 + sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
164 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
165 +index f2cc00594d64a..3e5789f372069 100644
166 +--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
167 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
168 +@@ -128,6 +128,9 @@
169 +
170 + /* CON15(V2.0)/CON17(V1.4) : PCIe / CON15(V2.0)/CON12(V1.4) :mini-PCIe */
171 + &pcie0 {
172 ++ pinctrl-names = "default";
173 ++ pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
174 ++ reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
175 + status = "okay";
176 + };
177 +
178 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
179 +index 6226e7e809807..a75bb2ea3506d 100644
180 +--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
181 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
182 +@@ -59,6 +59,7 @@
183 + phys = <&comphy1 0>;
184 + pinctrl-names = "default";
185 + pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
186 ++ reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
187 + };
188 +
189 + /* J6 */
190 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
191 +index de0eabff29353..16e73597bb78c 100644
192 +--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
193 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
194 +@@ -127,10 +127,6 @@
195 + };
196 + };
197 +
198 +-&pcie_reset_pins {
199 +- function = "gpio";
200 +-};
201 +-
202 + &pcie0 {
203 + pinctrl-names = "default";
204 + pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
205 +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
206 +index c28611c1c251a..3d15e4ab3f53a 100644
207 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
208 ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
209 +@@ -318,7 +318,7 @@
210 +
211 + pcie_reset_pins: pcie-reset-pins {
212 + groups = "pcie1";
213 +- function = "pcie";
214 ++ function = "gpio";
215 + };
216 +
217 + pcie_clkreq_pins: pcie-clkreq-pins {
218 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
219 +index 9749818eed6d6..2811ecc1f3c71 100644
220 +--- a/arch/mips/Kconfig
221 ++++ b/arch/mips/Kconfig
222 +@@ -3059,7 +3059,7 @@ config STACKTRACE_SUPPORT
223 + config PGTABLE_LEVELS
224 + int
225 + default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
226 +- default 3 if 64BIT && !PAGE_SIZE_64KB
227 ++ default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
228 + default 2
229 +
230 + config MIPS_AUTO_PFN_OFFSET
231 +diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
232 +index 164483b37d854..99cd24f2ea01b 100644
233 +--- a/arch/parisc/kernel/vmlinux.lds.S
234 ++++ b/arch/parisc/kernel/vmlinux.lds.S
235 +@@ -56,8 +56,6 @@ SECTIONS
236 + {
237 + . = KERNEL_BINARY_TEXT_START;
238 +
239 +- _stext = .; /* start of kernel text, includes init code & data */
240 +-
241 + __init_begin = .;
242 + HEAD_TEXT_SECTION
243 + MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
244 +@@ -81,6 +79,7 @@ SECTIONS
245 + /* freed after init ends here */
246 +
247 + _text = .; /* Text and read-only data */
248 ++ _stext = .;
249 + MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
250 + .text ALIGN(PAGE_SIZE) : {
251 + TEXT_TEXT
252 +diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
253 +index 4a91b543a8540..6d34b69729854 100644
254 +--- a/arch/powerpc/kvm/book3s_hv_builtin.c
255 ++++ b/arch/powerpc/kvm/book3s_hv_builtin.c
256 +@@ -821,6 +821,7 @@ static void flush_guest_tlb(struct kvm *kvm)
257 + "r" (0) : "memory");
258 + }
259 + asm volatile("ptesync": : :"memory");
260 ++ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
261 + asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
262 + } else {
263 + for (set = 0; set < kvm->arch.tlb_sets; ++set) {
264 +@@ -831,7 +832,9 @@ static void flush_guest_tlb(struct kvm *kvm)
265 + rb += PPC_BIT(51); /* increment set number */
266 + }
267 + asm volatile("ptesync": : :"memory");
268 +- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
269 ++ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
270 ++ if (cpu_has_feature(CPU_FTR_ARCH_300))
271 ++ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
272 + }
273 + }
274 +
275 +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
276 +index 9ebd01219812c..4438c00acb656 100644
277 +--- a/arch/s390/mm/pgtable.c
278 ++++ b/arch/s390/mm/pgtable.c
279 +@@ -970,6 +970,7 @@ EXPORT_SYMBOL(get_guest_storage_key);
280 + int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
281 + unsigned long *oldpte, unsigned long *oldpgste)
282 + {
283 ++ struct vm_area_struct *vma;
284 + unsigned long pgstev;
285 + spinlock_t *ptl;
286 + pgste_t pgste;
287 +@@ -979,6 +980,10 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
288 + WARN_ON_ONCE(orc > ESSA_MAX);
289 + if (unlikely(orc > ESSA_MAX))
290 + return -EINVAL;
291 ++
292 ++ vma = find_vma(mm, hva);
293 ++ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
294 ++ return -EFAULT;
295 + ptep = get_locked_pte(mm, hva, &ptl);
296 + if (unlikely(!ptep))
297 + return -EFAULT;
298 +@@ -1071,10 +1076,14 @@ EXPORT_SYMBOL(pgste_perform_essa);
299 + int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
300 + unsigned long bits, unsigned long value)
301 + {
302 ++ struct vm_area_struct *vma;
303 + spinlock_t *ptl;
304 + pgste_t new;
305 + pte_t *ptep;
306 +
307 ++ vma = find_vma(mm, hva);
308 ++ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
309 ++ return -EFAULT;
310 + ptep = get_locked_pte(mm, hva, &ptl);
311 + if (unlikely(!ptep))
312 + return -EFAULT;
313 +@@ -1099,9 +1108,13 @@ EXPORT_SYMBOL(set_pgste_bits);
314 + */
315 + int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
316 + {
317 ++ struct vm_area_struct *vma;
318 + spinlock_t *ptl;
319 + pte_t *ptep;
320 +
321 ++ vma = find_vma(mm, hva);
322 ++ if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
323 ++ return -EFAULT;
324 + ptep = get_locked_pte(mm, hva, &ptl);
325 + if (unlikely(!ptep))
326 + return -EFAULT;
327 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
328 +index 47f839bc0234f..1cdc7426bd033 100644
329 +--- a/drivers/android/binder.c
330 ++++ b/drivers/android/binder.c
331 +@@ -3095,7 +3095,7 @@ static void binder_transaction(struct binder_proc *proc,
332 + t->from = thread;
333 + else
334 + t->from = NULL;
335 +- t->sender_euid = proc->cred->euid;
336 ++ t->sender_euid = task_euid(proc->tsk);
337 + t->to_proc = target_proc;
338 + t->to_thread = target_thread;
339 + t->code = tr->code;
340 +diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
341 +index def41e1bd7364..baf10b73675e2 100644
342 +--- a/drivers/block/xen-blkfront.c
343 ++++ b/drivers/block/xen-blkfront.c
344 +@@ -80,6 +80,7 @@ enum blkif_state {
345 + BLKIF_STATE_DISCONNECTED,
346 + BLKIF_STATE_CONNECTED,
347 + BLKIF_STATE_SUSPENDED,
348 ++ BLKIF_STATE_ERROR,
349 + };
350 +
351 + struct grant {
352 +@@ -89,6 +90,7 @@ struct grant {
353 + };
354 +
355 + enum blk_req_status {
356 ++ REQ_PROCESSING,
357 + REQ_WAITING,
358 + REQ_DONE,
359 + REQ_ERROR,
360 +@@ -533,10 +535,10 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
361 +
362 + id = get_id_from_freelist(rinfo);
363 + rinfo->shadow[id].request = req;
364 +- rinfo->shadow[id].status = REQ_WAITING;
365 ++ rinfo->shadow[id].status = REQ_PROCESSING;
366 + rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
367 +
368 +- (*ring_req)->u.rw.id = id;
369 ++ rinfo->shadow[id].req.u.rw.id = id;
370 +
371 + return id;
372 + }
373 +@@ -544,11 +546,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
374 + static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
375 + {
376 + struct blkfront_info *info = rinfo->dev_info;
377 +- struct blkif_request *ring_req;
378 ++ struct blkif_request *ring_req, *final_ring_req;
379 + unsigned long id;
380 +
381 + /* Fill out a communications ring structure. */
382 +- id = blkif_ring_get_request(rinfo, req, &ring_req);
383 ++ id = blkif_ring_get_request(rinfo, req, &final_ring_req);
384 ++ ring_req = &rinfo->shadow[id].req;
385 +
386 + ring_req->operation = BLKIF_OP_DISCARD;
387 + ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
388 +@@ -559,8 +562,9 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
389 + else
390 + ring_req->u.discard.flag = 0;
391 +
392 +- /* Keep a private copy so we can reissue requests when recovering. */
393 +- rinfo->shadow[id].req = *ring_req;
394 ++ /* Copy the request to the ring page. */
395 ++ *final_ring_req = *ring_req;
396 ++ rinfo->shadow[id].status = REQ_WAITING;
397 +
398 + return 0;
399 + }
400 +@@ -693,6 +697,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
401 + {
402 + struct blkfront_info *info = rinfo->dev_info;
403 + struct blkif_request *ring_req, *extra_ring_req = NULL;
404 ++ struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
405 + unsigned long id, extra_id = NO_ASSOCIATED_ID;
406 + bool require_extra_req = false;
407 + int i;
408 +@@ -737,7 +742,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
409 + }
410 +
411 + /* Fill out a communications ring structure. */
412 +- id = blkif_ring_get_request(rinfo, req, &ring_req);
413 ++ id = blkif_ring_get_request(rinfo, req, &final_ring_req);
414 ++ ring_req = &rinfo->shadow[id].req;
415 +
416 + num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
417 + num_grant = 0;
418 +@@ -788,7 +794,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
419 + ring_req->u.rw.nr_segments = num_grant;
420 + if (unlikely(require_extra_req)) {
421 + extra_id = blkif_ring_get_request(rinfo, req,
422 +- &extra_ring_req);
423 ++ &final_extra_ring_req);
424 ++ extra_ring_req = &rinfo->shadow[extra_id].req;
425 ++
426 + /*
427 + * Only the first request contains the scatter-gather
428 + * list.
429 +@@ -830,10 +838,13 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
430 + if (setup.segments)
431 + kunmap_atomic(setup.segments);
432 +
433 +- /* Keep a private copy so we can reissue requests when recovering. */
434 +- rinfo->shadow[id].req = *ring_req;
435 +- if (unlikely(require_extra_req))
436 +- rinfo->shadow[extra_id].req = *extra_ring_req;
437 ++ /* Copy request(s) to the ring page. */
438 ++ *final_ring_req = *ring_req;
439 ++ rinfo->shadow[id].status = REQ_WAITING;
440 ++ if (unlikely(require_extra_req)) {
441 ++ *final_extra_ring_req = *extra_ring_req;
442 ++ rinfo->shadow[extra_id].status = REQ_WAITING;
443 ++ }
444 +
445 + if (new_persistent_gnts)
446 + gnttab_free_grant_references(setup.gref_head);
447 +@@ -1407,8 +1418,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
448 + static int blkif_get_final_status(enum blk_req_status s1,
449 + enum blk_req_status s2)
450 + {
451 +- BUG_ON(s1 == REQ_WAITING);
452 +- BUG_ON(s2 == REQ_WAITING);
453 ++ BUG_ON(s1 < REQ_DONE);
454 ++ BUG_ON(s2 < REQ_DONE);
455 +
456 + if (s1 == REQ_ERROR || s2 == REQ_ERROR)
457 + return BLKIF_RSP_ERROR;
458 +@@ -1441,7 +1452,7 @@ static bool blkif_completion(unsigned long *id,
459 + s->status = blkif_rsp_to_req_status(bret->status);
460 +
461 + /* Wait the second response if not yet here. */
462 +- if (s2->status == REQ_WAITING)
463 ++ if (s2->status < REQ_DONE)
464 + return false;
465 +
466 + bret->status = blkif_get_final_status(s->status,
467 +@@ -1549,7 +1560,7 @@ static bool blkif_completion(unsigned long *id,
468 + static irqreturn_t blkif_interrupt(int irq, void *dev_id)
469 + {
470 + struct request *req;
471 +- struct blkif_response *bret;
472 ++ struct blkif_response bret;
473 + RING_IDX i, rp;
474 + unsigned long flags;
475 + struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
476 +@@ -1560,54 +1571,76 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
477 +
478 + spin_lock_irqsave(&rinfo->ring_lock, flags);
479 + again:
480 +- rp = rinfo->ring.sring->rsp_prod;
481 +- rmb(); /* Ensure we see queued responses up to 'rp'. */
482 ++ rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
483 ++ virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
484 ++ if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
485 ++ pr_alert("%s: illegal number of responses %u\n",
486 ++ info->gd->disk_name, rp - rinfo->ring.rsp_cons);
487 ++ goto err;
488 ++ }
489 +
490 + for (i = rinfo->ring.rsp_cons; i != rp; i++) {
491 + unsigned long id;
492 ++ unsigned int op;
493 ++
494 ++ RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
495 ++ id = bret.id;
496 +
497 +- bret = RING_GET_RESPONSE(&rinfo->ring, i);
498 +- id = bret->id;
499 + /*
500 + * The backend has messed up and given us an id that we would
501 + * never have given to it (we stamp it up to BLK_RING_SIZE -
502 + * look in get_id_from_freelist.
503 + */
504 + if (id >= BLK_RING_SIZE(info)) {
505 +- WARN(1, "%s: response to %s has incorrect id (%ld)\n",
506 +- info->gd->disk_name, op_name(bret->operation), id);
507 +- /* We can't safely get the 'struct request' as
508 +- * the id is busted. */
509 +- continue;
510 ++ pr_alert("%s: response has incorrect id (%ld)\n",
511 ++ info->gd->disk_name, id);
512 ++ goto err;
513 + }
514 ++ if (rinfo->shadow[id].status != REQ_WAITING) {
515 ++ pr_alert("%s: response references no pending request\n",
516 ++ info->gd->disk_name);
517 ++ goto err;
518 ++ }
519 ++
520 ++ rinfo->shadow[id].status = REQ_PROCESSING;
521 + req = rinfo->shadow[id].request;
522 +
523 +- if (bret->operation != BLKIF_OP_DISCARD) {
524 ++ op = rinfo->shadow[id].req.operation;
525 ++ if (op == BLKIF_OP_INDIRECT)
526 ++ op = rinfo->shadow[id].req.u.indirect.indirect_op;
527 ++ if (bret.operation != op) {
528 ++ pr_alert("%s: response has wrong operation (%u instead of %u)\n",
529 ++ info->gd->disk_name, bret.operation, op);
530 ++ goto err;
531 ++ }
532 ++
533 ++ if (bret.operation != BLKIF_OP_DISCARD) {
534 + /*
535 + * We may need to wait for an extra response if the
536 + * I/O request is split in 2
537 + */
538 +- if (!blkif_completion(&id, rinfo, bret))
539 ++ if (!blkif_completion(&id, rinfo, &bret))
540 + continue;
541 + }
542 +
543 + if (add_id_to_freelist(rinfo, id)) {
544 + WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
545 +- info->gd->disk_name, op_name(bret->operation), id);
546 ++ info->gd->disk_name, op_name(bret.operation), id);
547 + continue;
548 + }
549 +
550 +- if (bret->status == BLKIF_RSP_OKAY)
551 ++ if (bret.status == BLKIF_RSP_OKAY)
552 + blkif_req(req)->error = BLK_STS_OK;
553 + else
554 + blkif_req(req)->error = BLK_STS_IOERR;
555 +
556 +- switch (bret->operation) {
557 ++ switch (bret.operation) {
558 + case BLKIF_OP_DISCARD:
559 +- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
560 ++ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
561 + struct request_queue *rq = info->rq;
562 +- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
563 +- info->gd->disk_name, op_name(bret->operation));
564 ++
565 ++ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
566 ++ info->gd->disk_name, op_name(bret.operation));
567 + blkif_req(req)->error = BLK_STS_NOTSUPP;
568 + info->feature_discard = 0;
569 + info->feature_secdiscard = 0;
570 +@@ -1617,15 +1650,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
571 + break;
572 + case BLKIF_OP_FLUSH_DISKCACHE:
573 + case BLKIF_OP_WRITE_BARRIER:
574 +- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
575 +- printk(KERN_WARNING "blkfront: %s: %s op failed\n",
576 +- info->gd->disk_name, op_name(bret->operation));
577 ++ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
578 ++ pr_warn_ratelimited("blkfront: %s: %s op failed\n",
579 ++ info->gd->disk_name, op_name(bret.operation));
580 + blkif_req(req)->error = BLK_STS_NOTSUPP;
581 + }
582 +- if (unlikely(bret->status == BLKIF_RSP_ERROR &&
583 ++ if (unlikely(bret.status == BLKIF_RSP_ERROR &&
584 + rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
585 +- printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
586 +- info->gd->disk_name, op_name(bret->operation));
587 ++ pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
588 ++ info->gd->disk_name, op_name(bret.operation));
589 + blkif_req(req)->error = BLK_STS_NOTSUPP;
590 + }
591 + if (unlikely(blkif_req(req)->error)) {
592 +@@ -1638,9 +1671,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
593 + /* fall through */
594 + case BLKIF_OP_READ:
595 + case BLKIF_OP_WRITE:
596 +- if (unlikely(bret->status != BLKIF_RSP_OKAY))
597 +- dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
598 +- "request: %x\n", bret->status);
599 ++ if (unlikely(bret.status != BLKIF_RSP_OKAY))
600 ++ dev_dbg_ratelimited(&info->xbdev->dev,
601 ++ "Bad return from blkdev data request: %#x\n",
602 ++ bret.status);
603 +
604 + break;
605 + default:
606 +@@ -1665,6 +1699,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
607 + spin_unlock_irqrestore(&rinfo->ring_lock, flags);
608 +
609 + return IRQ_HANDLED;
610 ++
611 ++ err:
612 ++ info->connected = BLKIF_STATE_ERROR;
613 ++
614 ++ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
615 ++
616 ++ pr_alert("%s disabled for further use\n", info->gd->disk_name);
617 ++ return IRQ_HANDLED;
618 + }
619 +
620 +
621 +diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
622 +index 041f8152272bf..177874adccf0d 100644
623 +--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
624 ++++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
625 +@@ -106,9 +106,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
626 + scmi_pd_data->domains = domains;
627 + scmi_pd_data->num_domains = num_domains;
628 +
629 +- of_genpd_add_provider_onecell(np, scmi_pd_data);
630 +-
631 +- return 0;
632 ++ return of_genpd_add_provider_onecell(np, scmi_pd_data);
633 + }
634 +
635 + static const struct scmi_device_id scmi_id_table[] = {
636 +diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
637 +index 72d30d90b856c..0af246a5609ca 100644
638 +--- a/drivers/gpu/drm/vc4/vc4_bo.c
639 ++++ b/drivers/gpu/drm/vc4/vc4_bo.c
640 +@@ -389,7 +389,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
641 +
642 + bo = kzalloc(sizeof(*bo), GFP_KERNEL);
643 + if (!bo)
644 +- return ERR_PTR(-ENOMEM);
645 ++ return NULL;
646 +
647 + bo->madv = VC4_MADV_WILLNEED;
648 + refcount_set(&bo->usecnt, 0);
649 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
650 +index f6be2e70a4967..e011839f19f89 100644
651 +--- a/drivers/hid/wacom_wac.c
652 ++++ b/drivers/hid/wacom_wac.c
653 +@@ -2578,6 +2578,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
654 + return;
655 +
656 + switch (equivalent_usage) {
657 ++ case HID_DG_CONFIDENCE:
658 ++ wacom_wac->hid_data.confidence = value;
659 ++ break;
660 + case HID_GD_X:
661 + wacom_wac->hid_data.x = value;
662 + break;
663 +@@ -2610,7 +2613,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
664 + }
665 +
666 + if (usage->usage_index + 1 == field->report_count) {
667 +- if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
668 ++ if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
669 ++ wacom_wac->hid_data.confidence)
670 + wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
671 + }
672 + }
673 +@@ -2625,6 +2629,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
674 +
675 + wacom_wac->is_invalid_bt_frame = false;
676 +
677 ++ hid_data->confidence = true;
678 ++
679 + for (i = 0; i < report->maxfield; i++) {
680 + struct hid_field *field = report->field[i];
681 + int j;
682 +diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
683 +index e3835407e8d23..8dea7cb298e69 100644
684 +--- a/drivers/hid/wacom_wac.h
685 ++++ b/drivers/hid/wacom_wac.h
686 +@@ -300,6 +300,7 @@ struct hid_data {
687 + bool tipswitch;
688 + bool barrelswitch;
689 + bool barrelswitch2;
690 ++ bool confidence;
691 + int x;
692 + int y;
693 + int pressure;
694 +diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
695 +index 06383b26712b6..56857ac0a0be2 100644
696 +--- a/drivers/media/cec/cec-adap.c
697 ++++ b/drivers/media/cec/cec-adap.c
698 +@@ -1191,6 +1191,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
699 + if (abort)
700 + dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
701 + msg->flags = dst->flags;
702 ++ msg->sequence = dst->sequence;
703 + /* Remove it from the wait_queue */
704 + list_del_init(&data->list);
705 +
706 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
707 +index cb54fa2120d72..deafcc56adee6 100644
708 +--- a/drivers/mmc/host/sdhci.c
709 ++++ b/drivers/mmc/host/sdhci.c
710 +@@ -749,7 +749,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
711 + len -= offset;
712 + }
713 +
714 +- BUG_ON(len > 65536);
715 ++ /*
716 ++ * The block layer forces a minimum segment size of PAGE_SIZE,
717 ++ * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
718 ++ * multiple descriptors, noting that the ADMA table is sized
719 ++ * for 4KiB chunks anyway, so it will be big enough.
720 ++ */
721 ++ while (len > host->max_adma) {
722 ++ int n = 32 * 1024; /* 32KiB*/
723 ++
724 ++ __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
725 ++ addr += n;
726 ++ len -= n;
727 ++ }
728 +
729 + /* tran, valid */
730 + if (len)
731 +@@ -3568,6 +3580,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
732 + * descriptor for each segment, plus 1 for a nop end descriptor.
733 + */
734 + host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
735 ++ host->max_adma = 65536;
736 +
737 + return host;
738 + }
739 +@@ -4221,10 +4234,12 @@ int sdhci_setup_host(struct sdhci_host *host)
740 + * be larger than 64 KiB though.
741 + */
742 + if (host->flags & SDHCI_USE_ADMA) {
743 +- if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
744 ++ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
745 ++ host->max_adma = 65532; /* 32-bit alignment */
746 + mmc->max_seg_size = 65535;
747 +- else
748 ++ } else {
749 + mmc->max_seg_size = 65536;
750 ++ }
751 + } else {
752 + mmc->max_seg_size = mmc->max_req_size;
753 + }
754 +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
755 +index 96a0a8f97f559..54f9d6720f132 100644
756 +--- a/drivers/mmc/host/sdhci.h
757 ++++ b/drivers/mmc/host/sdhci.h
758 +@@ -349,7 +349,8 @@ struct sdhci_adma2_64_desc {
759 +
760 + /*
761 + * Maximum segments assuming a 512KiB maximum requisition size and a minimum
762 +- * 4KiB page size.
763 ++ * 4KiB page size. Note this also allows enough for multiple descriptors in
764 ++ * case of PAGE_SIZE >= 64KiB.
765 + */
766 + #define SDHCI_MAX_SEGS 128
767 +
768 +@@ -547,6 +548,7 @@ struct sdhci_host {
769 + unsigned int blocks; /* remaining PIO blocks */
770 +
771 + int sg_count; /* Mapped sg entries */
772 ++ int max_adma; /* Max. length in ADMA descriptor */
773 +
774 + void *adma_table; /* ADMA descriptor table */
775 + void *align_buffer; /* Bounce buffer */
776 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
777 +index db2e9dd5681eb..ce6a4e1965e1d 100644
778 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
779 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
780 +@@ -644,9 +644,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
781 + roundup_size = ilog2(roundup_size);
782 +
783 + for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
784 +- tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
785 ++ tc_valid[i] = 1;
786 + tc_size[i] = roundup_size;
787 +- tc_offset[i] = rss_size * i;
788 ++ tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
789 + }
790 +
791 + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
792 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
793 +index ad1e796e5544a..4e0e1b02d615e 100644
794 +--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
795 ++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
796 +@@ -719,12 +719,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
797 + *
798 + * Change the ITR settings for a specific queue.
799 + **/
800 +-static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
801 +- struct ethtool_coalesce *ec, int queue)
802 ++static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
803 ++ struct ethtool_coalesce *ec, int queue)
804 + {
805 + struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
806 + struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
807 + struct iavf_q_vector *q_vector;
808 ++ u16 itr_setting;
809 ++
810 ++ itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
811 ++
812 ++ if (ec->rx_coalesce_usecs != itr_setting &&
813 ++ ec->use_adaptive_rx_coalesce) {
814 ++ netif_info(adapter, drv, adapter->netdev,
815 ++ "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
816 ++ return -EINVAL;
817 ++ }
818 ++
819 ++ itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
820 ++
821 ++ if (ec->tx_coalesce_usecs != itr_setting &&
822 ++ ec->use_adaptive_tx_coalesce) {
823 ++ netif_info(adapter, drv, adapter->netdev,
824 ++ "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
825 ++ return -EINVAL;
826 ++ }
827 +
828 + rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
829 + tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
830 +@@ -747,6 +766,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
831 + * the Tx and Rx ITR values based on the values we have entered
832 + * into the q_vector, no need to write the values now.
833 + */
834 ++ return 0;
835 + }
836 +
837 + /**
838 +@@ -788,9 +808,11 @@ static int __iavf_set_coalesce(struct net_device *netdev,
839 + */
840 + if (queue < 0) {
841 + for (i = 0; i < adapter->num_active_queues; i++)
842 +- iavf_set_itr_per_queue(adapter, ec, i);
843 ++ if (iavf_set_itr_per_queue(adapter, ec, i))
844 ++ return -EINVAL;
845 + } else if (queue < adapter->num_active_queues) {
846 +- iavf_set_itr_per_queue(adapter, ec, queue);
847 ++ if (iavf_set_itr_per_queue(adapter, ec, queue))
848 ++ return -EINVAL;
849 + } else {
850 + netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
851 + adapter->num_active_queues - 1);
852 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
853 +index 158feb0ab2739..c11244a9b7e69 100644
854 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
855 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
856 +@@ -7752,7 +7752,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
857 + if (likely(napi_complete_done(napi, work_done)))
858 + igb_ring_irq_enable(q_vector);
859 +
860 +- return min(work_done, budget - 1);
861 ++ return work_done;
862 + }
863 +
864 + /**
865 +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
866 +index 6030c90d50ccb..bf7832b34a000 100644
867 +--- a/drivers/net/ethernet/mscc/ocelot.c
868 ++++ b/drivers/net/ethernet/mscc/ocelot.c
869 +@@ -1024,12 +1024,6 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
870 + switch (cfg.rx_filter) {
871 + case HWTSTAMP_FILTER_NONE:
872 + break;
873 +- case HWTSTAMP_FILTER_ALL:
874 +- case HWTSTAMP_FILTER_SOME:
875 +- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
876 +- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
877 +- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
878 +- case HWTSTAMP_FILTER_NTP_ALL:
879 + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
880 + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
881 + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
882 +@@ -1189,7 +1183,10 @@ static int ocelot_get_ts_info(struct net_device *dev,
883 + SOF_TIMESTAMPING_RAW_HARDWARE;
884 + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
885 + BIT(HWTSTAMP_TX_ONESTEP_SYNC);
886 +- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
887 ++ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
888 ++ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
889 ++ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
890 ++ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
891 +
892 + return 0;
893 + }
894 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
895 +index 250f510b1d212..3dcb09f17b77f 100644
896 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
897 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
898 +@@ -557,7 +557,6 @@ struct nfp_net_dp {
899 + * @exn_name: Name for Exception interrupt
900 + * @shared_handler: Handler for shared interrupts
901 + * @shared_name: Name for shared interrupt
902 +- * @me_freq_mhz: ME clock_freq (MHz)
903 + * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
904 + * @reconfig_sync_present and HW reconfiguration request
905 + * regs/machinery from async requests (sync must take
906 +@@ -639,8 +638,6 @@ struct nfp_net {
907 + irq_handler_t shared_handler;
908 + char shared_name[IFNAMSIZ + 8];
909 +
910 +- u32 me_freq_mhz;
911 +-
912 + bool link_up;
913 + spinlock_t link_status_lock;
914 +
915 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
916 +index 2354dec994184..89e578e25ff8f 100644
917 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
918 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
919 +@@ -1269,7 +1269,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
920 + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
921 + * count.
922 + */
923 +- factor = nn->me_freq_mhz / 16;
924 ++ factor = nn->tlv_caps.me_freq_mhz / 16;
925 +
926 + /* Each pair of (usecs, max_frames) fields specifies that interrupts
927 + * should be coalesced until
928 +diff --git a/drivers/net/phy/mdio-aspeed.c b/drivers/net/phy/mdio-aspeed.c
929 +index cad820568f751..966c3b4ad59d1 100644
930 +--- a/drivers/net/phy/mdio-aspeed.c
931 ++++ b/drivers/net/phy/mdio-aspeed.c
932 +@@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
933 +
934 + iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
935 +
936 ++ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
937 ++ !(ctrl & ASPEED_MDIO_CTRL_FIRE),
938 ++ ASPEED_MDIO_INTERVAL_US,
939 ++ ASPEED_MDIO_TIMEOUT_US);
940 ++ if (rc < 0)
941 ++ return rc;
942 ++
943 + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
944 + data & ASPEED_MDIO_DATA_IDLE,
945 + ASPEED_MDIO_INTERVAL_US,
946 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
947 +index 7d389c2cc9026..d6f44343213cc 100644
948 +--- a/drivers/net/xen-netfront.c
949 ++++ b/drivers/net/xen-netfront.c
950 +@@ -121,21 +121,17 @@ struct netfront_queue {
951 +
952 + /*
953 + * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
954 +- * are linked from tx_skb_freelist through skb_entry.link.
955 +- *
956 +- * NB. Freelist index entries are always going to be less than
957 +- * PAGE_OFFSET, whereas pointers to skbs will always be equal or
958 +- * greater than PAGE_OFFSET: we use this property to distinguish
959 +- * them.
960 ++ * are linked from tx_skb_freelist through tx_link.
961 + */
962 +- union skb_entry {
963 +- struct sk_buff *skb;
964 +- unsigned long link;
965 +- } tx_skbs[NET_TX_RING_SIZE];
966 ++ struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
967 ++ unsigned short tx_link[NET_TX_RING_SIZE];
968 ++#define TX_LINK_NONE 0xffff
969 ++#define TX_PENDING 0xfffe
970 + grant_ref_t gref_tx_head;
971 + grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
972 + struct page *grant_tx_page[NET_TX_RING_SIZE];
973 + unsigned tx_skb_freelist;
974 ++ unsigned int tx_pend_queue;
975 +
976 + spinlock_t rx_lock ____cacheline_aligned_in_smp;
977 + struct xen_netif_rx_front_ring rx;
978 +@@ -161,6 +157,9 @@ struct netfront_info {
979 + struct netfront_stats __percpu *rx_stats;
980 + struct netfront_stats __percpu *tx_stats;
981 +
982 ++ /* Is device behaving sane? */
983 ++ bool broken;
984 ++
985 + atomic_t rx_gso_checksum_fixup;
986 + };
987 +
988 +@@ -169,33 +168,25 @@ struct netfront_rx_info {
989 + struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
990 + };
991 +
992 +-static void skb_entry_set_link(union skb_entry *list, unsigned short id)
993 +-{
994 +- list->link = id;
995 +-}
996 +-
997 +-static int skb_entry_is_link(const union skb_entry *list)
998 +-{
999 +- BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
1000 +- return (unsigned long)list->skb < PAGE_OFFSET;
1001 +-}
1002 +-
1003 + /*
1004 + * Access macros for acquiring freeing slots in tx_skbs[].
1005 + */
1006 +
1007 +-static void add_id_to_freelist(unsigned *head, union skb_entry *list,
1008 +- unsigned short id)
1009 ++static void add_id_to_list(unsigned *head, unsigned short *list,
1010 ++ unsigned short id)
1011 + {
1012 +- skb_entry_set_link(&list[id], *head);
1013 ++ list[id] = *head;
1014 + *head = id;
1015 + }
1016 +
1017 +-static unsigned short get_id_from_freelist(unsigned *head,
1018 +- union skb_entry *list)
1019 ++static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
1020 + {
1021 + unsigned int id = *head;
1022 +- *head = list[id].link;
1023 ++
1024 ++ if (id != TX_LINK_NONE) {
1025 ++ *head = list[id];
1026 ++ list[id] = TX_LINK_NONE;
1027 ++ }
1028 + return id;
1029 + }
1030 +
1031 +@@ -351,7 +342,7 @@ static int xennet_open(struct net_device *dev)
1032 + unsigned int i = 0;
1033 + struct netfront_queue *queue = NULL;
1034 +
1035 +- if (!np->queues)
1036 ++ if (!np->queues || np->broken)
1037 + return -ENODEV;
1038 +
1039 + for (i = 0; i < num_queues; ++i) {
1040 +@@ -379,27 +370,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1041 + unsigned short id;
1042 + struct sk_buff *skb;
1043 + bool more_to_do;
1044 ++ const struct device *dev = &queue->info->netdev->dev;
1045 +
1046 + BUG_ON(!netif_carrier_ok(queue->info->netdev));
1047 +
1048 + do {
1049 + prod = queue->tx.sring->rsp_prod;
1050 ++ if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
1051 ++ dev_alert(dev, "Illegal number of responses %u\n",
1052 ++ prod - queue->tx.rsp_cons);
1053 ++ goto err;
1054 ++ }
1055 + rmb(); /* Ensure we see responses up to 'rp'. */
1056 +
1057 + for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
1058 +- struct xen_netif_tx_response *txrsp;
1059 ++ struct xen_netif_tx_response txrsp;
1060 +
1061 +- txrsp = RING_GET_RESPONSE(&queue->tx, cons);
1062 +- if (txrsp->status == XEN_NETIF_RSP_NULL)
1063 ++ RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
1064 ++ if (txrsp.status == XEN_NETIF_RSP_NULL)
1065 + continue;
1066 +
1067 +- id = txrsp->id;
1068 +- skb = queue->tx_skbs[id].skb;
1069 ++ id = txrsp.id;
1070 ++ if (id >= RING_SIZE(&queue->tx)) {
1071 ++ dev_alert(dev,
1072 ++ "Response has incorrect id (%u)\n",
1073 ++ id);
1074 ++ goto err;
1075 ++ }
1076 ++ if (queue->tx_link[id] != TX_PENDING) {
1077 ++ dev_alert(dev,
1078 ++ "Response for inactive request\n");
1079 ++ goto err;
1080 ++ }
1081 ++
1082 ++ queue->tx_link[id] = TX_LINK_NONE;
1083 ++ skb = queue->tx_skbs[id];
1084 ++ queue->tx_skbs[id] = NULL;
1085 + if (unlikely(gnttab_query_foreign_access(
1086 + queue->grant_tx_ref[id]) != 0)) {
1087 +- pr_alert("%s: warning -- grant still in use by backend domain\n",
1088 +- __func__);
1089 +- BUG();
1090 ++ dev_alert(dev,
1091 ++ "Grant still in use by backend domain\n");
1092 ++ goto err;
1093 + }
1094 + gnttab_end_foreign_access_ref(
1095 + queue->grant_tx_ref[id], GNTMAP_readonly);
1096 +@@ -407,7 +418,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1097 + &queue->gref_tx_head, queue->grant_tx_ref[id]);
1098 + queue->grant_tx_ref[id] = GRANT_INVALID_REF;
1099 + queue->grant_tx_page[id] = NULL;
1100 +- add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
1101 ++ add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
1102 + dev_kfree_skb_irq(skb);
1103 + }
1104 +
1105 +@@ -417,13 +428,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1106 + } while (more_to_do);
1107 +
1108 + xennet_maybe_wake_tx(queue);
1109 ++
1110 ++ return;
1111 ++
1112 ++ err:
1113 ++ queue->info->broken = true;
1114 ++ dev_alert(dev, "Disabled for further use\n");
1115 + }
1116 +
1117 + struct xennet_gnttab_make_txreq {
1118 + struct netfront_queue *queue;
1119 + struct sk_buff *skb;
1120 + struct page *page;
1121 +- struct xen_netif_tx_request *tx; /* Last request */
1122 ++ struct xen_netif_tx_request *tx; /* Last request on ring page */
1123 ++ struct xen_netif_tx_request tx_local; /* Last request local copy*/
1124 + unsigned int size;
1125 + };
1126 +
1127 +@@ -439,7 +457,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
1128 + struct netfront_queue *queue = info->queue;
1129 + struct sk_buff *skb = info->skb;
1130 +
1131 +- id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
1132 ++ id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
1133 + tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
1134 + ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
1135 + WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
1136 +@@ -447,34 +465,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
1137 + gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
1138 + gfn, GNTMAP_readonly);
1139 +
1140 +- queue->tx_skbs[id].skb = skb;
1141 ++ queue->tx_skbs[id] = skb;
1142 + queue->grant_tx_page[id] = page;
1143 + queue->grant_tx_ref[id] = ref;
1144 +
1145 +- tx->id = id;
1146 +- tx->gref = ref;
1147 +- tx->offset = offset;
1148 +- tx->size = len;
1149 +- tx->flags = 0;
1150 ++ info->tx_local.id = id;
1151 ++ info->tx_local.gref = ref;
1152 ++ info->tx_local.offset = offset;
1153 ++ info->tx_local.size = len;
1154 ++ info->tx_local.flags = 0;
1155 ++
1156 ++ *tx = info->tx_local;
1157 ++
1158 ++ /*
1159 ++ * Put the request in the pending queue, it will be set to be pending
1160 ++ * when the producer index is about to be raised.
1161 ++ */
1162 ++ add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
1163 +
1164 + info->tx = tx;
1165 +- info->size += tx->size;
1166 ++ info->size += info->tx_local.size;
1167 + }
1168 +
1169 + static struct xen_netif_tx_request *xennet_make_first_txreq(
1170 +- struct netfront_queue *queue, struct sk_buff *skb,
1171 +- struct page *page, unsigned int offset, unsigned int len)
1172 ++ struct xennet_gnttab_make_txreq *info,
1173 ++ unsigned int offset, unsigned int len)
1174 + {
1175 +- struct xennet_gnttab_make_txreq info = {
1176 +- .queue = queue,
1177 +- .skb = skb,
1178 +- .page = page,
1179 +- .size = 0,
1180 +- };
1181 ++ info->size = 0;
1182 +
1183 +- gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
1184 ++ gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
1185 +
1186 +- return info.tx;
1187 ++ return info->tx;
1188 + }
1189 +
1190 + static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
1191 +@@ -487,35 +508,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
1192 + xennet_tx_setup_grant(gfn, offset, len, data);
1193 + }
1194 +
1195 +-static struct xen_netif_tx_request *xennet_make_txreqs(
1196 +- struct netfront_queue *queue, struct xen_netif_tx_request *tx,
1197 +- struct sk_buff *skb, struct page *page,
1198 ++static void xennet_make_txreqs(
1199 ++ struct xennet_gnttab_make_txreq *info,
1200 ++ struct page *page,
1201 + unsigned int offset, unsigned int len)
1202 + {
1203 +- struct xennet_gnttab_make_txreq info = {
1204 +- .queue = queue,
1205 +- .skb = skb,
1206 +- .tx = tx,
1207 +- };
1208 +-
1209 + /* Skip unused frames from start of page */
1210 + page += offset >> PAGE_SHIFT;
1211 + offset &= ~PAGE_MASK;
1212 +
1213 + while (len) {
1214 +- info.page = page;
1215 +- info.size = 0;
1216 ++ info->page = page;
1217 ++ info->size = 0;
1218 +
1219 + gnttab_foreach_grant_in_range(page, offset, len,
1220 + xennet_make_one_txreq,
1221 +- &info);
1222 ++ info);
1223 +
1224 + page++;
1225 + offset = 0;
1226 +- len -= info.size;
1227 ++ len -= info->size;
1228 + }
1229 +-
1230 +- return info.tx;
1231 + }
1232 +
1233 + /*
1234 +@@ -562,13 +575,22 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
1235 + return queue_idx;
1236 + }
1237 +
1238 ++static void xennet_mark_tx_pending(struct netfront_queue *queue)
1239 ++{
1240 ++ unsigned int i;
1241 ++
1242 ++ while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
1243 ++ TX_LINK_NONE)
1244 ++ queue->tx_link[i] = TX_PENDING;
1245 ++}
1246 ++
1247 + #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
1248 +
1249 + static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1250 + {
1251 + struct netfront_info *np = netdev_priv(dev);
1252 + struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
1253 +- struct xen_netif_tx_request *tx, *first_tx;
1254 ++ struct xen_netif_tx_request *first_tx;
1255 + unsigned int i;
1256 + int notify;
1257 + int slots;
1258 +@@ -577,6 +599,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1259 + unsigned int len;
1260 + unsigned long flags;
1261 + struct netfront_queue *queue = NULL;
1262 ++ struct xennet_gnttab_make_txreq info = { };
1263 + unsigned int num_queues = dev->real_num_tx_queues;
1264 + u16 queue_index;
1265 + struct sk_buff *nskb;
1266 +@@ -584,6 +607,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1267 + /* Drop the packet if no queues are set up */
1268 + if (num_queues < 1)
1269 + goto drop;
1270 ++ if (unlikely(np->broken))
1271 ++ goto drop;
1272 + /* Determine which queue to transmit this SKB on */
1273 + queue_index = skb_get_queue_mapping(skb);
1274 + queue = &np->queues[queue_index];
1275 +@@ -634,21 +659,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1276 + }
1277 +
1278 + /* First request for the linear area. */
1279 +- first_tx = tx = xennet_make_first_txreq(queue, skb,
1280 +- page, offset, len);
1281 +- offset += tx->size;
1282 ++ info.queue = queue;
1283 ++ info.skb = skb;
1284 ++ info.page = page;
1285 ++ first_tx = xennet_make_first_txreq(&info, offset, len);
1286 ++ offset += info.tx_local.size;
1287 + if (offset == PAGE_SIZE) {
1288 + page++;
1289 + offset = 0;
1290 + }
1291 +- len -= tx->size;
1292 ++ len -= info.tx_local.size;
1293 +
1294 + if (skb->ip_summed == CHECKSUM_PARTIAL)
1295 + /* local packet? */
1296 +- tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
1297 ++ first_tx->flags |= XEN_NETTXF_csum_blank |
1298 ++ XEN_NETTXF_data_validated;
1299 + else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1300 + /* remote but checksummed. */
1301 +- tx->flags |= XEN_NETTXF_data_validated;
1302 ++ first_tx->flags |= XEN_NETTXF_data_validated;
1303 +
1304 + /* Optional extra info after the first request. */
1305 + if (skb_shinfo(skb)->gso_size) {
1306 +@@ -657,7 +685,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1307 + gso = (struct xen_netif_extra_info *)
1308 + RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
1309 +
1310 +- tx->flags |= XEN_NETTXF_extra_info;
1311 ++ first_tx->flags |= XEN_NETTXF_extra_info;
1312 +
1313 + gso->u.gso.size = skb_shinfo(skb)->gso_size;
1314 + gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
1315 +@@ -671,12 +699,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1316 + }
1317 +
1318 + /* Requests for the rest of the linear area. */
1319 +- tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
1320 ++ xennet_make_txreqs(&info, page, offset, len);
1321 +
1322 + /* Requests for all the frags. */
1323 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1324 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1325 +- tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
1326 ++ xennet_make_txreqs(&info, skb_frag_page(frag),
1327 + skb_frag_off(frag),
1328 + skb_frag_size(frag));
1329 + }
1330 +@@ -684,6 +712,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1331 + /* First request has the packet length. */
1332 + first_tx->size = skb->len;
1333 +
1334 ++ xennet_mark_tx_pending(queue);
1335 ++
1336 + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
1337 + if (notify)
1338 + notify_remote_via_irq(queue->tx_irq);
1339 +@@ -741,7 +771,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
1340 + RING_IDX rp)
1341 +
1342 + {
1343 +- struct xen_netif_extra_info *extra;
1344 ++ struct xen_netif_extra_info extra;
1345 + struct device *dev = &queue->info->netdev->dev;
1346 + RING_IDX cons = queue->rx.rsp_cons;
1347 + int err = 0;
1348 +@@ -757,24 +787,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
1349 + break;
1350 + }
1351 +
1352 +- extra = (struct xen_netif_extra_info *)
1353 +- RING_GET_RESPONSE(&queue->rx, ++cons);
1354 ++ RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
1355 +
1356 +- if (unlikely(!extra->type ||
1357 +- extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1358 ++ if (unlikely(!extra.type ||
1359 ++ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1360 + if (net_ratelimit())
1361 + dev_warn(dev, "Invalid extra type: %d\n",
1362 +- extra->type);
1363 ++ extra.type);
1364 + err = -EINVAL;
1365 + } else {
1366 +- memcpy(&extras[extra->type - 1], extra,
1367 +- sizeof(*extra));
1368 ++ extras[extra.type - 1] = extra;
1369 + }
1370 +
1371 + skb = xennet_get_rx_skb(queue, cons);
1372 + ref = xennet_get_rx_ref(queue, cons);
1373 + xennet_move_rx_slot(queue, skb, ref);
1374 +- } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1375 ++ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1376 +
1377 + queue->rx.rsp_cons = cons;
1378 + return err;
1379 +@@ -784,7 +812,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
1380 + struct netfront_rx_info *rinfo, RING_IDX rp,
1381 + struct sk_buff_head *list)
1382 + {
1383 +- struct xen_netif_rx_response *rx = &rinfo->rx;
1384 ++ struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1385 + struct xen_netif_extra_info *extras = rinfo->extras;
1386 + struct device *dev = &queue->info->netdev->dev;
1387 + RING_IDX cons = queue->rx.rsp_cons;
1388 +@@ -842,7 +870,8 @@ next:
1389 + break;
1390 + }
1391 +
1392 +- rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
1393 ++ RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1394 ++ rx = &rx_local;
1395 + skb = xennet_get_rx_skb(queue, cons + slots);
1396 + ref = xennet_get_rx_ref(queue, cons + slots);
1397 + slots++;
1398 +@@ -897,10 +926,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
1399 + struct sk_buff *nskb;
1400 +
1401 + while ((nskb = __skb_dequeue(list))) {
1402 +- struct xen_netif_rx_response *rx =
1403 +- RING_GET_RESPONSE(&queue->rx, ++cons);
1404 ++ struct xen_netif_rx_response rx;
1405 + skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1406 +
1407 ++ RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1408 ++
1409 + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1410 + unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1411 +
1412 +@@ -915,7 +945,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
1413 +
1414 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1415 + skb_frag_page(nfrag),
1416 +- rx->offset, rx->status, PAGE_SIZE);
1417 ++ rx.offset, rx.status, PAGE_SIZE);
1418 +
1419 + skb_shinfo(nskb)->nr_frags = 0;
1420 + kfree_skb(nskb);
1421 +@@ -1008,12 +1038,19 @@ static int xennet_poll(struct napi_struct *napi, int budget)
1422 + skb_queue_head_init(&tmpq);
1423 +
1424 + rp = queue->rx.sring->rsp_prod;
1425 ++ if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1426 ++ dev_alert(&dev->dev, "Illegal number of responses %u\n",
1427 ++ rp - queue->rx.rsp_cons);
1428 ++ queue->info->broken = true;
1429 ++ spin_unlock(&queue->rx_lock);
1430 ++ return 0;
1431 ++ }
1432 + rmb(); /* Ensure we see queued responses up to 'rp'. */
1433 +
1434 + i = queue->rx.rsp_cons;
1435 + work_done = 0;
1436 + while ((i != rp) && (work_done < budget)) {
1437 +- memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1438 ++ RING_COPY_RESPONSE(&queue->rx, i, rx);
1439 + memset(extras, 0, sizeof(rinfo.extras));
1440 +
1441 + err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1442 +@@ -1135,17 +1172,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
1443 +
1444 + for (i = 0; i < NET_TX_RING_SIZE; i++) {
1445 + /* Skip over entries which are actually freelist references */
1446 +- if (skb_entry_is_link(&queue->tx_skbs[i]))
1447 ++ if (!queue->tx_skbs[i])
1448 + continue;
1449 +
1450 +- skb = queue->tx_skbs[i].skb;
1451 ++ skb = queue->tx_skbs[i];
1452 ++ queue->tx_skbs[i] = NULL;
1453 + get_page(queue->grant_tx_page[i]);
1454 + gnttab_end_foreign_access(queue->grant_tx_ref[i],
1455 + GNTMAP_readonly,
1456 + (unsigned long)page_address(queue->grant_tx_page[i]));
1457 + queue->grant_tx_page[i] = NULL;
1458 + queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1459 +- add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1460 ++ add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1461 + dev_kfree_skb_irq(skb);
1462 + }
1463 + }
1464 +@@ -1225,6 +1263,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1465 + struct netfront_queue *queue = dev_id;
1466 + unsigned long flags;
1467 +
1468 ++ if (queue->info->broken)
1469 ++ return IRQ_HANDLED;
1470 ++
1471 + spin_lock_irqsave(&queue->tx_lock, flags);
1472 + xennet_tx_buf_gc(queue);
1473 + spin_unlock_irqrestore(&queue->tx_lock, flags);
1474 +@@ -1237,6 +1278,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1475 + struct netfront_queue *queue = dev_id;
1476 + struct net_device *dev = queue->info->netdev;
1477 +
1478 ++ if (queue->info->broken)
1479 ++ return IRQ_HANDLED;
1480 ++
1481 + if (likely(netif_carrier_ok(dev) &&
1482 + RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1483 + napi_schedule(&queue->napi);
1484 +@@ -1258,6 +1302,10 @@ static void xennet_poll_controller(struct net_device *dev)
1485 + struct netfront_info *info = netdev_priv(dev);
1486 + unsigned int num_queues = dev->real_num_tx_queues;
1487 + unsigned int i;
1488 ++
1489 ++ if (info->broken)
1490 ++ return;
1491 ++
1492 + for (i = 0; i < num_queues; ++i)
1493 + xennet_interrupt(0, &info->queues[i]);
1494 + }
1495 +@@ -1627,13 +1675,15 @@ static int xennet_init_queue(struct netfront_queue *queue)
1496 + snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1497 + devid, queue->id);
1498 +
1499 +- /* Initialise tx_skbs as a free chain containing every entry. */
1500 ++ /* Initialise tx_skb_freelist as a free chain containing every entry. */
1501 + queue->tx_skb_freelist = 0;
1502 ++ queue->tx_pend_queue = TX_LINK_NONE;
1503 + for (i = 0; i < NET_TX_RING_SIZE; i++) {
1504 +- skb_entry_set_link(&queue->tx_skbs[i], i+1);
1505 ++ queue->tx_link[i] = i + 1;
1506 + queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1507 + queue->grant_tx_page[i] = NULL;
1508 + }
1509 ++ queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
1510 +
1511 + /* Clear out rx_skbs */
1512 + for (i = 0; i < NET_RX_RING_SIZE; i++) {
1513 +@@ -1838,6 +1888,9 @@ static int talk_to_netback(struct xenbus_device *dev,
1514 + if (info->queues)
1515 + xennet_destroy_queues(info);
1516 +
1517 ++ /* For the case of a reconnect reset the "broken" indicator. */
1518 ++ info->broken = false;
1519 ++
1520 + err = xennet_create_queues(info, &num_queues);
1521 + if (err < 0) {
1522 + xenbus_dev_fatal(dev, err, "creating queues");
1523 +diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
1524 +index 6ca17a0babae2..1c8d16b0245b1 100644
1525 +--- a/drivers/nvme/target/io-cmd-file.c
1526 ++++ b/drivers/nvme/target/io-cmd-file.c
1527 +@@ -8,6 +8,7 @@
1528 + #include <linux/uio.h>
1529 + #include <linux/falloc.h>
1530 + #include <linux/file.h>
1531 ++#include <linux/fs.h>
1532 + #include "nvmet.h"
1533 +
1534 + #define NVMET_MAX_MPOOL_BVEC 16
1535 +@@ -254,7 +255,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
1536 +
1537 + if (req->ns->buffered_io) {
1538 + if (likely(!req->f.mpool_alloc) &&
1539 +- nvmet_file_execute_io(req, IOCB_NOWAIT))
1540 ++ (req->ns->file->f_mode & FMODE_NOWAIT) &&
1541 ++ nvmet_file_execute_io(req, IOCB_NOWAIT))
1542 + return;
1543 + nvmet_file_submit_buffered_io(req);
1544 + } else
1545 +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
1546 +index fac1985870765..4341c72446628 100644
1547 +--- a/drivers/nvme/target/tcp.c
1548 ++++ b/drivers/nvme/target/tcp.c
1549 +@@ -631,10 +631,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
1550 + static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
1551 + {
1552 + struct nvmet_tcp_queue *queue = cmd->queue;
1553 ++ int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
1554 + struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1555 + struct kvec iov = {
1556 + .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
1557 +- .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
1558 ++ .iov_len = left
1559 + };
1560 + int ret;
1561 +
1562 +@@ -643,6 +644,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
1563 + return ret;
1564 +
1565 + cmd->offset += ret;
1566 ++ left -= ret;
1567 ++
1568 ++ if (left)
1569 ++ return -EAGAIN;
1570 +
1571 + if (queue->nvme_sq.sqhd_disabled) {
1572 + cmd->queue->snd_cmd = NULL;
1573 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
1574 +index 45794ba643d40..9e208294946cd 100644
1575 +--- a/drivers/pci/controller/pci-aardvark.c
1576 ++++ b/drivers/pci/controller/pci-aardvark.c
1577 +@@ -9,6 +9,7 @@
1578 + */
1579 +
1580 + #include <linux/delay.h>
1581 ++#include <linux/gpio/consumer.h>
1582 + #include <linux/interrupt.h>
1583 + #include <linux/irq.h>
1584 + #include <linux/irqdomain.h>
1585 +@@ -17,6 +18,7 @@
1586 + #include <linux/init.h>
1587 + #include <linux/platform_device.h>
1588 + #include <linux/of_address.h>
1589 ++#include <linux/of_gpio.h>
1590 + #include <linux/of_pci.h>
1591 +
1592 + #include "../pci.h"
1593 +@@ -25,21 +27,8 @@
1594 + /* PCIe core registers */
1595 + #define PCIE_CORE_DEV_ID_REG 0x0
1596 + #define PCIE_CORE_CMD_STATUS_REG 0x4
1597 +-#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
1598 +-#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
1599 +-#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
1600 + #define PCIE_CORE_DEV_REV_REG 0x8
1601 + #define PCIE_CORE_PCIEXP_CAP 0xc0
1602 +-#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
1603 +-#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
1604 +-#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
1605 +-#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
1606 +-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
1607 +-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
1608 +-#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
1609 +-#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
1610 +-#define PCIE_CORE_LINK_TRAINING BIT(5)
1611 +-#define PCIE_CORE_LINK_WIDTH_SHIFT 20
1612 + #define PCIE_CORE_ERR_CAPCTL_REG 0x118
1613 + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
1614 + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
1615 +@@ -122,6 +111,46 @@
1616 + #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
1617 + #define PCIE_MSI_DATA_MASK GENMASK(15, 0)
1618 +
1619 ++/* PCIe window configuration */
1620 ++#define OB_WIN_BASE_ADDR 0x4c00
1621 ++#define OB_WIN_BLOCK_SIZE 0x20
1622 ++#define OB_WIN_COUNT 8
1623 ++#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
1624 ++ OB_WIN_BLOCK_SIZE * (win) + \
1625 ++ (offset))
1626 ++#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
1627 ++#define OB_WIN_ENABLE BIT(0)
1628 ++#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
1629 ++#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
1630 ++#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
1631 ++#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
1632 ++#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
1633 ++#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
1634 ++#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
1635 ++#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
1636 ++#define OB_WIN_FUNC_NUM_SHIFT 24
1637 ++#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
1638 ++#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
1639 ++#define OB_WIN_BUS_NUM_BITS_SHIFT 20
1640 ++#define OB_WIN_MSG_CODE_ENABLE BIT(22)
1641 ++#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
1642 ++#define OB_WIN_MSG_CODE_SHIFT 14
1643 ++#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
1644 ++#define OB_WIN_ATTR_ENABLE BIT(11)
1645 ++#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
1646 ++#define OB_WIN_ATTR_TC_SHIFT 8
1647 ++#define OB_WIN_ATTR_RELAXED BIT(7)
1648 ++#define OB_WIN_ATTR_NOSNOOP BIT(6)
1649 ++#define OB_WIN_ATTR_POISON BIT(5)
1650 ++#define OB_WIN_ATTR_IDO BIT(4)
1651 ++#define OB_WIN_TYPE_MASK GENMASK(3, 0)
1652 ++#define OB_WIN_TYPE_SHIFT 0
1653 ++#define OB_WIN_TYPE_MEM 0x0
1654 ++#define OB_WIN_TYPE_IO 0x4
1655 ++#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
1656 ++#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
1657 ++#define OB_WIN_TYPE_MSG 0xc
1658 ++
1659 + /* LMI registers base address and register offsets */
1660 + #define LMI_BASE_ADDR 0x6000
1661 + #define CFG_REG (LMI_BASE_ADDR + 0x0)
1662 +@@ -237,6 +266,13 @@ struct advk_pcie {
1663 + struct platform_device *pdev;
1664 + void __iomem *base;
1665 + struct list_head resources;
1666 ++ struct {
1667 ++ phys_addr_t match;
1668 ++ phys_addr_t remap;
1669 ++ phys_addr_t mask;
1670 ++ u32 actions;
1671 ++ } wins[OB_WIN_COUNT];
1672 ++ u8 wins_count;
1673 + struct irq_domain *irq_domain;
1674 + struct irq_chip irq_chip;
1675 + raw_spinlock_t irq_lock;
1676 +@@ -249,7 +285,9 @@ struct advk_pcie {
1677 + struct mutex msi_used_lock;
1678 + u16 msi_msg;
1679 + int root_bus_nr;
1680 ++ int link_gen;
1681 + struct pci_bridge_emul bridge;
1682 ++ struct gpio_desc *reset_gpio;
1683 + };
1684 +
1685 + static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
1686 +@@ -309,20 +347,16 @@ static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
1687 +
1688 + static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
1689 + {
1690 +- struct device *dev = &pcie->pdev->dev;
1691 + int retries;
1692 +
1693 + /* check if the link is up or not */
1694 + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
1695 +- if (advk_pcie_link_up(pcie)) {
1696 +- dev_info(dev, "link up\n");
1697 ++ if (advk_pcie_link_up(pcie))
1698 + return 0;
1699 +- }
1700 +
1701 + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
1702 + }
1703 +
1704 +- dev_err(dev, "link never came up\n");
1705 + return -ETIMEDOUT;
1706 + }
1707 +
1708 +@@ -337,9 +371,115 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
1709 + }
1710 + }
1711 +
1712 ++static void advk_pcie_issue_perst(struct advk_pcie *pcie)
1713 ++{
1714 ++ if (!pcie->reset_gpio)
1715 ++ return;
1716 ++
1717 ++ /* 10ms delay is needed for some cards */
1718 ++ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
1719 ++ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
1720 ++ usleep_range(10000, 11000);
1721 ++ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
1722 ++}
1723 ++
1724 ++static void advk_pcie_train_link(struct advk_pcie *pcie)
1725 ++{
1726 ++ struct device *dev = &pcie->pdev->dev;
1727 ++ u32 reg;
1728 ++ int ret;
1729 ++
1730 ++ /*
1731 ++ * Setup PCIe rev / gen compliance based on device tree property
1732 ++ * 'max-link-speed' which also forces maximal link speed.
1733 ++ */
1734 ++ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1735 ++ reg &= ~PCIE_GEN_SEL_MSK;
1736 ++ if (pcie->link_gen == 3)
1737 ++ reg |= SPEED_GEN_3;
1738 ++ else if (pcie->link_gen == 2)
1739 ++ reg |= SPEED_GEN_2;
1740 ++ else
1741 ++ reg |= SPEED_GEN_1;
1742 ++ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1743 ++
1744 ++ /*
1745 ++ * Set maximal link speed value also into PCIe Link Control 2 register.
1746 ++ * Armada 3700 Functional Specification says that default value is based
1747 ++ * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
1748 ++ */
1749 ++ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
1750 ++ reg &= ~PCI_EXP_LNKCTL2_TLS;
1751 ++ if (pcie->link_gen == 3)
1752 ++ reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
1753 ++ else if (pcie->link_gen == 2)
1754 ++ reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
1755 ++ else
1756 ++ reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
1757 ++ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
1758 ++
1759 ++ /* Enable link training after selecting PCIe generation */
1760 ++ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1761 ++ reg |= LINK_TRAINING_EN;
1762 ++ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1763 ++
1764 ++ /*
1765 ++ * Reset PCIe card via PERST# signal. Some cards are not detected
1766 ++ * during link training when they are in some non-initial state.
1767 ++ */
1768 ++ advk_pcie_issue_perst(pcie);
1769 ++
1770 ++ /*
1771 ++ * PERST# signal could have been asserted by pinctrl subsystem before
1772 ++ * probe() callback has been called or issued explicitly by reset gpio
1773 ++ * function advk_pcie_issue_perst(), making the endpoint going into
1774 ++ * fundamental reset. As required by PCI Express spec (PCI Express
1775 ++ * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
1776 ++ * Conventional Reset) a delay for at least 100ms after such a reset
1777 ++ * before sending a Configuration Request to the device is needed.
1778 ++ * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
1779 ++ * waits for link at least 900ms.
1780 ++ */
1781 ++ ret = advk_pcie_wait_for_link(pcie);
1782 ++ if (ret < 0)
1783 ++ dev_err(dev, "link never came up\n");
1784 ++ else
1785 ++ dev_info(dev, "link up\n");
1786 ++}
1787 ++
1788 ++/*
1789 ++ * Set PCIe address window register which could be used for memory
1790 ++ * mapping.
1791 ++ */
1792 ++static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
1793 ++ phys_addr_t match, phys_addr_t remap,
1794 ++ phys_addr_t mask, u32 actions)
1795 ++{
1796 ++ advk_writel(pcie, OB_WIN_ENABLE |
1797 ++ lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
1798 ++ advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
1799 ++ advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
1800 ++ advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
1801 ++ advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
1802 ++ advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
1803 ++ advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
1804 ++}
1805 ++
1806 ++static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
1807 ++{
1808 ++ advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
1809 ++ advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
1810 ++ advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
1811 ++ advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
1812 ++ advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
1813 ++ advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
1814 ++ advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
1815 ++}
1816 ++
1817 + static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1818 + {
1819 + u32 reg;
1820 ++ int i;
1821 +
1822 + /* Set to Direct mode */
1823 + reg = advk_readl(pcie, CTRL_CONFIG_REG);
1824 +@@ -362,6 +502,31 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1825 + reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
1826 + advk_writel(pcie, reg, VENDOR_ID_REG);
1827 +
1828 ++ /*
1829 ++ * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
1830 ++ * because the default value is Mass storage controller (0x010400).
1831 ++ *
1832 ++ * Note that this Aardvark PCI Bridge does not have compliant Type 1
1833 ++ * Configuration Space and it even cannot be accessed via Aardvark's
1834 ++ * PCI config space access method. Something like config space is
1835 ++ * available in internal Aardvark registers starting at offset 0x0
1836 ++ * and is reported as Type 0. In range 0x10 - 0x34 it has totally
1837 ++ * different registers.
1838 ++ *
1839 ++ * Therefore driver uses emulation of PCI Bridge which emulates
1840 ++ * access to configuration space via internal Aardvark registers or
1841 ++ * emulated configuration buffer.
1842 ++ */
1843 ++ reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
1844 ++ reg &= ~0xffffff00;
1845 ++ reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
1846 ++ advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
1847 ++
1848 ++ /* Disable Root Bridge I/O space, memory space and bus mastering */
1849 ++ reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
1850 ++ reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1851 ++ advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
1852 ++
1853 + /* Set Advanced Error Capabilities and Control PF0 register */
1854 + reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
1855 + PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
1856 +@@ -369,36 +534,27 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1857 + PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
1858 + advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
1859 +
1860 +- /* Set PCIe Device Control and Status 1 PF0 register */
1861 +- reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
1862 +- (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
1863 +- PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
1864 +- (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
1865 +- PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
1866 +- advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
1867 ++ /* Set PCIe Device Control register */
1868 ++ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
1869 ++ reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
1870 ++ reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
1871 ++ reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
1872 ++ reg &= ~PCI_EXP_DEVCTL_READRQ;
1873 ++ reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
1874 ++ reg |= PCI_EXP_DEVCTL_READRQ_512B;
1875 ++ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
1876 +
1877 + /* Program PCIe Control 2 to disable strict ordering */
1878 + reg = PCIE_CORE_CTRL2_RESERVED |
1879 + PCIE_CORE_CTRL2_TD_ENABLE;
1880 + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
1881 +
1882 +- /* Set GEN2 */
1883 +- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1884 +- reg &= ~PCIE_GEN_SEL_MSK;
1885 +- reg |= SPEED_GEN_2;
1886 +- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1887 +-
1888 + /* Set lane X1 */
1889 + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1890 + reg &= ~LANE_CNT_MSK;
1891 + reg |= LANE_COUNT_1;
1892 + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1893 +
1894 +- /* Enable link training */
1895 +- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1896 +- reg |= LINK_TRAINING_EN;
1897 +- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1898 +-
1899 + /* Enable MSI */
1900 + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
1901 + reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
1902 +@@ -423,27 +579,52 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1903 + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
1904 + advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
1905 +
1906 ++ /*
1907 ++ * Enable AXI address window location generation:
1908 ++ * When it is enabled, the default outbound window
1909 ++ * configurations (Default User Field: 0xD0074CFC)
1910 ++ * are used to transparent address translation for
1911 ++ * the outbound transactions. Thus, PCIe address
1912 ++ * windows are not required for transparent memory
1913 ++ * access when default outbound window configuration
1914 ++ * is set for memory access.
1915 ++ */
1916 + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
1917 + reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
1918 + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
1919 +
1920 +- /* Bypass the address window mapping for PIO */
1921 ++ /*
1922 ++ * Set memory access in Default User Field so it
1923 ++ * is not required to configure PCIe address for
1924 ++ * transparent memory access.
1925 ++ */
1926 ++ advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
1927 ++
1928 ++ /*
1929 ++ * Bypass the address window mapping for PIO:
1930 ++ * Since PIO access already contains all required
1931 ++ * info over AXI interface by PIO registers, the
1932 ++ * address window is not required.
1933 ++ */
1934 + reg = advk_readl(pcie, PIO_CTRL);
1935 + reg |= PIO_CTRL_ADDR_WIN_DISABLE;
1936 + advk_writel(pcie, reg, PIO_CTRL);
1937 +
1938 +- /* Start link training */
1939 +- reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
1940 +- reg |= PCIE_CORE_LINK_TRAINING;
1941 +- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
1942 ++ /*
1943 ++ * Configure PCIe address windows for non-memory or
1944 ++ * non-transparent access as by default PCIe uses
1945 ++ * transparent memory access.
1946 ++ */
1947 ++ for (i = 0; i < pcie->wins_count; i++)
1948 ++ advk_pcie_set_ob_win(pcie, i,
1949 ++ pcie->wins[i].match, pcie->wins[i].remap,
1950 ++ pcie->wins[i].mask, pcie->wins[i].actions);
1951 +
1952 +- advk_pcie_wait_for_link(pcie);
1953 ++ /* Disable remaining PCIe outbound windows */
1954 ++ for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
1955 ++ advk_pcie_disable_ob_win(pcie, i);
1956 +
1957 +- reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
1958 +- reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
1959 +- PCIE_CORE_CMD_IO_ACCESS_EN |
1960 +- PCIE_CORE_CMD_MEM_IO_REQ_EN;
1961 +- advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
1962 ++ advk_pcie_train_link(pcie);
1963 + }
1964 +
1965 + static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
1966 +@@ -452,6 +633,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
1967 + u32 reg;
1968 + unsigned int status;
1969 + char *strcomp_status, *str_posted;
1970 ++ int ret;
1971 +
1972 + reg = advk_readl(pcie, PIO_STAT);
1973 + status = (reg & PIO_COMPLETION_STATUS_MASK) >>
1974 +@@ -476,6 +658,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
1975 + case PIO_COMPLETION_STATUS_OK:
1976 + if (reg & PIO_ERR_STATUS) {
1977 + strcomp_status = "COMP_ERR";
1978 ++ ret = -EFAULT;
1979 + break;
1980 + }
1981 + /* Get the read result */
1982 +@@ -483,9 +666,11 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
1983 + *val = advk_readl(pcie, PIO_RD_DATA);
1984 + /* No error */
1985 + strcomp_status = NULL;
1986 ++ ret = 0;
1987 + break;
1988 + case PIO_COMPLETION_STATUS_UR:
1989 + strcomp_status = "UR";
1990 ++ ret = -EOPNOTSUPP;
1991 + break;
1992 + case PIO_COMPLETION_STATUS_CRS:
1993 + if (allow_crs && val) {
1994 +@@ -503,6 +688,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
1995 + */
1996 + *val = CFG_RD_CRS_VAL;
1997 + strcomp_status = NULL;
1998 ++ ret = 0;
1999 + break;
2000 + }
2001 + /* PCIe r4.0, sec 2.3.2, says:
2002 +@@ -518,21 +704,24 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
2003 + * Request and taking appropriate action, e.g., complete the
2004 + * Request to the host as a failed transaction.
2005 + *
2006 +- * To simplify implementation do not re-issue the Configuration
2007 +- * Request and complete the Request as a failed transaction.
2008 ++ * So return -EAGAIN and caller (pci-aardvark.c driver) will
2009 ++ * re-issue request again up to the PIO_RETRY_CNT retries.
2010 + */
2011 + strcomp_status = "CRS";
2012 ++ ret = -EAGAIN;
2013 + break;
2014 + case PIO_COMPLETION_STATUS_CA:
2015 + strcomp_status = "CA";
2016 ++ ret = -ECANCELED;
2017 + break;
2018 + default:
2019 + strcomp_status = "Unknown";
2020 ++ ret = -EINVAL;
2021 + break;
2022 + }
2023 +
2024 + if (!strcomp_status)
2025 +- return 0;
2026 ++ return ret;
2027 +
2028 + if (reg & PIO_NON_POSTED_REQ)
2029 + str_posted = "Non-posted";
2030 +@@ -542,7 +731,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
2031 + dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
2032 + str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
2033 +
2034 +- return -EFAULT;
2035 ++ return ret;
2036 + }
2037 +
2038 + static int advk_pcie_wait_pio(struct advk_pcie *pcie)
2039 +@@ -550,13 +739,13 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
2040 + struct device *dev = &pcie->pdev->dev;
2041 + int i;
2042 +
2043 +- for (i = 0; i < PIO_RETRY_CNT; i++) {
2044 ++ for (i = 1; i <= PIO_RETRY_CNT; i++) {
2045 + u32 start, isr;
2046 +
2047 + start = advk_readl(pcie, PIO_START);
2048 + isr = advk_readl(pcie, PIO_ISR);
2049 + if (!start && isr)
2050 +- return 0;
2051 ++ return i;
2052 + udelay(PIO_RETRY_DELAY);
2053 + }
2054 +
2055 +@@ -564,6 +753,64 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
2056 + return -ETIMEDOUT;
2057 + }
2058 +
2059 ++static pci_bridge_emul_read_status_t
2060 ++advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
2061 ++ int reg, u32 *value)
2062 ++{
2063 ++ struct advk_pcie *pcie = bridge->data;
2064 ++
2065 ++ switch (reg) {
2066 ++ case PCI_COMMAND:
2067 ++ *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
2068 ++ return PCI_BRIDGE_EMUL_HANDLED;
2069 ++
2070 ++ case PCI_INTERRUPT_LINE: {
2071 ++ /*
2072 ++ * From the whole 32bit register we support reading from HW only
2073 ++ * one bit: PCI_BRIDGE_CTL_BUS_RESET.
2074 ++ * Other bits are retrieved only from emulated config buffer.
2075 ++ */
2076 ++ __le32 *cfgspace = (__le32 *)&bridge->conf;
2077 ++ u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
2078 ++ if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
2079 ++ val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
2080 ++ else
2081 ++ val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
2082 ++ *value = val;
2083 ++ return PCI_BRIDGE_EMUL_HANDLED;
2084 ++ }
2085 ++
2086 ++ default:
2087 ++ return PCI_BRIDGE_EMUL_NOT_HANDLED;
2088 ++ }
2089 ++}
2090 ++
2091 ++static void
2092 ++advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
2093 ++ int reg, u32 old, u32 new, u32 mask)
2094 ++{
2095 ++ struct advk_pcie *pcie = bridge->data;
2096 ++
2097 ++ switch (reg) {
2098 ++ case PCI_COMMAND:
2099 ++ advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
2100 ++ break;
2101 ++
2102 ++ case PCI_INTERRUPT_LINE:
2103 ++ if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
2104 ++ u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
2105 ++ if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
2106 ++ val |= HOT_RESET_GEN;
2107 ++ else
2108 ++ val &= ~HOT_RESET_GEN;
2109 ++ advk_writel(pcie, val, PCIE_CORE_CTRL1_REG);
2110 ++ }
2111 ++ break;
2112 ++
2113 ++ default:
2114 ++ break;
2115 ++ }
2116 ++}
2117 +
2118 + static pci_bridge_emul_read_status_t
2119 + advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
2120 +@@ -665,6 +912,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
2121 + }
2122 +
2123 + static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
2124 ++ .read_base = advk_pci_bridge_emul_base_conf_read,
2125 ++ .write_base = advk_pci_bridge_emul_base_conf_write,
2126 + .read_pcie = advk_pci_bridge_emul_pcie_conf_read,
2127 + .write_pcie = advk_pci_bridge_emul_pcie_conf_write,
2128 + };
2129 +@@ -676,37 +925,33 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
2130 + static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
2131 + {
2132 + struct pci_bridge_emul *bridge = &pcie->bridge;
2133 +- int ret;
2134 +
2135 +- bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff;
2136 +- bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16;
2137 ++ bridge->conf.vendor =
2138 ++ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
2139 ++ bridge->conf.device =
2140 ++ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
2141 + bridge->conf.class_revision =
2142 +- advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff;
2143 ++ cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
2144 +
2145 + /* Support 32 bits I/O addressing */
2146 + bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
2147 + bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
2148 +
2149 + /* Support 64 bits memory pref */
2150 +- bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64;
2151 +- bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64;
2152 ++ bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
2153 ++ bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
2154 +
2155 + /* Support interrupt A for MSI feature */
2156 + bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
2157 +
2158 ++ /* Indicates supports for Completion Retry Status */
2159 ++ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
2160 ++
2161 + bridge->has_pcie = true;
2162 + bridge->data = pcie;
2163 + bridge->ops = &advk_pci_bridge_emul_ops;
2164 +
2165 +- /* PCIe config space can be initialized after pci_bridge_emul_init() */
2166 +- ret = pci_bridge_emul_init(bridge, 0);
2167 +- if (ret < 0)
2168 +- return ret;
2169 +-
2170 +- /* Indicates supports for Completion Retry Status */
2171 +- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
2172 +-
2173 +- return 0;
2174 ++ return pci_bridge_emul_init(bridge, 0);
2175 + }
2176 +
2177 + static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
2178 +@@ -715,6 +960,13 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
2179 + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
2180 + return false;
2181 +
2182 ++ /*
2183 ++ * If the link goes down after we check for link-up, nothing bad
2184 ++ * happens but the config access times out.
2185 ++ */
2186 ++ if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie))
2187 ++ return false;
2188 ++
2189 + return true;
2190 + }
2191 +
2192 +@@ -751,6 +1003,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2193 + int where, int size, u32 *val)
2194 + {
2195 + struct advk_pcie *pcie = bus->sysdata;
2196 ++ int retry_count;
2197 + bool allow_crs;
2198 + u32 reg;
2199 + int ret;
2200 +@@ -773,18 +1026,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2201 + (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
2202 + PCI_EXP_RTCTL_CRSSVE);
2203 +
2204 +- if (advk_pcie_pio_is_running(pcie)) {
2205 +- /*
2206 +- * If it is possible return Completion Retry Status so caller
2207 +- * tries to issue the request again instead of failing.
2208 +- */
2209 +- if (allow_crs) {
2210 +- *val = CFG_RD_CRS_VAL;
2211 +- return PCIBIOS_SUCCESSFUL;
2212 +- }
2213 +- *val = 0xffffffff;
2214 +- return PCIBIOS_SET_FAILED;
2215 +- }
2216 ++ if (advk_pcie_pio_is_running(pcie))
2217 ++ goto try_crs;
2218 +
2219 + /* Program the control register */
2220 + reg = advk_readl(pcie, PIO_CTRL);
2221 +@@ -803,30 +1046,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2222 + /* Program the data strobe */
2223 + advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
2224 +
2225 +- /* Clear PIO DONE ISR and start the transfer */
2226 +- advk_writel(pcie, 1, PIO_ISR);
2227 +- advk_writel(pcie, 1, PIO_START);
2228 ++ retry_count = 0;
2229 ++ do {
2230 ++ /* Clear PIO DONE ISR and start the transfer */
2231 ++ advk_writel(pcie, 1, PIO_ISR);
2232 ++ advk_writel(pcie, 1, PIO_START);
2233 +
2234 +- ret = advk_pcie_wait_pio(pcie);
2235 +- if (ret < 0) {
2236 +- /*
2237 +- * If it is possible return Completion Retry Status so caller
2238 +- * tries to issue the request again instead of failing.
2239 +- */
2240 +- if (allow_crs) {
2241 +- *val = CFG_RD_CRS_VAL;
2242 +- return PCIBIOS_SUCCESSFUL;
2243 +- }
2244 +- *val = 0xffffffff;
2245 +- return PCIBIOS_SET_FAILED;
2246 +- }
2247 ++ ret = advk_pcie_wait_pio(pcie);
2248 ++ if (ret < 0)
2249 ++ goto try_crs;
2250 +
2251 +- /* Check PIO status and get the read result */
2252 +- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
2253 +- if (ret < 0) {
2254 +- *val = 0xffffffff;
2255 +- return PCIBIOS_SET_FAILED;
2256 +- }
2257 ++ retry_count += ret;
2258 ++
2259 ++ /* Check PIO status and get the read result */
2260 ++ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
2261 ++ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
2262 ++
2263 ++ if (ret < 0)
2264 ++ goto fail;
2265 +
2266 + if (size == 1)
2267 + *val = (*val >> (8 * (where & 3))) & 0xff;
2268 +@@ -834,6 +1071,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2269 + *val = (*val >> (8 * (where & 3))) & 0xffff;
2270 +
2271 + return PCIBIOS_SUCCESSFUL;
2272 ++
2273 ++try_crs:
2274 ++ /*
2275 ++ * If it is possible, return Completion Retry Status so that caller
2276 ++ * tries to issue the request again instead of failing.
2277 ++ */
2278 ++ if (allow_crs) {
2279 ++ *val = CFG_RD_CRS_VAL;
2280 ++ return PCIBIOS_SUCCESSFUL;
2281 ++ }
2282 ++
2283 ++fail:
2284 ++ *val = 0xffffffff;
2285 ++ return PCIBIOS_SET_FAILED;
2286 + }
2287 +
2288 + static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2289 +@@ -842,6 +1093,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2290 + struct advk_pcie *pcie = bus->sysdata;
2291 + u32 reg;
2292 + u32 data_strobe = 0x0;
2293 ++ int retry_count;
2294 + int offset;
2295 + int ret;
2296 +
2297 +@@ -883,19 +1135,22 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2298 + /* Program the data strobe */
2299 + advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
2300 +
2301 +- /* Clear PIO DONE ISR and start the transfer */
2302 +- advk_writel(pcie, 1, PIO_ISR);
2303 +- advk_writel(pcie, 1, PIO_START);
2304 ++ retry_count = 0;
2305 ++ do {
2306 ++ /* Clear PIO DONE ISR and start the transfer */
2307 ++ advk_writel(pcie, 1, PIO_ISR);
2308 ++ advk_writel(pcie, 1, PIO_START);
2309 +
2310 +- ret = advk_pcie_wait_pio(pcie);
2311 +- if (ret < 0)
2312 +- return PCIBIOS_SET_FAILED;
2313 ++ ret = advk_pcie_wait_pio(pcie);
2314 ++ if (ret < 0)
2315 ++ return PCIBIOS_SET_FAILED;
2316 +
2317 +- ret = advk_pcie_check_pio_status(pcie, false, NULL);
2318 +- if (ret < 0)
2319 +- return PCIBIOS_SET_FAILED;
2320 ++ retry_count += ret;
2321 +
2322 +- return PCIBIOS_SUCCESSFUL;
2323 ++ ret = advk_pcie_check_pio_status(pcie, false, NULL);
2324 ++ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
2325 ++
2326 ++ return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
2327 + }
2328 +
2329 + static struct pci_ops advk_pcie_ops = {
2330 +@@ -1244,6 +1499,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
2331 + struct advk_pcie *pcie;
2332 + struct resource *res;
2333 + struct pci_host_bridge *bridge;
2334 ++ struct resource_entry *entry;
2335 + int ret, irq;
2336 +
2337 + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
2338 +@@ -1273,6 +1529,102 @@ static int advk_pcie_probe(struct platform_device *pdev)
2339 + return ret;
2340 + }
2341 +
2342 ++ resource_list_for_each_entry(entry, &pcie->resources) {
2343 ++ resource_size_t start = entry->res->start;
2344 ++ resource_size_t size = resource_size(entry->res);
2345 ++ unsigned long type = resource_type(entry->res);
2346 ++ u64 win_size;
2347 ++
2348 ++ /*
2349 ++ * Aardvark hardware allows to configure also PCIe window
2350 ++ * for config type 0 and type 1 mapping, but driver uses
2351 ++ * only PIO for issuing configuration transfers which does
2352 ++ * not use PCIe window configuration.
2353 ++ */
2354 ++ if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
2355 ++ type != IORESOURCE_IO)
2356 ++ continue;
2357 ++
2358 ++ /*
2359 ++ * Skip transparent memory resources. Default outbound access
2360 ++ * configuration is set to transparent memory access so it
2361 ++ * does not need window configuration.
2362 ++ */
2363 ++ if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
2364 ++ entry->offset == 0)
2365 ++ continue;
2366 ++
2367 ++ /*
2368 ++ * The n-th PCIe window is configured by tuple (match, remap, mask)
2369 ++ * and an access to address A uses this window if A matches the
2370 ++ * match with given mask.
2371 ++ * So every PCIe window size must be a power of two and every start
2372 ++ * address must be aligned to window size. Minimal size is 64 KiB
2373 ++ * because lower 16 bits of mask must be zero. Remapped address
2374 ++ * may have set only bits from the mask.
2375 ++ */
2376 ++ while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
2377 ++ /* Calculate the largest aligned window size */
2378 ++ win_size = (1ULL << (fls64(size)-1)) |
2379 ++ (start ? (1ULL << __ffs64(start)) : 0);
2380 ++ win_size = 1ULL << __ffs64(win_size);
2381 ++ if (win_size < 0x10000)
2382 ++ break;
2383 ++
2384 ++ dev_dbg(dev,
2385 ++ "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
2386 ++ pcie->wins_count, (unsigned long long)start,
2387 ++ (unsigned long long)start + win_size, type);
2388 ++
2389 ++ if (type == IORESOURCE_IO) {
2390 ++ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
2391 ++ pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
2392 ++ } else {
2393 ++ pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
2394 ++ pcie->wins[pcie->wins_count].match = start;
2395 ++ }
2396 ++ pcie->wins[pcie->wins_count].remap = start - entry->offset;
2397 ++ pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
2398 ++
2399 ++ if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
2400 ++ break;
2401 ++
2402 ++ start += win_size;
2403 ++ size -= win_size;
2404 ++ pcie->wins_count++;
2405 ++ }
2406 ++
2407 ++ if (size > 0) {
2408 ++ dev_err(&pcie->pdev->dev,
2409 ++ "Invalid PCIe region [0x%llx-0x%llx]\n",
2410 ++ (unsigned long long)entry->res->start,
2411 ++ (unsigned long long)entry->res->end + 1);
2412 ++ return -EINVAL;
2413 ++ }
2414 ++ }
2415 ++
2416 ++ pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
2417 ++ "reset-gpios", 0,
2418 ++ GPIOD_OUT_LOW,
2419 ++ "pcie1-reset");
2420 ++ ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
2421 ++ if (ret) {
2422 ++ if (ret == -ENOENT) {
2423 ++ pcie->reset_gpio = NULL;
2424 ++ } else {
2425 ++ if (ret != -EPROBE_DEFER)
2426 ++ dev_err(dev, "Failed to get reset-gpio: %i\n",
2427 ++ ret);
2428 ++ return ret;
2429 ++ }
2430 ++ }
2431 ++
2432 ++ ret = of_pci_get_max_link_speed(dev->of_node);
2433 ++ if (ret <= 0 || ret > 3)
2434 ++ pcie->link_gen = 3;
2435 ++ else
2436 ++ pcie->link_gen = ret;
2437 ++
2438 + advk_pcie_setup_hw(pcie);
2439 +
2440 + ret = advk_sw_pci_bridge_init(pcie);
2441 +diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
2442 +index b3d63e319bb39..3026346ccb18c 100644
2443 +--- a/drivers/pci/pci-bridge-emul.c
2444 ++++ b/drivers/pci/pci-bridge-emul.c
2445 +@@ -21,8 +21,9 @@
2446 + #include "pci-bridge-emul.h"
2447 +
2448 + #define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
2449 ++#define PCI_CAP_PCIE_SIZEOF (PCI_EXP_SLTSTA2 + 2)
2450 + #define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
2451 +-#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
2452 ++#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_CAP_PCIE_SIZEOF)
2453 +
2454 + struct pci_bridge_reg_behavior {
2455 + /* Read-only bits */
2456 +@@ -38,7 +39,8 @@ struct pci_bridge_reg_behavior {
2457 + u32 rsvd;
2458 + };
2459 +
2460 +-static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
2461 ++static const
2462 ++struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = {
2463 + [PCI_VENDOR_ID / 4] = { .ro = ~0 },
2464 + [PCI_COMMAND / 4] = {
2465 + .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
2466 +@@ -173,7 +175,8 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
2467 + },
2468 + };
2469 +
2470 +-static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
2471 ++static const
2472 ++struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = {
2473 + [PCI_CAP_LIST_ID / 4] = {
2474 + /*
2475 + * Capability ID, Next Capability Pointer and
2476 +@@ -270,6 +273,8 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
2477 + int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
2478 + unsigned int flags)
2479 + {
2480 ++ BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END);
2481 ++
2482 + bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
2483 + bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
2484 + bridge->conf.cache_line_size = 0x10;
2485 +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2486 +index 83e585c5a6132..f56add78d58ce 100644
2487 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2488 ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2489 +@@ -166,10 +166,14 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
2490 + PIN_GRP_GPIO("jtag", 20, 5, BIT(0), "jtag"),
2491 + PIN_GRP_GPIO("sdio0", 8, 3, BIT(1), "sdio"),
2492 + PIN_GRP_GPIO("emmc_nb", 27, 9, BIT(2), "emmc"),
2493 +- PIN_GRP_GPIO("pwm0", 11, 1, BIT(3), "pwm"),
2494 +- PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
2495 +- PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
2496 +- PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
2497 ++ PIN_GRP_GPIO_3("pwm0", 11, 1, BIT(3) | BIT(20), 0, BIT(20), BIT(3),
2498 ++ "pwm", "led"),
2499 ++ PIN_GRP_GPIO_3("pwm1", 12, 1, BIT(4) | BIT(21), 0, BIT(21), BIT(4),
2500 ++ "pwm", "led"),
2501 ++ PIN_GRP_GPIO_3("pwm2", 13, 1, BIT(5) | BIT(22), 0, BIT(22), BIT(5),
2502 ++ "pwm", "led"),
2503 ++ PIN_GRP_GPIO_3("pwm3", 14, 1, BIT(6) | BIT(23), 0, BIT(23), BIT(6),
2504 ++ "pwm", "led"),
2505 + PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
2506 + PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
2507 + PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
2508 +@@ -183,11 +187,6 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
2509 + PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
2510 + BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
2511 + 18, 2, "gpio", "uart"),
2512 +- PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
2513 +- PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
2514 +- PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
2515 +- PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
2516 +-
2517 + };
2518 +
2519 + static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
2520 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2521 +index 3654cfc4376fa..97c1f242ef0a3 100644
2522 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2523 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2524 +@@ -3387,7 +3387,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
2525 +
2526 + shost_for_each_device(sdev, ioc->shost) {
2527 + sas_device_priv_data = sdev->hostdata;
2528 +- if (!sas_device_priv_data)
2529 ++ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
2530 + continue;
2531 + if (sas_device_priv_data->sas_target->sas_address
2532 + != sas_address)
2533 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
2534 +index 16432d42a50aa..6faf1d6451b0c 100644
2535 +--- a/drivers/scsi/scsi_sysfs.c
2536 ++++ b/drivers/scsi/scsi_sysfs.c
2537 +@@ -796,7 +796,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
2538 +
2539 + mutex_lock(&sdev->state_mutex);
2540 + if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
2541 +- ret = count;
2542 ++ ret = 0;
2543 + } else {
2544 + ret = scsi_device_set_state(sdev, state);
2545 + if (ret == 0 && state == SDEV_RUNNING)
2546 +diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
2547 +index cf263a58a1489..6fd549a424d53 100644
2548 +--- a/drivers/staging/fbtft/fb_ssd1351.c
2549 ++++ b/drivers/staging/fbtft/fb_ssd1351.c
2550 +@@ -187,7 +187,6 @@ static struct fbtft_display display = {
2551 + },
2552 + };
2553 +
2554 +-#ifdef CONFIG_FB_BACKLIGHT
2555 + static int update_onboard_backlight(struct backlight_device *bd)
2556 + {
2557 + struct fbtft_par *par = bl_get_data(bd);
2558 +@@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par)
2559 + if (!par->fbtftops.unregister_backlight)
2560 + par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
2561 + }
2562 +-#else
2563 +-static void register_onboard_backlight(struct fbtft_par *par) { };
2564 +-#endif
2565 +
2566 + FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
2567 +
2568 +diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
2569 +index bc53d68bfcaa3..771697508cec8 100644
2570 +--- a/drivers/staging/fbtft/fbtft-core.c
2571 ++++ b/drivers/staging/fbtft/fbtft-core.c
2572 +@@ -136,7 +136,6 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
2573 + }
2574 + #endif
2575 +
2576 +-#ifdef CONFIG_FB_BACKLIGHT
2577 + static int fbtft_backlight_update_status(struct backlight_device *bd)
2578 + {
2579 + struct fbtft_par *par = bl_get_data(bd);
2580 +@@ -169,6 +168,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par)
2581 + par->info->bl_dev = NULL;
2582 + }
2583 + }
2584 ++EXPORT_SYMBOL(fbtft_unregister_backlight);
2585 +
2586 + static const struct backlight_ops fbtft_bl_ops = {
2587 + .get_brightness = fbtft_backlight_get_brightness,
2588 +@@ -206,12 +206,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
2589 + if (!par->fbtftops.unregister_backlight)
2590 + par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
2591 + }
2592 +-#else
2593 +-void fbtft_register_backlight(struct fbtft_par *par) { };
2594 +-void fbtft_unregister_backlight(struct fbtft_par *par) { };
2595 +-#endif
2596 + EXPORT_SYMBOL(fbtft_register_backlight);
2597 +-EXPORT_SYMBOL(fbtft_unregister_backlight);
2598 +
2599 + static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
2600 + int ye)
2601 +@@ -860,13 +855,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
2602 + fb_info->fix.smem_len >> 10, text1,
2603 + HZ / fb_info->fbdefio->delay, text2);
2604 +
2605 +-#ifdef CONFIG_FB_BACKLIGHT
2606 + /* Turn on backlight if available */
2607 + if (fb_info->bl_dev) {
2608 + fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
2609 + fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
2610 + }
2611 +-#endif
2612 +
2613 + return 0;
2614 +
2615 +diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2616 +index c702ee9691b1d..bcbf0c8cd4209 100644
2617 +--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2618 ++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2619 +@@ -2559,13 +2559,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
2620 + free_irq(dev->irq, dev);
2621 + priv->irq = 0;
2622 + }
2623 +- free_rtllib(dev);
2624 +
2625 + if (dev->mem_start != 0) {
2626 + iounmap((void __iomem *)dev->mem_start);
2627 + release_mem_region(pci_resource_start(pdev, 1),
2628 + pci_resource_len(pdev, 1));
2629 + }
2630 ++
2631 ++ free_rtllib(dev);
2632 + } else {
2633 + priv = rtllib_priv(dev);
2634 + }
2635 +diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
2636 +index 5ef08905fe05c..15da02aeee948 100644
2637 +--- a/drivers/tty/hvc/hvc_xen.c
2638 ++++ b/drivers/tty/hvc/hvc_xen.c
2639 +@@ -86,7 +86,11 @@ static int __write_console(struct xencons_info *xencons,
2640 + cons = intf->out_cons;
2641 + prod = intf->out_prod;
2642 + mb(); /* update queue values before going on */
2643 +- BUG_ON((prod - cons) > sizeof(intf->out));
2644 ++
2645 ++ if ((prod - cons) > sizeof(intf->out)) {
2646 ++ pr_err_once("xencons: Illegal ring page indices");
2647 ++ return -EINVAL;
2648 ++ }
2649 +
2650 + while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
2651 + intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
2652 +@@ -114,7 +118,10 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
2653 + */
2654 + while (len) {
2655 + int sent = __write_console(cons, data, len);
2656 +-
2657 ++
2658 ++ if (sent < 0)
2659 ++ return sent;
2660 ++
2661 + data += sent;
2662 + len -= sent;
2663 +
2664 +@@ -138,7 +145,11 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
2665 + cons = intf->in_cons;
2666 + prod = intf->in_prod;
2667 + mb(); /* get pointers before reading ring */
2668 +- BUG_ON((prod - cons) > sizeof(intf->in));
2669 ++
2670 ++ if ((prod - cons) > sizeof(intf->in)) {
2671 ++ pr_err_once("xencons: Illegal ring page indices");
2672 ++ return -EINVAL;
2673 ++ }
2674 +
2675 + while (cons != prod && recv < len)
2676 + buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
2677 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2678 +index 303e8b3c1bdae..d7ab2e88631a0 100644
2679 +--- a/drivers/usb/core/hub.c
2680 ++++ b/drivers/usb/core/hub.c
2681 +@@ -4609,8 +4609,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
2682 + if (oldspeed == USB_SPEED_LOW)
2683 + delay = HUB_LONG_RESET_TIME;
2684 +
2685 +- mutex_lock(hcd->address0_mutex);
2686 +-
2687 + /* Reset the device; full speed may morph to high speed */
2688 + /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
2689 + retval = hub_port_reset(hub, port1, udev, delay, false);
2690 +@@ -4925,7 +4923,6 @@ fail:
2691 + hub_port_disable(hub, port1, 0);
2692 + update_devnum(udev, devnum); /* for disconnect processing */
2693 + }
2694 +- mutex_unlock(hcd->address0_mutex);
2695 + return retval;
2696 + }
2697 +
2698 +@@ -5015,6 +5012,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2699 + struct usb_port *port_dev = hub->ports[port1 - 1];
2700 + struct usb_device *udev = port_dev->child;
2701 + static int unreliable_port = -1;
2702 ++ bool retry_locked;
2703 +
2704 + /* Disconnect any existing devices under this port */
2705 + if (udev) {
2706 +@@ -5070,7 +5068,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2707 + unit_load = 100;
2708 +
2709 + status = 0;
2710 ++
2711 + for (i = 0; i < SET_CONFIG_TRIES; i++) {
2712 ++ usb_lock_port(port_dev);
2713 ++ mutex_lock(hcd->address0_mutex);
2714 ++ retry_locked = true;
2715 +
2716 + /* reallocate for each attempt, since references
2717 + * to the previous one can escape in various ways
2718 +@@ -5079,6 +5081,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2719 + if (!udev) {
2720 + dev_err(&port_dev->dev,
2721 + "couldn't allocate usb_device\n");
2722 ++ mutex_unlock(hcd->address0_mutex);
2723 ++ usb_unlock_port(port_dev);
2724 + goto done;
2725 + }
2726 +
2727 +@@ -5100,12 +5104,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2728 + }
2729 +
2730 + /* reset (non-USB 3.0 devices) and get descriptor */
2731 +- usb_lock_port(port_dev);
2732 + status = hub_port_init(hub, udev, port1, i);
2733 +- usb_unlock_port(port_dev);
2734 + if (status < 0)
2735 + goto loop;
2736 +
2737 ++ mutex_unlock(hcd->address0_mutex);
2738 ++ usb_unlock_port(port_dev);
2739 ++ retry_locked = false;
2740 ++
2741 + if (udev->quirks & USB_QUIRK_DELAY_INIT)
2742 + msleep(2000);
2743 +
2744 +@@ -5198,6 +5204,10 @@ loop:
2745 + usb_ep0_reinit(udev);
2746 + release_devnum(udev);
2747 + hub_free_dev(udev);
2748 ++ if (retry_locked) {
2749 ++ mutex_unlock(hcd->address0_mutex);
2750 ++ usb_unlock_port(port_dev);
2751 ++ }
2752 + usb_put_dev(udev);
2753 + if ((status == -ENOTCONN) || (status == -ENOTSUPP))
2754 + break;
2755 +@@ -5794,6 +5804,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
2756 + bos = udev->bos;
2757 + udev->bos = NULL;
2758 +
2759 ++ mutex_lock(hcd->address0_mutex);
2760 ++
2761 + for (i = 0; i < SET_CONFIG_TRIES; ++i) {
2762 +
2763 + /* ep0 maxpacket size may change; let the HCD know about it.
2764 +@@ -5803,6 +5815,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
2765 + if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
2766 + break;
2767 + }
2768 ++ mutex_unlock(hcd->address0_mutex);
2769 +
2770 + if (ret < 0)
2771 + goto re_enumerate;
2772 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
2773 +index e8b25dae09499..249e8e6aa9282 100644
2774 +--- a/drivers/usb/dwc2/gadget.c
2775 ++++ b/drivers/usb/dwc2/gadget.c
2776 +@@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
2777 + }
2778 + ctrl |= DXEPCTL_CNAK;
2779 + } else {
2780 ++ hs_req->req.frame_number = hs_ep->target_frame;
2781 ++ hs_req->req.actual = 0;
2782 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
2783 + return;
2784 + }
2785 +@@ -2855,9 +2857,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
2786 +
2787 + do {
2788 + hs_req = get_ep_head(hs_ep);
2789 +- if (hs_req)
2790 ++ if (hs_req) {
2791 ++ hs_req->req.frame_number = hs_ep->target_frame;
2792 ++ hs_req->req.actual = 0;
2793 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
2794 + -ENODATA);
2795 ++ }
2796 + dwc2_gadget_incr_frame_num(hs_ep);
2797 + /* Update current frame number value. */
2798 + hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
2799 +@@ -2910,8 +2915,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
2800 +
2801 + while (dwc2_gadget_target_frame_elapsed(ep)) {
2802 + hs_req = get_ep_head(ep);
2803 +- if (hs_req)
2804 ++ if (hs_req) {
2805 ++ hs_req->req.frame_number = ep->target_frame;
2806 ++ hs_req->req.actual = 0;
2807 + dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
2808 ++ }
2809 +
2810 + dwc2_gadget_incr_frame_num(ep);
2811 + /* Update current frame number value. */
2812 +@@ -3000,8 +3008,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
2813 +
2814 + while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
2815 + hs_req = get_ep_head(hs_ep);
2816 +- if (hs_req)
2817 ++ if (hs_req) {
2818 ++ hs_req->req.frame_number = hs_ep->target_frame;
2819 ++ hs_req->req.actual = 0;
2820 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
2821 ++ }
2822 +
2823 + dwc2_gadget_incr_frame_num(hs_ep);
2824 + /* Update current frame number value. */
2825 +diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
2826 +index 68bbac64b7536..94af71e9856f2 100644
2827 +--- a/drivers/usb/dwc2/hcd_queue.c
2828 ++++ b/drivers/usb/dwc2/hcd_queue.c
2829 +@@ -59,7 +59,7 @@
2830 + #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
2831 +
2832 + /* If we get a NAK, wait this long before retrying */
2833 +-#define DWC2_RETRY_WAIT_DELAY 1*1E6L
2834 ++#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
2835 +
2836 + /**
2837 + * dwc2_periodic_channel_available() - Checks that a channel is available for a
2838 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2839 +index a1e9cbe518c74..74203ed5479fa 100644
2840 +--- a/drivers/usb/serial/option.c
2841 ++++ b/drivers/usb/serial/option.c
2842 +@@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = {
2843 + .driver_info = NCTRL(2) },
2844 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
2845 + .driver_info = NCTRL(0) | ZLP },
2846 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
2847 ++ .driver_info = NCTRL(0) | ZLP },
2848 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
2849 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
2850 + .driver_info = RSVD(1) },
2851 +@@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = {
2852 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
2853 + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
2854 + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
2855 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
2856 ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
2857 ++ .driver_info = RSVD(4) },
2858 + { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
2859 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
2860 + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
2861 +diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
2862 +index b498960ff72b5..5e661bae39972 100644
2863 +--- a/drivers/usb/typec/tcpm/fusb302.c
2864 ++++ b/drivers/usb/typec/tcpm/fusb302.c
2865 +@@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
2866 + ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
2867 + FUSB_REG_MASK_BC_LVL |
2868 + FUSB_REG_MASK_COMP_CHNG,
2869 +- FUSB_REG_MASK_COMP_CHNG);
2870 ++ FUSB_REG_MASK_BC_LVL);
2871 + if (ret < 0) {
2872 + fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
2873 + ret);
2874 + goto done;
2875 + }
2876 + chip->intr_comp_chng = true;
2877 ++ chip->intr_bc_lvl = false;
2878 + break;
2879 + case TYPEC_CC_RD:
2880 + ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
2881 + FUSB_REG_MASK_BC_LVL |
2882 + FUSB_REG_MASK_COMP_CHNG,
2883 +- FUSB_REG_MASK_BC_LVL);
2884 ++ FUSB_REG_MASK_COMP_CHNG);
2885 + if (ret < 0) {
2886 + fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
2887 + ret);
2888 + goto done;
2889 + }
2890 + chip->intr_bc_lvl = true;
2891 ++ chip->intr_comp_chng = false;
2892 + break;
2893 + default:
2894 + break;
2895 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
2896 +index f21f5bfbb78dc..2bf7cb01da9a3 100644
2897 +--- a/drivers/vhost/vsock.c
2898 ++++ b/drivers/vhost/vsock.c
2899 +@@ -491,7 +491,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
2900 + virtio_transport_free_pkt(pkt);
2901 +
2902 + len += sizeof(pkt->hdr);
2903 +- vhost_add_used(vq, head, len);
2904 ++ vhost_add_used(vq, head, 0);
2905 + total_len += len;
2906 + added = true;
2907 + } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
2908 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
2909 +index 652894d619677..b911a91bce6b7 100644
2910 +--- a/drivers/xen/xenbus/xenbus_probe.c
2911 ++++ b/drivers/xen/xenbus/xenbus_probe.c
2912 +@@ -846,7 +846,7 @@ static struct notifier_block xenbus_resume_nb = {
2913 +
2914 + static int __init xenbus_init(void)
2915 + {
2916 +- int err = 0;
2917 ++ int err;
2918 + uint64_t v = 0;
2919 + xen_store_domain_type = XS_UNKNOWN;
2920 +
2921 +@@ -886,6 +886,29 @@ static int __init xenbus_init(void)
2922 + err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
2923 + if (err)
2924 + goto out_error;
2925 ++ /*
2926 ++ * Uninitialized hvm_params are zero and return no error.
2927 ++ * Although it is theoretically possible to have
2928 ++ * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
2929 ++ * not zero when valid. If zero, it means that Xenstore hasn't
2930 ++ * been properly initialized. Instead of attempting to map a
2931 ++ * wrong guest physical address return error.
2932 ++ *
2933 ++ * Also recognize all bits set as an invalid value.
2934 ++ */
2935 ++ if (!v || !~v) {
2936 ++ err = -ENOENT;
2937 ++ goto out_error;
2938 ++ }
2939 ++ /* Avoid truncation on 32-bit. */
2940 ++#if BITS_PER_LONG == 32
2941 ++ if (v > ULONG_MAX) {
2942 ++ pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
2943 ++ __func__, v);
2944 ++ err = -EINVAL;
2945 ++ goto out_error;
2946 ++ }
2947 ++#endif
2948 + xen_store_gfn = (unsigned long)v;
2949 + xen_store_interface =
2950 + xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
2951 +@@ -920,8 +943,10 @@ static int __init xenbus_init(void)
2952 + */
2953 + proc_create_mount_point("xen");
2954 + #endif
2955 ++ return 0;
2956 +
2957 + out_error:
2958 ++ xen_store_domain_type = XS_UNKNOWN;
2959 + return err;
2960 + }
2961 +
2962 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2963 +index a9746af5a44db..03c85beecec10 100644
2964 +--- a/fs/cifs/file.c
2965 ++++ b/fs/cifs/file.c
2966 +@@ -2577,12 +2577,23 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2967 + tcon = tlink_tcon(smbfile->tlink);
2968 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2969 + server = tcon->ses->server;
2970 +- if (server->ops->flush)
2971 +- rc = server->ops->flush(xid, tcon, &smbfile->fid);
2972 +- else
2973 ++ if (server->ops->flush == NULL) {
2974 + rc = -ENOSYS;
2975 ++ goto strict_fsync_exit;
2976 ++ }
2977 ++
2978 ++ if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2979 ++ smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2980 ++ if (smbfile) {
2981 ++ rc = server->ops->flush(xid, tcon, &smbfile->fid);
2982 ++ cifsFileInfo_put(smbfile);
2983 ++ } else
2984 ++ cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2985 ++ } else
2986 ++ rc = server->ops->flush(xid, tcon, &smbfile->fid);
2987 + }
2988 +
2989 ++strict_fsync_exit:
2990 + free_xid(xid);
2991 + return rc;
2992 + }
2993 +@@ -2594,6 +2605,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2994 + struct cifs_tcon *tcon;
2995 + struct TCP_Server_Info *server;
2996 + struct cifsFileInfo *smbfile = file->private_data;
2997 ++ struct inode *inode = file_inode(file);
2998 + struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2999 +
3000 + rc = file_write_and_wait_range(file, start, end);
3001 +@@ -2608,12 +2620,23 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
3002 + tcon = tlink_tcon(smbfile->tlink);
3003 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
3004 + server = tcon->ses->server;
3005 +- if (server->ops->flush)
3006 +- rc = server->ops->flush(xid, tcon, &smbfile->fid);
3007 +- else
3008 ++ if (server->ops->flush == NULL) {
3009 + rc = -ENOSYS;
3010 ++ goto fsync_exit;
3011 ++ }
3012 ++
3013 ++ if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
3014 ++ smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
3015 ++ if (smbfile) {
3016 ++ rc = server->ops->flush(xid, tcon, &smbfile->fid);
3017 ++ cifsFileInfo_put(smbfile);
3018 ++ } else
3019 ++ cifs_dbg(FYI, "ignore fsync for file not open for write\n");
3020 ++ } else
3021 ++ rc = server->ops->flush(xid, tcon, &smbfile->fid);
3022 + }
3023 +
3024 ++fsync_exit:
3025 + free_xid(xid);
3026 + return rc;
3027 + }
3028 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
3029 +index 4cb182c20eedd..0cd1d51dde06d 100644
3030 +--- a/fs/f2fs/node.c
3031 ++++ b/fs/f2fs/node.c
3032 +@@ -1385,6 +1385,7 @@ page_hit:
3033 + nid, nid_of_node(page), ino_of_node(page),
3034 + ofs_of_node(page), cpver_of_node(page),
3035 + next_blkaddr_of_node(page));
3036 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
3037 + err = -EINVAL;
3038 + out_err:
3039 + ClearPageUptodate(page);
3040 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
3041 +index fa4d2aba5a701..64d6c8c9f1ff2 100644
3042 +--- a/fs/fuse/dev.c
3043 ++++ b/fs/fuse/dev.c
3044 +@@ -839,17 +839,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
3045 + goto out_put_old;
3046 + }
3047 +
3048 ++ get_page(newpage);
3049 ++
3050 ++ if (!(buf->flags & PIPE_BUF_FLAG_LRU))
3051 ++ lru_cache_add_file(newpage);
3052 ++
3053 + /*
3054 + * Release while we have extra ref on stolen page. Otherwise
3055 + * anon_pipe_buf_release() might think the page can be reused.
3056 + */
3057 + pipe_buf_release(cs->pipe, buf);
3058 +
3059 +- get_page(newpage);
3060 +-
3061 +- if (!(buf->flags & PIPE_BUF_FLAG_LRU))
3062 +- lru_cache_add_file(newpage);
3063 +-
3064 + err = 0;
3065 + spin_lock(&cs->req->waitq.lock);
3066 + if (test_bit(FR_ABORTED, &cs->req->flags))
3067 +diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
3068 +index aed865a846296..2b78f7b8d5467 100644
3069 +--- a/fs/nfs/nfs42xdr.c
3070 ++++ b/fs/nfs/nfs42xdr.c
3071 +@@ -769,8 +769,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
3072 + status = decode_clone(xdr);
3073 + if (status)
3074 + goto out;
3075 +- status = decode_getfattr(xdr, res->dst_fattr, res->server);
3076 +-
3077 ++ decode_getfattr(xdr, res->dst_fattr, res->server);
3078 + out:
3079 + res->rpc_status = status;
3080 + return status;
3081 +diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
3082 +index 080ca9d5eccbb..b1102a31a1085 100644
3083 +--- a/fs/proc/vmcore.c
3084 ++++ b/fs/proc/vmcore.c
3085 +@@ -125,9 +125,13 @@ ssize_t read_from_oldmem(char *buf, size_t count,
3086 + nr_bytes = count;
3087 +
3088 + /* If pfn is not ram, return zeros for sparse dump files */
3089 +- if (pfn_is_ram(pfn) == 0)
3090 +- memset(buf, 0, nr_bytes);
3091 +- else {
3092 ++ if (pfn_is_ram(pfn) == 0) {
3093 ++ tmp = 0;
3094 ++ if (!userbuf)
3095 ++ memset(buf, 0, nr_bytes);
3096 ++ else if (clear_user(buf, nr_bytes))
3097 ++ tmp = -EFAULT;
3098 ++ } else {
3099 + if (encrypted)
3100 + tmp = copy_oldmem_page_encrypted(pfn, buf,
3101 + nr_bytes,
3102 +@@ -136,10 +140,10 @@ ssize_t read_from_oldmem(char *buf, size_t count,
3103 + else
3104 + tmp = copy_oldmem_page(pfn, buf, nr_bytes,
3105 + offset, userbuf);
3106 +-
3107 +- if (tmp < 0)
3108 +- return tmp;
3109 + }
3110 ++ if (tmp < 0)
3111 ++ return tmp;
3112 ++
3113 + *ppos += nr_bytes;
3114 + count -= nr_bytes;
3115 + buf += nr_bytes;
3116 +diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
3117 +index c309f43bde45e..f8c4d9f97819f 100644
3118 +--- a/include/linux/ipc_namespace.h
3119 ++++ b/include/linux/ipc_namespace.h
3120 +@@ -130,6 +130,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
3121 + return ns;
3122 + }
3123 +
3124 ++static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
3125 ++{
3126 ++ if (ns) {
3127 ++ if (refcount_inc_not_zero(&ns->count))
3128 ++ return ns;
3129 ++ }
3130 ++
3131 ++ return NULL;
3132 ++}
3133 ++
3134 + extern void put_ipc_ns(struct ipc_namespace *ns);
3135 + #else
3136 + static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
3137 +@@ -146,6 +156,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
3138 + return ns;
3139 + }
3140 +
3141 ++static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
3142 ++{
3143 ++ return ns;
3144 ++}
3145 ++
3146 + static inline void put_ipc_ns(struct ipc_namespace *ns)
3147 + {
3148 + }
3149 +diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
3150 +index 4b1c3b664f517..36f3011ab6013 100644
3151 +--- a/include/linux/sched/task.h
3152 ++++ b/include/linux/sched/task.h
3153 +@@ -157,7 +157,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
3154 + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
3155 + * subscriptions and synchronises with wait4(). Also used in procfs. Also
3156 + * pins the final release of task.io_context. Also protects ->cpuset and
3157 +- * ->cgroup.subsys[]. And ->vfork_done.
3158 ++ * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
3159 + *
3160 + * Nests both inside and outside of read_lock(&tasklist_lock).
3161 + * It must not be nested with write_lock_irq(&tasklist_lock),
3162 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
3163 +index bd0f1595bdc71..05ecaefeb6322 100644
3164 +--- a/include/net/ip6_fib.h
3165 ++++ b/include/net/ip6_fib.h
3166 +@@ -451,6 +451,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3167 + struct fib6_config *cfg, gfp_t gfp_flags,
3168 + struct netlink_ext_ack *extack);
3169 + void fib6_nh_release(struct fib6_nh *fib6_nh);
3170 ++void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
3171 +
3172 + int call_fib6_entry_notifiers(struct net *net,
3173 + enum fib_event_type event_type,
3174 +diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
3175 +index 3e7d2c0e79ca1..af9e127779adf 100644
3176 +--- a/include/net/ipv6_stubs.h
3177 ++++ b/include/net/ipv6_stubs.h
3178 +@@ -47,6 +47,7 @@ struct ipv6_stub {
3179 + struct fib6_config *cfg, gfp_t gfp_flags,
3180 + struct netlink_ext_ack *extack);
3181 + void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
3182 ++ void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
3183 + void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
3184 + int (*ip6_del_rt)(struct net *net, struct fib6_info *rt);
3185 + void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
3186 +diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
3187 +index 33979017b7824..004e49f748419 100644
3188 +--- a/include/net/nfc/nci_core.h
3189 ++++ b/include/net/nfc/nci_core.h
3190 +@@ -30,6 +30,7 @@ enum nci_flag {
3191 + NCI_UP,
3192 + NCI_DATA_EXCHANGE,
3193 + NCI_DATA_EXCHANGE_TO,
3194 ++ NCI_UNREG,
3195 + };
3196 +
3197 + /* NCI device states */
3198 +diff --git a/include/net/nl802154.h b/include/net/nl802154.h
3199 +index ddcee128f5d9a..145acb8f25095 100644
3200 +--- a/include/net/nl802154.h
3201 ++++ b/include/net/nl802154.h
3202 +@@ -19,6 +19,8 @@
3203 + *
3204 + */
3205 +
3206 ++#include <linux/types.h>
3207 ++
3208 + #define NL802154_GENL_NAME "nl802154"
3209 +
3210 + enum nl802154_commands {
3211 +@@ -150,10 +152,9 @@ enum nl802154_attrs {
3212 + };
3213 +
3214 + enum nl802154_iftype {
3215 +- /* for backwards compatibility TODO */
3216 +- NL802154_IFTYPE_UNSPEC = -1,
3217 ++ NL802154_IFTYPE_UNSPEC = (~(__u32)0),
3218 +
3219 +- NL802154_IFTYPE_NODE,
3220 ++ NL802154_IFTYPE_NODE = 0,
3221 + NL802154_IFTYPE_MONITOR,
3222 + NL802154_IFTYPE_COORD,
3223 +
3224 +diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
3225 +index 3f40501fc60b1..b39cdbc522ec7 100644
3226 +--- a/include/xen/interface/io/ring.h
3227 ++++ b/include/xen/interface/io/ring.h
3228 +@@ -1,21 +1,53 @@
3229 +-/* SPDX-License-Identifier: GPL-2.0 */
3230 + /******************************************************************************
3231 + * ring.h
3232 + *
3233 + * Shared producer-consumer ring macros.
3234 + *
3235 ++ * Permission is hereby granted, free of charge, to any person obtaining a copy
3236 ++ * of this software and associated documentation files (the "Software"), to
3237 ++ * deal in the Software without restriction, including without limitation the
3238 ++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3239 ++ * sell copies of the Software, and to permit persons to whom the Software is
3240 ++ * furnished to do so, subject to the following conditions:
3241 ++ *
3242 ++ * The above copyright notice and this permission notice shall be included in
3243 ++ * all copies or substantial portions of the Software.
3244 ++ *
3245 ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3246 ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3247 ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3248 ++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3249 ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3250 ++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3251 ++ * DEALINGS IN THE SOFTWARE.
3252 ++ *
3253 + * Tim Deegan and Andrew Warfield November 2004.
3254 + */
3255 +
3256 + #ifndef __XEN_PUBLIC_IO_RING_H__
3257 + #define __XEN_PUBLIC_IO_RING_H__
3258 +
3259 ++/*
3260 ++ * When #include'ing this header, you need to provide the following
3261 ++ * declaration upfront:
3262 ++ * - standard integers types (uint8_t, uint16_t, etc)
3263 ++ * They are provided by stdint.h of the standard headers.
3264 ++ *
3265 ++ * In addition, if you intend to use the FLEX macros, you also need to
3266 ++ * provide the following, before invoking the FLEX macros:
3267 ++ * - size_t
3268 ++ * - memcpy
3269 ++ * - grant_ref_t
3270 ++ * These declarations are provided by string.h of the standard headers,
3271 ++ * and grant_table.h from the Xen public headers.
3272 ++ */
3273 ++
3274 + #include <xen/interface/grant_table.h>
3275 +
3276 + typedef unsigned int RING_IDX;
3277 +
3278 + /* Round a 32-bit unsigned constant down to the nearest power of two. */
3279 +-#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3280 ++#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3281 + #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
3282 + #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
3283 + #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
3284 +@@ -27,82 +59,79 @@ typedef unsigned int RING_IDX;
3285 + * A ring contains as many entries as will fit, rounded down to the nearest
3286 + * power of two (so we can mask with (size-1) to loop around).
3287 + */
3288 +-#define __CONST_RING_SIZE(_s, _sz) \
3289 +- (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
3290 +- sizeof(((struct _s##_sring *)0)->ring[0])))
3291 +-
3292 ++#define __CONST_RING_SIZE(_s, _sz) \
3293 ++ (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
3294 ++ sizeof(((struct _s##_sring *)0)->ring[0])))
3295 + /*
3296 + * The same for passing in an actual pointer instead of a name tag.
3297 + */
3298 +-#define __RING_SIZE(_s, _sz) \
3299 +- (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3300 ++#define __RING_SIZE(_s, _sz) \
3301 ++ (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3302 +
3303 + /*
3304 + * Macros to make the correct C datatypes for a new kind of ring.
3305 + *
3306 + * To make a new ring datatype, you need to have two message structures,
3307 +- * let's say struct request, and struct response already defined.
3308 ++ * let's say request_t, and response_t already defined.
3309 + *
3310 + * In a header where you want the ring datatype declared, you then do:
3311 + *
3312 +- * DEFINE_RING_TYPES(mytag, struct request, struct response);
3313 ++ * DEFINE_RING_TYPES(mytag, request_t, response_t);
3314 + *
3315 + * These expand out to give you a set of types, as you can see below.
3316 + * The most important of these are:
3317 + *
3318 +- * struct mytag_sring - The shared ring.
3319 +- * struct mytag_front_ring - The 'front' half of the ring.
3320 +- * struct mytag_back_ring - The 'back' half of the ring.
3321 ++ * mytag_sring_t - The shared ring.
3322 ++ * mytag_front_ring_t - The 'front' half of the ring.
3323 ++ * mytag_back_ring_t - The 'back' half of the ring.
3324 + *
3325 + * To initialize a ring in your code you need to know the location and size
3326 + * of the shared memory area (PAGE_SIZE, for instance). To initialise
3327 + * the front half:
3328 + *
3329 +- * struct mytag_front_ring front_ring;
3330 +- * SHARED_RING_INIT((struct mytag_sring *)shared_page);
3331 +- * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
3332 +- * PAGE_SIZE);
3333 ++ * mytag_front_ring_t front_ring;
3334 ++ * SHARED_RING_INIT((mytag_sring_t *)shared_page);
3335 ++ * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3336 + *
3337 + * Initializing the back follows similarly (note that only the front
3338 + * initializes the shared ring):
3339 + *
3340 +- * struct mytag_back_ring back_ring;
3341 +- * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
3342 +- * PAGE_SIZE);
3343 ++ * mytag_back_ring_t back_ring;
3344 ++ * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3345 + */
3346 +
3347 +-#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3348 +- \
3349 +-/* Shared ring entry */ \
3350 +-union __name##_sring_entry { \
3351 +- __req_t req; \
3352 +- __rsp_t rsp; \
3353 +-}; \
3354 +- \
3355 +-/* Shared ring page */ \
3356 +-struct __name##_sring { \
3357 +- RING_IDX req_prod, req_event; \
3358 +- RING_IDX rsp_prod, rsp_event; \
3359 +- uint8_t pad[48]; \
3360 +- union __name##_sring_entry ring[1]; /* variable-length */ \
3361 +-}; \
3362 +- \
3363 +-/* "Front" end's private variables */ \
3364 +-struct __name##_front_ring { \
3365 +- RING_IDX req_prod_pvt; \
3366 +- RING_IDX rsp_cons; \
3367 +- unsigned int nr_ents; \
3368 +- struct __name##_sring *sring; \
3369 +-}; \
3370 +- \
3371 +-/* "Back" end's private variables */ \
3372 +-struct __name##_back_ring { \
3373 +- RING_IDX rsp_prod_pvt; \
3374 +- RING_IDX req_cons; \
3375 +- unsigned int nr_ents; \
3376 +- struct __name##_sring *sring; \
3377 +-};
3378 +-
3379 ++#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3380 ++ \
3381 ++/* Shared ring entry */ \
3382 ++union __name##_sring_entry { \
3383 ++ __req_t req; \
3384 ++ __rsp_t rsp; \
3385 ++}; \
3386 ++ \
3387 ++/* Shared ring page */ \
3388 ++struct __name##_sring { \
3389 ++ RING_IDX req_prod, req_event; \
3390 ++ RING_IDX rsp_prod, rsp_event; \
3391 ++ uint8_t __pad[48]; \
3392 ++ union __name##_sring_entry ring[1]; /* variable-length */ \
3393 ++}; \
3394 ++ \
3395 ++/* "Front" end's private variables */ \
3396 ++struct __name##_front_ring { \
3397 ++ RING_IDX req_prod_pvt; \
3398 ++ RING_IDX rsp_cons; \
3399 ++ unsigned int nr_ents; \
3400 ++ struct __name##_sring *sring; \
3401 ++}; \
3402 ++ \
3403 ++/* "Back" end's private variables */ \
3404 ++struct __name##_back_ring { \
3405 ++ RING_IDX rsp_prod_pvt; \
3406 ++ RING_IDX req_cons; \
3407 ++ unsigned int nr_ents; \
3408 ++ struct __name##_sring *sring; \
3409 ++}; \
3410 ++ \
3411 + /*
3412 + * Macros for manipulating rings.
3413 + *
3414 +@@ -119,105 +148,99 @@ struct __name##_back_ring { \
3415 + */
3416 +
3417 + /* Initialising empty rings */
3418 +-#define SHARED_RING_INIT(_s) do { \
3419 +- (_s)->req_prod = (_s)->rsp_prod = 0; \
3420 +- (_s)->req_event = (_s)->rsp_event = 1; \
3421 +- memset((_s)->pad, 0, sizeof((_s)->pad)); \
3422 ++#define SHARED_RING_INIT(_s) do { \
3423 ++ (_s)->req_prod = (_s)->rsp_prod = 0; \
3424 ++ (_s)->req_event = (_s)->rsp_event = 1; \
3425 ++ (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
3426 + } while(0)
3427 +
3428 +-#define FRONT_RING_INIT(_r, _s, __size) do { \
3429 +- (_r)->req_prod_pvt = 0; \
3430 +- (_r)->rsp_cons = 0; \
3431 +- (_r)->nr_ents = __RING_SIZE(_s, __size); \
3432 +- (_r)->sring = (_s); \
3433 ++#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
3434 ++ (_r)->req_prod_pvt = (_i); \
3435 ++ (_r)->rsp_cons = (_i); \
3436 ++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
3437 ++ (_r)->sring = (_s); \
3438 + } while (0)
3439 +
3440 +-#define BACK_RING_INIT(_r, _s, __size) do { \
3441 +- (_r)->rsp_prod_pvt = 0; \
3442 +- (_r)->req_cons = 0; \
3443 +- (_r)->nr_ents = __RING_SIZE(_s, __size); \
3444 +- (_r)->sring = (_s); \
3445 +-} while (0)
3446 ++#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
3447 +
3448 +-/* Initialize to existing shared indexes -- for recovery */
3449 +-#define FRONT_RING_ATTACH(_r, _s, __size) do { \
3450 +- (_r)->sring = (_s); \
3451 +- (_r)->req_prod_pvt = (_s)->req_prod; \
3452 +- (_r)->rsp_cons = (_s)->rsp_prod; \
3453 +- (_r)->nr_ents = __RING_SIZE(_s, __size); \
3454 ++#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
3455 ++ (_r)->rsp_prod_pvt = (_i); \
3456 ++ (_r)->req_cons = (_i); \
3457 ++ (_r)->nr_ents = __RING_SIZE(_s, __size); \
3458 ++ (_r)->sring = (_s); \
3459 + } while (0)
3460 +
3461 +-#define BACK_RING_ATTACH(_r, _s, __size) do { \
3462 +- (_r)->sring = (_s); \
3463 +- (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
3464 +- (_r)->req_cons = (_s)->req_prod; \
3465 +- (_r)->nr_ents = __RING_SIZE(_s, __size); \
3466 +-} while (0)
3467 ++#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
3468 +
3469 + /* How big is this ring? */
3470 +-#define RING_SIZE(_r) \
3471 ++#define RING_SIZE(_r) \
3472 + ((_r)->nr_ents)
3473 +
3474 + /* Number of free requests (for use on front side only). */
3475 +-#define RING_FREE_REQUESTS(_r) \
3476 ++#define RING_FREE_REQUESTS(_r) \
3477 + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
3478 +
3479 + /* Test if there is an empty slot available on the front ring.
3480 + * (This is only meaningful from the front. )
3481 + */
3482 +-#define RING_FULL(_r) \
3483 ++#define RING_FULL(_r) \
3484 + (RING_FREE_REQUESTS(_r) == 0)
3485 +
3486 + /* Test if there are outstanding messages to be processed on a ring. */
3487 +-#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
3488 ++#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
3489 + ((_r)->sring->rsp_prod - (_r)->rsp_cons)
3490 +
3491 +-#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
3492 +- ({ \
3493 +- unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
3494 +- unsigned int rsp = RING_SIZE(_r) - \
3495 +- ((_r)->req_cons - (_r)->rsp_prod_pvt); \
3496 +- req < rsp ? req : rsp; \
3497 +- })
3498 ++#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
3499 ++ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
3500 ++ unsigned int rsp = RING_SIZE(_r) - \
3501 ++ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
3502 ++ req < rsp ? req : rsp; \
3503 ++})
3504 +
3505 + /* Direct access to individual ring elements, by index. */
3506 +-#define RING_GET_REQUEST(_r, _idx) \
3507 ++#define RING_GET_REQUEST(_r, _idx) \
3508 + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
3509 +
3510 ++#define RING_GET_RESPONSE(_r, _idx) \
3511 ++ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
3512 ++
3513 + /*
3514 +- * Get a local copy of a request.
3515 ++ * Get a local copy of a request/response.
3516 + *
3517 +- * Use this in preference to RING_GET_REQUEST() so all processing is
3518 ++ * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
3519 + * done on a local copy that cannot be modified by the other end.
3520 + *
3521 + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
3522 +- * to be ineffective where _req is a struct which consists of only bitfields.
3523 ++ * to be ineffective where dest is a struct which consists of only bitfields.
3524 + */
3525 +-#define RING_COPY_REQUEST(_r, _idx, _req) do { \
3526 +- /* Use volatile to force the copy into _req. */ \
3527 +- *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
3528 ++#define RING_COPY_(type, r, idx, dest) do { \
3529 ++ /* Use volatile to force the copy into dest. */ \
3530 ++ *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
3531 + } while (0)
3532 +
3533 +-#define RING_GET_RESPONSE(_r, _idx) \
3534 +- (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
3535 ++#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
3536 ++#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
3537 +
3538 + /* Loop termination condition: Would the specified index overflow the ring? */
3539 +-#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
3540 ++#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
3541 + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
3542 +
3543 + /* Ill-behaved frontend determination: Can there be this many requests? */
3544 +-#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
3545 ++#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
3546 + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
3547 +
3548 ++/* Ill-behaved backend determination: Can there be this many responses? */
3549 ++#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
3550 ++ (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
3551 +
3552 +-#define RING_PUSH_REQUESTS(_r) do { \
3553 +- virt_wmb(); /* back sees requests /before/ updated producer index */ \
3554 +- (_r)->sring->req_prod = (_r)->req_prod_pvt; \
3555 ++#define RING_PUSH_REQUESTS(_r) do { \
3556 ++ virt_wmb(); /* back sees requests /before/ updated producer index */\
3557 ++ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
3558 + } while (0)
3559 +
3560 +-#define RING_PUSH_RESPONSES(_r) do { \
3561 +- virt_wmb(); /* front sees responses /before/ updated producer index */ \
3562 +- (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
3563 ++#define RING_PUSH_RESPONSES(_r) do { \
3564 ++ virt_wmb(); /* front sees resps /before/ updated producer index */ \
3565 ++ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
3566 + } while (0)
3567 +
3568 + /*
3569 +@@ -250,40 +273,40 @@ struct __name##_back_ring { \
3570 + * field appropriately.
3571 + */
3572 +
3573 +-#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
3574 +- RING_IDX __old = (_r)->sring->req_prod; \
3575 +- RING_IDX __new = (_r)->req_prod_pvt; \
3576 +- virt_wmb(); /* back sees requests /before/ updated producer index */ \
3577 +- (_r)->sring->req_prod = __new; \
3578 +- virt_mb(); /* back sees new requests /before/ we check req_event */ \
3579 +- (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
3580 +- (RING_IDX)(__new - __old)); \
3581 ++#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
3582 ++ RING_IDX __old = (_r)->sring->req_prod; \
3583 ++ RING_IDX __new = (_r)->req_prod_pvt; \
3584 ++ virt_wmb(); /* back sees requests /before/ updated producer index */\
3585 ++ (_r)->sring->req_prod = __new; \
3586 ++ virt_mb(); /* back sees new requests /before/ we check req_event */ \
3587 ++ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
3588 ++ (RING_IDX)(__new - __old)); \
3589 + } while (0)
3590 +
3591 +-#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
3592 +- RING_IDX __old = (_r)->sring->rsp_prod; \
3593 +- RING_IDX __new = (_r)->rsp_prod_pvt; \
3594 +- virt_wmb(); /* front sees responses /before/ updated producer index */ \
3595 +- (_r)->sring->rsp_prod = __new; \
3596 +- virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
3597 +- (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
3598 +- (RING_IDX)(__new - __old)); \
3599 ++#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
3600 ++ RING_IDX __old = (_r)->sring->rsp_prod; \
3601 ++ RING_IDX __new = (_r)->rsp_prod_pvt; \
3602 ++ virt_wmb(); /* front sees resps /before/ updated producer index */ \
3603 ++ (_r)->sring->rsp_prod = __new; \
3604 ++ virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
3605 ++ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
3606 ++ (RING_IDX)(__new - __old)); \
3607 + } while (0)
3608 +
3609 +-#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
3610 +- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3611 +- if (_work_to_do) break; \
3612 +- (_r)->sring->req_event = (_r)->req_cons + 1; \
3613 +- virt_mb(); \
3614 +- (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3615 ++#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
3616 ++ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3617 ++ if (_work_to_do) break; \
3618 ++ (_r)->sring->req_event = (_r)->req_cons + 1; \
3619 ++ virt_mb(); \
3620 ++ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3621 + } while (0)
3622 +
3623 +-#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
3624 +- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3625 +- if (_work_to_do) break; \
3626 +- (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
3627 +- virt_mb(); \
3628 +- (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3629 ++#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
3630 ++ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3631 ++ if (_work_to_do) break; \
3632 ++ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
3633 ++ virt_mb(); \
3634 ++ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3635 + } while (0)
3636 +
3637 +
3638 +diff --git a/ipc/shm.c b/ipc/shm.c
3639 +index ce1ca9f7c6e97..984addb5aeb5e 100644
3640 +--- a/ipc/shm.c
3641 ++++ b/ipc/shm.c
3642 +@@ -62,9 +62,18 @@ struct shmid_kernel /* private to the kernel */
3643 + struct pid *shm_lprid;
3644 + struct user_struct *mlock_user;
3645 +
3646 +- /* The task created the shm object. NULL if the task is dead. */
3647 ++ /*
3648 ++ * The task created the shm object, for
3649 ++ * task_lock(shp->shm_creator)
3650 ++ */
3651 + struct task_struct *shm_creator;
3652 +- struct list_head shm_clist; /* list by creator */
3653 ++
3654 ++ /*
3655 ++ * List by creator. task_lock(->shm_creator) required for read/write.
3656 ++ * If list_empty(), then the creator is dead already.
3657 ++ */
3658 ++ struct list_head shm_clist;
3659 ++ struct ipc_namespace *ns;
3660 + } __randomize_layout;
3661 +
3662 + /* shm_mode upper byte flags */
3663 +@@ -115,6 +124,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
3664 + struct shmid_kernel *shp;
3665 +
3666 + shp = container_of(ipcp, struct shmid_kernel, shm_perm);
3667 ++ WARN_ON(ns != shp->ns);
3668 +
3669 + if (shp->shm_nattch) {
3670 + shp->shm_perm.mode |= SHM_DEST;
3671 +@@ -225,10 +235,43 @@ static void shm_rcu_free(struct rcu_head *head)
3672 + kvfree(shp);
3673 + }
3674 +
3675 +-static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
3676 ++/*
3677 ++ * It has to be called with shp locked.
3678 ++ * It must be called before ipc_rmid()
3679 ++ */
3680 ++static inline void shm_clist_rm(struct shmid_kernel *shp)
3681 + {
3682 +- list_del(&s->shm_clist);
3683 +- ipc_rmid(&shm_ids(ns), &s->shm_perm);
3684 ++ struct task_struct *creator;
3685 ++
3686 ++ /* ensure that shm_creator does not disappear */
3687 ++ rcu_read_lock();
3688 ++
3689 ++ /*
3690 ++ * A concurrent exit_shm may do a list_del_init() as well.
3691 ++ * Just do nothing if exit_shm already did the work
3692 ++ */
3693 ++ if (!list_empty(&shp->shm_clist)) {
3694 ++ /*
3695 ++ * shp->shm_creator is guaranteed to be valid *only*
3696 ++ * if shp->shm_clist is not empty.
3697 ++ */
3698 ++ creator = shp->shm_creator;
3699 ++
3700 ++ task_lock(creator);
3701 ++ /*
3702 ++ * list_del_init() is a nop if the entry was already removed
3703 ++ * from the list.
3704 ++ */
3705 ++ list_del_init(&shp->shm_clist);
3706 ++ task_unlock(creator);
3707 ++ }
3708 ++ rcu_read_unlock();
3709 ++}
3710 ++
3711 ++static inline void shm_rmid(struct shmid_kernel *s)
3712 ++{
3713 ++ shm_clist_rm(s);
3714 ++ ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
3715 + }
3716 +
3717 +
3718 +@@ -283,7 +326,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
3719 + shm_file = shp->shm_file;
3720 + shp->shm_file = NULL;
3721 + ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
3722 +- shm_rmid(ns, shp);
3723 ++ shm_rmid(shp);
3724 + shm_unlock(shp);
3725 + if (!is_file_hugepages(shm_file))
3726 + shmem_lock(shm_file, 0, shp->mlock_user);
3727 +@@ -306,10 +349,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
3728 + *
3729 + * 2) sysctl kernel.shm_rmid_forced is set to 1.
3730 + */
3731 +-static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
3732 ++static bool shm_may_destroy(struct shmid_kernel *shp)
3733 + {
3734 + return (shp->shm_nattch == 0) &&
3735 +- (ns->shm_rmid_forced ||
3736 ++ (shp->ns->shm_rmid_forced ||
3737 + (shp->shm_perm.mode & SHM_DEST));
3738 + }
3739 +
3740 +@@ -340,7 +383,7 @@ static void shm_close(struct vm_area_struct *vma)
3741 + ipc_update_pid(&shp->shm_lprid, task_tgid(current));
3742 + shp->shm_dtim = ktime_get_real_seconds();
3743 + shp->shm_nattch--;
3744 +- if (shm_may_destroy(ns, shp))
3745 ++ if (shm_may_destroy(shp))
3746 + shm_destroy(ns, shp);
3747 + else
3748 + shm_unlock(shp);
3749 +@@ -361,10 +404,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
3750 + *
3751 + * As shp->* are changed under rwsem, it's safe to skip shp locking.
3752 + */
3753 +- if (shp->shm_creator != NULL)
3754 ++ if (!list_empty(&shp->shm_clist))
3755 + return 0;
3756 +
3757 +- if (shm_may_destroy(ns, shp)) {
3758 ++ if (shm_may_destroy(shp)) {
3759 + shm_lock_by_ptr(shp);
3760 + shm_destroy(ns, shp);
3761 + }
3762 +@@ -382,48 +425,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
3763 + /* Locking assumes this will only be called with task == current */
3764 + void exit_shm(struct task_struct *task)
3765 + {
3766 +- struct ipc_namespace *ns = task->nsproxy->ipc_ns;
3767 +- struct shmid_kernel *shp, *n;
3768 ++ for (;;) {
3769 ++ struct shmid_kernel *shp;
3770 ++ struct ipc_namespace *ns;
3771 +
3772 +- if (list_empty(&task->sysvshm.shm_clist))
3773 +- return;
3774 ++ task_lock(task);
3775 ++
3776 ++ if (list_empty(&task->sysvshm.shm_clist)) {
3777 ++ task_unlock(task);
3778 ++ break;
3779 ++ }
3780 ++
3781 ++ shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
3782 ++ shm_clist);
3783 +
3784 +- /*
3785 +- * If kernel.shm_rmid_forced is not set then only keep track of
3786 +- * which shmids are orphaned, so that a later set of the sysctl
3787 +- * can clean them up.
3788 +- */
3789 +- if (!ns->shm_rmid_forced) {
3790 +- down_read(&shm_ids(ns).rwsem);
3791 +- list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
3792 +- shp->shm_creator = NULL;
3793 + /*
3794 +- * Only under read lock but we are only called on current
3795 +- * so no entry on the list will be shared.
3796 ++ * 1) Get pointer to the ipc namespace. It is worth to say
3797 ++ * that this pointer is guaranteed to be valid because
3798 ++ * shp lifetime is always shorter than namespace lifetime
3799 ++ * in which shp lives.
3800 ++ * We taken task_lock it means that shp won't be freed.
3801 + */
3802 +- list_del(&task->sysvshm.shm_clist);
3803 +- up_read(&shm_ids(ns).rwsem);
3804 +- return;
3805 +- }
3806 ++ ns = shp->ns;
3807 +
3808 +- /*
3809 +- * Destroy all already created segments, that were not yet mapped,
3810 +- * and mark any mapped as orphan to cover the sysctl toggling.
3811 +- * Destroy is skipped if shm_may_destroy() returns false.
3812 +- */
3813 +- down_write(&shm_ids(ns).rwsem);
3814 +- list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
3815 +- shp->shm_creator = NULL;
3816 ++ /*
3817 ++ * 2) If kernel.shm_rmid_forced is not set then only keep track of
3818 ++ * which shmids are orphaned, so that a later set of the sysctl
3819 ++ * can clean them up.
3820 ++ */
3821 ++ if (!ns->shm_rmid_forced)
3822 ++ goto unlink_continue;
3823 +
3824 +- if (shm_may_destroy(ns, shp)) {
3825 +- shm_lock_by_ptr(shp);
3826 +- shm_destroy(ns, shp);
3827 ++ /*
3828 ++ * 3) get a reference to the namespace.
3829 ++ * The refcount could be already 0. If it is 0, then
3830 ++ * the shm objects will be free by free_ipc_work().
3831 ++ */
3832 ++ ns = get_ipc_ns_not_zero(ns);
3833 ++ if (!ns) {
3834 ++unlink_continue:
3835 ++ list_del_init(&shp->shm_clist);
3836 ++ task_unlock(task);
3837 ++ continue;
3838 + }
3839 +- }
3840 +
3841 +- /* Remove the list head from any segments still attached. */
3842 +- list_del(&task->sysvshm.shm_clist);
3843 +- up_write(&shm_ids(ns).rwsem);
3844 ++ /*
3845 ++ * 4) get a reference to shp.
3846 ++ * This cannot fail: shm_clist_rm() is called before
3847 ++ * ipc_rmid(), thus the refcount cannot be 0.
3848 ++ */
3849 ++ WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
3850 ++
3851 ++ /*
3852 ++ * 5) unlink the shm segment from the list of segments
3853 ++ * created by current.
3854 ++ * This must be done last. After unlinking,
3855 ++ * only the refcounts obtained above prevent IPC_RMID
3856 ++ * from destroying the segment or the namespace.
3857 ++ */
3858 ++ list_del_init(&shp->shm_clist);
3859 ++
3860 ++ task_unlock(task);
3861 ++
3862 ++ /*
3863 ++ * 6) we have all references
3864 ++ * Thus lock & if needed destroy shp.
3865 ++ */
3866 ++ down_write(&shm_ids(ns).rwsem);
3867 ++ shm_lock_by_ptr(shp);
3868 ++ /*
3869 ++ * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
3870 ++ * safe to call ipc_rcu_putref here
3871 ++ */
3872 ++ ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
3873 ++
3874 ++ if (ipc_valid_object(&shp->shm_perm)) {
3875 ++ if (shm_may_destroy(shp))
3876 ++ shm_destroy(ns, shp);
3877 ++ else
3878 ++ shm_unlock(shp);
3879 ++ } else {
3880 ++ /*
3881 ++ * Someone else deleted the shp from namespace
3882 ++ * idr/kht while we have waited.
3883 ++ * Just unlock and continue.
3884 ++ */
3885 ++ shm_unlock(shp);
3886 ++ }
3887 ++
3888 ++ up_write(&shm_ids(ns).rwsem);
3889 ++ put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
3890 ++ }
3891 + }
3892 +
3893 + static vm_fault_t shm_fault(struct vm_fault *vmf)
3894 +@@ -680,7 +772,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
3895 + if (error < 0)
3896 + goto no_id;
3897 +
3898 ++ shp->ns = ns;
3899 ++
3900 ++ task_lock(current);
3901 + list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
3902 ++ task_unlock(current);
3903 +
3904 + /*
3905 + * shmid gets reported as "inode#" in /proc/pid/maps.
3906 +@@ -1575,7 +1671,8 @@ out_nattch:
3907 + down_write(&shm_ids(ns).rwsem);
3908 + shp = shm_lock(ns, shmid);
3909 + shp->shm_nattch--;
3910 +- if (shm_may_destroy(ns, shp))
3911 ++
3912 ++ if (shm_may_destroy(shp))
3913 + shm_destroy(ns, shp);
3914 + else
3915 + shm_unlock(shp);
3916 +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
3917 +index 69c4cd472def3..6cafb2e910a11 100644
3918 +--- a/kernel/power/hibernate.c
3919 ++++ b/kernel/power/hibernate.c
3920 +@@ -676,7 +676,7 @@ static int load_image_and_restore(void)
3921 + goto Unlock;
3922 +
3923 + error = swsusp_read(&flags);
3924 +- swsusp_close(FMODE_READ);
3925 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
3926 + if (!error)
3927 + hibernation_restore(flags & SF_PLATFORM_MODE);
3928 +
3929 +@@ -871,7 +871,7 @@ static int software_resume(void)
3930 + /* The snapshot device should not be opened while we're running */
3931 + if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
3932 + error = -EBUSY;
3933 +- swsusp_close(FMODE_READ);
3934 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
3935 + goto Unlock;
3936 + }
3937 +
3938 +@@ -907,7 +907,7 @@ static int software_resume(void)
3939 + pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
3940 + return error;
3941 + Close_Finish:
3942 +- swsusp_close(FMODE_READ);
3943 ++ swsusp_close(FMODE_READ | FMODE_EXCL);
3944 + goto Finish;
3945 + }
3946 +
3947 +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
3948 +index 35e9a01b54800..1d514a1a31554 100644
3949 +--- a/kernel/trace/trace.h
3950 ++++ b/kernel/trace/trace.h
3951 +@@ -1423,14 +1423,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
3952 + if (eflags & EVENT_FILE_FL_TRIGGER_COND)
3953 + *tt = event_triggers_call(file, entry, event);
3954 +
3955 +- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
3956 +- (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
3957 +- !filter_match_preds(file->filter, entry))) {
3958 +- __trace_event_discard_commit(buffer, event);
3959 +- return true;
3960 +- }
3961 ++ if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
3962 ++ EVENT_FILE_FL_FILTERED |
3963 ++ EVENT_FILE_FL_PID_FILTER))))
3964 ++ return false;
3965 ++
3966 ++ if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
3967 ++ goto discard;
3968 ++
3969 ++ if (file->flags & EVENT_FILE_FL_FILTERED &&
3970 ++ !filter_match_preds(file->filter, entry))
3971 ++ goto discard;
3972 ++
3973 ++ if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
3974 ++ trace_event_ignore_this_pid(file))
3975 ++ goto discard;
3976 +
3977 + return false;
3978 ++ discard:
3979 ++ __trace_event_discard_commit(buffer, event);
3980 ++ return true;
3981 + }
3982 +
3983 + /**
3984 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
3985 +index e31ee325dad16..4acc77e049e5f 100644
3986 +--- a/kernel/trace/trace_events.c
3987 ++++ b/kernel/trace/trace_events.c
3988 +@@ -2247,12 +2247,19 @@ static struct trace_event_file *
3989 + trace_create_new_event(struct trace_event_call *call,
3990 + struct trace_array *tr)
3991 + {
3992 ++ struct trace_pid_list *pid_list;
3993 + struct trace_event_file *file;
3994 +
3995 + file = kmem_cache_alloc(file_cachep, GFP_TRACE);
3996 + if (!file)
3997 + return NULL;
3998 +
3999 ++ pid_list = rcu_dereference_protected(tr->filtered_pids,
4000 ++ lockdep_is_held(&event_mutex));
4001 ++
4002 ++ if (pid_list)
4003 ++ file->flags |= EVENT_FILE_FL_PID_FILTER;
4004 ++
4005 + file->event_call = call;
4006 + file->tr = tr;
4007 + atomic_set(&file->sm_ref, 0);
4008 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
4009 +index b515db036becc..efb51a23a14f2 100644
4010 +--- a/kernel/trace/trace_uprobe.c
4011 ++++ b/kernel/trace/trace_uprobe.c
4012 +@@ -1299,6 +1299,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
4013 + return 0;
4014 +
4015 + list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
4016 ++ tu = container_of(pos, struct trace_uprobe, tp);
4017 + err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
4018 + if (err) {
4019 + uprobe_perf_close(call, event);
4020 +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
4021 +index cd7c0429cddf8..796d95797ab40 100644
4022 +--- a/net/8021q/vlan.c
4023 ++++ b/net/8021q/vlan.c
4024 +@@ -177,9 +177,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
4025 + if (err)
4026 + goto out_unregister_netdev;
4027 +
4028 +- /* Account for reference in struct vlan_dev_priv */
4029 +- dev_hold(real_dev);
4030 +-
4031 + vlan_stacked_transfer_operstate(real_dev, dev, vlan);
4032 + linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
4033 +
4034 +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
4035 +index 415a29d42cdf0..589615ec490bb 100644
4036 +--- a/net/8021q/vlan_dev.c
4037 ++++ b/net/8021q/vlan_dev.c
4038 +@@ -583,6 +583,9 @@ static int vlan_dev_init(struct net_device *dev)
4039 + if (!vlan->vlan_pcpu_stats)
4040 + return -ENOMEM;
4041 +
4042 ++ /* Get vlan's reference to real_dev */
4043 ++ dev_hold(real_dev);
4044 ++
4045 + return 0;
4046 + }
4047 +
4048 +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
4049 +index 858bb10d8341e..4d69b3de980a6 100644
4050 +--- a/net/ipv4/nexthop.c
4051 ++++ b/net/ipv4/nexthop.c
4052 +@@ -839,15 +839,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
4053 + /* if any FIB entries reference this nexthop, any dst entries
4054 + * need to be regenerated
4055 + */
4056 +-static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
4057 ++static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
4058 ++ struct nexthop *replaced_nh)
4059 + {
4060 + struct fib6_info *f6i;
4061 ++ struct nh_group *nhg;
4062 ++ int i;
4063 +
4064 + if (!list_empty(&nh->fi_list))
4065 + rt_cache_flush(net);
4066 +
4067 + list_for_each_entry(f6i, &nh->f6i_list, nh_list)
4068 + ipv6_stub->fib6_update_sernum(net, f6i);
4069 ++
4070 ++ /* if an IPv6 group was replaced, we have to release all old
4071 ++ * dsts to make sure all refcounts are released
4072 ++ */
4073 ++ if (!replaced_nh->is_group)
4074 ++ return;
4075 ++
4076 ++ /* new dsts must use only the new nexthop group */
4077 ++ synchronize_net();
4078 ++
4079 ++ nhg = rtnl_dereference(replaced_nh->nh_grp);
4080 ++ for (i = 0; i < nhg->num_nh; i++) {
4081 ++ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
4082 ++ struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
4083 ++
4084 ++ if (nhi->family == AF_INET6)
4085 ++ ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
4086 ++ }
4087 + }
4088 +
4089 + static int replace_nexthop_grp(struct net *net, struct nexthop *old,
4090 +@@ -994,7 +1015,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old,
4091 + err = replace_nexthop_single(net, old, new, extack);
4092 +
4093 + if (!err) {
4094 +- nh_rt_cache_flush(net, old);
4095 ++ nh_rt_cache_flush(net, old, new);
4096 +
4097 + __remove_nexthop(net, new, NULL);
4098 + nexthop_put(new);
4099 +@@ -1231,11 +1252,15 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
4100 + /* sets nh_dev if successful */
4101 + err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
4102 + extack);
4103 +- if (err)
4104 ++ if (err) {
4105 ++ /* IPv6 is not enabled, don't call fib6_nh_release */
4106 ++ if (err == -EAFNOSUPPORT)
4107 ++ goto out;
4108 + ipv6_stub->fib6_nh_release(fib6_nh);
4109 +- else
4110 ++ } else {
4111 + nh->nh_flags = fib6_nh->fib_nh_flags;
4112 +-
4113 ++ }
4114 ++out:
4115 + return err;
4116 + }
4117 +
4118 +diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
4119 +index ee6c38a73325d..44be7a5a13911 100644
4120 +--- a/net/ipv4/tcp_cubic.c
4121 ++++ b/net/ipv4/tcp_cubic.c
4122 +@@ -341,8 +341,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
4123 + return;
4124 +
4125 + if (tcp_in_slow_start(tp)) {
4126 +- if (hystart && after(ack, ca->end_seq))
4127 +- bictcp_hystart_reset(sk);
4128 + acked = tcp_slow_start(tp, acked);
4129 + if (!acked)
4130 + return;
4131 +@@ -384,6 +382,9 @@ static void hystart_update(struct sock *sk, u32 delay)
4132 + if (ca->found & hystart_detect)
4133 + return;
4134 +
4135 ++ if (after(tp->snd_una, ca->end_seq))
4136 ++ bictcp_hystart_reset(sk);
4137 ++
4138 + if (hystart_detect & HYSTART_ACK_TRAIN) {
4139 + u32 now = bictcp_clock();
4140 +
4141 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
4142 +index 14ac1d9112877..942da168f18fb 100644
4143 +--- a/net/ipv6/af_inet6.c
4144 ++++ b/net/ipv6/af_inet6.c
4145 +@@ -955,6 +955,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
4146 + .ip6_mtu_from_fib6 = ip6_mtu_from_fib6,
4147 + .fib6_nh_init = fib6_nh_init,
4148 + .fib6_nh_release = fib6_nh_release,
4149 ++ .fib6_nh_release_dsts = fib6_nh_release_dsts,
4150 + .fib6_update_sernum = fib6_update_sernum_stub,
4151 + .fib6_rt_update = fib6_rt_update,
4152 + .ip6_del_rt = ip6_del_rt,
4153 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4154 +index fc913f09606db..d847aa32628da 100644
4155 +--- a/net/ipv6/ip6_output.c
4156 ++++ b/net/ipv6/ip6_output.c
4157 +@@ -192,7 +192,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
4158 + #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
4159 + /* Policy lookup after SNAT yielded a new policy */
4160 + if (skb_dst(skb)->xfrm) {
4161 +- IPCB(skb)->flags |= IPSKB_REROUTED;
4162 ++ IP6CB(skb)->flags |= IP6SKB_REROUTED;
4163 + return dst_output(net, sk, skb);
4164 + }
4165 + #endif
4166 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4167 +index daa876c6ae8db..f36db3dd97346 100644
4168 +--- a/net/ipv6/route.c
4169 ++++ b/net/ipv6/route.c
4170 +@@ -3585,6 +3585,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh)
4171 + fib_nh_common_release(&fib6_nh->nh_common);
4172 + }
4173 +
4174 ++void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
4175 ++{
4176 ++ int cpu;
4177 ++
4178 ++ if (!fib6_nh->rt6i_pcpu)
4179 ++ return;
4180 ++
4181 ++ for_each_possible_cpu(cpu) {
4182 ++ struct rt6_info *pcpu_rt, **ppcpu_rt;
4183 ++
4184 ++ ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
4185 ++ pcpu_rt = xchg(ppcpu_rt, NULL);
4186 ++ if (pcpu_rt) {
4187 ++ dst_dev_put(&pcpu_rt->dst);
4188 ++ dst_release(&pcpu_rt->dst);
4189 ++ }
4190 ++ }
4191 ++}
4192 ++
4193 + static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
4194 + gfp_t gfp_flags,
4195 + struct netlink_ext_ack *extack)
4196 +diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
4197 +index 0187e65176c05..114ef47db76d3 100644
4198 +--- a/net/ncsi/ncsi-cmd.c
4199 ++++ b/net/ncsi/ncsi-cmd.c
4200 +@@ -18,6 +18,8 @@
4201 + #include "internal.h"
4202 + #include "ncsi-pkt.h"
4203 +
4204 ++static const int padding_bytes = 26;
4205 ++
4206 + u32 ncsi_calculate_checksum(unsigned char *data, int len)
4207 + {
4208 + u32 checksum = 0;
4209 +@@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb,
4210 + {
4211 + struct ncsi_cmd_oem_pkt *cmd;
4212 + unsigned int len;
4213 ++ int payload;
4214 ++ /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2
4215 ++ * requires payload to be padded with 0 to
4216 ++ * 32-bit boundary before the checksum field.
4217 ++ * Ensure the padding bytes are accounted for in
4218 ++ * skb allocation
4219 ++ */
4220 +
4221 ++ payload = ALIGN(nca->payload, 4);
4222 + len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
4223 +- if (nca->payload < 26)
4224 +- len += 26;
4225 +- else
4226 +- len += nca->payload;
4227 ++ len += max(payload, padding_bytes);
4228 +
4229 + cmd = skb_put_zero(skb, len);
4230 + memcpy(&cmd->mfr_id, nca->data, nca->payload);
4231 +@@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
4232 + struct net_device *dev = nd->dev;
4233 + int hlen = LL_RESERVED_SPACE(dev);
4234 + int tlen = dev->needed_tailroom;
4235 ++ int payload;
4236 + int len = hlen + tlen;
4237 + struct sk_buff *skb;
4238 + struct ncsi_request *nr;
4239 +@@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
4240 + return NULL;
4241 +
4242 + /* NCSI command packet has 16-bytes header, payload, 4 bytes checksum.
4243 ++ * Payload needs padding so that the checksum field following payload is
4244 ++ * aligned to 32-bit boundary.
4245 + * The packet needs padding if its payload is less than 26 bytes to
4246 + * meet 64 bytes minimal ethernet frame length.
4247 + */
4248 + len += sizeof(struct ncsi_cmd_pkt_hdr) + 4;
4249 +- if (nca->payload < 26)
4250 +- len += 26;
4251 +- else
4252 +- len += nca->payload;
4253 ++ payload = ALIGN(nca->payload, 4);
4254 ++ len += max(payload, padding_bytes);
4255 +
4256 + /* Allocate skb */
4257 + skb = alloc_skb(len, GFP_ATOMIC);
4258 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
4259 +index 89aa1fc334b19..ccd6af1440745 100644
4260 +--- a/net/netfilter/ipvs/ip_vs_core.c
4261 ++++ b/net/netfilter/ipvs/ip_vs_core.c
4262 +@@ -1982,7 +1982,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
4263 + struct ip_vs_proto_data *pd;
4264 + struct ip_vs_conn *cp;
4265 + int ret, pkts;
4266 +- int conn_reuse_mode;
4267 + struct sock *sk;
4268 +
4269 + /* Already marked as IPVS request or reply? */
4270 +@@ -2059,15 +2058,16 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
4271 + cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
4272 + ipvs, af, skb, &iph);
4273 +
4274 +- conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
4275 +- if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
4276 ++ if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
4277 ++ int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
4278 + bool old_ct = false, resched = false;
4279 +
4280 + if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
4281 + unlikely(!atomic_read(&cp->dest->weight))) {
4282 + resched = true;
4283 + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
4284 +- } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
4285 ++ } else if (conn_reuse_mode &&
4286 ++ is_new_conn_expected(cp, conn_reuse_mode)) {
4287 + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
4288 + if (!atomic_read(&cp->n_control)) {
4289 + resched = true;
4290 +diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
4291 +index 1d0aa9e6044bf..b8ecb002e6238 100644
4292 +--- a/net/nfc/nci/core.c
4293 ++++ b/net/nfc/nci/core.c
4294 +@@ -473,6 +473,11 @@ static int nci_open_device(struct nci_dev *ndev)
4295 +
4296 + mutex_lock(&ndev->req_lock);
4297 +
4298 ++ if (test_bit(NCI_UNREG, &ndev->flags)) {
4299 ++ rc = -ENODEV;
4300 ++ goto done;
4301 ++ }
4302 ++
4303 + if (test_bit(NCI_UP, &ndev->flags)) {
4304 + rc = -EALREADY;
4305 + goto done;
4306 +@@ -536,6 +541,10 @@ done:
4307 + static int nci_close_device(struct nci_dev *ndev)
4308 + {
4309 + nci_req_cancel(ndev, ENODEV);
4310 ++
4311 ++ /* This mutex needs to be held as a barrier for
4312 ++ * caller nci_unregister_device
4313 ++ */
4314 + mutex_lock(&ndev->req_lock);
4315 +
4316 + if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
4317 +@@ -573,8 +582,8 @@ static int nci_close_device(struct nci_dev *ndev)
4318 + /* Flush cmd wq */
4319 + flush_workqueue(ndev->cmd_wq);
4320 +
4321 +- /* Clear flags */
4322 +- ndev->flags = 0;
4323 ++ /* Clear flags except NCI_UNREG */
4324 ++ ndev->flags &= BIT(NCI_UNREG);
4325 +
4326 + mutex_unlock(&ndev->req_lock);
4327 +
4328 +@@ -1256,6 +1265,12 @@ void nci_unregister_device(struct nci_dev *ndev)
4329 + {
4330 + struct nci_conn_info *conn_info, *n;
4331 +
4332 ++ /* This set_bit is not protected with specialized barrier,
4333 ++ * However, it is fine because the mutex_lock(&ndev->req_lock);
4334 ++ * in nci_close_device() will help to emit one.
4335 ++ */
4336 ++ set_bit(NCI_UNREG, &ndev->flags);
4337 ++
4338 + nci_close_device(ndev);
4339 +
4340 + destroy_workqueue(ndev->cmd_wq);
4341 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
4342 +index 6b0f09c5b195f..5e1493f8deba7 100644
4343 +--- a/net/smc/af_smc.c
4344 ++++ b/net/smc/af_smc.c
4345 +@@ -1658,8 +1658,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
4346 + static int smc_shutdown(struct socket *sock, int how)
4347 + {
4348 + struct sock *sk = sock->sk;
4349 ++ bool do_shutdown = true;
4350 + struct smc_sock *smc;
4351 + int rc = -EINVAL;
4352 ++ int old_state;
4353 + int rc1 = 0;
4354 +
4355 + smc = smc_sk(sk);
4356 +@@ -1686,7 +1688,11 @@ static int smc_shutdown(struct socket *sock, int how)
4357 + }
4358 + switch (how) {
4359 + case SHUT_RDWR: /* shutdown in both directions */
4360 ++ old_state = sk->sk_state;
4361 + rc = smc_close_active(smc);
4362 ++ if (old_state == SMC_ACTIVE &&
4363 ++ sk->sk_state == SMC_PEERCLOSEWAIT1)
4364 ++ do_shutdown = false;
4365 + break;
4366 + case SHUT_WR:
4367 + rc = smc_close_shutdown_write(smc);
4368 +@@ -1696,7 +1702,7 @@ static int smc_shutdown(struct socket *sock, int how)
4369 + /* nothing more to do because peer is not involved */
4370 + break;
4371 + }
4372 +- if (smc->clcsock)
4373 ++ if (do_shutdown && smc->clcsock)
4374 + rc1 = kernel_sock_shutdown(smc->clcsock, how);
4375 + /* map sock_shutdown_cmd constants to sk_shutdown value range */
4376 + sk->sk_shutdown |= how + 1;
4377 +diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
4378 +index fc06720b53c14..2eabf39dee74d 100644
4379 +--- a/net/smc/smc_close.c
4380 ++++ b/net/smc/smc_close.c
4381 +@@ -218,6 +218,12 @@ again:
4382 + if (rc)
4383 + break;
4384 + sk->sk_state = SMC_PEERCLOSEWAIT1;
4385 ++
4386 ++ /* actively shutdown clcsock before peer close it,
4387 ++ * prevent peer from entering TIME_WAIT state.
4388 ++ */
4389 ++ if (smc->clcsock && smc->clcsock->sk)
4390 ++ rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
4391 + } else {
4392 + /* peer event has changed the state */
4393 + goto again;
4394 +diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
4395 +index d4ff377eb3a34..6d636bdcaa5a3 100644
4396 +--- a/sound/pci/ctxfi/ctamixer.c
4397 ++++ b/sound/pci/ctxfi/ctamixer.c
4398 +@@ -23,16 +23,15 @@
4399 +
4400 + #define BLANK_SLOT 4094
4401 +
4402 +-static int amixer_master(struct rsc *rsc)
4403 ++static void amixer_master(struct rsc *rsc)
4404 + {
4405 + rsc->conj = 0;
4406 +- return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
4407 ++ rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
4408 + }
4409 +
4410 +-static int amixer_next_conj(struct rsc *rsc)
4411 ++static void amixer_next_conj(struct rsc *rsc)
4412 + {
4413 + rsc->conj++;
4414 +- return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
4415 + }
4416 +
4417 + static int amixer_index(const struct rsc *rsc)
4418 +@@ -331,16 +330,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
4419 +
4420 + /* SUM resource management */
4421 +
4422 +-static int sum_master(struct rsc *rsc)
4423 ++static void sum_master(struct rsc *rsc)
4424 + {
4425 + rsc->conj = 0;
4426 +- return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
4427 ++ rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
4428 + }
4429 +
4430 +-static int sum_next_conj(struct rsc *rsc)
4431 ++static void sum_next_conj(struct rsc *rsc)
4432 + {
4433 + rsc->conj++;
4434 +- return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
4435 + }
4436 +
4437 + static int sum_index(const struct rsc *rsc)
4438 +diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
4439 +index 27441d498968d..b5e1296af09ee 100644
4440 +--- a/sound/pci/ctxfi/ctdaio.c
4441 ++++ b/sound/pci/ctxfi/ctdaio.c
4442 +@@ -51,12 +51,12 @@ static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
4443 + [SPDIFIO] = {.left = 0x05, .right = 0x85},
4444 + };
4445 +
4446 +-static int daio_master(struct rsc *rsc)
4447 ++static void daio_master(struct rsc *rsc)
4448 + {
4449 + /* Actually, this is not the resource index of DAIO.
4450 + * For DAO, it is the input mapper index. And, for DAI,
4451 + * it is the output time-slot index. */
4452 +- return rsc->conj = rsc->idx;
4453 ++ rsc->conj = rsc->idx;
4454 + }
4455 +
4456 + static int daio_index(const struct rsc *rsc)
4457 +@@ -64,19 +64,19 @@ static int daio_index(const struct rsc *rsc)
4458 + return rsc->conj;
4459 + }
4460 +
4461 +-static int daio_out_next_conj(struct rsc *rsc)
4462 ++static void daio_out_next_conj(struct rsc *rsc)
4463 + {
4464 +- return rsc->conj += 2;
4465 ++ rsc->conj += 2;
4466 + }
4467 +
4468 +-static int daio_in_next_conj_20k1(struct rsc *rsc)
4469 ++static void daio_in_next_conj_20k1(struct rsc *rsc)
4470 + {
4471 +- return rsc->conj += 0x200;
4472 ++ rsc->conj += 0x200;
4473 + }
4474 +
4475 +-static int daio_in_next_conj_20k2(struct rsc *rsc)
4476 ++static void daio_in_next_conj_20k2(struct rsc *rsc)
4477 + {
4478 +- return rsc->conj += 0x100;
4479 ++ rsc->conj += 0x100;
4480 + }
4481 +
4482 + static const struct rsc_ops daio_out_rsc_ops = {
4483 +diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
4484 +index 0bb5696e44b37..ec5f597b580ad 100644
4485 +--- a/sound/pci/ctxfi/ctresource.c
4486 ++++ b/sound/pci/ctxfi/ctresource.c
4487 +@@ -109,18 +109,17 @@ static int audio_ring_slot(const struct rsc *rsc)
4488 + return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
4489 + }
4490 +
4491 +-static int rsc_next_conj(struct rsc *rsc)
4492 ++static void rsc_next_conj(struct rsc *rsc)
4493 + {
4494 + unsigned int i;
4495 + for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
4496 + i++;
4497 + rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
4498 +- return rsc->conj;
4499 + }
4500 +
4501 +-static int rsc_master(struct rsc *rsc)
4502 ++static void rsc_master(struct rsc *rsc)
4503 + {
4504 +- return rsc->conj = rsc->idx;
4505 ++ rsc->conj = rsc->idx;
4506 + }
4507 +
4508 + static const struct rsc_ops rsc_generic_ops = {
4509 +diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
4510 +index 93e47488a1c1c..92146054af582 100644
4511 +--- a/sound/pci/ctxfi/ctresource.h
4512 ++++ b/sound/pci/ctxfi/ctresource.h
4513 +@@ -39,8 +39,8 @@ struct rsc {
4514 + };
4515 +
4516 + struct rsc_ops {
4517 +- int (*master)(struct rsc *rsc); /* Move to master resource */
4518 +- int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
4519 ++ void (*master)(struct rsc *rsc); /* Move to master resource */
4520 ++ void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
4521 + int (*index)(const struct rsc *rsc); /* Return the index of resource */
4522 + /* Return the output slot number */
4523 + int (*output_slot)(const struct rsc *rsc);
4524 +diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
4525 +index 37c18ce84974a..7d2bda0c3d3de 100644
4526 +--- a/sound/pci/ctxfi/ctsrc.c
4527 ++++ b/sound/pci/ctxfi/ctsrc.c
4528 +@@ -590,16 +590,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
4529 +
4530 + /* SRCIMP resource manager operations */
4531 +
4532 +-static int srcimp_master(struct rsc *rsc)
4533 ++static void srcimp_master(struct rsc *rsc)
4534 + {
4535 + rsc->conj = 0;
4536 +- return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
4537 ++ rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
4538 + }
4539 +
4540 +-static int srcimp_next_conj(struct rsc *rsc)
4541 ++static void srcimp_next_conj(struct rsc *rsc)
4542 + {
4543 + rsc->conj++;
4544 +- return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
4545 + }
4546 +
4547 + static int srcimp_index(const struct rsc *rsc)
4548 +diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
4549 +index 745cc9dd14f38..16f26dd2d59ed 100644
4550 +--- a/sound/soc/qcom/qdsp6/q6routing.c
4551 ++++ b/sound/soc/qcom/qdsp6/q6routing.c
4552 +@@ -443,7 +443,11 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
4553 + session->port_id = be_id;
4554 + snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
4555 + } else {
4556 +- session->port_id = -1;
4557 ++ if (session->port_id == be_id) {
4558 ++ session->port_id = -1;
4559 ++ return 0;
4560 ++ }
4561 ++
4562 + snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
4563 + }
4564 +
4565 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
4566 +index c367609433bfc..21f859e56b700 100644
4567 +--- a/sound/soc/soc-topology.c
4568 ++++ b/sound/soc/soc-topology.c
4569 +@@ -2777,6 +2777,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
4570 + /* remove dynamic controls from the component driver */
4571 + int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
4572 + {
4573 ++ struct snd_card *card = comp->card->snd_card;
4574 + struct snd_soc_dobj *dobj, *next_dobj;
4575 + int pass = SOC_TPLG_PASS_END;
4576 +
4577 +@@ -2784,6 +2785,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
4578 + while (pass >= SOC_TPLG_PASS_START) {
4579 +
4580 + /* remove mixer controls */
4581 ++ down_write(&card->controls_rwsem);
4582 + list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
4583 + list) {
4584 +
4585 +@@ -2827,6 +2829,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
4586 + break;
4587 + }
4588 + }
4589 ++ up_write(&card->controls_rwsem);
4590 + pass--;
4591 + }
4592 +