Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.19 commit in: /
Date: Tue, 20 Sep 2022 12:00:25
Message-Id: 1663675209.a15baa1cdaa74379d95243035410d3a16ea473ff.mpagano@gentoo
1 commit: a15baa1cdaa74379d95243035410d3a16ea473ff
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Sep 20 12:00:09 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Sep 20 12:00:09 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a15baa1c
7
8 Linux patch 5.19.10
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1009_linux-5.19.10.patch | 1743 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1747 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 341e7dca..e710df97 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -79,6 +79,10 @@ Patch: 1008_linux-5.19.9.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.19.9
23
24 +Patch: 1009_linux-5.19.10.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.19.10
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1009_linux-5.19.10.patch b/1009_linux-5.19.10.patch
33 new file mode 100644
34 index 00000000..ded561b4
35 --- /dev/null
36 +++ b/1009_linux-5.19.10.patch
37 @@ -0,0 +1,1743 @@
38 +diff --git a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
39 +index b6bbc312a7cf7..1414ba9977c16 100644
40 +--- a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
41 ++++ b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
42 +@@ -24,8 +24,10 @@ properties:
43 +
44 + interrupts:
45 + minItems: 1
46 ++ maxItems: 2
47 + description:
48 + Should be configured with type IRQ_TYPE_EDGE_RISING.
49 ++ If two interrupts are provided, expected order is INT1 and INT2.
50 +
51 + required:
52 + - compatible
53 +diff --git a/Documentation/input/joydev/joystick.rst b/Documentation/input/joydev/joystick.rst
54 +index f615906a0821b..6d721396717a2 100644
55 +--- a/Documentation/input/joydev/joystick.rst
56 ++++ b/Documentation/input/joydev/joystick.rst
57 +@@ -517,6 +517,7 @@ All I-Force devices are supported by the iforce module. This includes:
58 + * AVB Mag Turbo Force
59 + * AVB Top Shot Pegasus
60 + * AVB Top Shot Force Feedback Racing Wheel
61 ++* Boeder Force Feedback Wheel
62 + * Logitech WingMan Force
63 + * Logitech WingMan Force Wheel
64 + * Guillemot Race Leader Force Feedback
65 +diff --git a/Makefile b/Makefile
66 +index 1f27c4bd09e67..33a9b6b547c47 100644
67 +--- a/Makefile
68 ++++ b/Makefile
69 +@@ -1,7 +1,7 @@
70 + # SPDX-License-Identifier: GPL-2.0
71 + VERSION = 5
72 + PATCHLEVEL = 19
73 +-SUBLEVEL = 9
74 ++SUBLEVEL = 10
75 + EXTRAVERSION =
76 + NAME = Superb Owl
77 +
78 +diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
79 +index 62b5b07fa4e1c..ca64bf5f5b038 100644
80 +--- a/arch/loongarch/Kconfig
81 ++++ b/arch/loongarch/Kconfig
82 +@@ -36,6 +36,7 @@ config LOONGARCH
83 + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
84 + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
85 + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
86 ++ select ARCH_KEEP_MEMBLOCK
87 + select ARCH_MIGHT_HAVE_PC_PARPORT
88 + select ARCH_MIGHT_HAVE_PC_SERIO
89 + select ARCH_SPARSEMEM_ENABLE
90 +diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
91 +index 62044cd5b7bc5..825c2519b9d1f 100644
92 +--- a/arch/loongarch/include/asm/acpi.h
93 ++++ b/arch/loongarch/include/asm/acpi.h
94 +@@ -15,7 +15,7 @@ extern int acpi_pci_disabled;
95 + extern int acpi_noirq;
96 +
97 + #define acpi_os_ioremap acpi_os_ioremap
98 +-void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
99 ++void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
100 +
101 + static inline void disable_acpi(void)
102 + {
103 +diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
104 +index bb729ee8a2370..796a24055a942 100644
105 +--- a/arch/loongarch/kernel/acpi.c
106 ++++ b/arch/loongarch/kernel/acpi.c
107 +@@ -113,7 +113,7 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
108 + early_memunmap(map, size);
109 + }
110 +
111 +-void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
112 ++void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
113 + {
114 + if (!memblock_is_memory(phys))
115 + return ioremap(phys, size);
116 +diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
117 +index 7094a68c9b832..3c3fbff0b8f86 100644
118 +--- a/arch/loongarch/mm/init.c
119 ++++ b/arch/loongarch/mm/init.c
120 +@@ -131,18 +131,6 @@ int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
121 + return ret;
122 + }
123 +
124 +-#ifdef CONFIG_NUMA
125 +-int memory_add_physaddr_to_nid(u64 start)
126 +-{
127 +- int nid;
128 +-
129 +- nid = pa_to_nid(start);
130 +- return nid;
131 +-}
132 +-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
133 +-#endif
134 +-
135 +-#ifdef CONFIG_MEMORY_HOTREMOVE
136 + void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
137 + {
138 + unsigned long start_pfn = start >> PAGE_SHIFT;
139 +@@ -154,6 +142,16 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
140 + page += vmem_altmap_offset(altmap);
141 + __remove_pages(start_pfn, nr_pages, altmap);
142 + }
143 ++
144 ++#ifdef CONFIG_NUMA
145 ++int memory_add_physaddr_to_nid(u64 start)
146 ++{
147 ++ int nid;
148 ++
149 ++ nid = pa_to_nid(start);
150 ++ return nid;
151 ++}
152 ++EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
153 + #endif
154 + #endif
155 +
156 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
157 +index 356226c7ebbdc..aa1ba803659cd 100644
158 +--- a/arch/x86/kvm/mmu/mmu.c
159 ++++ b/arch/x86/kvm/mmu/mmu.c
160 +@@ -5907,47 +5907,18 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
161 + const struct kvm_memory_slot *memslot,
162 + int start_level)
163 + {
164 +- bool flush = false;
165 +-
166 + if (kvm_memslots_have_rmaps(kvm)) {
167 + write_lock(&kvm->mmu_lock);
168 +- flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
169 +- start_level, KVM_MAX_HUGEPAGE_LEVEL,
170 +- false);
171 ++ slot_handle_level(kvm, memslot, slot_rmap_write_protect,
172 ++ start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
173 + write_unlock(&kvm->mmu_lock);
174 + }
175 +
176 + if (is_tdp_mmu_enabled(kvm)) {
177 + read_lock(&kvm->mmu_lock);
178 +- flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
179 ++ kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
180 + read_unlock(&kvm->mmu_lock);
181 + }
182 +-
183 +- /*
184 +- * Flush TLBs if any SPTEs had to be write-protected to ensure that
185 +- * guest writes are reflected in the dirty bitmap before the memslot
186 +- * update completes, i.e. before enabling dirty logging is visible to
187 +- * userspace.
188 +- *
189 +- * Perform the TLB flush outside the mmu_lock to reduce the amount of
190 +- * time the lock is held. However, this does mean that another CPU can
191 +- * now grab mmu_lock and encounter a write-protected SPTE while CPUs
192 +- * still have a writable mapping for the associated GFN in their TLB.
193 +- *
194 +- * This is safe but requires KVM to be careful when making decisions
195 +- * based on the write-protection status of an SPTE. Specifically, KVM
196 +- * also write-protects SPTEs to monitor changes to guest page tables
197 +- * during shadow paging, and must guarantee no CPUs can write to those
198 +- * page before the lock is dropped. As mentioned in the previous
199 +- * paragraph, a write-protected SPTE is no guarantee that CPU cannot
200 +- * perform writes. So to determine if a TLB flush is truly required, KVM
201 +- * will clear a separate software-only bit (MMU-writable) and skip the
202 +- * flush if-and-only-if this bit was already clear.
203 +- *
204 +- * See is_writable_pte() for more details.
205 +- */
206 +- if (flush)
207 +- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
208 + }
209 +
210 + /* Must be called with the mmu_lock held in write-mode. */
211 +@@ -6070,32 +6041,30 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
212 + void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
213 + const struct kvm_memory_slot *memslot)
214 + {
215 +- bool flush = false;
216 +-
217 + if (kvm_memslots_have_rmaps(kvm)) {
218 + write_lock(&kvm->mmu_lock);
219 + /*
220 + * Clear dirty bits only on 4k SPTEs since the legacy MMU only
221 + * support dirty logging at a 4k granularity.
222 + */
223 +- flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
224 ++ slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
225 + write_unlock(&kvm->mmu_lock);
226 + }
227 +
228 + if (is_tdp_mmu_enabled(kvm)) {
229 + read_lock(&kvm->mmu_lock);
230 +- flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
231 ++ kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
232 + read_unlock(&kvm->mmu_lock);
233 + }
234 +
235 + /*
236 ++ * The caller will flush the TLBs after this function returns.
237 ++ *
238 + * It's also safe to flush TLBs out of mmu lock here as currently this
239 + * function is only used for dirty logging, in which case flushing TLB
240 + * out of mmu lock also guarantees no dirty pages will be lost in
241 + * dirty_bitmap.
242 + */
243 +- if (flush)
244 +- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
245 + }
246 +
247 + void kvm_mmu_zap_all(struct kvm *kvm)
248 +diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
249 +index f80dbb628df57..e09bdcf1e47c5 100644
250 +--- a/arch/x86/kvm/mmu/spte.h
251 ++++ b/arch/x86/kvm/mmu/spte.h
252 +@@ -326,7 +326,7 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
253 + }
254 +
255 + /*
256 +- * An shadow-present leaf SPTE may be non-writable for 3 possible reasons:
257 ++ * A shadow-present leaf SPTE may be non-writable for 4 possible reasons:
258 + *
259 + * 1. To intercept writes for dirty logging. KVM write-protects huge pages
260 + * so that they can be split be split down into the dirty logging
261 +@@ -344,8 +344,13 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
262 + * read-only memslot or guest memory backed by a read-only VMA. Writes to
263 + * such pages are disallowed entirely.
264 + *
265 +- * To keep track of why a given SPTE is write-protected, KVM uses 2
266 +- * software-only bits in the SPTE:
267 ++ * 4. To emulate the Accessed bit for SPTEs without A/D bits. Note, in this
268 ++ * case, the SPTE is access-protected, not just write-protected!
269 ++ *
270 ++ * For cases #1 and #4, KVM can safely make such SPTEs writable without taking
271 ++ * mmu_lock as capturing the Accessed/Dirty state doesn't require taking it.
272 ++ * To differentiate #1 and #4 from #2 and #3, KVM uses two software-only bits
273 ++ * in the SPTE:
274 + *
275 + * shadow_mmu_writable_mask, aka MMU-writable -
276 + * Cleared on SPTEs that KVM is currently write-protecting for shadow paging
277 +@@ -374,7 +379,8 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
278 + * shadow page tables between vCPUs. Write-protecting an SPTE for dirty logging
279 + * (which does not clear the MMU-writable bit), does not flush TLBs before
280 + * dropping the lock, as it only needs to synchronize guest writes with the
281 +- * dirty bitmap.
282 ++ * dirty bitmap. Similarly, making the SPTE inaccessible (and non-writable) for
283 ++ * access-tracking via the clear_young() MMU notifier also does not flush TLBs.
284 + *
285 + * So, there is the problem: clearing the MMU-writable bit can encounter a
286 + * write-protected SPTE while CPUs still have writable mappings for that SPTE
287 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
288 +index 55de0d1981e52..5b36866528568 100644
289 +--- a/arch/x86/kvm/x86.c
290 ++++ b/arch/x86/kvm/x86.c
291 +@@ -12265,6 +12265,50 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
292 + } else {
293 + kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
294 + }
295 ++
296 ++ /*
297 ++ * Unconditionally flush the TLBs after enabling dirty logging.
298 ++ * A flush is almost always going to be necessary (see below),
299 ++ * and unconditionally flushing allows the helpers to omit
300 ++ * the subtly complex checks when removing write access.
301 ++ *
302 ++ * Do the flush outside of mmu_lock to reduce the amount of
303 ++ * time mmu_lock is held. Flushing after dropping mmu_lock is
304 ++ * safe as KVM only needs to guarantee the slot is fully
305 ++ * write-protected before returning to userspace, i.e. before
306 ++ * userspace can consume the dirty status.
307 ++ *
308 ++ * Flushing outside of mmu_lock requires KVM to be careful when
309 ++ * making decisions based on writable status of an SPTE, e.g. a
310 ++ * !writable SPTE doesn't guarantee a CPU can't perform writes.
311 ++ *
312 ++ * Specifically, KVM also write-protects guest page tables to
313 ++ * monitor changes when using shadow paging, and must guarantee
314 ++ * no CPUs can write to those page before mmu_lock is dropped.
315 ++ * Because CPUs may have stale TLB entries at this point, a
316 ++ * !writable SPTE doesn't guarantee CPUs can't perform writes.
317 ++ *
318 ++ * KVM also allows making SPTES writable outside of mmu_lock,
319 ++ * e.g. to allow dirty logging without taking mmu_lock.
320 ++ *
321 ++ * To handle these scenarios, KVM uses a separate software-only
322 ++ * bit (MMU-writable) to track if a SPTE is !writable due to
323 ++ * a guest page table being write-protected (KVM clears the
324 ++ * MMU-writable flag when write-protecting for shadow paging).
325 ++ *
326 ++ * The use of MMU-writable is also the primary motivation for
327 ++ * the unconditional flush. Because KVM must guarantee that a
328 ++ * CPU doesn't contain stale, writable TLB entries for a
329 ++ * !MMU-writable SPTE, KVM must flush if it encounters any
330 ++ * MMU-writable SPTE regardless of whether the actual hardware
331 ++ * writable bit was set. I.e. KVM is almost guaranteed to need
332 ++ * to flush, while unconditionally flushing allows the "remove
333 ++ * write access" helpers to ignore MMU-writable entirely.
334 ++ *
335 ++ * See is_writable_pte() for more details (the case involving
336 ++ * access-tracked SPTEs is particularly relevant).
337 ++ */
338 ++ kvm_arch_flush_remote_tlbs_memslot(kvm, new);
339 + }
340 + }
341 +
342 +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
343 +index c2d4947844250..510cdec375c4d 100644
344 +--- a/drivers/acpi/resource.c
345 ++++ b/drivers/acpi/resource.c
346 +@@ -416,6 +416,16 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
347 + {
348 + int i;
349 +
350 ++#ifdef CONFIG_X86
351 ++ /*
352 ++ * IRQ override isn't needed on modern AMD Zen systems and
353 ++ * this override breaks active low IRQs on AMD Ryzen 6000 and
354 ++ * newer systems. Skip it.
355 ++ */
356 ++ if (boot_cpu_has(X86_FEATURE_ZEN))
357 ++ return false;
358 ++#endif
359 ++
360 + for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
361 + const struct irq_override_cmp *entry = &skip_override_table[i];
362 +
363 +diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
364 +index f118ad9bcd33d..0e95351d47d49 100644
365 +--- a/drivers/gpio/gpio-104-dio-48e.c
366 ++++ b/drivers/gpio/gpio-104-dio-48e.c
367 +@@ -271,6 +271,7 @@ static void dio48e_irq_mask(struct irq_data *data)
368 + dio48egpio->irq_mask &= ~BIT(0);
369 + else
370 + dio48egpio->irq_mask &= ~BIT(1);
371 ++ gpiochip_disable_irq(chip, offset);
372 +
373 + if (!dio48egpio->irq_mask)
374 + /* disable interrupts */
375 +@@ -298,6 +299,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
376 + iowrite8(0x00, dio48egpio->base + 0xB);
377 + }
378 +
379 ++ gpiochip_enable_irq(chip, offset);
380 + if (offset == 19)
381 + dio48egpio->irq_mask |= BIT(0);
382 + else
383 +@@ -320,12 +322,14 @@ static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type)
384 + return 0;
385 + }
386 +
387 +-static struct irq_chip dio48e_irqchip = {
388 ++static const struct irq_chip dio48e_irqchip = {
389 + .name = "104-dio-48e",
390 + .irq_ack = dio48e_irq_ack,
391 + .irq_mask = dio48e_irq_mask,
392 + .irq_unmask = dio48e_irq_unmask,
393 +- .irq_set_type = dio48e_irq_set_type
394 ++ .irq_set_type = dio48e_irq_set_type,
395 ++ .flags = IRQCHIP_IMMUTABLE,
396 ++ GPIOCHIP_IRQ_RESOURCE_HELPERS,
397 + };
398 +
399 + static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
400 +@@ -414,7 +418,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
401 + dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
402 +
403 + girq = &dio48egpio->chip.irq;
404 +- girq->chip = &dio48e_irqchip;
405 ++ gpio_irq_chip_set_chip(girq, &dio48e_irqchip);
406 + /* This will let us handle the parent IRQ in the driver */
407 + girq->parent_handler = NULL;
408 + girq->num_parents = 0;
409 +diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
410 +index 45f7ad8573e19..a8b7c8eafac5a 100644
411 +--- a/drivers/gpio/gpio-104-idio-16.c
412 ++++ b/drivers/gpio/gpio-104-idio-16.c
413 +@@ -150,10 +150,11 @@ static void idio_16_irq_mask(struct irq_data *data)
414 + {
415 + struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
416 + struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
417 +- const unsigned long mask = BIT(irqd_to_hwirq(data));
418 ++ const unsigned long offset = irqd_to_hwirq(data);
419 + unsigned long flags;
420 +
421 +- idio16gpio->irq_mask &= ~mask;
422 ++ idio16gpio->irq_mask &= ~BIT(offset);
423 ++ gpiochip_disable_irq(chip, offset);
424 +
425 + if (!idio16gpio->irq_mask) {
426 + raw_spin_lock_irqsave(&idio16gpio->lock, flags);
427 +@@ -168,11 +169,12 @@ static void idio_16_irq_unmask(struct irq_data *data)
428 + {
429 + struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
430 + struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
431 +- const unsigned long mask = BIT(irqd_to_hwirq(data));
432 ++ const unsigned long offset = irqd_to_hwirq(data);
433 + const unsigned long prev_irq_mask = idio16gpio->irq_mask;
434 + unsigned long flags;
435 +
436 +- idio16gpio->irq_mask |= mask;
437 ++ gpiochip_enable_irq(chip, offset);
438 ++ idio16gpio->irq_mask |= BIT(offset);
439 +
440 + if (!prev_irq_mask) {
441 + raw_spin_lock_irqsave(&idio16gpio->lock, flags);
442 +@@ -193,12 +195,14 @@ static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type)
443 + return 0;
444 + }
445 +
446 +-static struct irq_chip idio_16_irqchip = {
447 ++static const struct irq_chip idio_16_irqchip = {
448 + .name = "104-idio-16",
449 + .irq_ack = idio_16_irq_ack,
450 + .irq_mask = idio_16_irq_mask,
451 + .irq_unmask = idio_16_irq_unmask,
452 +- .irq_set_type = idio_16_irq_set_type
453 ++ .irq_set_type = idio_16_irq_set_type,
454 ++ .flags = IRQCHIP_IMMUTABLE,
455 ++ GPIOCHIP_IRQ_RESOURCE_HELPERS,
456 + };
457 +
458 + static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
459 +@@ -275,7 +279,7 @@ static int idio_16_probe(struct device *dev, unsigned int id)
460 + idio16gpio->out_state = 0xFFFF;
461 +
462 + girq = &idio16gpio->chip.irq;
463 +- girq->chip = &idio_16_irqchip;
464 ++ gpio_irq_chip_set_chip(girq, &idio_16_irqchip);
465 + /* This will let us handle the parent IRQ in the driver */
466 + girq->parent_handler = NULL;
467 + girq->num_parents = 0;
468 +diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
469 +index 8943cea927642..a2e505a7545cd 100644
470 +--- a/drivers/gpio/gpio-mockup.c
471 ++++ b/drivers/gpio/gpio-mockup.c
472 +@@ -373,6 +373,13 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
473 + }
474 + }
475 +
476 ++static void gpio_mockup_debugfs_cleanup(void *data)
477 ++{
478 ++ struct gpio_mockup_chip *chip = data;
479 ++
480 ++ debugfs_remove_recursive(chip->dbg_dir);
481 ++}
482 ++
483 + static void gpio_mockup_dispose_mappings(void *data)
484 + {
485 + struct gpio_mockup_chip *chip = data;
486 +@@ -455,7 +462,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
487 +
488 + gpio_mockup_debugfs_setup(dev, chip);
489 +
490 +- return 0;
491 ++ return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip);
492 + }
493 +
494 + static const struct of_device_id gpio_mockup_of_match[] = {
495 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
496 +index ecada5eadfe35..e325150879df7 100644
497 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
498 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
499 +@@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
500 + return true;
501 + case CHIP_SIENNA_CICHLID:
502 + if (strnstr(atom_ctx->vbios_version, "D603",
503 ++ sizeof(atom_ctx->vbios_version))) {
504 ++ if (strnstr(atom_ctx->vbios_version, "D603GLXE",
505 + sizeof(atom_ctx->vbios_version)))
506 +- return true;
507 +- else
508 ++ return false;
509 ++ else
510 ++ return true;
511 ++ } else {
512 + return false;
513 ++ }
514 + default:
515 + return false;
516 + }
517 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
518 +index 2b00f8fe15a89..b19bf0c3f3737 100644
519 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
520 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
521 +@@ -2372,7 +2372,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
522 + static bool fw_load_skip_check(struct psp_context *psp,
523 + struct amdgpu_firmware_info *ucode)
524 + {
525 +- if (!ucode->fw)
526 ++ if (!ucode->fw || !ucode->ucode_size)
527 + return true;
528 +
529 + if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
530 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
531 +index 9cde13b07dd26..d9a5209aa8433 100644
532 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
533 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
534 +@@ -382,11 +382,27 @@ static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
535 + return 0;
536 + }
537 +
538 ++static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
539 ++ void **table,
540 ++ uint32_t *size)
541 ++{
542 ++ struct smu_table_context *smu_table = &smu->smu_table;
543 ++ void *combo_pptable = smu_table->combo_pptable;
544 ++ int ret = 0;
545 ++
546 ++ ret = smu_cmn_get_combo_pptable(smu);
547 ++ if (ret)
548 ++ return ret;
549 ++
550 ++ *table = combo_pptable;
551 ++ *size = sizeof(struct smu_13_0_7_powerplay_table);
552 ++
553 ++ return 0;
554 ++}
555 +
556 + static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
557 + {
558 + struct smu_table_context *smu_table = &smu->smu_table;
559 +- void *combo_pptable = smu_table->combo_pptable;
560 + struct amdgpu_device *adev = smu->adev;
561 + int ret = 0;
562 +
563 +@@ -395,18 +411,11 @@ static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
564 + * be used directly by driver. To get the raw pptable, we need to
565 + * rely on the combo pptable(and its revelant SMU message).
566 + */
567 +- if (adev->scpm_enabled) {
568 +- ret = smu_cmn_get_combo_pptable(smu);
569 +- if (ret)
570 +- return ret;
571 +-
572 +- smu->smu_table.power_play_table = combo_pptable;
573 +- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_7_powerplay_table);
574 +- } else {
575 +- ret = smu_v13_0_setup_pptable(smu);
576 +- if (ret)
577 +- return ret;
578 +- }
579 ++ ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
580 ++ &smu_table->power_play_table,
581 ++ &smu_table->power_play_table_size);
582 ++ if (ret)
583 ++ return ret;
584 +
585 + ret = smu_v13_0_7_store_powerplay_table(smu);
586 + if (ret)
587 +diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
588 +index a92ffde53f0b3..db2f847c8535f 100644
589 +--- a/drivers/gpu/drm/msm/msm_rd.c
590 ++++ b/drivers/gpu/drm/msm/msm_rd.c
591 +@@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file)
592 + file->private_data = rd;
593 + rd->open = true;
594 +
595 ++ /* Reset fifo to clear any previously unread data: */
596 ++ rd->fifo.head = rd->fifo.tail = 0;
597 ++
598 + /* the parsing tools need to know gpu-id to know which
599 + * register database to load.
600 + *
601 +diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
602 +index 6a5cc11aefd89..35dddc5015b37 100644
603 +--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
604 ++++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
605 +@@ -105,7 +105,7 @@ struct report_list {
606 + * @multi_packet_cnt: Count of fragmented packet count
607 + *
608 + * This structure is used to store completion flags and per client data like
609 +- * like report description, number of HID devices etc.
610 ++ * report description, number of HID devices etc.
611 + */
612 + struct ishtp_cl_data {
613 + /* completion flags */
614 +diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
615 +index 405e0d5212cc8..df0a825694f52 100644
616 +--- a/drivers/hid/intel-ish-hid/ishtp/client.c
617 ++++ b/drivers/hid/intel-ish-hid/ishtp/client.c
618 +@@ -626,13 +626,14 @@ static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
619 + }
620 +
621 + /**
622 +- * ipc_tx_callback() - IPC tx callback function
623 ++ * ipc_tx_send() - IPC tx send function
624 + * @prm: Pointer to client device instance
625 + *
626 +- * Send message over IPC either first time or on callback on previous message
627 +- * completion
628 ++ * Send message over IPC. Message will be split into fragments
629 ++ * if message size is bigger than IPC FIFO size, and all
630 ++ * fragments will be sent one by one.
631 + */
632 +-static void ipc_tx_callback(void *prm)
633 ++static void ipc_tx_send(void *prm)
634 + {
635 + struct ishtp_cl *cl = prm;
636 + struct ishtp_cl_tx_ring *cl_msg;
637 +@@ -677,32 +678,41 @@ static void ipc_tx_callback(void *prm)
638 + list);
639 + rem = cl_msg->send_buf.size - cl->tx_offs;
640 +
641 +- ishtp_hdr.host_addr = cl->host_client_id;
642 +- ishtp_hdr.fw_addr = cl->fw_client_id;
643 +- ishtp_hdr.reserved = 0;
644 +- pmsg = cl_msg->send_buf.data + cl->tx_offs;
645 ++ while (rem > 0) {
646 ++ ishtp_hdr.host_addr = cl->host_client_id;
647 ++ ishtp_hdr.fw_addr = cl->fw_client_id;
648 ++ ishtp_hdr.reserved = 0;
649 ++ pmsg = cl_msg->send_buf.data + cl->tx_offs;
650 ++
651 ++ if (rem <= dev->mtu) {
652 ++ /* Last fragment or only one packet */
653 ++ ishtp_hdr.length = rem;
654 ++ ishtp_hdr.msg_complete = 1;
655 ++ /* Submit to IPC queue with no callback */
656 ++ ishtp_write_message(dev, &ishtp_hdr, pmsg);
657 ++ cl->tx_offs = 0;
658 ++ cl->sending = 0;
659 +
660 +- if (rem <= dev->mtu) {
661 +- ishtp_hdr.length = rem;
662 +- ishtp_hdr.msg_complete = 1;
663 +- cl->sending = 0;
664 +- list_del_init(&cl_msg->list); /* Must be before write */
665 +- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
666 +- /* Submit to IPC queue with no callback */
667 +- ishtp_write_message(dev, &ishtp_hdr, pmsg);
668 +- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
669 +- list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
670 +- ++cl->tx_ring_free_size;
671 +- spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
672 +- tx_free_flags);
673 +- } else {
674 +- /* Send IPC fragment */
675 +- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
676 +- cl->tx_offs += dev->mtu;
677 +- ishtp_hdr.length = dev->mtu;
678 +- ishtp_hdr.msg_complete = 0;
679 +- ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
680 ++ break;
681 ++ } else {
682 ++ /* Send ipc fragment */
683 ++ ishtp_hdr.length = dev->mtu;
684 ++ ishtp_hdr.msg_complete = 0;
685 ++ /* All fregments submitted to IPC queue with no callback */
686 ++ ishtp_write_message(dev, &ishtp_hdr, pmsg);
687 ++ cl->tx_offs += dev->mtu;
688 ++ rem = cl_msg->send_buf.size - cl->tx_offs;
689 ++ }
690 + }
691 ++
692 ++ list_del_init(&cl_msg->list);
693 ++ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
694 ++
695 ++ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
696 ++ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
697 ++ ++cl->tx_ring_free_size;
698 ++ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
699 ++ tx_free_flags);
700 + }
701 +
702 + /**
703 +@@ -720,7 +730,7 @@ static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
704 + return;
705 +
706 + cl->tx_offs = 0;
707 +- ipc_tx_callback(cl);
708 ++ ipc_tx_send(cl);
709 + ++cl->send_msg_cnt_ipc;
710 + }
711 +
712 +diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
713 +index d003ad864ee44..a6e5d350a94ce 100644
714 +--- a/drivers/infiniband/hw/irdma/uk.c
715 ++++ b/drivers/infiniband/hw/irdma/uk.c
716 +@@ -497,7 +497,8 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
717 + FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
718 + i = 0;
719 + } else {
720 +- qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
721 ++ qp->wqe_ops.iw_set_fragment(wqe, 0,
722 ++ frag_cnt ? op_info->sg_list : NULL,
723 + qp->swqe_polarity);
724 + i = 1;
725 + }
726 +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
727 +index 08371a80fdc26..be189e0525de6 100644
728 +--- a/drivers/infiniband/hw/mlx5/cq.c
729 ++++ b/drivers/infiniband/hw/mlx5/cq.c
730 +@@ -523,6 +523,10 @@ repoll:
731 + "Requestor" : "Responder", cq->mcq.cqn);
732 + mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
733 + err_cqe->syndrome, err_cqe->vendor_err_synd);
734 ++ if (wc->status != IB_WC_WR_FLUSH_ERR &&
735 ++ (*cur_qp)->type == MLX5_IB_QPT_REG_UMR)
736 ++ dev->umrc.state = MLX5_UMR_STATE_RECOVER;
737 ++
738 + if (opcode == MLX5_CQE_REQ_ERR) {
739 + wq = &(*cur_qp)->sq;
740 + wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
741 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
742 +index 63c89a72cc352..bb13164124fdb 100644
743 +--- a/drivers/infiniband/hw/mlx5/main.c
744 ++++ b/drivers/infiniband/hw/mlx5/main.c
745 +@@ -4336,7 +4336,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
746 + dev->mdev = mdev;
747 + dev->num_ports = num_ports;
748 +
749 +- if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
750 ++ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
751 + profile = &raw_eth_profile;
752 + else
753 + profile = &pf_profile;
754 +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
755 +index 998b67509a533..c2cca032a6ed4 100644
756 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
757 ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
758 +@@ -717,13 +717,24 @@ struct mlx5_ib_umr_context {
759 + struct completion done;
760 + };
761 +
762 ++enum {
763 ++ MLX5_UMR_STATE_UNINIT,
764 ++ MLX5_UMR_STATE_ACTIVE,
765 ++ MLX5_UMR_STATE_RECOVER,
766 ++ MLX5_UMR_STATE_ERR,
767 ++};
768 ++
769 + struct umr_common {
770 + struct ib_pd *pd;
771 + struct ib_cq *cq;
772 + struct ib_qp *qp;
773 +- /* control access to UMR QP
774 ++ /* Protects from UMR QP overflow
775 + */
776 + struct semaphore sem;
777 ++ /* Protects from using UMR while the UMR is not active
778 ++ */
779 ++ struct mutex lock;
780 ++ unsigned int state;
781 + };
782 +
783 + struct mlx5_cache_ent {
784 +diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
785 +index 3a48364c09181..d5105b5c9979b 100644
786 +--- a/drivers/infiniband/hw/mlx5/umr.c
787 ++++ b/drivers/infiniband/hw/mlx5/umr.c
788 +@@ -176,6 +176,8 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
789 + dev->umrc.pd = pd;
790 +
791 + sema_init(&dev->umrc.sem, MAX_UMR_WR);
792 ++ mutex_init(&dev->umrc.lock);
793 ++ dev->umrc.state = MLX5_UMR_STATE_ACTIVE;
794 +
795 + return 0;
796 +
797 +@@ -190,11 +192,38 @@ destroy_pd:
798 +
799 + void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
800 + {
801 ++ if (dev->umrc.state == MLX5_UMR_STATE_UNINIT)
802 ++ return;
803 + ib_destroy_qp(dev->umrc.qp);
804 + ib_free_cq(dev->umrc.cq);
805 + ib_dealloc_pd(dev->umrc.pd);
806 + }
807 +
808 ++static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
809 ++{
810 ++ struct umr_common *umrc = &dev->umrc;
811 ++ struct ib_qp_attr attr;
812 ++ int err;
813 ++
814 ++ attr.qp_state = IB_QPS_RESET;
815 ++ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
816 ++ if (err) {
817 ++ mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
818 ++ goto err;
819 ++ }
820 ++
821 ++ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
822 ++ if (err)
823 ++ goto err;
824 ++
825 ++ umrc->state = MLX5_UMR_STATE_ACTIVE;
826 ++ return 0;
827 ++
828 ++err:
829 ++ umrc->state = MLX5_UMR_STATE_ERR;
830 ++ return err;
831 ++}
832 ++
833 + static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
834 + struct mlx5r_umr_wqe *wqe, bool with_data)
835 + {
836 +@@ -231,7 +260,7 @@ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
837 +
838 + id.ib_cqe = cqe;
839 + mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
840 +- MLX5_FENCE_MODE_NONE, MLX5_OPCODE_UMR);
841 ++ MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR);
842 +
843 + mlx5r_ring_db(qp, 1, ctrl);
844 +
845 +@@ -270,17 +299,49 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
846 + mlx5r_umr_init_context(&umr_context);
847 +
848 + down(&umrc->sem);
849 +- err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
850 +- with_data);
851 +- if (err)
852 +- mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
853 +- else {
854 +- wait_for_completion(&umr_context.done);
855 +- if (umr_context.status != IB_WC_SUCCESS) {
856 +- mlx5_ib_warn(dev, "reg umr failed (%u)\n",
857 +- umr_context.status);
858 ++ while (true) {
859 ++ mutex_lock(&umrc->lock);
860 ++ if (umrc->state == MLX5_UMR_STATE_ERR) {
861 ++ mutex_unlock(&umrc->lock);
862 + err = -EFAULT;
863 ++ break;
864 ++ }
865 ++
866 ++ if (umrc->state == MLX5_UMR_STATE_RECOVER) {
867 ++ mutex_unlock(&umrc->lock);
868 ++ usleep_range(3000, 5000);
869 ++ continue;
870 ++ }
871 ++
872 ++ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
873 ++ with_data);
874 ++ mutex_unlock(&umrc->lock);
875 ++ if (err) {
876 ++ mlx5_ib_warn(dev, "UMR post send failed, err %d\n",
877 ++ err);
878 ++ break;
879 + }
880 ++
881 ++ wait_for_completion(&umr_context.done);
882 ++
883 ++ if (umr_context.status == IB_WC_SUCCESS)
884 ++ break;
885 ++
886 ++ if (umr_context.status == IB_WC_WR_FLUSH_ERR)
887 ++ continue;
888 ++
889 ++ WARN_ON_ONCE(1);
890 ++ mlx5_ib_warn(dev,
891 ++ "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n",
892 ++ umr_context.status);
893 ++ mutex_lock(&umrc->lock);
894 ++ err = mlx5r_umr_recover(dev);
895 ++ mutex_unlock(&umrc->lock);
896 ++ if (err)
897 ++ mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
898 ++ err);
899 ++ err = -EFAULT;
900 ++ break;
901 + }
902 + up(&umrc->sem);
903 + return err;
904 +diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
905 +index b2a68bc9f0b4d..b86de1312512b 100644
906 +--- a/drivers/input/joystick/iforce/iforce-main.c
907 ++++ b/drivers/input/joystick/iforce/iforce-main.c
908 +@@ -50,6 +50,7 @@ static struct iforce_device iforce_device[] = {
909 + { 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce },
910 + { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce },
911 + { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce },
912 ++ { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce },
913 + { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
914 + { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
915 + { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
916 +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
917 +index aa45a9fee6a01..3020ddc1ca48b 100644
918 +--- a/drivers/input/touchscreen/goodix.c
919 ++++ b/drivers/input/touchscreen/goodix.c
920 +@@ -95,6 +95,7 @@ static const struct goodix_chip_data gt9x_chip_data = {
921 +
922 + static const struct goodix_chip_id goodix_chip_ids[] = {
923 + { .id = "1151", .data = &gt1x_chip_data },
924 ++ { .id = "1158", .data = &gt1x_chip_data },
925 + { .id = "5663", .data = &gt1x_chip_data },
926 + { .id = "5688", .data = &gt1x_chip_data },
927 + { .id = "917S", .data = &gt1x_chip_data },
928 +@@ -1514,6 +1515,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
929 + #ifdef CONFIG_OF
930 + static const struct of_device_id goodix_of_match[] = {
931 + { .compatible = "goodix,gt1151" },
932 ++ { .compatible = "goodix,gt1158" },
933 + { .compatible = "goodix,gt5663" },
934 + { .compatible = "goodix,gt5688" },
935 + { .compatible = "goodix,gt911" },
936 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
937 +index 40ac3a78d90ef..c0464959cbcdb 100644
938 +--- a/drivers/iommu/intel/iommu.c
939 ++++ b/drivers/iommu/intel/iommu.c
940 +@@ -168,38 +168,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
941 + return re->hi & VTD_PAGE_MASK;
942 + }
943 +
944 +-static inline void context_clear_pasid_enable(struct context_entry *context)
945 +-{
946 +- context->lo &= ~(1ULL << 11);
947 +-}
948 +-
949 +-static inline bool context_pasid_enabled(struct context_entry *context)
950 +-{
951 +- return !!(context->lo & (1ULL << 11));
952 +-}
953 +-
954 +-static inline void context_set_copied(struct context_entry *context)
955 +-{
956 +- context->hi |= (1ull << 3);
957 +-}
958 +-
959 +-static inline bool context_copied(struct context_entry *context)
960 +-{
961 +- return !!(context->hi & (1ULL << 3));
962 +-}
963 +-
964 +-static inline bool __context_present(struct context_entry *context)
965 +-{
966 +- return (context->lo & 1);
967 +-}
968 +-
969 +-bool context_present(struct context_entry *context)
970 +-{
971 +- return context_pasid_enabled(context) ?
972 +- __context_present(context) :
973 +- __context_present(context) && !context_copied(context);
974 +-}
975 +-
976 + static inline void context_set_present(struct context_entry *context)
977 + {
978 + context->lo |= 1;
979 +@@ -247,6 +215,26 @@ static inline void context_clear_entry(struct context_entry *context)
980 + context->hi = 0;
981 + }
982 +
983 ++static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
984 ++{
985 ++ if (!iommu->copied_tables)
986 ++ return false;
987 ++
988 ++ return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
989 ++}
990 ++
991 ++static inline void
992 ++set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
993 ++{
994 ++ set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
995 ++}
996 ++
997 ++static inline void
998 ++clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
999 ++{
1000 ++ clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
1001 ++}
1002 ++
1003 + /*
1004 + * This domain is a statically identity mapping domain.
1005 + * 1. This domain creats a static 1:1 mapping to all usable memory.
1006 +@@ -644,6 +632,13 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
1007 + struct context_entry *context;
1008 + u64 *entry;
1009 +
1010 ++ /*
1011 ++ * Except that the caller requested to allocate a new entry,
1012 ++ * returning a copied context entry makes no sense.
1013 ++ */
1014 ++ if (!alloc && context_copied(iommu, bus, devfn))
1015 ++ return NULL;
1016 ++
1017 + entry = &root->lo;
1018 + if (sm_supported(iommu)) {
1019 + if (devfn >= 0x80) {
1020 +@@ -1770,6 +1765,11 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
1021 + iommu->domain_ids = NULL;
1022 + }
1023 +
1024 ++ if (iommu->copied_tables) {
1025 ++ bitmap_free(iommu->copied_tables);
1026 ++ iommu->copied_tables = NULL;
1027 ++ }
1028 ++
1029 + g_iommus[iommu->seq_id] = NULL;
1030 +
1031 + /* free context mapping */
1032 +@@ -1978,7 +1978,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1033 + goto out_unlock;
1034 +
1035 + ret = 0;
1036 +- if (context_present(context))
1037 ++ if (context_present(context) && !context_copied(iommu, bus, devfn))
1038 + goto out_unlock;
1039 +
1040 + /*
1041 +@@ -1990,7 +1990,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1042 + * in-flight DMA will exist, and we don't need to worry anymore
1043 + * hereafter.
1044 + */
1045 +- if (context_copied(context)) {
1046 ++ if (context_copied(iommu, bus, devfn)) {
1047 + u16 did_old = context_domain_id(context);
1048 +
1049 + if (did_old < cap_ndoms(iommu->cap)) {
1050 +@@ -2001,6 +2001,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1051 + iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
1052 + DMA_TLB_DSI_FLUSH);
1053 + }
1054 ++
1055 ++ clear_context_copied(iommu, bus, devfn);
1056 + }
1057 +
1058 + context_clear_entry(context);
1059 +@@ -2783,32 +2785,14 @@ static int copy_context_table(struct intel_iommu *iommu,
1060 + /* Now copy the context entry */
1061 + memcpy(&ce, old_ce + idx, sizeof(ce));
1062 +
1063 +- if (!__context_present(&ce))
1064 ++ if (!context_present(&ce))
1065 + continue;
1066 +
1067 + did = context_domain_id(&ce);
1068 + if (did >= 0 && did < cap_ndoms(iommu->cap))
1069 + set_bit(did, iommu->domain_ids);
1070 +
1071 +- /*
1072 +- * We need a marker for copied context entries. This
1073 +- * marker needs to work for the old format as well as
1074 +- * for extended context entries.
1075 +- *
1076 +- * Bit 67 of the context entry is used. In the old
1077 +- * format this bit is available to software, in the
1078 +- * extended format it is the PGE bit, but PGE is ignored
1079 +- * by HW if PASIDs are disabled (and thus still
1080 +- * available).
1081 +- *
1082 +- * So disable PASIDs first and then mark the entry
1083 +- * copied. This means that we don't copy PASID
1084 +- * translations from the old kernel, but this is fine as
1085 +- * faults there are not fatal.
1086 +- */
1087 +- context_clear_pasid_enable(&ce);
1088 +- context_set_copied(&ce);
1089 +-
1090 ++ set_context_copied(iommu, bus, devfn);
1091 + new_ce[idx] = ce;
1092 + }
1093 +
1094 +@@ -2835,8 +2819,8 @@ static int copy_translation_tables(struct intel_iommu *iommu)
1095 + bool new_ext, ext;
1096 +
1097 + rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
1098 +- ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
1099 +- new_ext = !!ecap_ecs(iommu->ecap);
1100 ++ ext = !!(rtaddr_reg & DMA_RTADDR_SMT);
1101 ++ new_ext = !!sm_supported(iommu);
1102 +
1103 + /*
1104 + * The RTT bit can only be changed when translation is disabled,
1105 +@@ -2847,6 +2831,10 @@ static int copy_translation_tables(struct intel_iommu *iommu)
1106 + if (new_ext != ext)
1107 + return -EINVAL;
1108 +
1109 ++ iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
1110 ++ if (!iommu->copied_tables)
1111 ++ return -ENOMEM;
1112 ++
1113 + old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
1114 + if (!old_rt_phys)
1115 + return -EINVAL;
1116 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1117 +index c28f8cc00d1cf..a9cc85882b315 100644
1118 +--- a/drivers/net/ethernet/broadcom/tg3.c
1119 ++++ b/drivers/net/ethernet/broadcom/tg3.c
1120 +@@ -18076,16 +18076,20 @@ static void tg3_shutdown(struct pci_dev *pdev)
1121 + struct net_device *dev = pci_get_drvdata(pdev);
1122 + struct tg3 *tp = netdev_priv(dev);
1123 +
1124 ++ tg3_reset_task_cancel(tp);
1125 ++
1126 + rtnl_lock();
1127 ++
1128 + netif_device_detach(dev);
1129 +
1130 + if (netif_running(dev))
1131 + dev_close(dev);
1132 +
1133 +- if (system_state == SYSTEM_POWER_OFF)
1134 +- tg3_power_down(tp);
1135 ++ tg3_power_down(tp);
1136 +
1137 + rtnl_unlock();
1138 ++
1139 ++ pci_disable_device(pdev);
1140 + }
1141 +
1142 + /**
1143 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
1144 +index cfb8bedba5124..079fa44ada71e 100644
1145 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
1146 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
1147 +@@ -289,6 +289,10 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
1148 + sw_owner_id[i]);
1149 + }
1150 +
1151 ++ if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) &&
1152 ++ dev->priv.sw_vhca_id > 0)
1153 ++ MLX5_SET(init_hca_in, in, sw_vhca_id, dev->priv.sw_vhca_id);
1154 ++
1155 + return mlx5_cmd_exec_in(dev, init_hca, in);
1156 + }
1157 +
1158 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1159 +index 616207c3b187a..6c8bb74bd8fc6 100644
1160 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
1161 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1162 +@@ -90,6 +90,8 @@ module_param_named(prof_sel, prof_sel, uint, 0444);
1163 + MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
1164 +
1165 + static u32 sw_owner_id[4];
1166 ++#define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1)
1167 ++static DEFINE_IDA(sw_vhca_ida);
1168 +
1169 + enum {
1170 + MLX5_ATOMIC_REQ_MODE_BE = 0x0,
1171 +@@ -499,6 +501,49 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
1172 + return err;
1173 + }
1174 +
1175 ++bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
1176 ++{
1177 ++ struct devlink *devlink = priv_to_devlink(dev);
1178 ++ union devlink_param_value val;
1179 ++ int err;
1180 ++
1181 ++ err = devlink_param_driverinit_value_get(devlink,
1182 ++ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
1183 ++ &val);
1184 ++
1185 ++ if (!err)
1186 ++ return val.vbool;
1187 ++
1188 ++ mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
1189 ++ return MLX5_CAP_GEN(dev, roce);
1190 ++}
1191 ++EXPORT_SYMBOL(mlx5_is_roce_on);
1192 ++
1193 ++static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
1194 ++{
1195 ++ void *set_hca_cap;
1196 ++ int err;
1197 ++
1198 ++ if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2))
1199 ++ return 0;
1200 ++
1201 ++ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
1202 ++ if (err)
1203 ++ return err;
1204 ++
1205 ++ if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
1206 ++ !(dev->priv.sw_vhca_id > 0))
1207 ++ return 0;
1208 ++
1209 ++ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
1210 ++ capability);
1211 ++ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
1212 ++ MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
1213 ++ MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
1214 ++
1215 ++ return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
1216 ++}
1217 ++
1218 + static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
1219 + {
1220 + struct mlx5_profile *prof = &dev->profile;
1221 +@@ -577,7 +622,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
1222 + MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
1223 +
1224 + if (MLX5_CAP_GEN(dev, roce_rw_supported))
1225 +- MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
1226 ++ MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
1227 ++ mlx5_is_roce_on(dev));
1228 +
1229 + max_uc_list = max_uc_list_get_devlink_param(dev);
1230 + if (max_uc_list > 0)
1231 +@@ -603,7 +649,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
1232 + */
1233 + static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
1234 + {
1235 +- return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
1236 ++ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) ||
1237 + (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
1238 + }
1239 +
1240 +@@ -669,6 +715,13 @@ static int set_hca_cap(struct mlx5_core_dev *dev)
1241 + goto out;
1242 + }
1243 +
1244 ++ memset(set_ctx, 0, set_sz);
1245 ++ err = handle_hca_cap_2(dev, set_ctx);
1246 ++ if (err) {
1247 ++ mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
1248 ++ goto out;
1249 ++ }
1250 ++
1251 + out:
1252 + kfree(set_ctx);
1253 + return err;
1254 +@@ -1512,6 +1565,18 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
1255 + if (err)
1256 + goto err_hca_caps;
1257 +
1258 ++ /* The conjunction of sw_vhca_id with sw_owner_id will be a global
1259 ++ * unique id per function which uses mlx5_core.
1260 ++ * Those values are supplied to FW as part of the init HCA command to
1261 ++ * be used by both driver and FW when it's applicable.
1262 ++ */
1263 ++ dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1,
1264 ++ MAX_SW_VHCA_ID,
1265 ++ GFP_KERNEL);
1266 ++ if (dev->priv.sw_vhca_id < 0)
1267 ++ mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n",
1268 ++ dev->priv.sw_vhca_id);
1269 ++
1270 + return 0;
1271 +
1272 + err_hca_caps:
1273 +@@ -1537,6 +1602,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
1274 + {
1275 + struct mlx5_priv *priv = &dev->priv;
1276 +
1277 ++ if (priv->sw_vhca_id > 0)
1278 ++ ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
1279 ++
1280 + mlx5_hca_caps_free(dev);
1281 + mlx5_adev_cleanup(dev);
1282 + mlx5_pagealloc_cleanup(dev);
1283 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
1284 +index ac020cb780727..d5c3173250309 100644
1285 +--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
1286 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
1287 +@@ -1086,9 +1086,17 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
1288 + goto free;
1289 +
1290 + MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1291 +- MLX5_SET(modify_nic_vport_context_in, in,
1292 +- nic_vport_context.affiliated_vhca_id,
1293 +- MLX5_CAP_GEN(master_mdev, vhca_id));
1294 ++ if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
1295 ++ MLX5_SET(modify_nic_vport_context_in, in,
1296 ++ nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
1297 ++ MLX5_SET(modify_nic_vport_context_in, in,
1298 ++ nic_vport_context.affiliated_vhca_id,
1299 ++ MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
1300 ++ } else {
1301 ++ MLX5_SET(modify_nic_vport_context_in, in,
1302 ++ nic_vport_context.affiliated_vhca_id,
1303 ++ MLX5_CAP_GEN(master_mdev, vhca_id));
1304 ++ }
1305 + MLX5_SET(modify_nic_vport_context_in, in,
1306 + nic_vport_context.affiliation_criteria,
1307 + MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
1308 +diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
1309 +index 1e1f40f628a02..c69b87d3837da 100644
1310 +--- a/drivers/net/ieee802154/cc2520.c
1311 ++++ b/drivers/net/ieee802154/cc2520.c
1312 +@@ -504,6 +504,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
1313 + goto err_tx;
1314 +
1315 + if (status & CC2520_STATUS_TX_UNDERFLOW) {
1316 ++ rc = -EINVAL;
1317 + dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
1318 + goto err_tx;
1319 + }
1320 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
1321 +index 2de09ad5bac03..e11f70911acc1 100644
1322 +--- a/drivers/net/usb/cdc_ether.c
1323 ++++ b/drivers/net/usb/cdc_ether.c
1324 +@@ -777,6 +777,13 @@ static const struct usb_device_id products[] = {
1325 + },
1326 + #endif
1327 +
1328 ++/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
1329 ++{
1330 ++ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
1331 ++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
1332 ++ .driver_info = 0,
1333 ++},
1334 ++
1335 + /* ThinkPad USB-C Dock (based on Realtek RTL8153) */
1336 + {
1337 + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
1338 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1339 +index d142ac8fcf6e2..688905ea0a6d3 100644
1340 +--- a/drivers/net/usb/r8152.c
1341 ++++ b/drivers/net/usb/r8152.c
1342 +@@ -770,6 +770,7 @@ enum rtl8152_flags {
1343 + RX_EPROTO,
1344 + };
1345 +
1346 ++#define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054
1347 + #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
1348 + #define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c
1349 + #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
1350 +@@ -9581,6 +9582,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
1351 +
1352 + if (vendor_id == VENDOR_ID_LENOVO) {
1353 + switch (product_id) {
1354 ++ case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK:
1355 + case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
1356 + case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
1357 + case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
1358 +@@ -9828,6 +9830,7 @@ static const struct usb_device_id rtl8152_table[] = {
1359 + REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
1360 + REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
1361 + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
1362 ++ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
1363 + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
1364 + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
1365 + REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
1366 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
1367 +index 73d9fcba3b1c0..9f6614f7dbeb1 100644
1368 +--- a/drivers/nvme/host/pci.c
1369 ++++ b/drivers/nvme/host/pci.c
1370 +@@ -3517,6 +3517,8 @@ static const struct pci_device_id nvme_id_table[] = {
1371 + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
1372 + { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
1373 + .driver_data = NVME_QUIRK_BOGUS_NID, },
1374 ++ { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
1375 ++ .driver_data = NVME_QUIRK_BOGUS_NID, },
1376 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
1377 + .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
1378 + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
1379 +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
1380 +index dc3b4dc8fe08b..a3694a32f6d52 100644
1381 +--- a/drivers/nvme/target/tcp.c
1382 ++++ b/drivers/nvme/target/tcp.c
1383 +@@ -1506,6 +1506,9 @@ static void nvmet_tcp_state_change(struct sock *sk)
1384 + goto done;
1385 +
1386 + switch (sk->sk_state) {
1387 ++ case TCP_FIN_WAIT2:
1388 ++ case TCP_LAST_ACK:
1389 ++ break;
1390 + case TCP_FIN_WAIT1:
1391 + case TCP_CLOSE_WAIT:
1392 + case TCP_CLOSE:
1393 +diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c
1394 +index 68eb61c65d345..de4a7b3e5966e 100644
1395 +--- a/drivers/peci/cpu.c
1396 ++++ b/drivers/peci/cpu.c
1397 +@@ -188,8 +188,6 @@ static void adev_release(struct device *dev)
1398 + {
1399 + struct auxiliary_device *adev = to_auxiliary_dev(dev);
1400 +
1401 +- auxiliary_device_uninit(adev);
1402 +-
1403 + kfree(adev->name);
1404 + kfree(adev);
1405 + }
1406 +@@ -234,6 +232,7 @@ static void unregister_adev(void *_adev)
1407 + struct auxiliary_device *adev = _adev;
1408 +
1409 + auxiliary_device_delete(adev);
1410 ++ auxiliary_device_uninit(adev);
1411 + }
1412 +
1413 + static int devm_adev_add(struct device *dev, int idx)
1414 +diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
1415 +index 513de1f54e2d7..933b96e243b84 100644
1416 +--- a/drivers/perf/arm_pmu_platform.c
1417 ++++ b/drivers/perf/arm_pmu_platform.c
1418 +@@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
1419 +
1420 + if (num_irqs == 1) {
1421 + int irq = platform_get_irq(pdev, 0);
1422 +- if (irq && irq_is_percpu_devid(irq))
1423 ++ if ((irq > 0) && irq_is_percpu_devid(irq))
1424 + return pmu_parse_percpu_irq(pmu, irq);
1425 + }
1426 +
1427 +diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
1428 +index ce2bd88feeaa8..08019c6ccc9ca 100644
1429 +--- a/drivers/platform/surface/surface_aggregator_registry.c
1430 ++++ b/drivers/platform/surface/surface_aggregator_registry.c
1431 +@@ -556,6 +556,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
1432 + /* Surface Laptop Go 1 */
1433 + { "MSHW0118", (unsigned long)ssam_node_group_slg1 },
1434 +
1435 ++ /* Surface Laptop Go 2 */
1436 ++ { "MSHW0290", (unsigned long)ssam_node_group_slg1 },
1437 ++
1438 + /* Surface Laptop Studio */
1439 + { "MSHW0123", (unsigned long)ssam_node_group_sls },
1440 +
1441 +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
1442 +index 9c6943e401a6c..0fbcaffabbfc7 100644
1443 +--- a/drivers/platform/x86/acer-wmi.c
1444 ++++ b/drivers/platform/x86/acer-wmi.c
1445 +@@ -99,6 +99,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
1446 + {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
1447 + {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
1448 + {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
1449 ++ {KE_KEY, 0x27, {KEY_HELP} },
1450 + {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */
1451 + {KE_IGNORE, 0x41, {KEY_MUTE} },
1452 + {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
1453 +@@ -112,7 +113,13 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
1454 + {KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
1455 + {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
1456 + {KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} },
1457 +- {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
1458 ++ /*
1459 ++ * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event
1460 ++ * with the "Video Bus" input device events. But sometimes it is not
1461 ++ * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that
1462 ++ * udev/hwdb can override it on systems where it is not a dup.
1463 ++ */
1464 ++ {KE_KEY, 0x61, {KEY_UNKNOWN} },
1465 + {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
1466 + {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
1467 + {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
1468 +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
1469 +index 62ce198a34631..a0f31624aee97 100644
1470 +--- a/drivers/platform/x86/asus-wmi.c
1471 ++++ b/drivers/platform/x86/asus-wmi.c
1472 +@@ -107,7 +107,7 @@ module_param(fnlock_default, bool, 0444);
1473 + #define WMI_EVENT_MASK 0xFFFF
1474 +
1475 + #define FAN_CURVE_POINTS 8
1476 +-#define FAN_CURVE_BUF_LEN (FAN_CURVE_POINTS * 2)
1477 ++#define FAN_CURVE_BUF_LEN 32
1478 + #define FAN_CURVE_DEV_CPU 0x00
1479 + #define FAN_CURVE_DEV_GPU 0x01
1480 + /* Mask to determine if setting temperature or percentage */
1481 +@@ -2208,8 +2208,10 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
1482 + curves = &asus->custom_fan_curves[fan_idx];
1483 + err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf,
1484 + FAN_CURVE_BUF_LEN);
1485 +- if (err)
1486 ++ if (err) {
1487 ++ pr_warn("%s (0x%08x) failed: %d\n", __func__, fan_dev, err);
1488 + return err;
1489 ++ }
1490 +
1491 + fan_curve_copy_from_buf(curves, buf);
1492 + curves->device_id = fan_dev;
1493 +@@ -2227,9 +2229,6 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
1494 +
1495 + err = fan_curve_get_factory_default(asus, fan_dev);
1496 + if (err) {
1497 +- pr_debug("fan_curve_get_factory_default(0x%08x) failed: %d\n",
1498 +- fan_dev, err);
1499 +- /* Don't cause probe to fail on devices without fan-curves */
1500 + return 0;
1501 + }
1502 +
1503 +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
1504 +index 4051c8cd0cd8a..23ab3b048d9be 100644
1505 +--- a/drivers/usb/storage/unusual_uas.h
1506 ++++ b/drivers/usb/storage/unusual_uas.h
1507 +@@ -62,6 +62,13 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
1508 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1509 + US_FL_IGNORE_UAS),
1510 +
1511 ++/* Reported-by: Tom Hu <huxiaoying@×××××××.cn> */
1512 ++UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999,
1513 ++ "ASUS",
1514 ++ "External HDD",
1515 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1516 ++ US_FL_IGNORE_UAS),
1517 ++
1518 + /* Reported-by: David Webb <djw@××××××.uk> */
1519 + UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
1520 + "Seagate",
1521 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
1522 +index 5fcf89faa31ab..d72626d71258f 100644
1523 +--- a/include/linux/intel-iommu.h
1524 ++++ b/include/linux/intel-iommu.h
1525 +@@ -196,7 +196,6 @@
1526 + #define ecap_dis(e) (((e) >> 27) & 0x1)
1527 + #define ecap_nest(e) (((e) >> 26) & 0x1)
1528 + #define ecap_mts(e) (((e) >> 25) & 0x1)
1529 +-#define ecap_ecs(e) (((e) >> 24) & 0x1)
1530 + #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
1531 + #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
1532 + #define ecap_coherent(e) ((e) & 0x1)
1533 +@@ -264,7 +263,6 @@
1534 + #define DMA_GSTS_CFIS (((u32)1) << 23)
1535 +
1536 + /* DMA_RTADDR_REG */
1537 +-#define DMA_RTADDR_RTT (((u64)1) << 11)
1538 + #define DMA_RTADDR_SMT (((u64)1) << 10)
1539 +
1540 + /* CCMD_REG */
1541 +@@ -579,6 +577,7 @@ struct intel_iommu {
1542 +
1543 + #ifdef CONFIG_INTEL_IOMMU
1544 + unsigned long *domain_ids; /* bitmap of domains */
1545 ++ unsigned long *copied_tables; /* bitmap of copied tables */
1546 + spinlock_t lock; /* protect context, domain ids */
1547 + struct root_entry *root_entry; /* virtual address */
1548 +
1549 +@@ -692,6 +691,11 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte)
1550 + (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
1551 + }
1552 +
1553 ++static inline bool context_present(struct context_entry *context)
1554 ++{
1555 ++ return (context->lo & 1);
1556 ++}
1557 ++
1558 + extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
1559 +
1560 + extern int dmar_enable_qi(struct intel_iommu *iommu);
1561 +@@ -776,7 +780,6 @@ static inline void intel_iommu_debugfs_init(void) {}
1562 + #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
1563 +
1564 + extern const struct attribute_group *intel_iommu_groups[];
1565 +-bool context_present(struct context_entry *context);
1566 + struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
1567 + u8 devfn, int alloc);
1568 +
1569 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
1570 +index b0b4ac92354a2..b3ea245faa515 100644
1571 +--- a/include/linux/mlx5/driver.h
1572 ++++ b/include/linux/mlx5/driver.h
1573 +@@ -606,6 +606,7 @@ struct mlx5_priv {
1574 + spinlock_t ctx_lock;
1575 + struct mlx5_adev **adev;
1576 + int adev_idx;
1577 ++ int sw_vhca_id;
1578 + struct mlx5_events *events;
1579 +
1580 + struct mlx5_flow_steering *steering;
1581 +@@ -1274,16 +1275,17 @@ enum {
1582 + MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1583 + };
1584 +
1585 +-static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
1586 ++bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
1587 ++
1588 ++static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
1589 + {
1590 +- struct devlink *devlink = priv_to_devlink(dev);
1591 +- union devlink_param_value val;
1592 +- int err;
1593 +-
1594 +- err = devlink_param_driverinit_value_get(devlink,
1595 +- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
1596 +- &val);
1597 +- return err ? MLX5_CAP_GEN(dev, roce) : val.vbool;
1598 ++ if (MLX5_CAP_GEN(dev, roce_rw_supported))
1599 ++ return MLX5_CAP_GEN(dev, roce);
1600 ++
1601 ++ /* If RoCE cap is read-only in FW, get RoCE state from devlink
1602 ++ * in order to support RoCE enable/disable feature
1603 ++ */
1604 ++ return mlx5_is_roce_on(dev);
1605 + }
1606 +
1607 + #endif /* MLX5_DRIVER_H */
1608 +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
1609 +index fd7d083a34d33..6d57e5ec9718d 100644
1610 +--- a/include/linux/mlx5/mlx5_ifc.h
1611 ++++ b/include/linux/mlx5/mlx5_ifc.h
1612 +@@ -1804,7 +1804,14 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
1613 + u8 max_reformat_remove_size[0x8];
1614 + u8 max_reformat_remove_offset[0x8];
1615 +
1616 +- u8 reserved_at_c0[0x740];
1617 ++ u8 reserved_at_c0[0x160];
1618 ++
1619 ++ u8 reserved_at_220[0x1];
1620 ++ u8 sw_vhca_id_valid[0x1];
1621 ++ u8 sw_vhca_id[0xe];
1622 ++ u8 reserved_at_230[0x10];
1623 ++
1624 ++ u8 reserved_at_240[0x5c0];
1625 + };
1626 +
1627 + enum mlx5_ifc_flow_destination_type {
1628 +@@ -3715,6 +3722,11 @@ struct mlx5_ifc_rmpc_bits {
1629 + struct mlx5_ifc_wq_bits wq;
1630 + };
1631 +
1632 ++enum {
1633 ++ VHCA_ID_TYPE_HW = 0,
1634 ++ VHCA_ID_TYPE_SW = 1,
1635 ++};
1636 ++
1637 + struct mlx5_ifc_nic_vport_context_bits {
1638 + u8 reserved_at_0[0x5];
1639 + u8 min_wqe_inline_mode[0x3];
1640 +@@ -3731,8 +3743,8 @@ struct mlx5_ifc_nic_vport_context_bits {
1641 + u8 event_on_mc_address_change[0x1];
1642 + u8 event_on_uc_address_change[0x1];
1643 +
1644 +- u8 reserved_at_40[0xc];
1645 +-
1646 ++ u8 vhca_id_type[0x1];
1647 ++ u8 reserved_at_41[0xb];
1648 + u8 affiliation_criteria[0x4];
1649 + u8 affiliated_vhca_id[0x10];
1650 +
1651 +@@ -7189,7 +7201,12 @@ struct mlx5_ifc_init_hca_in_bits {
1652 + u8 reserved_at_20[0x10];
1653 + u8 op_mod[0x10];
1654 +
1655 +- u8 reserved_at_40[0x40];
1656 ++ u8 reserved_at_40[0x20];
1657 ++
1658 ++ u8 reserved_at_60[0x2];
1659 ++ u8 sw_vhca_id[0xe];
1660 ++ u8 reserved_at_70[0x10];
1661 ++
1662 + u8 sw_owner_id[4][0x20];
1663 + };
1664 +
1665 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
1666 +index cbdf0e2bc5ae0..d0fb74b0db1d5 100644
1667 +--- a/net/bluetooth/mgmt.c
1668 ++++ b/net/bluetooth/mgmt.c
1669 +@@ -4420,6 +4420,22 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
1670 + MGMT_STATUS_NOT_SUPPORTED);
1671 + }
1672 +
1673 ++static u32 get_params_flags(struct hci_dev *hdev,
1674 ++ struct hci_conn_params *params)
1675 ++{
1676 ++ u32 flags = hdev->conn_flags;
1677 ++
1678 ++ /* Devices using RPAs can only be programmed in the acceptlist if
1679 ++ * LL Privacy has been enable otherwise they cannot mark
1680 ++ * HCI_CONN_FLAG_REMOTE_WAKEUP.
1681 ++ */
1682 ++ if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
1683 ++ hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
1684 ++ flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
1685 ++
1686 ++ return flags;
1687 ++}
1688 ++
1689 + static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
1690 + u16 data_len)
1691 + {
1692 +@@ -4451,10 +4467,10 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
1693 + } else {
1694 + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
1695 + le_addr_type(cp->addr.type));
1696 +-
1697 + if (!params)
1698 + goto done;
1699 +
1700 ++ supported_flags = get_params_flags(hdev, params);
1701 + current_flags = params->flags;
1702 + }
1703 +
1704 +@@ -4523,38 +4539,35 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
1705 + bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
1706 + &cp->addr.bdaddr, cp->addr.type);
1707 + }
1708 +- } else {
1709 +- params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
1710 +- le_addr_type(cp->addr.type));
1711 +- if (params) {
1712 +- /* Devices using RPAs can only be programmed in the
1713 +- * acceptlist LL Privacy has been enable otherwise they
1714 +- * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
1715 +- */
1716 +- if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
1717 +- !use_ll_privacy(hdev) &&
1718 +- hci_find_irk_by_addr(hdev, &params->addr,
1719 +- params->addr_type)) {
1720 +- bt_dev_warn(hdev,
1721 +- "Cannot set wakeable for RPA");
1722 +- goto unlock;
1723 +- }
1724 +
1725 +- params->flags = current_flags;
1726 +- status = MGMT_STATUS_SUCCESS;
1727 ++ goto unlock;
1728 ++ }
1729 +
1730 +- /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
1731 +- * has been set.
1732 +- */
1733 +- if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
1734 +- hci_update_passive_scan(hdev);
1735 +- } else {
1736 +- bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
1737 +- &cp->addr.bdaddr,
1738 +- le_addr_type(cp->addr.type));
1739 +- }
1740 ++ params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
1741 ++ le_addr_type(cp->addr.type));
1742 ++ if (!params) {
1743 ++ bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
1744 ++ &cp->addr.bdaddr, le_addr_type(cp->addr.type));
1745 ++ goto unlock;
1746 ++ }
1747 ++
1748 ++ supported_flags = get_params_flags(hdev, params);
1749 ++
1750 ++ if ((supported_flags | current_flags) != supported_flags) {
1751 ++ bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
1752 ++ current_flags, supported_flags);
1753 ++ goto unlock;
1754 + }
1755 +
1756 ++ params->flags = current_flags;
1757 ++ status = MGMT_STATUS_SUCCESS;
1758 ++
1759 ++ /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
1760 ++ * has been set.
1761 ++ */
1762 ++ if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
1763 ++ hci_update_passive_scan(hdev);
1764 ++
1765 + unlock:
1766 + hci_dev_unlock(hdev);
1767 +
1768 +diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
1769 +index eb204ad36eeec..846588c0070a5 100644
1770 +--- a/net/dsa/tag_hellcreek.c
1771 ++++ b/net/dsa/tag_hellcreek.c
1772 +@@ -45,7 +45,7 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
1773 +
1774 + skb->dev = dsa_master_find_slave(dev, 0, port);
1775 + if (!skb->dev) {
1776 +- netdev_warn(dev, "Failed to get source port: %d\n", port);
1777 ++ netdev_warn_once(dev, "Failed to get source port: %d\n", port);
1778 + return NULL;
1779 + }
1780 +