Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Sun, 12 Sep 2021 14:38:05
Message-Id: 1631457470.e28c47d5ba4a96b33ace817627a332757b3e1606.mpagano@gentoo
1 commit: e28c47d5ba4a96b33ace817627a332757b3e1606
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Sep 12 14:37:50 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Sep 12 14:37:50 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e28c47d5
7
8 Linux patch 5.10.64
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1063_linux-5.10.64.patch | 1262 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1266 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 0e463ca..8ddd447 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -295,6 +295,10 @@ Patch: 1062_linux-5.10.63.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.63
23
24 +Patch: 1063_linux-5.10.64.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.64
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1063_linux-5.10.64.patch b/1063_linux-5.10.64.patch
33 new file mode 100644
34 index 0000000..e05fdf6
35 --- /dev/null
36 +++ b/1063_linux-5.10.64.patch
37 @@ -0,0 +1,1262 @@
38 +diff --git a/Makefile b/Makefile
39 +index b2d326f4dea68..982aa1876aa04 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 10
46 +-SUBLEVEL = 63
47 ++SUBLEVEL = 64
48 + EXTRAVERSION =
49 + NAME = Dare mighty things
50 +
51 +diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
52 +index 6a98a76516214..2da6139b0977f 100644
53 +--- a/arch/x86/events/amd/iommu.c
54 ++++ b/arch/x86/events/amd/iommu.c
55 +@@ -18,8 +18,6 @@
56 + #include "../perf_event.h"
57 + #include "iommu.h"
58 +
59 +-#define COUNTER_SHIFT 16
60 +-
61 + /* iommu pmu conf masks */
62 + #define GET_CSOURCE(x) ((x)->conf & 0xFFULL)
63 + #define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL)
64 +@@ -285,22 +283,31 @@ static void perf_iommu_start(struct perf_event *event, int flags)
65 + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
66 + hwc->state = 0;
67 +
68 ++ /*
69 ++ * To account for power-gating, which prevents write to
70 ++ * the counter, we need to enable the counter
71 ++ * before setting up counter register.
72 ++ */
73 ++ perf_iommu_enable_event(event);
74 ++
75 + if (flags & PERF_EF_RELOAD) {
76 +- u64 prev_raw_count = local64_read(&hwc->prev_count);
77 ++ u64 count = 0;
78 + struct amd_iommu *iommu = perf_event_2_iommu(event);
79 +
80 ++ /*
81 ++ * Since the IOMMU PMU only support counting mode,
82 ++ * the counter always start with value zero.
83 ++ */
84 + amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
85 +- IOMMU_PC_COUNTER_REG, &prev_raw_count);
86 ++ IOMMU_PC_COUNTER_REG, &count);
87 + }
88 +
89 +- perf_iommu_enable_event(event);
90 + perf_event_update_userpage(event);
91 +-
92 + }
93 +
94 + static void perf_iommu_read(struct perf_event *event)
95 + {
96 +- u64 count, prev, delta;
97 ++ u64 count;
98 + struct hw_perf_event *hwc = &event->hw;
99 + struct amd_iommu *iommu = perf_event_2_iommu(event);
100 +
101 +@@ -311,14 +318,11 @@ static void perf_iommu_read(struct perf_event *event)
102 + /* IOMMU pc counter register is only 48 bits */
103 + count &= GENMASK_ULL(47, 0);
104 +
105 +- prev = local64_read(&hwc->prev_count);
106 +- if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
107 +- return;
108 +-
109 +- /* Handle 48-bit counter overflow */
110 +- delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
111 +- delta >>= COUNTER_SHIFT;
112 +- local64_add(delta, &event->count);
113 ++ /*
114 ++ * Since the counter always start with value zero,
115 ++ * simply just accumulate the count for the event.
116 ++ */
117 ++ local64_add(count, &event->count);
118 + }
119 +
120 + static void perf_iommu_stop(struct perf_event *event, int flags)
121 +@@ -328,15 +332,16 @@ static void perf_iommu_stop(struct perf_event *event, int flags)
122 + if (hwc->state & PERF_HES_UPTODATE)
123 + return;
124 +
125 ++ /*
126 ++ * To account for power-gating, in which reading the counter would
127 ++ * return zero, we need to read the register before disabling.
128 ++ */
129 ++ perf_iommu_read(event);
130 ++ hwc->state |= PERF_HES_UPTODATE;
131 ++
132 + perf_iommu_disable_event(event);
133 + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
134 + hwc->state |= PERF_HES_STOPPED;
135 +-
136 +- if (hwc->state & PERF_HES_UPTODATE)
137 +- return;
138 +-
139 +- perf_iommu_read(event);
140 +- hwc->state |= PERF_HES_UPTODATE;
141 + }
142 +
143 + static int perf_iommu_add(struct perf_event *event, int flags)
144 +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
145 +index b29657b76e3fa..798a6f73f8946 100644
146 +--- a/arch/x86/kernel/reboot.c
147 ++++ b/arch/x86/kernel/reboot.c
148 +@@ -388,10 +388,11 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
149 + },
150 + { /* Handle problems with rebooting on the OptiPlex 990. */
151 + .callback = set_pci_reboot,
152 +- .ident = "Dell OptiPlex 990",
153 ++ .ident = "Dell OptiPlex 990 BIOS A0x",
154 + .matches = {
155 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
156 + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
157 ++ DMI_MATCH(DMI_BIOS_VERSION, "A0"),
158 + },
159 + },
160 + { /* Handle problems with rebooting on Dell 300's */
161 +diff --git a/block/blk-core.c b/block/blk-core.c
162 +index 2d53e2ff48ff8..fbc39756f37de 100644
163 +--- a/block/blk-core.c
164 ++++ b/block/blk-core.c
165 +@@ -121,7 +121,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
166 + rq->internal_tag = BLK_MQ_NO_TAG;
167 + rq->start_time_ns = ktime_get_ns();
168 + rq->part = NULL;
169 +- refcount_set(&rq->ref, 1);
170 + blk_crypto_rq_set_defaults(rq);
171 + }
172 + EXPORT_SYMBOL(blk_rq_init);
173 +diff --git a/block/blk-flush.c b/block/blk-flush.c
174 +index 7ee7e5e8905d5..70f1d02135ed6 100644
175 +--- a/block/blk-flush.c
176 ++++ b/block/blk-flush.c
177 +@@ -263,6 +263,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
178 + spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
179 + }
180 +
181 ++bool is_flush_rq(struct request *rq)
182 ++{
183 ++ return rq->end_io == flush_end_io;
184 ++}
185 ++
186 + /**
187 + * blk_kick_flush - consider issuing flush request
188 + * @q: request_queue being kicked
189 +@@ -330,6 +335,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
190 + flush_rq->rq_flags |= RQF_FLUSH_SEQ;
191 + flush_rq->rq_disk = first_rq->rq_disk;
192 + flush_rq->end_io = flush_end_io;
193 ++ /*
194 ++ * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
195 ++ * implied in refcount_inc_not_zero() called from
196 ++ * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
197 ++ * and READ flush_rq->end_io
198 ++ */
199 ++ smp_wmb();
200 ++ refcount_set(&flush_rq->ref, 1);
201 +
202 + blk_flush_queue_rq(flush_rq, false);
203 + }
204 +diff --git a/block/blk-mq.c b/block/blk-mq.c
205 +index 044d0e3a15ad7..9e3fedbaa644b 100644
206 +--- a/block/blk-mq.c
207 ++++ b/block/blk-mq.c
208 +@@ -929,7 +929,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
209 +
210 + void blk_mq_put_rq_ref(struct request *rq)
211 + {
212 +- if (is_flush_rq(rq, rq->mq_hctx))
213 ++ if (is_flush_rq(rq))
214 + rq->end_io(rq, 0);
215 + else if (refcount_dec_and_test(&rq->ref))
216 + __blk_mq_free_request(rq);
217 +@@ -2589,16 +2589,49 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
218 + &hctx->cpuhp_dead);
219 + }
220 +
221 ++/*
222 ++ * Before freeing hw queue, clearing the flush request reference in
223 ++ * tags->rqs[] for avoiding potential UAF.
224 ++ */
225 ++static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
226 ++ unsigned int queue_depth, struct request *flush_rq)
227 ++{
228 ++ int i;
229 ++ unsigned long flags;
230 ++
231 ++ /* The hw queue may not be mapped yet */
232 ++ if (!tags)
233 ++ return;
234 ++
235 ++ WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
236 ++
237 ++ for (i = 0; i < queue_depth; i++)
238 ++ cmpxchg(&tags->rqs[i], flush_rq, NULL);
239 ++
240 ++ /*
241 ++ * Wait until all pending iteration is done.
242 ++ *
243 ++ * Request reference is cleared and it is guaranteed to be observed
244 ++ * after the ->lock is released.
245 ++ */
246 ++ spin_lock_irqsave(&tags->lock, flags);
247 ++ spin_unlock_irqrestore(&tags->lock, flags);
248 ++}
249 ++
250 + /* hctx->ctxs will be freed in queue's release handler */
251 + static void blk_mq_exit_hctx(struct request_queue *q,
252 + struct blk_mq_tag_set *set,
253 + struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
254 + {
255 ++ struct request *flush_rq = hctx->fq->flush_rq;
256 ++
257 + if (blk_mq_hw_queue_mapped(hctx))
258 + blk_mq_tag_idle(hctx);
259 +
260 ++ blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
261 ++ set->queue_depth, flush_rq);
262 + if (set->ops->exit_request)
263 +- set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
264 ++ set->ops->exit_request(set, flush_rq, hctx_idx);
265 +
266 + if (set->ops->exit_hctx)
267 + set->ops->exit_hctx(hctx, hctx_idx);
268 +diff --git a/block/blk.h b/block/blk.h
269 +index dfab98465db9a..ecfd523c68d00 100644
270 +--- a/block/blk.h
271 ++++ b/block/blk.h
272 +@@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q)
273 + kobject_get(&q->kobj);
274 + }
275 +
276 +-static inline bool
277 +-is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
278 +-{
279 +- return hctx->fq->flush_rq == req;
280 +-}
281 ++bool is_flush_rq(struct request *req);
282 +
283 + struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
284 + gfp_t flags);
285 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
286 +index e690a1b09e98b..30be18bac8063 100644
287 +--- a/drivers/net/ethernet/realtek/r8169_main.c
288 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
289 +@@ -3547,6 +3547,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
290 + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
291 +
292 + rtl_pcie_state_l2l3_disable(tp);
293 ++ rtl_hw_aspm_clkreq_enable(tp, true);
294 + }
295 +
296 + DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
297 +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
298 +index 6bd3a389d389c..650ffb93796f1 100644
299 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
300 ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
301 +@@ -942,10 +942,8 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
302 + wmb();
303 + lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
304 +
305 +- if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
306 +- netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
307 ++ if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
308 + netif_stop_queue(ndev);
309 +- }
310 +
311 + return NETDEV_TX_OK;
312 + }
313 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
314 +index cd2401d4764f2..a91c944961caa 100644
315 +--- a/drivers/pci/quirks.c
316 ++++ b/drivers/pci/quirks.c
317 +@@ -3246,12 +3246,12 @@ static void fixup_mpss_256(struct pci_dev *dev)
318 + {
319 + dev->pcie_mpss = 1; /* 256 bytes */
320 + }
321 +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
322 +- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
323 +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
324 +- PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
325 +-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
326 +- PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
327 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
328 ++ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
329 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
330 ++ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
331 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
332 ++ PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
333 +
334 + /*
335 + * Intel 5000 and 5100 Memory controllers have an erratum with read completion
336 +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
337 +index ad0549dac7d79..c37468887fd2a 100644
338 +--- a/drivers/tty/serial/8250/8250_omap.c
339 ++++ b/drivers/tty/serial/8250/8250_omap.c
340 +@@ -538,6 +538,11 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
341 + static void omap_serial_fill_features_erratas(struct uart_8250_port *up,
342 + struct omap8250_priv *priv)
343 + {
344 ++ const struct soc_device_attribute k3_soc_devices[] = {
345 ++ { .family = "AM65X", },
346 ++ { .family = "J721E", .revision = "SR1.0" },
347 ++ { /* sentinel */ }
348 ++ };
349 + u32 mvr, scheme;
350 + u16 revision, major, minor;
351 +
352 +@@ -585,6 +590,14 @@ static void omap_serial_fill_features_erratas(struct uart_8250_port *up,
353 + default:
354 + break;
355 + }
356 ++
357 ++ /*
358 ++ * AM65x SR1.0, AM65x SR2.0 and J721e SR1.0 don't
359 ++ * don't have RHR_IT_DIS bit in IER2 register. So drop to flag
360 ++ * to enable errata workaround.
361 ++ */
362 ++ if (soc_device_match(k3_soc_devices))
363 ++ priv->habit &= ~UART_HAS_RHR_IT_DIS;
364 + }
365 +
366 + static void omap8250_uart_qos_work(struct work_struct *work)
367 +@@ -1208,12 +1221,6 @@ static int omap8250_no_handle_irq(struct uart_port *port)
368 + return 0;
369 + }
370 +
371 +-static const struct soc_device_attribute k3_soc_devices[] = {
372 +- { .family = "AM65X", },
373 +- { .family = "J721E", .revision = "SR1.0" },
374 +- { /* sentinel */ }
375 +-};
376 +-
377 + static struct omap8250_dma_params am654_dma = {
378 + .rx_size = SZ_2K,
379 + .rx_trigger = 1,
380 +@@ -1419,13 +1426,6 @@ static int omap8250_probe(struct platform_device *pdev)
381 + up.dma->rxconf.src_maxburst = RX_TRIGGER;
382 + up.dma->txconf.dst_maxburst = TX_TRIGGER;
383 + }
384 +-
385 +- /*
386 +- * AM65x SR1.0, AM65x SR2.0 and J721e SR1.0 don't
387 +- * don't have RHR_IT_DIS bit in IER2 register
388 +- */
389 +- if (soc_device_match(k3_soc_devices))
390 +- priv->habit &= ~UART_HAS_RHR_IT_DIS;
391 + }
392 + #endif
393 + ret = serial8250_register_8250_port(&up);
394 +diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
395 +index f3f112b08c9b1..57ee72fead45a 100644
396 +--- a/drivers/usb/gadget/udc/tegra-xudc.c
397 ++++ b/drivers/usb/gadget/udc/tegra-xudc.c
398 +@@ -1610,7 +1610,7 @@ static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
399 + u16 maxpacket, maxburst = 0, esit = 0;
400 + u32 val;
401 +
402 +- maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
403 ++ maxpacket = usb_endpoint_maxp(desc);
404 + if (xudc->gadget.speed == USB_SPEED_SUPER) {
405 + if (!usb_endpoint_xfer_control(desc))
406 + maxburst = comp_desc->bMaxBurst;
407 +@@ -1621,7 +1621,7 @@ static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
408 + (usb_endpoint_xfer_int(desc) ||
409 + usb_endpoint_xfer_isoc(desc))) {
410 + if (xudc->gadget.speed == USB_SPEED_HIGH) {
411 +- maxburst = (usb_endpoint_maxp(desc) >> 11) & 0x3;
412 ++ maxburst = usb_endpoint_maxp_mult(desc) - 1;
413 + if (maxburst == 0x3) {
414 + dev_warn(xudc->dev,
415 + "invalid endpoint maxburst\n");
416 +diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
417 +index 2c0fda57869e4..dc832ddf7033f 100644
418 +--- a/drivers/usb/host/xhci-debugfs.c
419 ++++ b/drivers/usb/host/xhci-debugfs.c
420 +@@ -198,12 +198,13 @@ static void xhci_ring_dump_segment(struct seq_file *s,
421 + int i;
422 + dma_addr_t dma;
423 + union xhci_trb *trb;
424 ++ char str[XHCI_MSG_MAX];
425 +
426 + for (i = 0; i < TRBS_PER_SEGMENT; i++) {
427 + trb = &seg->trbs[i];
428 + dma = seg->dma + i * sizeof(*trb);
429 + seq_printf(s, "%pad: %s\n", &dma,
430 +- xhci_decode_trb(le32_to_cpu(trb->generic.field[0]),
431 ++ xhci_decode_trb(str, XHCI_MSG_MAX, le32_to_cpu(trb->generic.field[0]),
432 + le32_to_cpu(trb->generic.field[1]),
433 + le32_to_cpu(trb->generic.field[2]),
434 + le32_to_cpu(trb->generic.field[3])));
435 +@@ -260,11 +261,13 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
436 + struct xhci_slot_ctx *slot_ctx;
437 + struct xhci_slot_priv *priv = s->private;
438 + struct xhci_virt_device *dev = priv->dev;
439 ++ char str[XHCI_MSG_MAX];
440 +
441 + xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
442 + slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
443 + seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma,
444 +- xhci_decode_slot_context(le32_to_cpu(slot_ctx->dev_info),
445 ++ xhci_decode_slot_context(str,
446 ++ le32_to_cpu(slot_ctx->dev_info),
447 + le32_to_cpu(slot_ctx->dev_info2),
448 + le32_to_cpu(slot_ctx->tt_info),
449 + le32_to_cpu(slot_ctx->dev_state)));
450 +@@ -280,6 +283,7 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
451 + struct xhci_ep_ctx *ep_ctx;
452 + struct xhci_slot_priv *priv = s->private;
453 + struct xhci_virt_device *dev = priv->dev;
454 ++ char str[XHCI_MSG_MAX];
455 +
456 + xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
457 +
458 +@@ -287,7 +291,8 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
459 + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
460 + dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params);
461 + seq_printf(s, "%pad: %s\n", &dma,
462 +- xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
463 ++ xhci_decode_ep_context(str,
464 ++ le32_to_cpu(ep_ctx->ep_info),
465 + le32_to_cpu(ep_ctx->ep_info2),
466 + le64_to_cpu(ep_ctx->deq),
467 + le32_to_cpu(ep_ctx->tx_info)));
468 +@@ -341,9 +346,10 @@ static int xhci_portsc_show(struct seq_file *s, void *unused)
469 + {
470 + struct xhci_port *port = s->private;
471 + u32 portsc;
472 ++ char str[XHCI_MSG_MAX];
473 +
474 + portsc = readl(port->addr);
475 +- seq_printf(s, "%s\n", xhci_decode_portsc(portsc));
476 ++ seq_printf(s, "%s\n", xhci_decode_portsc(str, portsc));
477 +
478 + return 0;
479 + }
480 +diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
481 +index 1bc4fe7b8c756..9888ba7d85b6a 100644
482 +--- a/drivers/usb/host/xhci-rcar.c
483 ++++ b/drivers/usb/host/xhci-rcar.c
484 +@@ -134,6 +134,13 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
485 + const struct soc_device_attribute *attr;
486 + const char *firmware_name;
487 +
488 ++ /*
489 ++ * According to the datasheet, "Upon the completion of FW Download,
490 ++ * there is no need to write or reload FW".
491 ++ */
492 ++ if (readl(regs + RCAR_USB3_DL_CTRL) & RCAR_USB3_DL_CTRL_FW_SUCCESS)
493 ++ return 0;
494 ++
495 + attr = soc_device_match(rcar_quirks_match);
496 + if (attr)
497 + quirks = (uintptr_t)attr->data;
498 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
499 +index 53059ee957ad5..dc2068e3bedb7 100644
500 +--- a/drivers/usb/host/xhci-ring.c
501 ++++ b/drivers/usb/host/xhci-ring.c
502 +@@ -1005,6 +1005,7 @@ void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
503 + struct xhci_hcd *xhci = ep->xhci;
504 + unsigned long flags;
505 + u32 usbsts;
506 ++ char str[XHCI_MSG_MAX];
507 +
508 + spin_lock_irqsave(&xhci->lock, flags);
509 +
510 +@@ -1018,7 +1019,7 @@ void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
511 + usbsts = readl(&xhci->op_regs->status);
512 +
513 + xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
514 +- xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(usbsts));
515 ++ xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
516 +
517 + ep->ep_state &= ~EP_STOP_CMD_PENDING;
518 +
519 +diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
520 +index 627abd236dbe1..a5da020772977 100644
521 +--- a/drivers/usb/host/xhci-trace.h
522 ++++ b/drivers/usb/host/xhci-trace.h
523 +@@ -25,8 +25,6 @@
524 + #include "xhci.h"
525 + #include "xhci-dbgcap.h"
526 +
527 +-#define XHCI_MSG_MAX 500
528 +-
529 + DECLARE_EVENT_CLASS(xhci_log_msg,
530 + TP_PROTO(struct va_format *vaf),
531 + TP_ARGS(vaf),
532 +@@ -122,6 +120,7 @@ DECLARE_EVENT_CLASS(xhci_log_trb,
533 + __field(u32, field1)
534 + __field(u32, field2)
535 + __field(u32, field3)
536 ++ __dynamic_array(char, str, XHCI_MSG_MAX)
537 + ),
538 + TP_fast_assign(
539 + __entry->type = ring->type;
540 +@@ -131,7 +130,7 @@ DECLARE_EVENT_CLASS(xhci_log_trb,
541 + __entry->field3 = le32_to_cpu(trb->field[3]);
542 + ),
543 + TP_printk("%s: %s", xhci_ring_type_string(__entry->type),
544 +- xhci_decode_trb(__entry->field0, __entry->field1,
545 ++ xhci_decode_trb(__get_str(str), XHCI_MSG_MAX, __entry->field0, __entry->field1,
546 + __entry->field2, __entry->field3)
547 + )
548 + );
549 +@@ -323,6 +322,7 @@ DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
550 + __field(u32, info2)
551 + __field(u64, deq)
552 + __field(u32, tx_info)
553 ++ __dynamic_array(char, str, XHCI_MSG_MAX)
554 + ),
555 + TP_fast_assign(
556 + __entry->info = le32_to_cpu(ctx->ep_info);
557 +@@ -330,8 +330,8 @@ DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
558 + __entry->deq = le64_to_cpu(ctx->deq);
559 + __entry->tx_info = le32_to_cpu(ctx->tx_info);
560 + ),
561 +- TP_printk("%s", xhci_decode_ep_context(__entry->info,
562 +- __entry->info2, __entry->deq, __entry->tx_info)
563 ++ TP_printk("%s", xhci_decode_ep_context(__get_str(str),
564 ++ __entry->info, __entry->info2, __entry->deq, __entry->tx_info)
565 + )
566 + );
567 +
568 +@@ -368,6 +368,7 @@ DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
569 + __field(u32, info2)
570 + __field(u32, tt_info)
571 + __field(u32, state)
572 ++ __dynamic_array(char, str, XHCI_MSG_MAX)
573 + ),
574 + TP_fast_assign(
575 + __entry->info = le32_to_cpu(ctx->dev_info);
576 +@@ -375,9 +376,9 @@ DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
577 + __entry->tt_info = le64_to_cpu(ctx->tt_info);
578 + __entry->state = le32_to_cpu(ctx->dev_state);
579 + ),
580 +- TP_printk("%s", xhci_decode_slot_context(__entry->info,
581 +- __entry->info2, __entry->tt_info,
582 +- __entry->state)
583 ++ TP_printk("%s", xhci_decode_slot_context(__get_str(str),
584 ++ __entry->info, __entry->info2,
585 ++ __entry->tt_info, __entry->state)
586 + )
587 + );
588 +
589 +@@ -432,12 +433,13 @@ DECLARE_EVENT_CLASS(xhci_log_ctrl_ctx,
590 + TP_STRUCT__entry(
591 + __field(u32, drop)
592 + __field(u32, add)
593 ++ __dynamic_array(char, str, XHCI_MSG_MAX)
594 + ),
595 + TP_fast_assign(
596 + __entry->drop = le32_to_cpu(ctrl_ctx->drop_flags);
597 + __entry->add = le32_to_cpu(ctrl_ctx->add_flags);
598 + ),
599 +- TP_printk("%s", xhci_decode_ctrl_ctx(__entry->drop, __entry->add)
600 ++ TP_printk("%s", xhci_decode_ctrl_ctx(__get_str(str), __entry->drop, __entry->add)
601 + )
602 + );
603 +
604 +@@ -523,6 +525,7 @@ DECLARE_EVENT_CLASS(xhci_log_portsc,
605 + TP_STRUCT__entry(
606 + __field(u32, portnum)
607 + __field(u32, portsc)
608 ++ __dynamic_array(char, str, XHCI_MSG_MAX)
609 + ),
610 + TP_fast_assign(
611 + __entry->portnum = portnum;
612 +@@ -530,7 +533,7 @@ DECLARE_EVENT_CLASS(xhci_log_portsc,
613 + ),
614 + TP_printk("port-%d: %s",
615 + __entry->portnum,
616 +- xhci_decode_portsc(__entry->portsc)
617 ++ xhci_decode_portsc(__get_str(str), __entry->portsc)
618 + )
619 + );
620 +
621 +@@ -555,13 +558,14 @@ DECLARE_EVENT_CLASS(xhci_log_doorbell,
622 + TP_STRUCT__entry(
623 + __field(u32, slot)
624 + __field(u32, doorbell)
625 ++ __dynamic_array(char, str, XHCI_MSG_MAX)
626 + ),
627 + TP_fast_assign(
628 + __entry->slot = slot;
629 + __entry->doorbell = doorbell;
630 + ),
631 + TP_printk("Ring doorbell for %s",
632 +- xhci_decode_doorbell(__entry->slot, __entry->doorbell)
633 ++ xhci_decode_doorbell(__get_str(str), __entry->slot, __entry->doorbell)
634 + )
635 + );
636 +
637 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
638 +index c1865a121100c..1c97c8d81154d 100644
639 +--- a/drivers/usb/host/xhci.h
640 ++++ b/drivers/usb/host/xhci.h
641 +@@ -22,6 +22,9 @@
642 + #include "xhci-ext-caps.h"
643 + #include "pci-quirks.h"
644 +
645 ++/* max buffer size for trace and debug messages */
646 ++#define XHCI_MSG_MAX 500
647 ++
648 + /* xHCI PCI Configuration Registers */
649 + #define XHCI_SBRN_OFFSET (0x60)
650 +
651 +@@ -2223,15 +2226,14 @@ static inline char *xhci_slot_state_string(u32 state)
652 + }
653 + }
654 +
655 +-static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
656 +- u32 field3)
657 ++static inline const char *xhci_decode_trb(char *str, size_t size,
658 ++ u32 field0, u32 field1, u32 field2, u32 field3)
659 + {
660 +- static char str[256];
661 + int type = TRB_FIELD_TO_TYPE(field3);
662 +
663 + switch (type) {
664 + case TRB_LINK:
665 +- sprintf(str,
666 ++ snprintf(str, size,
667 + "LINK %08x%08x intr %d type '%s' flags %c:%c:%c:%c",
668 + field1, field0, GET_INTR_TARGET(field2),
669 + xhci_trb_type_string(type),
670 +@@ -2248,7 +2250,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
671 + case TRB_HC_EVENT:
672 + case TRB_DEV_NOTE:
673 + case TRB_MFINDEX_WRAP:
674 +- sprintf(str,
675 ++ snprintf(str, size,
676 + "TRB %08x%08x status '%s' len %d slot %d ep %d type '%s' flags %c:%c",
677 + field1, field0,
678 + xhci_trb_comp_code_string(GET_COMP_CODE(field2)),
679 +@@ -2261,7 +2263,8 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
680 +
681 + break;
682 + case TRB_SETUP:
683 +- sprintf(str, "bRequestType %02x bRequest %02x wValue %02x%02x wIndex %02x%02x wLength %d length %d TD size %d intr %d type '%s' flags %c:%c:%c",
684 ++ snprintf(str, size,
685 ++ "bRequestType %02x bRequest %02x wValue %02x%02x wIndex %02x%02x wLength %d length %d TD size %d intr %d type '%s' flags %c:%c:%c",
686 + field0 & 0xff,
687 + (field0 & 0xff00) >> 8,
688 + (field0 & 0xff000000) >> 24,
689 +@@ -2278,7 +2281,8 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
690 + field3 & TRB_CYCLE ? 'C' : 'c');
691 + break;
692 + case TRB_DATA:
693 +- sprintf(str, "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c",
694 ++ snprintf(str, size,
695 ++ "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c",
696 + field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
697 + GET_INTR_TARGET(field2),
698 + xhci_trb_type_string(type),
699 +@@ -2291,7 +2295,8 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
700 + field3 & TRB_CYCLE ? 'C' : 'c');
701 + break;
702 + case TRB_STATUS:
703 +- sprintf(str, "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c",
704 ++ snprintf(str, size,
705 ++ "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c",
706 + field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
707 + GET_INTR_TARGET(field2),
708 + xhci_trb_type_string(type),
709 +@@ -2304,7 +2309,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
710 + case TRB_ISOC:
711 + case TRB_EVENT_DATA:
712 + case TRB_TR_NOOP:
713 +- sprintf(str,
714 ++ snprintf(str, size,
715 + "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c:%c",
716 + field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
717 + GET_INTR_TARGET(field2),
718 +@@ -2321,21 +2326,21 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
719 +
720 + case TRB_CMD_NOOP:
721 + case TRB_ENABLE_SLOT:
722 +- sprintf(str,
723 ++ snprintf(str, size,
724 + "%s: flags %c",
725 + xhci_trb_type_string(type),
726 + field3 & TRB_CYCLE ? 'C' : 'c');
727 + break;
728 + case TRB_DISABLE_SLOT:
729 + case TRB_NEG_BANDWIDTH:
730 +- sprintf(str,
731 ++ snprintf(str, size,
732 + "%s: slot %d flags %c",
733 + xhci_trb_type_string(type),
734 + TRB_TO_SLOT_ID(field3),
735 + field3 & TRB_CYCLE ? 'C' : 'c');
736 + break;
737 + case TRB_ADDR_DEV:
738 +- sprintf(str,
739 ++ snprintf(str, size,
740 + "%s: ctx %08x%08x slot %d flags %c:%c",
741 + xhci_trb_type_string(type),
742 + field1, field0,
743 +@@ -2344,7 +2349,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
744 + field3 & TRB_CYCLE ? 'C' : 'c');
745 + break;
746 + case TRB_CONFIG_EP:
747 +- sprintf(str,
748 ++ snprintf(str, size,
749 + "%s: ctx %08x%08x slot %d flags %c:%c",
750 + xhci_trb_type_string(type),
751 + field1, field0,
752 +@@ -2353,7 +2358,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
753 + field3 & TRB_CYCLE ? 'C' : 'c');
754 + break;
755 + case TRB_EVAL_CONTEXT:
756 +- sprintf(str,
757 ++ snprintf(str, size,
758 + "%s: ctx %08x%08x slot %d flags %c",
759 + xhci_trb_type_string(type),
760 + field1, field0,
761 +@@ -2361,7 +2366,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
762 + field3 & TRB_CYCLE ? 'C' : 'c');
763 + break;
764 + case TRB_RESET_EP:
765 +- sprintf(str,
766 ++ snprintf(str, size,
767 + "%s: ctx %08x%08x slot %d ep %d flags %c:%c",
768 + xhci_trb_type_string(type),
769 + field1, field0,
770 +@@ -2382,7 +2387,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
771 + field3 & TRB_CYCLE ? 'C' : 'c');
772 + break;
773 + case TRB_SET_DEQ:
774 +- sprintf(str,
775 ++ snprintf(str, size,
776 + "%s: deq %08x%08x stream %d slot %d ep %d flags %c",
777 + xhci_trb_type_string(type),
778 + field1, field0,
779 +@@ -2393,14 +2398,14 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
780 + field3 & TRB_CYCLE ? 'C' : 'c');
781 + break;
782 + case TRB_RESET_DEV:
783 +- sprintf(str,
784 ++ snprintf(str, size,
785 + "%s: slot %d flags %c",
786 + xhci_trb_type_string(type),
787 + TRB_TO_SLOT_ID(field3),
788 + field3 & TRB_CYCLE ? 'C' : 'c');
789 + break;
790 + case TRB_FORCE_EVENT:
791 +- sprintf(str,
792 ++ snprintf(str, size,
793 + "%s: event %08x%08x vf intr %d vf id %d flags %c",
794 + xhci_trb_type_string(type),
795 + field1, field0,
796 +@@ -2409,14 +2414,14 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
797 + field3 & TRB_CYCLE ? 'C' : 'c');
798 + break;
799 + case TRB_SET_LT:
800 +- sprintf(str,
801 ++ snprintf(str, size,
802 + "%s: belt %d flags %c",
803 + xhci_trb_type_string(type),
804 + TRB_TO_BELT(field3),
805 + field3 & TRB_CYCLE ? 'C' : 'c');
806 + break;
807 + case TRB_GET_BW:
808 +- sprintf(str,
809 ++ snprintf(str, size,
810 + "%s: ctx %08x%08x slot %d speed %d flags %c",
811 + xhci_trb_type_string(type),
812 + field1, field0,
813 +@@ -2425,7 +2430,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
814 + field3 & TRB_CYCLE ? 'C' : 'c');
815 + break;
816 + case TRB_FORCE_HEADER:
817 +- sprintf(str,
818 ++ snprintf(str, size,
819 + "%s: info %08x%08x%08x pkt type %d roothub port %d flags %c",
820 + xhci_trb_type_string(type),
821 + field2, field1, field0 & 0xffffffe0,
822 +@@ -2434,7 +2439,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
823 + field3 & TRB_CYCLE ? 'C' : 'c');
824 + break;
825 + default:
826 +- sprintf(str,
827 ++ snprintf(str, size,
828 + "type '%s' -> raw %08x %08x %08x %08x",
829 + xhci_trb_type_string(type),
830 + field0, field1, field2, field3);
831 +@@ -2443,10 +2448,9 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
832 + return str;
833 + }
834 +
835 +-static inline const char *xhci_decode_ctrl_ctx(unsigned long drop,
836 +- unsigned long add)
837 ++static inline const char *xhci_decode_ctrl_ctx(char *str,
838 ++ unsigned long drop, unsigned long add)
839 + {
840 +- static char str[1024];
841 + unsigned int bit;
842 + int ret = 0;
843 +
844 +@@ -2472,10 +2476,9 @@ static inline const char *xhci_decode_ctrl_ctx(unsigned long drop,
845 + return str;
846 + }
847 +
848 +-static inline const char *xhci_decode_slot_context(u32 info, u32 info2,
849 +- u32 tt_info, u32 state)
850 ++static inline const char *xhci_decode_slot_context(char *str,
851 ++ u32 info, u32 info2, u32 tt_info, u32 state)
852 + {
853 +- static char str[1024];
854 + u32 speed;
855 + u32 hub;
856 + u32 mtt;
857 +@@ -2559,9 +2562,8 @@ static inline const char *xhci_portsc_link_state_string(u32 portsc)
858 + return "Unknown";
859 + }
860 +
861 +-static inline const char *xhci_decode_portsc(u32 portsc)
862 ++static inline const char *xhci_decode_portsc(char *str, u32 portsc)
863 + {
864 +- static char str[256];
865 + int ret;
866 +
867 + ret = sprintf(str, "%s %s %s Link:%s PortSpeed:%d ",
868 +@@ -2605,9 +2607,8 @@ static inline const char *xhci_decode_portsc(u32 portsc)
869 + return str;
870 + }
871 +
872 +-static inline const char *xhci_decode_usbsts(u32 usbsts)
873 ++static inline const char *xhci_decode_usbsts(char *str, u32 usbsts)
874 + {
875 +- static char str[256];
876 + int ret = 0;
877 +
878 + if (usbsts == ~(u32)0)
879 +@@ -2634,9 +2635,8 @@ static inline const char *xhci_decode_usbsts(u32 usbsts)
880 + return str;
881 + }
882 +
883 +-static inline const char *xhci_decode_doorbell(u32 slot, u32 doorbell)
884 ++static inline const char *xhci_decode_doorbell(char *str, u32 slot, u32 doorbell)
885 + {
886 +- static char str[256];
887 + u8 ep;
888 + u16 stream;
889 + int ret;
890 +@@ -2703,10 +2703,9 @@ static inline const char *xhci_ep_type_string(u8 type)
891 + }
892 + }
893 +
894 +-static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
895 +- u32 tx_info)
896 ++static inline const char *xhci_decode_ep_context(char *str, u32 info,
897 ++ u32 info2, u64 deq, u32 tx_info)
898 + {
899 +- static char str[1024];
900 + int ret;
901 +
902 + u32 esit;
903 +diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
904 +index b3b4599375668..3d328dfdbb5ed 100644
905 +--- a/drivers/usb/mtu3/mtu3_core.c
906 ++++ b/drivers/usb/mtu3/mtu3_core.c
907 +@@ -227,11 +227,13 @@ void mtu3_set_speed(struct mtu3 *mtu, enum usb_device_speed speed)
908 + mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
909 + break;
910 + case USB_SPEED_SUPER:
911 ++ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
912 + mtu3_clrbits(mtu->ippc_base, SSUSB_U3_CTRL(0),
913 + SSUSB_U3_PORT_SSP_SPEED);
914 + break;
915 + case USB_SPEED_SUPER_PLUS:
916 +- mtu3_setbits(mtu->ippc_base, SSUSB_U3_CTRL(0),
917 ++ mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
918 ++ mtu3_setbits(mtu->ippc_base, SSUSB_U3_CTRL(0),
919 + SSUSB_U3_PORT_SSP_SPEED);
920 + break;
921 + default:
922 +diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
923 +index 38f17d66d5bc1..0b3aa7c65857a 100644
924 +--- a/drivers/usb/mtu3/mtu3_gadget.c
925 ++++ b/drivers/usb/mtu3/mtu3_gadget.c
926 +@@ -64,14 +64,12 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
927 + u32 interval = 0;
928 + u32 mult = 0;
929 + u32 burst = 0;
930 +- int max_packet;
931 + int ret;
932 +
933 + desc = mep->desc;
934 + comp_desc = mep->comp_desc;
935 + mep->type = usb_endpoint_type(desc);
936 +- max_packet = usb_endpoint_maxp(desc);
937 +- mep->maxp = max_packet & GENMASK(10, 0);
938 ++ mep->maxp = usb_endpoint_maxp(desc);
939 +
940 + switch (mtu->g.speed) {
941 + case USB_SPEED_SUPER:
942 +@@ -92,7 +90,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
943 + usb_endpoint_xfer_int(desc)) {
944 + interval = desc->bInterval;
945 + interval = clamp_val(interval, 1, 16) - 1;
946 +- burst = (max_packet & GENMASK(12, 11)) >> 11;
947 ++ mult = usb_endpoint_maxp_mult(desc) - 1;
948 + }
949 + break;
950 + default:
951 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
952 +index b418a0d4adb89..c713d98b4a203 100644
953 +--- a/drivers/usb/serial/mos7720.c
954 ++++ b/drivers/usb/serial/mos7720.c
955 +@@ -226,8 +226,10 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
956 + int status;
957 +
958 + buf = kmalloc(1, GFP_KERNEL);
959 +- if (!buf)
960 ++ if (!buf) {
961 ++ *data = 0;
962 + return -ENOMEM;
963 ++ }
964 +
965 + status = usb_control_msg(usbdev, pipe, request, requesttype, value,
966 + index, buf, 1, MOS_WDR_TIMEOUT);
967 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
968 +index 2d01b2bbb7465..0a1239819fd2a 100644
969 +--- a/include/linux/skbuff.h
970 ++++ b/include/linux/skbuff.h
971 +@@ -4608,7 +4608,7 @@ static inline void skb_reset_redirect(struct sk_buff *skb)
972 + #endif
973 + }
974 +
975 +-#ifdef CONFIG_KCOV
976 ++#if IS_ENABLED(CONFIG_KCOV) && IS_ENABLED(CONFIG_SKB_EXTENSIONS)
977 + static inline void skb_set_kcov_handle(struct sk_buff *skb,
978 + const u64 kcov_handle)
979 + {
980 +@@ -4636,7 +4636,7 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
981 + static inline void skb_set_kcov_handle(struct sk_buff *skb,
982 + const u64 kcov_handle) { }
983 + static inline u64 skb_get_kcov_handle(struct sk_buff *skb) { return 0; }
984 +-#endif /* CONFIG_KCOV */
985 ++#endif /* CONFIG_KCOV && CONFIG_SKB_EXTENSIONS */
986 +
987 + #endif /* __KERNEL__ */
988 + #endif /* _LINUX_SKBUFF_H */
989 +diff --git a/include/uapi/linux/termios.h b/include/uapi/linux/termios.h
990 +index 33961d4e4de0d..e6da9d4433d11 100644
991 +--- a/include/uapi/linux/termios.h
992 ++++ b/include/uapi/linux/termios.h
993 +@@ -5,19 +5,4 @@
994 + #include <linux/types.h>
995 + #include <asm/termios.h>
996 +
997 +-#define NFF 5
998 +-
999 +-struct termiox
1000 +-{
1001 +- __u16 x_hflag;
1002 +- __u16 x_cflag;
1003 +- __u16 x_rflag[NFF];
1004 +- __u16 x_sflag;
1005 +-};
1006 +-
1007 +-#define RTSXOFF 0x0001 /* RTS flow control on input */
1008 +-#define CTSXON 0x0002 /* CTS flow control on output */
1009 +-#define DTRXOFF 0x0004 /* DTR flow control on input */
1010 +-#define DSRXON 0x0008 /* DCD flow control on output */
1011 +-
1012 + #endif
1013 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
1014 +index ffccc13d685bd..bf174798afcb9 100644
1015 +--- a/lib/Kconfig.debug
1016 ++++ b/lib/Kconfig.debug
1017 +@@ -1869,7 +1869,7 @@ config KCOV
1018 + depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
1019 + select DEBUG_FS
1020 + select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
1021 +- select SKB_EXTENSIONS
1022 ++ select SKB_EXTENSIONS if NET
1023 + help
1024 + KCOV exposes kernel code coverage information in a form suitable
1025 + for coverage-guided fuzzing (randomized testing).
1026 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1027 +index 0166558d3d647..e8e0f1cec8b04 100644
1028 +--- a/mm/page_alloc.c
1029 ++++ b/mm/page_alloc.c
1030 +@@ -996,7 +996,7 @@ static inline void __free_one_page(struct page *page,
1031 + struct page *buddy;
1032 + bool to_tail;
1033 +
1034 +- max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
1035 ++ max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
1036 +
1037 + VM_BUG_ON(!zone_is_initialized(zone));
1038 + VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1039 +@@ -1009,7 +1009,7 @@ static inline void __free_one_page(struct page *page,
1040 + VM_BUG_ON_PAGE(bad_range(zone, page), page);
1041 +
1042 + continue_merging:
1043 +- while (order < max_order - 1) {
1044 ++ while (order < max_order) {
1045 + if (compaction_capture(capc, page, order, migratetype)) {
1046 + __mod_zone_freepage_state(zone, -(1 << order),
1047 + migratetype);
1048 +@@ -1035,7 +1035,7 @@ continue_merging:
1049 + pfn = combined_pfn;
1050 + order++;
1051 + }
1052 +- if (max_order < MAX_ORDER) {
1053 ++ if (order < MAX_ORDER - 1) {
1054 + /* If we are here, it means order is >= pageblock_order.
1055 + * We want to prevent merge between freepages on isolate
1056 + * pageblock and normal pageblock. Without this, pageblock
1057 +@@ -1056,7 +1056,7 @@ continue_merging:
1058 + is_migrate_isolate(buddy_mt)))
1059 + goto done_merging;
1060 + }
1061 +- max_order++;
1062 ++ max_order = order + 1;
1063 + goto continue_merging;
1064 + }
1065 +
1066 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
1067 +index 00576bae183d3..0c321996c6eb0 100644
1068 +--- a/net/ipv4/igmp.c
1069 ++++ b/net/ipv4/igmp.c
1070 +@@ -2720,6 +2720,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u
1071 + rv = 1;
1072 + } else if (im) {
1073 + if (src_addr) {
1074 ++ spin_lock_bh(&im->lock);
1075 + for (psf = im->sources; psf; psf = psf->sf_next) {
1076 + if (psf->sf_inaddr == src_addr)
1077 + break;
1078 +@@ -2730,6 +2731,7 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u
1079 + im->sfcount[MCAST_EXCLUDE];
1080 + else
1081 + rv = im->sfcount[MCAST_EXCLUDE] != 0;
1082 ++ spin_unlock_bh(&im->lock);
1083 + } else
1084 + rv = 1; /* unspecified source; tentatively allow */
1085 + }
1086 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
1087 +index e34d05cc57549..2b5f97e1d40b9 100644
1088 +--- a/net/netfilter/nf_tables_api.c
1089 ++++ b/net/netfilter/nf_tables_api.c
1090 +@@ -4115,6 +4115,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
1091 + struct nft_table *table;
1092 + struct nft_set *set;
1093 + struct nft_ctx ctx;
1094 ++ size_t alloc_size;
1095 + char *name;
1096 + u64 size;
1097 + u64 timeout;
1098 +@@ -4263,8 +4264,10 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
1099 + size = 0;
1100 + if (ops->privsize != NULL)
1101 + size = ops->privsize(nla, &desc);
1102 +-
1103 +- set = kvzalloc(sizeof(*set) + size + udlen, GFP_KERNEL);
1104 ++ alloc_size = sizeof(*set) + size + udlen;
1105 ++ if (alloc_size < size)
1106 ++ return -ENOMEM;
1107 ++ set = kvzalloc(alloc_size, GFP_KERNEL);
1108 + if (!set)
1109 + return -ENOMEM;
1110 +
1111 +@@ -4277,15 +4280,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
1112 + err = nf_tables_set_alloc_name(&ctx, set, name);
1113 + kfree(name);
1114 + if (err < 0)
1115 +- goto err_set_alloc_name;
1116 +-
1117 +- if (nla[NFTA_SET_EXPR]) {
1118 +- expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
1119 +- if (IS_ERR(expr)) {
1120 +- err = PTR_ERR(expr);
1121 +- goto err_set_alloc_name;
1122 +- }
1123 +- }
1124 ++ goto err_set_name;
1125 +
1126 + udata = NULL;
1127 + if (udlen) {
1128 +@@ -4296,21 +4291,19 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
1129 + INIT_LIST_HEAD(&set->bindings);
1130 + set->table = table;
1131 + write_pnet(&set->net, net);
1132 +- set->ops = ops;
1133 ++ set->ops = ops;
1134 + set->ktype = ktype;
1135 +- set->klen = desc.klen;
1136 ++ set->klen = desc.klen;
1137 + set->dtype = dtype;
1138 + set->objtype = objtype;
1139 +- set->dlen = desc.dlen;
1140 +- set->expr = expr;
1141 ++ set->dlen = desc.dlen;
1142 + set->flags = flags;
1143 +- set->size = desc.size;
1144 ++ set->size = desc.size;
1145 + set->policy = policy;
1146 +- set->udlen = udlen;
1147 +- set->udata = udata;
1148 ++ set->udlen = udlen;
1149 ++ set->udata = udata;
1150 + set->timeout = timeout;
1151 + set->gc_int = gc_int;
1152 +- set->handle = nf_tables_alloc_handle(table);
1153 +
1154 + set->field_count = desc.field_count;
1155 + for (i = 0; i < desc.field_count; i++)
1156 +@@ -4320,20 +4313,32 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
1157 + if (err < 0)
1158 + goto err_set_init;
1159 +
1160 ++ if (nla[NFTA_SET_EXPR]) {
1161 ++ expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
1162 ++ if (IS_ERR(expr)) {
1163 ++ err = PTR_ERR(expr);
1164 ++ goto err_set_expr_alloc;
1165 ++ }
1166 ++
1167 ++ set->expr = expr;
1168 ++ }
1169 ++
1170 ++ set->handle = nf_tables_alloc_handle(table);
1171 ++
1172 + err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
1173 + if (err < 0)
1174 +- goto err_set_trans;
1175 ++ goto err_set_expr_alloc;
1176 +
1177 + list_add_tail_rcu(&set->list, &table->sets);
1178 + table->use++;
1179 + return 0;
1180 +
1181 +-err_set_trans:
1182 ++err_set_expr_alloc:
1183 ++ if (set->expr)
1184 ++ nft_expr_destroy(&ctx, set->expr);
1185 ++
1186 + ops->destroy(set);
1187 + err_set_init:
1188 +- if (expr)
1189 +- nft_expr_destroy(&ctx, expr);
1190 +-err_set_alloc_name:
1191 + kfree(set->name);
1192 + err_set_name:
1193 + kvfree(set);
1194 +@@ -5145,6 +5150,24 @@ static void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
1195 + kfree(elem);
1196 + }
1197 +
1198 ++static int nft_set_elem_expr_setup(struct nft_ctx *ctx,
1199 ++ const struct nft_set_ext *ext,
1200 ++ struct nft_expr *expr)
1201 ++{
1202 ++ struct nft_expr *elem_expr = nft_set_ext_expr(ext);
1203 ++ int err;
1204 ++
1205 ++ if (expr == NULL)
1206 ++ return 0;
1207 ++
1208 ++ err = nft_expr_clone(elem_expr, expr);
1209 ++ if (err < 0)
1210 ++ return -ENOMEM;
1211 ++
1212 ++ nft_expr_destroy(ctx, expr);
1213 ++ return 0;
1214 ++}
1215 ++
1216 + static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
1217 + const struct nlattr *attr, u32 nlmsg_flags)
1218 + {
1219 +@@ -5347,15 +5370,17 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
1220 + *nft_set_ext_obj(ext) = obj;
1221 + obj->use++;
1222 + }
1223 +- if (expr) {
1224 +- memcpy(nft_set_ext_expr(ext), expr, expr->ops->size);
1225 +- kfree(expr);
1226 +- expr = NULL;
1227 +- }
1228 ++
1229 ++ err = nft_set_elem_expr_setup(ctx, ext, expr);
1230 ++ if (err < 0)
1231 ++ goto err_elem_expr;
1232 ++ expr = NULL;
1233 +
1234 + trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
1235 +- if (trans == NULL)
1236 +- goto err_trans;
1237 ++ if (trans == NULL) {
1238 ++ err = -ENOMEM;
1239 ++ goto err_elem_expr;
1240 ++ }
1241 +
1242 + ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
1243 + err = set->ops->insert(ctx->net, set, &elem, &ext2);
1244 +@@ -5399,7 +5424,7 @@ err_set_full:
1245 + set->ops->remove(ctx->net, set, &elem);
1246 + err_element_clash:
1247 + kfree(trans);
1248 +-err_trans:
1249 ++err_elem_expr:
1250 + if (obj)
1251 + obj->use--;
1252 +
1253 +diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
1254 +index d7083bcb20e8c..858c8d4d659a8 100644
1255 +--- a/net/netfilter/nft_set_hash.c
1256 ++++ b/net/netfilter/nft_set_hash.c
1257 +@@ -604,7 +604,7 @@ static u64 nft_hash_privsize(const struct nlattr * const nla[],
1258 + const struct nft_set_desc *desc)
1259 + {
1260 + return sizeof(struct nft_hash) +
1261 +- nft_hash_buckets(desc->size) * sizeof(struct hlist_head);
1262 ++ (u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head);
1263 + }
1264 +
1265 + static int nft_hash_init(const struct nft_set *set,
1266 +@@ -644,8 +644,8 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
1267 + return false;
1268 +
1269 + est->size = sizeof(struct nft_hash) +
1270 +- nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
1271 +- desc->size * sizeof(struct nft_hash_elem);
1272 ++ (u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
1273 ++ (u64)desc->size * sizeof(struct nft_hash_elem);
1274 + est->lookup = NFT_SET_CLASS_O_1;
1275 + est->space = NFT_SET_CLASS_O_N;
1276 +
1277 +@@ -662,8 +662,8 @@ static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features
1278 + return false;
1279 +
1280 + est->size = sizeof(struct nft_hash) +
1281 +- nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
1282 +- desc->size * sizeof(struct nft_hash_elem);
1283 ++ (u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
1284 ++ (u64)desc->size * sizeof(struct nft_hash_elem);
1285 + est->lookup = NFT_SET_CLASS_O_1;
1286 + est->space = NFT_SET_CLASS_O_N;
1287 +
1288 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
1289 +index 33d185b62a767..a45b27a2ed4ec 100644
1290 +--- a/sound/usb/quirks.c
1291 ++++ b/sound/usb/quirks.c
1292 +@@ -1896,6 +1896,7 @@ static const struct registration_quirk registration_quirks[] = {
1293 + REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
1294 + REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
1295 + REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */
1296 ++ REG_QUIRK_ENTRY(0x0ecb, 0x1f47, 2), /* JBL Quantum 800 */
1297 + REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */
1298 + REG_QUIRK_ENTRY(0x0ecb, 0x203c, 2), /* JBL Quantum 600 */
1299 + REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */