Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.19 commit in: /
Date: Fri, 23 Sep 2022 12:38:37
Message-Id: 1663936703.b3e6664fbe92c3787a56b3f0f64a08c2ff24f6ee.mpagano@gentoo
1 commit: b3e6664fbe92c3787a56b3f0f64a08c2ff24f6ee
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Sep 23 12:38:23 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Sep 23 12:38:23 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b3e6664f
7
8 Linux patch 5.19.11
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1010_linux-5.19.11.patch | 1231 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1235 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e710df97..d3eec191 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -83,6 +83,10 @@ Patch: 1009_linux-5.19.10.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.19.10
23
24 +Patch: 1010_linux-5.19.11.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.19.11
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1010_linux-5.19.11.patch b/1010_linux-5.19.11.patch
33 new file mode 100644
34 index 00000000..a5ff5cbf
35 --- /dev/null
36 +++ b/1010_linux-5.19.11.patch
37 @@ -0,0 +1,1231 @@
38 +diff --git a/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml b/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
39 +index 85c85b694217c..e18107eafe7cc 100644
40 +--- a/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
41 ++++ b/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
42 +@@ -96,7 +96,7 @@ properties:
43 + Documentation/devicetree/bindings/arm/cpus.yaml).
44 +
45 + required:
46 +- - fiq-index
47 ++ - apple,fiq-index
48 + - cpus
49 +
50 + required:
51 +diff --git a/Makefile b/Makefile
52 +index 33a9b6b547c47..01463a22926d5 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 19
59 +-SUBLEVEL = 10
60 ++SUBLEVEL = 11
61 + EXTRAVERSION =
62 + NAME = Superb Owl
63 +
64 +diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
65 +index cd2b3fe156724..c68c3581483ac 100644
66 +--- a/arch/parisc/Kconfig
67 ++++ b/arch/parisc/Kconfig
68 +@@ -225,8 +225,18 @@ config MLONGCALLS
69 + Enabling this option will probably slow down your kernel.
70 +
71 + config 64BIT
72 +- def_bool "$(ARCH)" = "parisc64"
73 ++ def_bool y if "$(ARCH)" = "parisc64"
74 ++ bool "64-bit kernel" if "$(ARCH)" = "parisc"
75 + depends on PA8X00
76 ++ help
77 ++ Enable this if you want to support 64bit kernel on PA-RISC platform.
78 ++
79 ++ At the moment, only people willing to use more than 2GB of RAM,
80 ++ or having a 64bit-only capable PA-RISC machine should say Y here.
81 ++
82 ++ Since there is no 64bit userland on PA-RISC, there is no point to
83 ++ enable this option otherwise. The 64bit kernel is significantly bigger
84 ++ and slower than the 32bit one.
85 +
86 + choice
87 + prompt "Kernel page size"
88 +diff --git a/block/blk-core.c b/block/blk-core.c
89 +index 27fb1357ad4b8..cc6fbcb6d2521 100644
90 +--- a/block/blk-core.c
91 ++++ b/block/blk-core.c
92 +@@ -338,7 +338,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
93 +
94 + while (!blk_try_enter_queue(q, pm)) {
95 + if (flags & BLK_MQ_REQ_NOWAIT)
96 +- return -EBUSY;
97 ++ return -EAGAIN;
98 +
99 + /*
100 + * read pair of barrier in blk_freeze_queue_start(), we need to
101 +@@ -368,7 +368,7 @@ int __bio_queue_enter(struct request_queue *q, struct bio *bio)
102 + if (test_bit(GD_DEAD, &disk->state))
103 + goto dead;
104 + bio_wouldblock_error(bio);
105 +- return -EBUSY;
106 ++ return -EAGAIN;
107 + }
108 +
109 + /*
110 +diff --git a/block/blk-lib.c b/block/blk-lib.c
111 +index 09b7e1200c0f4..20e42144065b8 100644
112 +--- a/block/blk-lib.c
113 ++++ b/block/blk-lib.c
114 +@@ -311,6 +311,11 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
115 + struct blk_plug plug;
116 + int ret = 0;
117 +
118 ++ /* make sure that "len << SECTOR_SHIFT" doesn't overflow */
119 ++ if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
120 ++ max_sectors = UINT_MAX >> SECTOR_SHIFT;
121 ++ max_sectors &= ~bs_mask;
122 ++
123 + if (max_sectors == 0)
124 + return -EOPNOTSUPP;
125 + if ((sector | nr_sects) & bs_mask)
126 +@@ -324,10 +329,10 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
127 +
128 + bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
129 + bio->bi_iter.bi_sector = sector;
130 +- bio->bi_iter.bi_size = len;
131 ++ bio->bi_iter.bi_size = len << SECTOR_SHIFT;
132 +
133 +- sector += len << SECTOR_SHIFT;
134 +- nr_sects -= len << SECTOR_SHIFT;
135 ++ sector += len;
136 ++ nr_sects -= len;
137 + if (!nr_sects) {
138 + ret = submit_bio_wait(bio);
139 + bio_put(bio);
140 +diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
141 +index a964e25ea6206..763256efddc2b 100644
142 +--- a/drivers/gpio/gpio-mpc8xxx.c
143 ++++ b/drivers/gpio/gpio-mpc8xxx.c
144 +@@ -172,6 +172,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
145 +
146 + switch (flow_type) {
147 + case IRQ_TYPE_EDGE_FALLING:
148 ++ case IRQ_TYPE_LEVEL_LOW:
149 + raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
150 + gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
151 + gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
152 +diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
153 +index e342a6dc4c6c1..bb953f6478647 100644
154 +--- a/drivers/gpio/gpio-rockchip.c
155 ++++ b/drivers/gpio/gpio-rockchip.c
156 +@@ -418,11 +418,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
157 + goto out;
158 + } else {
159 + bank->toggle_edge_mode |= mask;
160 +- level |= mask;
161 ++ level &= ~mask;
162 +
163 + /*
164 + * Determine gpio state. If 1 next interrupt should be
165 +- * falling otherwise rising.
166 ++ * low otherwise high.
167 + */
168 + data = readl(bank->reg_base + bank->gpio_regs->ext_port);
169 + if (data & mask)
170 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
171 +index 67d4a3c13ed19..929f8b75bfaee 100644
172 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
173 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
174 +@@ -2391,8 +2391,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
175 + }
176 + adev->ip_blocks[i].status.sw = true;
177 +
178 +- /* need to do gmc hw init early so we can allocate gpu mem */
179 +- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
180 ++ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
181 ++ /* need to do common hw init early so everything is set up for gmc */
182 ++ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
183 ++ if (r) {
184 ++ DRM_ERROR("hw_init %d failed %d\n", i, r);
185 ++ goto init_failed;
186 ++ }
187 ++ adev->ip_blocks[i].status.hw = true;
188 ++ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
189 ++ /* need to do gmc hw init early so we can allocate gpu mem */
190 + /* Try to reserve bad pages early */
191 + if (amdgpu_sriov_vf(adev))
192 + amdgpu_virt_exchange_data(adev);
193 +@@ -3078,8 +3086,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
194 + int i, r;
195 +
196 + static enum amd_ip_block_type ip_order[] = {
197 +- AMD_IP_BLOCK_TYPE_GMC,
198 + AMD_IP_BLOCK_TYPE_COMMON,
199 ++ AMD_IP_BLOCK_TYPE_GMC,
200 + AMD_IP_BLOCK_TYPE_PSP,
201 + AMD_IP_BLOCK_TYPE_IH,
202 + };
203 +diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
204 +index f49db13b3fbee..0debdbcf46310 100644
205 +--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
206 ++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
207 +@@ -380,6 +380,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
208 + WREG32_PCIE(smnPCIE_LC_CNTL, data);
209 + }
210 +
211 ++#ifdef CONFIG_PCIEASPM
212 + static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
213 + {
214 + uint32_t def, data;
215 +@@ -401,9 +402,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
216 + if (def != data)
217 + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
218 + }
219 ++#endif
220 +
221 + static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
222 + {
223 ++#ifdef CONFIG_PCIEASPM
224 + uint32_t def, data;
225 +
226 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
227 +@@ -459,7 +462,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
228 + if (def != data)
229 + WREG32_PCIE(smnPCIE_LC_CNTL6, data);
230 +
231 +- nbio_v2_3_program_ltr(adev);
232 ++ /* Don't bother about LTR if LTR is not enabled
233 ++ * in the path */
234 ++ if (adev->pdev->ltr_path)
235 ++ nbio_v2_3_program_ltr(adev);
236 +
237 + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
238 + data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
239 +@@ -483,6 +489,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
240 + data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
241 + if (def != data)
242 + WREG32_PCIE(smnPCIE_LC_CNTL3, data);
243 ++#endif
244 + }
245 +
246 + static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
247 +diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
248 +index f7f6ddebd3e49..37615a77287bc 100644
249 +--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
250 ++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
251 +@@ -282,6 +282,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
252 + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
253 + }
254 +
255 ++#ifdef CONFIG_PCIEASPM
256 + static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
257 + {
258 + uint32_t def, data;
259 +@@ -303,9 +304,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
260 + if (def != data)
261 + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
262 + }
263 ++#endif
264 +
265 + static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
266 + {
267 ++#ifdef CONFIG_PCIEASPM
268 + uint32_t def, data;
269 +
270 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
271 +@@ -361,7 +364,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
272 + if (def != data)
273 + WREG32_PCIE(smnPCIE_LC_CNTL6, data);
274 +
275 +- nbio_v6_1_program_ltr(adev);
276 ++ /* Don't bother about LTR if LTR is not enabled
277 ++ * in the path */
278 ++ if (adev->pdev->ltr_path)
279 ++ nbio_v6_1_program_ltr(adev);
280 +
281 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
282 + data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
283 +@@ -385,6 +391,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
284 + data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
285 + if (def != data)
286 + WREG32_PCIE(smnPCIE_LC_CNTL3, data);
287 ++#endif
288 + }
289 +
290 + const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
291 +diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
292 +index 11848d1e238b6..19455a7259391 100644
293 +--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
294 ++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
295 +@@ -673,6 +673,7 @@ struct amdgpu_nbio_ras nbio_v7_4_ras = {
296 + };
297 +
298 +
299 ++#ifdef CONFIG_PCIEASPM
300 + static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
301 + {
302 + uint32_t def, data;
303 +@@ -694,9 +695,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
304 + if (def != data)
305 + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
306 + }
307 ++#endif
308 +
309 + static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
310 + {
311 ++#ifdef CONFIG_PCIEASPM
312 + uint32_t def, data;
313 +
314 + if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
315 +@@ -755,7 +758,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
316 + if (def != data)
317 + WREG32_PCIE(smnPCIE_LC_CNTL6, data);
318 +
319 +- nbio_v7_4_program_ltr(adev);
320 ++ /* Don't bother about LTR if LTR is not enabled
321 ++ * in the path */
322 ++ if (adev->pdev->ltr_path)
323 ++ nbio_v7_4_program_ltr(adev);
324 +
325 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
326 + data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
327 +@@ -779,6 +785,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
328 + data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
329 + if (def != data)
330 + WREG32_PCIE(smnPCIE_LC_CNTL3, data);
331 ++#endif
332 + }
333 +
334 + const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
335 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
336 +index 65181efba50ec..56424f75dd2cc 100644
337 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
338 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
339 +@@ -1504,6 +1504,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
340 + WREG32_SDMA(i, mmSDMA0_CNTL, temp);
341 +
342 + if (!amdgpu_sriov_vf(adev)) {
343 ++ ring = &adev->sdma.instance[i].ring;
344 ++ adev->nbio.funcs->sdma_doorbell_range(adev, i,
345 ++ ring->use_doorbell, ring->doorbell_index,
346 ++ adev->doorbell_index.sdma_doorbell_range);
347 ++
348 + /* unhalt engine */
349 + temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
350 + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
351 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
352 +index fde6154f20096..183024d7c184e 100644
353 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
354 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
355 +@@ -1211,25 +1211,6 @@ static int soc15_common_sw_fini(void *handle)
356 + return 0;
357 + }
358 +
359 +-static void soc15_doorbell_range_init(struct amdgpu_device *adev)
360 +-{
361 +- int i;
362 +- struct amdgpu_ring *ring;
363 +-
364 +- /* sdma/ih doorbell range are programed by hypervisor */
365 +- if (!amdgpu_sriov_vf(adev)) {
366 +- for (i = 0; i < adev->sdma.num_instances; i++) {
367 +- ring = &adev->sdma.instance[i].ring;
368 +- adev->nbio.funcs->sdma_doorbell_range(adev, i,
369 +- ring->use_doorbell, ring->doorbell_index,
370 +- adev->doorbell_index.sdma_doorbell_range);
371 +- }
372 +-
373 +- adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
374 +- adev->irq.ih.doorbell_index);
375 +- }
376 +-}
377 +-
378 + static int soc15_common_hw_init(void *handle)
379 + {
380 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
381 +@@ -1249,12 +1230,6 @@ static int soc15_common_hw_init(void *handle)
382 +
383 + /* enable the doorbell aperture */
384 + soc15_enable_doorbell_aperture(adev, true);
385 +- /* HW doorbell routing policy: doorbell writing not
386 +- * in SDMA/IH/MM/ACV range will be routed to CP. So
387 +- * we need to init SDMA/IH/MM/ACV doorbell range prior
388 +- * to CP ip block init and ring test.
389 +- */
390 +- soc15_doorbell_range_init(adev);
391 +
392 + return 0;
393 + }
394 +diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
395 +index 03b7066471f9a..1e83db0c5438d 100644
396 +--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
397 ++++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
398 +@@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
399 + }
400 + }
401 +
402 ++ if (!amdgpu_sriov_vf(adev))
403 ++ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
404 ++ adev->irq.ih.doorbell_index);
405 ++
406 + pci_set_master(adev->pdev);
407 +
408 + /* enable interrupts */
409 +diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
410 +index 2022ffbb8dba5..59dfca093155c 100644
411 +--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
412 ++++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
413 +@@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
414 + }
415 + }
416 +
417 ++ if (!amdgpu_sriov_vf(adev))
418 ++ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
419 ++ adev->irq.ih.doorbell_index);
420 ++
421 + pci_set_master(adev->pdev);
422 +
423 + /* enable interrupts */
424 +diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
425 +index 19bf717fd4cb6..5508ebb9eb434 100644
426 +--- a/drivers/gpu/drm/i915/display/icl_dsi.c
427 ++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
428 +@@ -1629,6 +1629,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
429 + /* FIXME: initialize from VBT */
430 + vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
431 +
432 ++ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
433 ++
434 + ret = intel_dsc_compute_params(crtc_state);
435 + if (ret)
436 + return ret;
437 +diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
438 +index 41aaa6c98114f..fe8b6b72970a2 100644
439 +--- a/drivers/gpu/drm/i915/display/intel_dp.c
440 ++++ b/drivers/gpu/drm/i915/display/intel_dp.c
441 +@@ -1379,6 +1379,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
442 + * DP_DSC_RC_BUF_SIZE for this.
443 + */
444 + vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
445 ++ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
446 +
447 + /*
448 + * Slice Height of 8 works for all currently available panels. So start
449 +diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
450 +index 43e1bbc1e3035..ca530f0733e0e 100644
451 +--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
452 ++++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
453 +@@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
454 + u8 i = 0;
455 +
456 + vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
457 +- vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
458 + vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
459 + pipe_config->dsc.slice_count);
460 +
461 +diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
462 +index 9feda105f9131..a7acffbf15d1f 100644
463 +--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
464 ++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
465 +@@ -235,6 +235,14 @@ struct intel_guc {
466 + * @shift: Right shift value for the gpm timestamp
467 + */
468 + u32 shift;
469 ++
470 ++ /**
471 ++ * @last_stat_jiffies: jiffies at last actual stats collection time
472 ++ * We use this timestamp to ensure we don't oversample the
473 ++ * stats because runtime power management events can trigger
474 ++ * stats collection at much higher rates than required.
475 ++ */
476 ++ unsigned long last_stat_jiffies;
477 + } timestamp;
478 +
479 + #ifdef CONFIG_DRM_I915_SELFTEST
480 +diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
481 +index 26a051ef119df..d7e4681d7297c 100644
482 +--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
483 ++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
484 +@@ -1365,6 +1365,8 @@ static void __update_guc_busyness_stats(struct intel_guc *guc)
485 + unsigned long flags;
486 + ktime_t unused;
487 +
488 ++ guc->timestamp.last_stat_jiffies = jiffies;
489 ++
490 + spin_lock_irqsave(&guc->timestamp.lock, flags);
491 +
492 + guc_update_pm_timestamp(guc, &unused);
493 +@@ -1436,7 +1438,23 @@ void intel_guc_busyness_park(struct intel_gt *gt)
494 + if (!guc_submission_initialized(guc))
495 + return;
496 +
497 +- cancel_delayed_work(&guc->timestamp.work);
498 ++ /*
499 ++ * There is a race with suspend flow where the worker runs after suspend
500 ++ * and causes an unclaimed register access warning. Cancel the worker
501 ++ * synchronously here.
502 ++ */
503 ++ cancel_delayed_work_sync(&guc->timestamp.work);
504 ++
505 ++ /*
506 ++ * Before parking, we should sample engine busyness stats if we need to.
507 ++ * We can skip it if we are less than half a ping from the last time we
508 ++ * sampled the busyness stats.
509 ++ */
510 ++ if (guc->timestamp.last_stat_jiffies &&
511 ++ !time_after(jiffies, guc->timestamp.last_stat_jiffies +
512 ++ (guc->timestamp.ping_delay / 2)))
513 ++ return;
514 ++
515 + __update_guc_busyness_stats(guc);
516 + }
517 +
518 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
519 +index 4f5a51bb9e1e4..e77956ae88a4b 100644
520 +--- a/drivers/gpu/drm/i915/i915_reg.h
521 ++++ b/drivers/gpu/drm/i915/i915_reg.h
522 +@@ -1849,14 +1849,14 @@
523 +
524 + #define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
525 + #define GT0_PERF_LIMIT_REASONS_MASK 0xde3
526 +-#define PROCHOT_MASK REG_BIT(1)
527 +-#define THERMAL_LIMIT_MASK REG_BIT(2)
528 +-#define RATL_MASK REG_BIT(6)
529 +-#define VR_THERMALERT_MASK REG_BIT(7)
530 +-#define VR_TDC_MASK REG_BIT(8)
531 +-#define POWER_LIMIT_4_MASK REG_BIT(9)
532 +-#define POWER_LIMIT_1_MASK REG_BIT(11)
533 +-#define POWER_LIMIT_2_MASK REG_BIT(12)
534 ++#define PROCHOT_MASK REG_BIT(0)
535 ++#define THERMAL_LIMIT_MASK REG_BIT(1)
536 ++#define RATL_MASK REG_BIT(5)
537 ++#define VR_THERMALERT_MASK REG_BIT(6)
538 ++#define VR_TDC_MASK REG_BIT(7)
539 ++#define POWER_LIMIT_4_MASK REG_BIT(8)
540 ++#define POWER_LIMIT_1_MASK REG_BIT(10)
541 ++#define POWER_LIMIT_2_MASK REG_BIT(11)
542 +
543 + #define CHV_CLK_CTL1 _MMIO(0x101100)
544 + #define VLV_CLK_CTL2 _MMIO(0x101104)
545 +diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
546 +index 16460b169ed21..2a32729a74b51 100644
547 +--- a/drivers/gpu/drm/i915/i915_vma.c
548 ++++ b/drivers/gpu/drm/i915/i915_vma.c
549 +@@ -1870,12 +1870,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
550 + enum dma_resv_usage usage;
551 + int idx;
552 +
553 +- obj->read_domains = 0;
554 + if (flags & EXEC_OBJECT_WRITE) {
555 + usage = DMA_RESV_USAGE_WRITE;
556 + obj->write_domain = I915_GEM_DOMAIN_RENDER;
557 ++ obj->read_domains = 0;
558 + } else {
559 + usage = DMA_RESV_USAGE_READ;
560 ++ obj->write_domain = 0;
561 + }
562 +
563 + dma_fence_array_for_each(curr, idx, fence)
564 +diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
565 +index 8640a8a8a4691..44aa526294439 100644
566 +--- a/drivers/gpu/drm/meson/meson_plane.c
567 ++++ b/drivers/gpu/drm/meson/meson_plane.c
568 +@@ -168,7 +168,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
569 +
570 + /* Enable OSD and BLK0, set max global alpha */
571 + priv->viu.osd1_ctrl_stat = OSD_ENABLE |
572 +- (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
573 ++ (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
574 + OSD_BLK0_ENABLE;
575 +
576 + priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
577 +diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
578 +index bb7e109534de1..d4b907889a21d 100644
579 +--- a/drivers/gpu/drm/meson/meson_viu.c
580 ++++ b/drivers/gpu/drm/meson/meson_viu.c
581 +@@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
582 + priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
583 + writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
584 + priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
585 +- writel((m[11] & 0x1fff) << 16,
586 ++ writel((m[11] & 0x1fff),
587 + priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
588 +
589 + writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
590 +diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
591 +index a189982601a48..e8040defe6073 100644
592 +--- a/drivers/gpu/drm/panel/panel-edp.c
593 ++++ b/drivers/gpu/drm/panel/panel-edp.c
594 +@@ -1270,7 +1270,8 @@ static const struct panel_desc innolux_n116bca_ea1 = {
595 + },
596 + .delay = {
597 + .hpd_absent = 200,
598 +- .prepare_to_enable = 80,
599 ++ .enable = 80,
600 ++ .disable = 50,
601 + .unprepare = 500,
602 + },
603 + };
604 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
605 +index d6e831576cd2b..88271f04615b0 100644
606 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
607 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
608 +@@ -1436,11 +1436,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
609 + die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
610 + die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
611 + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
612 ++ dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
613 ++ dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
614 + break;
615 + case ROCKCHIP_VOP2_EP_EDP0:
616 + die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
617 + die |= RK3568_SYS_DSP_INFACE_EN_EDP |
618 + FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
619 ++ dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
620 ++ dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
621 + break;
622 + case ROCKCHIP_VOP2_EP_MIPI0:
623 + die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
624 +diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
625 +index fc8c1420c0b69..64b14ac4c7b02 100644
626 +--- a/drivers/iommu/intel/dmar.c
627 ++++ b/drivers/iommu/intel/dmar.c
628 +@@ -2368,13 +2368,6 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert)
629 + if (!dmar_in_use())
630 + return 0;
631 +
632 +- /*
633 +- * It's unlikely that any I/O board is hot added before the IOMMU
634 +- * subsystem is initialized.
635 +- */
636 +- if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled)
637 +- return -EOPNOTSUPP;
638 +-
639 + if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
640 + tmp = handle;
641 + } else {
642 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
643 +index c0464959cbcdb..861a239d905a4 100644
644 +--- a/drivers/iommu/intel/iommu.c
645 ++++ b/drivers/iommu/intel/iommu.c
646 +@@ -3133,7 +3133,13 @@ static int __init init_dmars(void)
647 +
648 + #ifdef CONFIG_INTEL_IOMMU_SVM
649 + if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
650 ++ /*
651 ++ * Call dmar_alloc_hwirq() with dmar_global_lock held,
652 ++ * could cause possible lock race condition.
653 ++ */
654 ++ up_write(&dmar_global_lock);
655 + ret = intel_svm_enable_prq(iommu);
656 ++ down_write(&dmar_global_lock);
657 + if (ret)
658 + goto free_iommu;
659 + }
660 +@@ -4039,6 +4045,7 @@ int __init intel_iommu_init(void)
661 + force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
662 + platform_optin_force_iommu();
663 +
664 ++ down_write(&dmar_global_lock);
665 + if (dmar_table_init()) {
666 + if (force_on)
667 + panic("tboot: Failed to initialize DMAR table\n");
668 +@@ -4051,6 +4058,16 @@ int __init intel_iommu_init(void)
669 + goto out_free_dmar;
670 + }
671 +
672 ++ up_write(&dmar_global_lock);
673 ++
674 ++ /*
675 ++ * The bus notifier takes the dmar_global_lock, so lockdep will
676 ++ * complain later when we register it under the lock.
677 ++ */
678 ++ dmar_register_bus_notifier();
679 ++
680 ++ down_write(&dmar_global_lock);
681 ++
682 + if (!no_iommu)
683 + intel_iommu_debugfs_init();
684 +
685 +@@ -4098,9 +4115,11 @@ int __init intel_iommu_init(void)
686 + pr_err("Initialization failed\n");
687 + goto out_free_dmar;
688 + }
689 ++ up_write(&dmar_global_lock);
690 +
691 + init_iommu_pm_ops();
692 +
693 ++ down_read(&dmar_global_lock);
694 + for_each_active_iommu(iommu, drhd) {
695 + /*
696 + * The flush queue implementation does not perform
697 +@@ -4118,11 +4137,13 @@ int __init intel_iommu_init(void)
698 + "%s", iommu->name);
699 + iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
700 + }
701 ++ up_read(&dmar_global_lock);
702 +
703 + bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
704 + if (si_domain && !hw_pass_through)
705 + register_memory_notifier(&intel_iommu_memory_nb);
706 +
707 ++ down_read(&dmar_global_lock);
708 + if (probe_acpi_namespace_devices())
709 + pr_warn("ACPI name space devices didn't probe correctly\n");
710 +
711 +@@ -4133,15 +4154,17 @@ int __init intel_iommu_init(void)
712 +
713 + iommu_disable_protect_mem_regions(iommu);
714 + }
715 ++ up_read(&dmar_global_lock);
716 +
717 +- intel_iommu_enabled = 1;
718 +- dmar_register_bus_notifier();
719 + pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
720 +
721 ++ intel_iommu_enabled = 1;
722 ++
723 + return 0;
724 +
725 + out_free_dmar:
726 + intel_iommu_free_dmars();
727 ++ up_write(&dmar_global_lock);
728 + return ret;
729 + }
730 +
731 +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
732 +index 520ed965bb7a4..583ca847a39cb 100644
733 +--- a/drivers/of/fdt.c
734 ++++ b/drivers/of/fdt.c
735 +@@ -314,7 +314,7 @@ static int unflatten_dt_nodes(const void *blob,
736 + for (offset = 0;
737 + offset >= 0 && depth >= initial_depth;
738 + offset = fdt_next_node(blob, offset, &depth)) {
739 +- if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
740 ++ if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
741 + continue;
742 +
743 + if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
744 +diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
745 +index f69ab90b5e22d..6052f264bbb0a 100644
746 +--- a/drivers/parisc/ccio-dma.c
747 ++++ b/drivers/parisc/ccio-dma.c
748 +@@ -1546,6 +1546,7 @@ static int __init ccio_probe(struct parisc_device *dev)
749 + }
750 + ccio_ioc_init(ioc);
751 + if (ccio_init_resources(ioc)) {
752 ++ iounmap(ioc->ioc_regs);
753 + kfree(ioc);
754 + return -ENOMEM;
755 + }
756 +diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
757 +index 6bec7f1431348..704a99d2f93ce 100644
758 +--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
759 ++++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
760 +@@ -530,10 +530,10 @@ DECLARE_MSM_GPIO_PINS(187);
761 + DECLARE_MSM_GPIO_PINS(188);
762 + DECLARE_MSM_GPIO_PINS(189);
763 +
764 +-static const unsigned int sdc2_clk_pins[] = { 190 };
765 +-static const unsigned int sdc2_cmd_pins[] = { 191 };
766 +-static const unsigned int sdc2_data_pins[] = { 192 };
767 +-static const unsigned int ufs_reset_pins[] = { 193 };
768 ++static const unsigned int ufs_reset_pins[] = { 190 };
769 ++static const unsigned int sdc2_clk_pins[] = { 191 };
770 ++static const unsigned int sdc2_cmd_pins[] = { 192 };
771 ++static const unsigned int sdc2_data_pins[] = { 193 };
772 +
773 + enum sc8180x_functions {
774 + msm_mux_adsp_ext,
775 +@@ -1582,7 +1582,7 @@ static const int sc8180x_acpi_reserved_gpios[] = {
776 + static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = {
777 + { 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 },
778 + { 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 },
779 +- { 37, 43 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
780 ++ { 37, 44 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
781 + { 42, 48 }, { 46, 50 }, { 47, 49 }, { 48, 51 }, { 49, 53 }, { 50, 52 },
782 + { 51, 116 }, { 51, 123 }, { 53, 54 }, { 54, 55 }, { 55, 56 },
783 + { 56, 57 }, { 58, 58 }, { 60, 60 }, { 68, 62 }, { 70, 63 }, { 76, 86 },
784 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
785 +index 21054fcacd345..18088f6f44b23 100644
786 +--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
787 ++++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
788 +@@ -98,7 +98,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match);
789 + static struct platform_driver a100_r_pinctrl_driver = {
790 + .probe = a100_r_pinctrl_probe,
791 + .driver = {
792 +- .name = "sun50iw10p1-r-pinctrl",
793 ++ .name = "sun50i-a100-r-pinctrl",
794 + .of_match_table = a100_r_pinctrl_match,
795 + },
796 + };
797 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
798 +index 386bb523c69ea..bdc3efdb12219 100644
799 +--- a/fs/cifs/connect.c
800 ++++ b/fs/cifs/connect.c
801 +@@ -707,9 +707,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
802 + int length = 0;
803 + int total_read;
804 +
805 +- smb_msg->msg_control = NULL;
806 +- smb_msg->msg_controllen = 0;
807 +-
808 + for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
809 + try_to_freeze();
810 +
811 +@@ -765,7 +762,7 @@ int
812 + cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
813 + unsigned int to_read)
814 + {
815 +- struct msghdr smb_msg;
816 ++ struct msghdr smb_msg = {};
817 + struct kvec iov = {.iov_base = buf, .iov_len = to_read};
818 + iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
819 +
820 +@@ -775,15 +772,13 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
821 + ssize_t
822 + cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
823 + {
824 +- struct msghdr smb_msg;
825 ++ struct msghdr smb_msg = {};
826 +
827 + /*
828 + * iov_iter_discard already sets smb_msg.type and count and iov_offset
829 + * and cifs_readv_from_socket sets msg_control and msg_controllen
830 + * so little to initialize in struct msghdr
831 + */
832 +- smb_msg.msg_name = NULL;
833 +- smb_msg.msg_namelen = 0;
834 + iov_iter_discard(&smb_msg.msg_iter, READ, to_read);
835 +
836 + return cifs_readv_from_socket(server, &smb_msg);
837 +@@ -793,7 +788,7 @@ int
838 + cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
839 + unsigned int page_offset, unsigned int to_read)
840 + {
841 +- struct msghdr smb_msg;
842 ++ struct msghdr smb_msg = {};
843 + struct bio_vec bv = {
844 + .bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
845 + iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
846 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
847 +index 0f03c0bfdf280..02dd591acabb3 100644
848 +--- a/fs/cifs/file.c
849 ++++ b/fs/cifs/file.c
850 +@@ -3327,6 +3327,9 @@ static ssize_t __cifs_writev(
851 +
852 + ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
853 + {
854 ++ struct file *file = iocb->ki_filp;
855 ++
856 ++ cifs_revalidate_mapping(file->f_inode);
857 + return __cifs_writev(iocb, from, true);
858 + }
859 +
860 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
861 +index bfc9bd55870a0..8adc0f2a59518 100644
862 +--- a/fs/cifs/transport.c
863 ++++ b/fs/cifs/transport.c
864 +@@ -196,10 +196,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
865 +
866 + *sent = 0;
867 +
868 +- smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
869 +- smb_msg->msg_namelen = sizeof(struct sockaddr);
870 +- smb_msg->msg_control = NULL;
871 +- smb_msg->msg_controllen = 0;
872 + if (server->noblocksnd)
873 + smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
874 + else
875 +@@ -311,7 +307,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
876 + sigset_t mask, oldmask;
877 + size_t total_len = 0, sent, size;
878 + struct socket *ssocket = server->ssocket;
879 +- struct msghdr smb_msg;
880 ++ struct msghdr smb_msg = {};
881 + __be32 rfc1002_marker;
882 +
883 + if (cifs_rdma_enabled(server)) {
884 +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
885 +index 8f8cd6e2d4dbc..597e3ce3f148a 100644
886 +--- a/fs/nfs/internal.h
887 ++++ b/fs/nfs/internal.h
888 +@@ -604,6 +604,31 @@ static inline gfp_t nfs_io_gfp_mask(void)
889 + return GFP_KERNEL;
890 + }
891 +
892 ++/*
893 ++ * Special version of should_remove_suid() that ignores capabilities.
894 ++ */
895 ++static inline int nfs_should_remove_suid(const struct inode *inode)
896 ++{
897 ++ umode_t mode = inode->i_mode;
898 ++ int kill = 0;
899 ++
900 ++ /* suid always must be killed */
901 ++ if (unlikely(mode & S_ISUID))
902 ++ kill = ATTR_KILL_SUID;
903 ++
904 ++ /*
905 ++ * sgid without any exec bits is just a mandatory locking mark; leave
906 ++ * it alone. If some exec bits are set, it's a real sgid; kill it.
907 ++ */
908 ++ if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
909 ++ kill |= ATTR_KILL_SGID;
910 ++
911 ++ if (unlikely(kill && S_ISREG(mode)))
912 ++ return kill;
913 ++
914 ++ return 0;
915 ++}
916 ++
917 + /* unlink.c */
918 + extern struct rpc_task *
919 + nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
920 +diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
921 +index 068c45b3bc1ab..6dab9e4083729 100644
922 +--- a/fs/nfs/nfs42proc.c
923 ++++ b/fs/nfs/nfs42proc.c
924 +@@ -78,10 +78,15 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
925 +
926 + status = nfs4_call_sync(server->client, server, msg,
927 + &args.seq_args, &res.seq_res, 0);
928 +- if (status == 0)
929 ++ if (status == 0) {
930 ++ if (nfs_should_remove_suid(inode)) {
931 ++ spin_lock(&inode->i_lock);
932 ++ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
933 ++ spin_unlock(&inode->i_lock);
934 ++ }
935 + status = nfs_post_op_update_inode_force_wcc(inode,
936 + res.falloc_fattr);
937 +-
938 ++ }
939 + if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE])
940 + trace_nfs4_fallocate(inode, &args, status);
941 + else
942 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
943 +index 6ab5eeb000dc0..5e4bacb77bfc7 100644
944 +--- a/fs/nfs/super.c
945 ++++ b/fs/nfs/super.c
946 +@@ -1051,22 +1051,31 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
947 + if (ctx->bsize)
948 + sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits);
949 +
950 +- if (server->nfs_client->rpc_ops->version != 2) {
951 +- /* The VFS shouldn't apply the umask to mode bits. We will do
952 +- * so ourselves when necessary.
953 ++ switch (server->nfs_client->rpc_ops->version) {
954 ++ case 2:
955 ++ sb->s_time_gran = 1000;
956 ++ sb->s_time_min = 0;
957 ++ sb->s_time_max = U32_MAX;
958 ++ break;
959 ++ case 3:
960 ++ /*
961 ++ * The VFS shouldn't apply the umask to mode bits.
962 ++ * We will do so ourselves when necessary.
963 + */
964 + sb->s_flags |= SB_POSIXACL;
965 + sb->s_time_gran = 1;
966 +- sb->s_export_op = &nfs_export_ops;
967 +- } else
968 +- sb->s_time_gran = 1000;
969 +-
970 +- if (server->nfs_client->rpc_ops->version != 4) {
971 + sb->s_time_min = 0;
972 + sb->s_time_max = U32_MAX;
973 +- } else {
974 ++ sb->s_export_op = &nfs_export_ops;
975 ++ break;
976 ++ case 4:
977 ++ sb->s_flags |= SB_POSIXACL;
978 ++ sb->s_time_gran = 1;
979 + sb->s_time_min = S64_MIN;
980 + sb->s_time_max = S64_MAX;
981 ++ if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
982 ++ sb->s_export_op = &nfs_export_ops;
983 ++ break;
984 + }
985 +
986 + sb->s_magic = NFS_SUPER_MAGIC;
987 +diff --git a/fs/nfs/write.c b/fs/nfs/write.c
988 +index 5d7e1c2061842..4212473c69ee9 100644
989 +--- a/fs/nfs/write.c
990 ++++ b/fs/nfs/write.c
991 +@@ -1497,31 +1497,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata)
992 + NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
993 + }
994 +
995 +-/*
996 +- * Special version of should_remove_suid() that ignores capabilities.
997 +- */
998 +-static int nfs_should_remove_suid(const struct inode *inode)
999 +-{
1000 +- umode_t mode = inode->i_mode;
1001 +- int kill = 0;
1002 +-
1003 +- /* suid always must be killed */
1004 +- if (unlikely(mode & S_ISUID))
1005 +- kill = ATTR_KILL_SUID;
1006 +-
1007 +- /*
1008 +- * sgid without any exec bits is just a mandatory locking mark; leave
1009 +- * it alone. If some exec bits are set, it's a real sgid; kill it.
1010 +- */
1011 +- if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1012 +- kill |= ATTR_KILL_SGID;
1013 +-
1014 +- if (unlikely(kill && S_ISREG(mode)))
1015 +- return kill;
1016 +-
1017 +- return 0;
1018 +-}
1019 +-
1020 + static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1021 + struct nfs_fattr *fattr)
1022 + {
1023 +diff --git a/include/linux/dmar.h b/include/linux/dmar.h
1024 +index f3a3d95df5325..cbd714a198a0a 100644
1025 +--- a/include/linux/dmar.h
1026 ++++ b/include/linux/dmar.h
1027 +@@ -69,7 +69,6 @@ struct dmar_pci_notify_info {
1028 +
1029 + extern struct rw_semaphore dmar_global_lock;
1030 + extern struct list_head dmar_drhd_units;
1031 +-extern int intel_iommu_enabled;
1032 +
1033 + #define for_each_drhd_unit(drhd) \
1034 + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
1035 +@@ -93,8 +92,7 @@ extern int intel_iommu_enabled;
1036 + static inline bool dmar_rcu_check(void)
1037 + {
1038 + return rwsem_is_locked(&dmar_global_lock) ||
1039 +- system_state == SYSTEM_BOOTING ||
1040 +- (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled);
1041 ++ system_state == SYSTEM_BOOTING;
1042 + }
1043 +
1044 + #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
1045 +diff --git a/include/linux/of_device.h b/include/linux/of_device.h
1046 +index 1d7992a02e36e..1a803e4335d30 100644
1047 +--- a/include/linux/of_device.h
1048 ++++ b/include/linux/of_device.h
1049 +@@ -101,8 +101,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
1050 + }
1051 +
1052 + static inline int of_dma_configure_id(struct device *dev,
1053 +- struct device_node *np,
1054 +- bool force_dma)
1055 ++ struct device_node *np,
1056 ++ bool force_dma,
1057 ++ const u32 *id)
1058 + {
1059 + return 0;
1060 + }
1061 +diff --git a/include/net/xfrm.h b/include/net/xfrm.h
1062 +index c39d910d4b454..9ca397eed1638 100644
1063 +--- a/include/net/xfrm.h
1064 ++++ b/include/net/xfrm.h
1065 +@@ -1195,6 +1195,8 @@ int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1066 +
1067 + static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1068 + {
1069 ++ if (!sk_fullsock(osk))
1070 ++ return 0;
1071 + sk->sk_policy[0] = NULL;
1072 + sk->sk_policy[1] = NULL;
1073 + if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1074 +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
1075 +index 48833d0edd089..602da2cfd57c8 100644
1076 +--- a/io_uring/io_uring.c
1077 ++++ b/io_uring/io_uring.c
1078 +@@ -5061,7 +5061,8 @@ done:
1079 + req_set_fail(req);
1080 + __io_req_complete(req, issue_flags, ret, 0);
1081 + /* put file to avoid an attempt to IOPOLL the req */
1082 +- io_put_file(req->file);
1083 ++ if (!(req->flags & REQ_F_FIXED_FILE))
1084 ++ io_put_file(req->file);
1085 + req->file = NULL;
1086 + return 0;
1087 + }
1088 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
1089 +index afc6c0e9c966e..f93983910b5e1 100644
1090 +--- a/kernel/cgroup/cgroup-v1.c
1091 ++++ b/kernel/cgroup/cgroup-v1.c
1092 +@@ -59,6 +59,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
1093 + int retval = 0;
1094 +
1095 + mutex_lock(&cgroup_mutex);
1096 ++ cpus_read_lock();
1097 + percpu_down_write(&cgroup_threadgroup_rwsem);
1098 + for_each_root(root) {
1099 + struct cgroup *from_cgrp;
1100 +@@ -72,6 +73,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
1101 + break;
1102 + }
1103 + percpu_up_write(&cgroup_threadgroup_rwsem);
1104 ++ cpus_read_unlock();
1105 + mutex_unlock(&cgroup_mutex);
1106 +
1107 + return retval;
1108 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1109 +index da8b3cc67234d..028eb28c7882d 100644
1110 +--- a/net/ipv4/ip_output.c
1111 ++++ b/net/ipv4/ip_output.c
1112 +@@ -1704,7 +1704,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1113 + tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1114 + arg->uid);
1115 + security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1116 +- rt = ip_route_output_key(net, &fl4);
1117 ++ rt = ip_route_output_flow(net, &fl4, sk);
1118 + if (IS_ERR(rt))
1119 + return;
1120 +
1121 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1122 +index 586c102ce152d..9fd92e263d0a3 100644
1123 +--- a/net/ipv4/tcp_ipv4.c
1124 ++++ b/net/ipv4/tcp_ipv4.c
1125 +@@ -819,6 +819,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
1126 + ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
1127 + inet_twsk(sk)->tw_priority : sk->sk_priority;
1128 + transmit_time = tcp_transmit_time(sk);
1129 ++ xfrm_sk_clone_policy(ctl_sk, sk);
1130 + }
1131 + ip_send_unicast_reply(ctl_sk,
1132 + skb, &TCP_SKB_CB(skb)->header.h4.opt,
1133 +@@ -827,6 +828,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
1134 + transmit_time);
1135 +
1136 + ctl_sk->sk_mark = 0;
1137 ++ xfrm_sk_free_policy(ctl_sk);
1138 + sock_net_set(ctl_sk, &init_net);
1139 + __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
1140 + __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
1141 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1142 +index be09941fe6d9a..5eabe746cfa76 100644
1143 +--- a/net/ipv6/tcp_ipv6.c
1144 ++++ b/net/ipv6/tcp_ipv6.c
1145 +@@ -952,7 +952,10 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
1146 + * Underlying function will use this to retrieve the network
1147 + * namespace
1148 + */
1149 +- dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
1150 ++ if (sk && sk->sk_state != TCP_TIME_WAIT)
1151 ++ dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/
1152 ++ else
1153 ++ dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL);
1154 + if (!IS_ERR(dst)) {
1155 + skb_dst_set(buff, dst);
1156 + ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
1157 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
1158 +index c1a01947530f0..db8c0de1de422 100644
1159 +--- a/net/sunrpc/clnt.c
1160 ++++ b/net/sunrpc/clnt.c
1161 +@@ -2858,6 +2858,9 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
1162 +
1163 + task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
1164 + &rpc_cb_add_xprt_call_ops, data);
1165 ++ if (IS_ERR(task))
1166 ++ return PTR_ERR(task);
1167 ++
1168 + data->xps->xps_nunique_destaddr_xprts++;
1169 + rpc_put_task(task);
1170 + success:
1171 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
1172 +index 53b024cea3b3e..5ecafffe7ce59 100644
1173 +--- a/net/sunrpc/xprt.c
1174 ++++ b/net/sunrpc/xprt.c
1175 +@@ -1179,11 +1179,8 @@ xprt_request_dequeue_receive_locked(struct rpc_task *task)
1176 + {
1177 + struct rpc_rqst *req = task->tk_rqstp;
1178 +
1179 +- if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1180 ++ if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1181 + xprt_request_rb_remove(req->rq_xprt, req);
1182 +- xdr_free_bvec(&req->rq_rcv_buf);
1183 +- req->rq_private_buf.bvec = NULL;
1184 +- }
1185 + }
1186 +
1187 + /**
1188 +@@ -1221,6 +1218,8 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
1189 +
1190 + xprt->stat.recvs++;
1191 +
1192 ++ xdr_free_bvec(&req->rq_rcv_buf);
1193 ++ req->rq_private_buf.bvec = NULL;
1194 + req->rq_private_buf.len = copied;
1195 + /* Ensure all writes are done before we update */
1196 + /* req->rq_reply_bytes_recvd */
1197 +@@ -1453,6 +1452,7 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
1198 + xprt_request_dequeue_transmit_locked(task);
1199 + xprt_request_dequeue_receive_locked(task);
1200 + spin_unlock(&xprt->queue_lock);
1201 ++ xdr_free_bvec(&req->rq_rcv_buf);
1202 + }
1203 + }
1204 +
1205 +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
1206 +index 61df4d33c48ff..7f340f18599c9 100644
1207 +--- a/sound/pci/hda/patch_sigmatel.c
1208 ++++ b/sound/pci/hda/patch_sigmatel.c
1209 +@@ -209,6 +209,7 @@ struct sigmatel_spec {
1210 +
1211 + /* beep widgets */
1212 + hda_nid_t anabeep_nid;
1213 ++ bool beep_power_on;
1214 +
1215 + /* SPDIF-out mux */
1216 + const char * const *spdif_labels;
1217 +@@ -4443,6 +4444,28 @@ static int stac_suspend(struct hda_codec *codec)
1218 +
1219 + return 0;
1220 + }
1221 ++
1222 ++static int stac_check_power_status(struct hda_codec *codec, hda_nid_t nid)
1223 ++{
1224 ++#ifdef CONFIG_SND_HDA_INPUT_BEEP
1225 ++ struct sigmatel_spec *spec = codec->spec;
1226 ++#endif
1227 ++ int ret = snd_hda_gen_check_power_status(codec, nid);
1228 ++
1229 ++#ifdef CONFIG_SND_HDA_INPUT_BEEP
1230 ++ if (nid == spec->gen.beep_nid && codec->beep) {
1231 ++ if (codec->beep->enabled != spec->beep_power_on) {
1232 ++ spec->beep_power_on = codec->beep->enabled;
1233 ++ if (spec->beep_power_on)
1234 ++ snd_hda_power_up_pm(codec);
1235 ++ else
1236 ++ snd_hda_power_down_pm(codec);
1237 ++ }
1238 ++ ret |= spec->beep_power_on;
1239 ++ }
1240 ++#endif
1241 ++ return ret;
1242 ++}
1243 + #else
1244 + #define stac_suspend NULL
1245 + #endif /* CONFIG_PM */
1246 +@@ -4455,6 +4478,7 @@ static const struct hda_codec_ops stac_patch_ops = {
1247 + .unsol_event = snd_hda_jack_unsol_event,
1248 + #ifdef CONFIG_PM
1249 + .suspend = stac_suspend,
1250 ++ .check_power_status = stac_check_power_status,
1251 + #endif
1252 + };
1253 +
1254 +diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h
1255 +index d30439b4b8ab4..869379f91fe48 100644
1256 +--- a/tools/include/uapi/asm/errno.h
1257 ++++ b/tools/include/uapi/asm/errno.h
1258 +@@ -9,8 +9,8 @@
1259 + #include "../../../arch/alpha/include/uapi/asm/errno.h"
1260 + #elif defined(__mips__)
1261 + #include "../../../arch/mips/include/uapi/asm/errno.h"
1262 +-#elif defined(__xtensa__)
1263 +-#include "../../../arch/xtensa/include/uapi/asm/errno.h"
1264 ++#elif defined(__hppa__)
1265 ++#include "../../../arch/parisc/include/uapi/asm/errno.h"
1266 + #else
1267 + #include <asm-generic/errno.h>
1268 + #endif