Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Tue, 11 Jan 2022 15:34:52
Message-Id: 1641915270.0635af2e31a42cb9599e7469bb3bb2ee79bb002c.mpagano@gentoo
1 commit: 0635af2e31a42cb9599e7469bb3bb2ee79bb002c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jan 11 15:34:30 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jan 11 15:34:30 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0635af2e
7
8 Linux patch 5.15.14
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1013_linux-5.15.14.patch | 2967 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2971 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 037403d7..4ce455a9 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -95,6 +95,10 @@ Patch: 1012_linux-5.15.13.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.13
23
24 +Patch: 1013_linux-5.15.14.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.14
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1013_linux-5.15.14.patch b/1013_linux-5.15.14.patch
33 new file mode 100644
34 index 00000000..01a9faf2
35 --- /dev/null
36 +++ b/1013_linux-5.15.14.patch
37 @@ -0,0 +1,2967 @@
38 +diff --git a/Makefile b/Makefile
39 +index 0964b940b8890..a469670e7675a 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 15
46 +-SUBLEVEL = 13
47 ++SUBLEVEL = 14
48 + EXTRAVERSION =
49 + NAME = Trick or Treat
50 +
51 +diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
52 +index 9e01dbca4a011..dff18fc9a9065 100644
53 +--- a/arch/arm/boot/dts/bcm2711.dtsi
54 ++++ b/arch/arm/boot/dts/bcm2711.dtsi
55 +@@ -582,6 +582,8 @@
56 + <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
57 + <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
58 +
59 ++ gpio-ranges = <&gpio 0 0 58>;
60 ++
61 + gpclk0_gpio49: gpclk0_gpio49 {
62 + pin-gpclk {
63 + pins = "gpio49";
64 +diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
65 +index a3e06b6809476..c113661a6668f 100644
66 +--- a/arch/arm/boot/dts/bcm283x.dtsi
67 ++++ b/arch/arm/boot/dts/bcm283x.dtsi
68 +@@ -126,6 +126,8 @@
69 + interrupt-controller;
70 + #interrupt-cells = <2>;
71 +
72 ++ gpio-ranges = <&gpio 0 0 54>;
73 ++
74 + /* Defines common pin muxing groups
75 + *
76 + * While each pin can have its mux selected
77 +diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
78 +index 54a83a7445384..f33c804a922ac 100644
79 +--- a/arch/x86/kvm/debugfs.c
80 ++++ b/arch/x86/kvm/debugfs.c
81 +@@ -95,6 +95,9 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v)
82 + unsigned int *log[KVM_NR_PAGE_SIZES], *cur;
83 + int i, j, k, l, ret;
84 +
85 ++ if (!kvm_memslots_have_rmaps(kvm))
86 ++ return 0;
87 ++
88 + ret = -ENOMEM;
89 + memset(log, 0, sizeof(log));
90 + for (i = 0; i < KVM_NR_PAGE_SIZES; i++) {
91 +diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
92 +index 304accde365c8..6c010d4efa4ae 100644
93 +--- a/drivers/auxdisplay/charlcd.c
94 ++++ b/drivers/auxdisplay/charlcd.c
95 +@@ -578,6 +578,9 @@ static int charlcd_init(struct charlcd *lcd)
96 + * Since charlcd_init_display() needs to write data, we have to
97 + * enable mark the LCD initialized just before.
98 + */
99 ++ if (WARN_ON(!lcd->ops->init_display))
100 ++ return -EINVAL;
101 ++
102 + ret = lcd->ops->init_display(lcd);
103 + if (ret)
104 + return ret;
105 +diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
106 +index 83345bfac246f..6cf50ee0b77c5 100644
107 +--- a/drivers/edac/i10nm_base.c
108 ++++ b/drivers/edac/i10nm_base.c
109 +@@ -358,6 +358,9 @@ static int i10nm_get_hbm_munits(void)
110 +
111 + mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE);
112 + if (!mbase) {
113 ++ pci_dev_put(d->imc[lmc].mdev);
114 ++ d->imc[lmc].mdev = NULL;
115 ++
116 + i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n",
117 + base + off);
118 + return -ENOMEM;
119 +@@ -368,6 +371,12 @@ static int i10nm_get_hbm_munits(void)
120 +
121 + mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0);
122 + if (!I10NM_IS_HBM_IMC(mcmtr)) {
123 ++ iounmap(d->imc[lmc].mbase);
124 ++ d->imc[lmc].mbase = NULL;
125 ++ d->imc[lmc].hbm_mc = false;
126 ++ pci_dev_put(d->imc[lmc].mdev);
127 ++ d->imc[lmc].mdev = NULL;
128 ++
129 + i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n");
130 + return -ENODEV;
131 + }
132 +diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
133 +index 3d6ef37a7702a..b3a9b8488f11d 100644
134 +--- a/drivers/gpio/gpio-aspeed-sgpio.c
135 ++++ b/drivers/gpio/gpio-aspeed-sgpio.c
136 +@@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
137 + reg = ioread32(bank_reg(data, bank, reg_irq_status));
138 +
139 + for_each_set_bit(p, &reg, 32)
140 +- generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);
141 ++ generic_handle_domain_irq(gc->irq.domain, (i * 32 + p) * 2);
142 + }
143 +
144 + chained_irq_exit(ic, desc);
145 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
146 +index 289c7dc053634..f428f94b43c0a 100644
147 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
148 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
149 +@@ -1069,6 +1069,7 @@ struct amdgpu_device {
150 + bool runpm;
151 + bool in_runpm;
152 + bool has_pr3;
153 ++ bool is_fw_fb;
154 +
155 + bool pm_sysfs_en;
156 + bool ucode_sysfs_en;
157 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
158 +index ae6ab93c868b8..7444484a12bf8 100644
159 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
160 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
161 +@@ -384,7 +384,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
162 + struct amdgpu_vm_bo_base *bo_base;
163 + int r;
164 +
165 +- if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
166 ++ if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
167 + return;
168 +
169 + r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
170 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
171 +index f18240f873878..41677f99c67b1 100644
172 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
173 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
174 +@@ -38,6 +38,7 @@
175 + #include <drm/drm_probe_helper.h>
176 + #include <linux/mmu_notifier.h>
177 + #include <linux/suspend.h>
178 ++#include <linux/fb.h>
179 +
180 + #include "amdgpu.h"
181 + #include "amdgpu_irq.h"
182 +@@ -1246,6 +1247,26 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
183 +
184 + static const struct drm_driver amdgpu_kms_driver;
185 +
186 ++static bool amdgpu_is_fw_framebuffer(resource_size_t base,
187 ++ resource_size_t size)
188 ++{
189 ++ bool found = false;
190 ++#if IS_REACHABLE(CONFIG_FB)
191 ++ struct apertures_struct *a;
192 ++
193 ++ a = alloc_apertures(1);
194 ++ if (!a)
195 ++ return false;
196 ++
197 ++ a->ranges[0].base = base;
198 ++ a->ranges[0].size = size;
199 ++
200 ++ found = is_firmware_framebuffer(a);
201 ++ kfree(a);
202 ++#endif
203 ++ return found;
204 ++}
205 ++
206 + static int amdgpu_pci_probe(struct pci_dev *pdev,
207 + const struct pci_device_id *ent)
208 + {
209 +@@ -1254,6 +1275,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
210 + unsigned long flags = ent->driver_data;
211 + int ret, retry = 0;
212 + bool supports_atomic = false;
213 ++ bool is_fw_fb;
214 ++ resource_size_t base, size;
215 +
216 + if (amdgpu_virtual_display ||
217 + amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
218 +@@ -1310,6 +1333,10 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
219 + }
220 + #endif
221 +
222 ++ base = pci_resource_start(pdev, 0);
223 ++ size = pci_resource_len(pdev, 0);
224 ++ is_fw_fb = amdgpu_is_fw_framebuffer(base, size);
225 ++
226 + /* Get rid of things like offb */
227 + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
228 + if (ret)
229 +@@ -1322,6 +1349,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
230 + adev->dev = &pdev->dev;
231 + adev->pdev = pdev;
232 + ddev = adev_to_drm(adev);
233 ++ adev->is_fw_fb = is_fw_fb;
234 +
235 + if (!supports_atomic)
236 + ddev->driver_features &= ~DRIVER_ATOMIC;
237 +@@ -1498,7 +1526,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
238 + adev->in_s3 = true;
239 + r = amdgpu_device_suspend(drm_dev, true);
240 + adev->in_s3 = false;
241 +-
242 ++ if (r)
243 ++ return r;
244 ++ if (!adev->in_s0ix)
245 ++ r = amdgpu_asic_reset(adev);
246 + return r;
247 + }
248 +
249 +@@ -1575,12 +1606,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
250 + if (amdgpu_device_supports_px(drm_dev))
251 + drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
252 +
253 ++ /*
254 ++ * By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some
255 ++ * proper cleanups and put itself into a state ready for PNP. That
256 ++ * can address some random resuming failure observed on BOCO capable
257 ++ * platforms.
258 ++ * TODO: this may be also needed for PX capable platform.
259 ++ */
260 ++ if (amdgpu_device_supports_boco(drm_dev))
261 ++ adev->mp1_state = PP_MP1_STATE_UNLOAD;
262 ++
263 + ret = amdgpu_device_suspend(drm_dev, false);
264 + if (ret) {
265 + adev->in_runpm = false;
266 ++ if (amdgpu_device_supports_boco(drm_dev))
267 ++ adev->mp1_state = PP_MP1_STATE_NONE;
268 + return ret;
269 + }
270 +
271 ++ if (amdgpu_device_supports_boco(drm_dev))
272 ++ adev->mp1_state = PP_MP1_STATE_NONE;
273 ++
274 + if (amdgpu_device_supports_px(drm_dev)) {
275 + /* Only need to handle PCI state in the driver for ATPX
276 + * PCI core handles it for _PR3.
277 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
278 +index 8d682befe0d68..14499f0de32dc 100644
279 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
280 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
281 +@@ -552,9 +552,6 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
282 + if (!ring || !ring->fence_drv.initialized)
283 + continue;
284 +
285 +- if (!ring->no_scheduler)
286 +- drm_sched_stop(&ring->sched, NULL);
287 +-
288 + /* You can't wait for HW to signal if it's gone */
289 + if (!drm_dev_is_unplugged(&adev->ddev))
290 + r = amdgpu_fence_wait_empty(ring);
291 +@@ -614,11 +611,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
292 + if (!ring || !ring->fence_drv.initialized)
293 + continue;
294 +
295 +- if (!ring->no_scheduler) {
296 +- drm_sched_resubmit_jobs(&ring->sched);
297 +- drm_sched_start(&ring->sched, true);
298 +- }
299 +-
300 + /* enable the interrupt */
301 + if (ring->fence_drv.irq_src)
302 + amdgpu_irq_get(adev, ring->fence_drv.irq_src,
303 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
304 +index 7e45640fbee02..09a2fe8390591 100644
305 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
306 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
307 +@@ -206,6 +206,12 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
308 + adev->runpm = true;
309 + break;
310 + }
311 ++ /* XXX: disable runtime pm if we are the primary adapter
312 ++ * to avoid displays being re-enabled after DPMS.
313 ++ * This needs to be sorted out and fixed properly.
314 ++ */
315 ++ if (adev->is_fw_fb)
316 ++ adev->runpm = false;
317 + if (adev->runpm)
318 + dev_info(adev->dev, "Using BACO for runtime pm\n");
319 + }
320 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
321 +index 34001a30d449a..10e613ec7d24f 100644
322 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
323 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
324 +@@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
325 + .get_clock = dcn10_get_clock,
326 + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
327 + .calc_vupdate_position = dcn10_calc_vupdate_position,
328 ++ .power_down = dce110_power_down,
329 + .set_backlight_level = dce110_set_backlight_level,
330 + .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
331 + .set_pipe = dce110_set_pipe,
332 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
333 +index 6d8f26dada722..0fe570717ba01 100644
334 +--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
335 ++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
336 +@@ -352,6 +352,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = {
337 + clk_src_regs(3, D),
338 + clk_src_regs(4, E)
339 + };
340 ++/*pll_id being rempped in dmub, in driver it is logical instance*/
341 ++static const struct dce110_clk_src_regs clk_src_regs_b0[] = {
342 ++ clk_src_regs(0, A),
343 ++ clk_src_regs(1, B),
344 ++ clk_src_regs(2, F),
345 ++ clk_src_regs(3, G),
346 ++ clk_src_regs(4, E)
347 ++};
348 +
349 + static const struct dce110_clk_src_shift cs_shift = {
350 + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
351 +@@ -2019,14 +2027,27 @@ static bool dcn31_resource_construct(
352 + dcn30_clock_source_create(ctx, ctx->dc_bios,
353 + CLOCK_SOURCE_COMBO_PHY_PLL1,
354 + &clk_src_regs[1], false);
355 +- pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
356 ++ /*move phypllx_pixclk_resync to dmub next*/
357 ++ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
358 ++ pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
359 ++ dcn30_clock_source_create(ctx, ctx->dc_bios,
360 ++ CLOCK_SOURCE_COMBO_PHY_PLL2,
361 ++ &clk_src_regs_b0[2], false);
362 ++ pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
363 ++ dcn30_clock_source_create(ctx, ctx->dc_bios,
364 ++ CLOCK_SOURCE_COMBO_PHY_PLL3,
365 ++ &clk_src_regs_b0[3], false);
366 ++ } else {
367 ++ pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
368 + dcn30_clock_source_create(ctx, ctx->dc_bios,
369 + CLOCK_SOURCE_COMBO_PHY_PLL2,
370 + &clk_src_regs[2], false);
371 +- pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
372 ++ pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
373 + dcn30_clock_source_create(ctx, ctx->dc_bios,
374 + CLOCK_SOURCE_COMBO_PHY_PLL3,
375 + &clk_src_regs[3], false);
376 ++ }
377 ++
378 + pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
379 + dcn30_clock_source_create(ctx, ctx->dc_bios,
380 + CLOCK_SOURCE_COMBO_PHY_PLL4,
381 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
382 +index 93571c9769967..cc4bed675588c 100644
383 +--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
384 ++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
385 +@@ -39,4 +39,35 @@ struct resource_pool *dcn31_create_resource_pool(
386 + const struct dc_init_data *init_data,
387 + struct dc *dc);
388 +
389 ++/*temp: B0 specific before switch to dcn313 headers*/
390 ++#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
391 ++#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
392 ++#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1
393 ++#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f
394 ++#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1
395 ++
396 ++//PHYPLLF_PIXCLK_RESYNC_CNTL
397 ++#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
398 ++#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
399 ++#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
400 ++#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
401 ++#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
402 ++#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
403 ++#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
404 ++#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
405 ++#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L
406 ++#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
407 ++
408 ++//PHYPLLG_PIXCLK_RESYNC_CNTL
409 ++#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
410 ++#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
411 ++#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
412 ++#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8
413 ++#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
414 ++#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
415 ++#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
416 ++#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
417 ++#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L
418 ++#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
419 ++#endif
420 + #endif /* _DCN31_RESOURCE_H_ */
421 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
422 +index 04863a7971155..6dc83cfad9d84 100644
423 +--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
424 ++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
425 +@@ -1386,8 +1386,14 @@ static int smu_disable_dpms(struct smu_context *smu)
426 + {
427 + struct amdgpu_device *adev = smu->adev;
428 + int ret = 0;
429 ++ /*
430 ++ * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair
431 ++ * the workaround which always reset the asic in suspend.
432 ++ * It's likely that workaround will be dropped in the future.
433 ++ * Then the change here should be dropped together.
434 ++ */
435 + bool use_baco = !smu->is_apu &&
436 +- ((amdgpu_in_reset(adev) &&
437 ++ (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) &&
438 + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
439 + ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
440 +
441 +@@ -1536,9 +1542,7 @@ static int smu_suspend(void *handle)
442 +
443 + smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
444 +
445 +- /* skip CGPG when in S0ix */
446 +- if (smu->is_apu && !adev->in_s0ix)
447 +- smu_set_gfx_cgpg(&adev->smu, false);
448 ++ smu_set_gfx_cgpg(&adev->smu, false);
449 +
450 + return 0;
451 + }
452 +@@ -1569,8 +1573,7 @@ static int smu_resume(void *handle)
453 + return ret;
454 + }
455 +
456 +- if (smu->is_apu)
457 +- smu_set_gfx_cgpg(&adev->smu, true);
458 ++ smu_set_gfx_cgpg(&adev->smu, true);
459 +
460 + smu->disable_uclk_switch = 0;
461 +
462 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
463 +index 43028f2cd28b5..9c91e79c955fb 100644
464 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
465 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
466 +@@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
467 +
468 + int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
469 + {
470 +- if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
471 ++ /* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */
472 ++ if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix)
473 + return 0;
474 +
475 + return smu_cmn_send_smc_msg_with_param(smu,
476 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
477 +index 5019903db492a..c9cfeb094750d 100644
478 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
479 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
480 +@@ -1619,7 +1619,7 @@ static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
481 + {
482 + return smu_cmn_send_smc_msg_with_param(smu,
483 + SMU_MSG_GmiPwrDnControl,
484 +- en ? 1 : 0,
485 ++ en ? 0 : 1,
486 + NULL);
487 + }
488 +
489 +diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
490 +index 53b8da6dbb23f..db26cc36e13fe 100644
491 +--- a/drivers/i2c/busses/i2c-mpc.c
492 ++++ b/drivers/i2c/busses/i2c-mpc.c
493 +@@ -492,7 +492,7 @@ static void mpc_i2c_finish(struct mpc_i2c *i2c, int rc)
494 +
495 + static void mpc_i2c_do_action(struct mpc_i2c *i2c)
496 + {
497 +- struct i2c_msg *msg = &i2c->msgs[i2c->curr_msg];
498 ++ struct i2c_msg *msg = NULL;
499 + int dir = 0;
500 + int recv_len = 0;
501 + u8 byte;
502 +@@ -501,10 +501,13 @@ static void mpc_i2c_do_action(struct mpc_i2c *i2c)
503 +
504 + i2c->cntl_bits &= ~(CCR_RSTA | CCR_MTX | CCR_TXAK);
505 +
506 +- if (msg->flags & I2C_M_RD)
507 +- dir = 1;
508 +- if (msg->flags & I2C_M_RECV_LEN)
509 +- recv_len = 1;
510 ++ if (i2c->action != MPC_I2C_ACTION_STOP) {
511 ++ msg = &i2c->msgs[i2c->curr_msg];
512 ++ if (msg->flags & I2C_M_RD)
513 ++ dir = 1;
514 ++ if (msg->flags & I2C_M_RECV_LEN)
515 ++ recv_len = 1;
516 ++ }
517 +
518 + switch (i2c->action) {
519 + case MPC_I2C_ACTION_RESTART:
520 +@@ -581,7 +584,7 @@ static void mpc_i2c_do_action(struct mpc_i2c *i2c)
521 + break;
522 + }
523 +
524 +- if (msg->len == i2c->byte_posn) {
525 ++ if (msg && msg->len == i2c->byte_posn) {
526 + i2c->curr_msg++;
527 + i2c->byte_posn = 0;
528 +
529 +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
530 +index 54964fbe3f033..cfbef70e8ba70 100644
531 +--- a/drivers/i2c/i2c-core-base.c
532 ++++ b/drivers/i2c/i2c-core-base.c
533 +@@ -466,14 +466,12 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
534 + static int i2c_device_probe(struct device *dev)
535 + {
536 + struct i2c_client *client = i2c_verify_client(dev);
537 +- struct i2c_adapter *adap;
538 + struct i2c_driver *driver;
539 + int status;
540 +
541 + if (!client)
542 + return 0;
543 +
544 +- adap = client->adapter;
545 + client->irq = client->init_irq;
546 +
547 + if (!client->irq) {
548 +@@ -539,14 +537,6 @@ static int i2c_device_probe(struct device *dev)
549 +
550 + dev_dbg(dev, "probe\n");
551 +
552 +- if (adap->bus_regulator) {
553 +- status = regulator_enable(adap->bus_regulator);
554 +- if (status < 0) {
555 +- dev_err(&adap->dev, "Failed to enable bus regulator\n");
556 +- goto err_clear_wakeup_irq;
557 +- }
558 +- }
559 +-
560 + status = of_clk_set_defaults(dev->of_node, false);
561 + if (status < 0)
562 + goto err_clear_wakeup_irq;
563 +@@ -604,10 +594,8 @@ put_sync_adapter:
564 + static void i2c_device_remove(struct device *dev)
565 + {
566 + struct i2c_client *client = to_i2c_client(dev);
567 +- struct i2c_adapter *adap;
568 + struct i2c_driver *driver;
569 +
570 +- adap = client->adapter;
571 + driver = to_i2c_driver(dev->driver);
572 + if (driver->remove) {
573 + int status;
574 +@@ -622,8 +610,6 @@ static void i2c_device_remove(struct device *dev)
575 + devres_release_group(&client->dev, client->devres_group_id);
576 +
577 + dev_pm_domain_detach(&client->dev, true);
578 +- if (!pm_runtime_status_suspended(&client->dev) && adap->bus_regulator)
579 +- regulator_disable(adap->bus_regulator);
580 +
581 + dev_pm_clear_wake_irq(&client->dev);
582 + device_init_wakeup(&client->dev, false);
583 +@@ -633,86 +619,6 @@ static void i2c_device_remove(struct device *dev)
584 + pm_runtime_put(&client->adapter->dev);
585 + }
586 +
587 +-#ifdef CONFIG_PM_SLEEP
588 +-static int i2c_resume_early(struct device *dev)
589 +-{
590 +- struct i2c_client *client = i2c_verify_client(dev);
591 +- int err;
592 +-
593 +- if (!client)
594 +- return 0;
595 +-
596 +- if (pm_runtime_status_suspended(&client->dev) &&
597 +- client->adapter->bus_regulator) {
598 +- err = regulator_enable(client->adapter->bus_regulator);
599 +- if (err)
600 +- return err;
601 +- }
602 +-
603 +- return pm_generic_resume_early(&client->dev);
604 +-}
605 +-
606 +-static int i2c_suspend_late(struct device *dev)
607 +-{
608 +- struct i2c_client *client = i2c_verify_client(dev);
609 +- int err;
610 +-
611 +- if (!client)
612 +- return 0;
613 +-
614 +- err = pm_generic_suspend_late(&client->dev);
615 +- if (err)
616 +- return err;
617 +-
618 +- if (!pm_runtime_status_suspended(&client->dev) &&
619 +- client->adapter->bus_regulator)
620 +- return regulator_disable(client->adapter->bus_regulator);
621 +-
622 +- return 0;
623 +-}
624 +-#endif
625 +-
626 +-#ifdef CONFIG_PM
627 +-static int i2c_runtime_resume(struct device *dev)
628 +-{
629 +- struct i2c_client *client = i2c_verify_client(dev);
630 +- int err;
631 +-
632 +- if (!client)
633 +- return 0;
634 +-
635 +- if (client->adapter->bus_regulator) {
636 +- err = regulator_enable(client->adapter->bus_regulator);
637 +- if (err)
638 +- return err;
639 +- }
640 +-
641 +- return pm_generic_runtime_resume(&client->dev);
642 +-}
643 +-
644 +-static int i2c_runtime_suspend(struct device *dev)
645 +-{
646 +- struct i2c_client *client = i2c_verify_client(dev);
647 +- int err;
648 +-
649 +- if (!client)
650 +- return 0;
651 +-
652 +- err = pm_generic_runtime_suspend(&client->dev);
653 +- if (err)
654 +- return err;
655 +-
656 +- if (client->adapter->bus_regulator)
657 +- return regulator_disable(client->adapter->bus_regulator);
658 +- return 0;
659 +-}
660 +-#endif
661 +-
662 +-static const struct dev_pm_ops i2c_device_pm = {
663 +- SET_LATE_SYSTEM_SLEEP_PM_OPS(i2c_suspend_late, i2c_resume_early)
664 +- SET_RUNTIME_PM_OPS(i2c_runtime_suspend, i2c_runtime_resume, NULL)
665 +-};
666 +-
667 + static void i2c_device_shutdown(struct device *dev)
668 + {
669 + struct i2c_client *client = i2c_verify_client(dev);
670 +@@ -772,7 +678,6 @@ struct bus_type i2c_bus_type = {
671 + .probe = i2c_device_probe,
672 + .remove = i2c_device_remove,
673 + .shutdown = i2c_device_shutdown,
674 +- .pm = &i2c_device_pm,
675 + };
676 + EXPORT_SYMBOL_GPL(i2c_bus_type);
677 +
678 +diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
679 +index b8d715c68ca44..11a0806469162 100644
680 +--- a/drivers/infiniband/core/uverbs_marshall.c
681 ++++ b/drivers/infiniband/core/uverbs_marshall.c
682 +@@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
683 + struct rdma_ah_attr *src = ah_attr;
684 + struct rdma_ah_attr conv_ah;
685 +
686 +- memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
687 ++ memset(&dst->grh, 0, sizeof(dst->grh));
688 +
689 + if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
690 + (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) &&
691 +diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
692 +index 2f2c7646fce17..a02916a3a79ce 100644
693 +--- a/drivers/infiniband/core/uverbs_uapi.c
694 ++++ b/drivers/infiniband/core/uverbs_uapi.c
695 +@@ -447,6 +447,9 @@ static int uapi_finalize(struct uverbs_api *uapi)
696 + uapi->num_write_ex = max_write_ex + 1;
697 + data = kmalloc_array(uapi->num_write + uapi->num_write_ex,
698 + sizeof(*uapi->write_methods), GFP_KERNEL);
699 ++ if (!data)
700 ++ return -ENOMEM;
701 ++
702 + for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++)
703 + data[i] = &uapi->notsupp_method;
704 + uapi->write_methods = data;
705 +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
706 +index 6204ae2caef58..bf20a388eabe1 100644
707 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
708 ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
709 +@@ -641,6 +641,7 @@ struct mlx5_ib_mr {
710 +
711 + /* User MR data */
712 + struct mlx5_cache_ent *cache_ent;
713 ++ struct ib_umem *umem;
714 +
715 + /* This is zero'd when the MR is allocated */
716 + union {
717 +@@ -652,7 +653,7 @@ struct mlx5_ib_mr {
718 + struct list_head list;
719 + };
720 +
721 +- /* Used only by kernel MRs */
722 ++ /* Used only by kernel MRs (umem == NULL) */
723 + struct {
724 + void *descs;
725 + void *descs_alloc;
726 +@@ -674,9 +675,8 @@ struct mlx5_ib_mr {
727 + int data_length;
728 + };
729 +
730 +- /* Used only by User MRs */
731 ++ /* Used only by User MRs (umem != NULL) */
732 + struct {
733 +- struct ib_umem *umem;
734 + unsigned int page_shift;
735 + /* Current access_flags */
736 + int access_flags;
737 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
738 +index 69b2ce4c292ae..22e2f4d79743d 100644
739 +--- a/drivers/infiniband/hw/mlx5/mr.c
740 ++++ b/drivers/infiniband/hw/mlx5/mr.c
741 +@@ -1911,18 +1911,19 @@ err:
742 + return ret;
743 + }
744 +
745 +-static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
746 ++static void
747 ++mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
748 + {
749 +- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
750 +- int size = mr->max_descs * mr->desc_size;
751 +-
752 +- if (!mr->descs)
753 +- return;
754 ++ if (!mr->umem && mr->descs) {
755 ++ struct ib_device *device = mr->ibmr.device;
756 ++ int size = mr->max_descs * mr->desc_size;
757 ++ struct mlx5_ib_dev *dev = to_mdev(device);
758 +
759 +- dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
760 +- DMA_TO_DEVICE);
761 +- kfree(mr->descs_alloc);
762 +- mr->descs = NULL;
763 ++ dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
764 ++ DMA_TO_DEVICE);
765 ++ kfree(mr->descs_alloc);
766 ++ mr->descs = NULL;
767 ++ }
768 + }
769 +
770 + int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
771 +@@ -1998,8 +1999,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
772 + if (mr->cache_ent) {
773 + mlx5_mr_cache_free(dev, mr);
774 + } else {
775 +- if (!udata)
776 +- mlx5_free_priv_descs(mr);
777 ++ mlx5_free_priv_descs(mr);
778 + kfree(mr);
779 + }
780 + return 0;
781 +@@ -2086,6 +2086,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
782 + if (err)
783 + goto err_free_in;
784 +
785 ++ mr->umem = NULL;
786 + kfree(in);
787 +
788 + return mr;
789 +@@ -2212,6 +2213,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
790 + }
791 +
792 + mr->ibmr.device = pd->device;
793 ++ mr->umem = NULL;
794 +
795 + switch (mr_type) {
796 + case IB_MR_TYPE_MEM_REG:
797 +diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
798 +index b8d901099378d..1e70b8d2a8d79 100644
799 +--- a/drivers/input/touchscreen/zinitix.c
800 ++++ b/drivers/input/touchscreen/zinitix.c
801 +@@ -488,6 +488,15 @@ static int zinitix_ts_probe(struct i2c_client *client)
802 + return error;
803 + }
804 +
805 ++ error = devm_request_threaded_irq(&client->dev, client->irq,
806 ++ NULL, zinitix_ts_irq_handler,
807 ++ IRQF_ONESHOT | IRQF_NO_AUTOEN,
808 ++ client->name, bt541);
809 ++ if (error) {
810 ++ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
811 ++ return error;
812 ++ }
813 ++
814 + error = zinitix_init_input_dev(bt541);
815 + if (error) {
816 + dev_err(&client->dev,
817 +@@ -513,15 +522,6 @@ static int zinitix_ts_probe(struct i2c_client *client)
818 + return -EINVAL;
819 + }
820 +
821 +- error = devm_request_threaded_irq(&client->dev, client->irq,
822 +- NULL, zinitix_ts_irq_handler,
823 +- IRQF_ONESHOT | IRQF_NO_AUTOEN,
824 +- client->name, bt541);
825 +- if (error) {
826 +- dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
827 +- return error;
828 +- }
829 +-
830 + return 0;
831 + }
832 +
833 +diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
834 +index 55891e4204460..a41b4b2645941 100644
835 +--- a/drivers/isdn/mISDN/core.c
836 ++++ b/drivers/isdn/mISDN/core.c
837 +@@ -381,7 +381,7 @@ mISDNInit(void)
838 + err = mISDN_inittimer(&debug);
839 + if (err)
840 + goto error2;
841 +- err = l1_init(&debug);
842 ++ err = Isdnl1_Init(&debug);
843 + if (err)
844 + goto error3;
845 + err = Isdnl2_Init(&debug);
846 +@@ -395,7 +395,7 @@ mISDNInit(void)
847 + error5:
848 + Isdnl2_cleanup();
849 + error4:
850 +- l1_cleanup();
851 ++ Isdnl1_cleanup();
852 + error3:
853 + mISDN_timer_cleanup();
854 + error2:
855 +@@ -408,7 +408,7 @@ static void mISDN_cleanup(void)
856 + {
857 + misdn_sock_cleanup();
858 + Isdnl2_cleanup();
859 +- l1_cleanup();
860 ++ Isdnl1_cleanup();
861 + mISDN_timer_cleanup();
862 + class_unregister(&mISDN_class);
863 +
864 +diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h
865 +index 23b44d3033279..42599f49c189d 100644
866 +--- a/drivers/isdn/mISDN/core.h
867 ++++ b/drivers/isdn/mISDN/core.h
868 +@@ -60,8 +60,8 @@ struct Bprotocol *get_Bprotocol4id(u_int);
869 + extern int mISDN_inittimer(u_int *);
870 + extern void mISDN_timer_cleanup(void);
871 +
872 +-extern int l1_init(u_int *);
873 +-extern void l1_cleanup(void);
874 ++extern int Isdnl1_Init(u_int *);
875 ++extern void Isdnl1_cleanup(void);
876 + extern int Isdnl2_Init(u_int *);
877 + extern void Isdnl2_cleanup(void);
878 +
879 +diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
880 +index 98a3bc6c17009..7b31c25a550e3 100644
881 +--- a/drivers/isdn/mISDN/layer1.c
882 ++++ b/drivers/isdn/mISDN/layer1.c
883 +@@ -398,7 +398,7 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
884 + EXPORT_SYMBOL(create_l1);
885 +
886 + int
887 +-l1_init(u_int *deb)
888 ++Isdnl1_Init(u_int *deb)
889 + {
890 + debug = deb;
891 + l1fsm_s.state_count = L1S_STATE_COUNT;
892 +@@ -409,7 +409,7 @@ l1_init(u_int *deb)
893 + }
894 +
895 + void
896 +-l1_cleanup(void)
897 ++Isdnl1_cleanup(void)
898 + {
899 + mISDN_FsmFree(&l1fsm_s);
900 + }
901 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
902 +index 6ba12f0f0f036..9fa4794936426 100644
903 +--- a/drivers/md/raid1.c
904 ++++ b/drivers/md/raid1.c
905 +@@ -1496,12 +1496,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
906 + if (!r1_bio->bios[i])
907 + continue;
908 +
909 +- if (first_clone && test_bit(WriteMostly, &rdev->flags)) {
910 ++ if (first_clone) {
911 + /* do behind I/O ?
912 + * Not if there are too many, or cannot
913 + * allocate memory, or a reader on WriteMostly
914 + * is waiting for behind writes to flush */
915 + if (bitmap &&
916 ++ test_bit(WriteMostly, &rdev->flags) &&
917 + (atomic_read(&bitmap->behind_writes)
918 + < mddev->bitmap_info.max_write_behind) &&
919 + !waitqueue_active(&bitmap->behind_wait)) {
920 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
921 +index 0e43000614abd..8f08e0bae3004 100644
922 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
923 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
924 +@@ -1288,26 +1288,22 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
925 +
926 + static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
927 + {
928 +- struct ena_tx_buffer *tx_info = NULL;
929 ++ struct ena_tx_buffer *tx_info;
930 +
931 +- if (likely(req_id < tx_ring->ring_size)) {
932 +- tx_info = &tx_ring->tx_buffer_info[req_id];
933 +- if (likely(tx_info->skb))
934 +- return 0;
935 +- }
936 ++ tx_info = &tx_ring->tx_buffer_info[req_id];
937 ++ if (likely(tx_info->skb))
938 ++ return 0;
939 +
940 + return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
941 + }
942 +
943 + static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
944 + {
945 +- struct ena_tx_buffer *tx_info = NULL;
946 ++ struct ena_tx_buffer *tx_info;
947 +
948 +- if (likely(req_id < xdp_ring->ring_size)) {
949 +- tx_info = &xdp_ring->tx_buffer_info[req_id];
950 +- if (likely(tx_info->xdpf))
951 +- return 0;
952 +- }
953 ++ tx_info = &xdp_ring->tx_buffer_info[req_id];
954 ++ if (likely(tx_info->xdpf))
955 ++ return 0;
956 +
957 + return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
958 + }
959 +@@ -1332,9 +1328,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
960 +
961 + rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
962 + &req_id);
963 +- if (rc)
964 ++ if (rc) {
965 ++ if (unlikely(rc == -EINVAL))
966 ++ handle_invalid_req_id(tx_ring, req_id, NULL,
967 ++ false);
968 + break;
969 ++ }
970 +
971 ++ /* validate that the request id points to a valid skb */
972 + rc = validate_tx_req_id(tx_ring, req_id);
973 + if (rc)
974 + break;
975 +@@ -1427,6 +1428,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
976 + u16 *next_to_clean)
977 + {
978 + struct ena_rx_buffer *rx_info;
979 ++ struct ena_adapter *adapter;
980 + u16 len, req_id, buf = 0;
981 + struct sk_buff *skb;
982 + void *page_addr;
983 +@@ -1439,8 +1441,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
984 + rx_info = &rx_ring->rx_buffer_info[req_id];
985 +
986 + if (unlikely(!rx_info->page)) {
987 +- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
988 +- "Page is NULL\n");
989 ++ adapter = rx_ring->adapter;
990 ++ netif_err(adapter, rx_err, rx_ring->netdev,
991 ++ "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
992 ++ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
993 ++ adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
994 ++ /* Make sure reset reason is set before triggering the reset */
995 ++ smp_mb__before_atomic();
996 ++ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
997 + return NULL;
998 + }
999 +
1000 +@@ -1896,9 +1904,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1001 +
1002 + rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1003 + &req_id);
1004 +- if (rc)
1005 ++ if (rc) {
1006 ++ if (unlikely(rc == -EINVAL))
1007 ++ handle_invalid_req_id(xdp_ring, req_id, NULL,
1008 ++ true);
1009 + break;
1010 ++ }
1011 +
1012 ++ /* validate that the request id points to a valid xdp_frame */
1013 + rc = validate_xdp_req_id(xdp_ring, req_id);
1014 + if (rc)
1015 + break;
1016 +@@ -4013,10 +4026,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
1017 + max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
1018 + /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
1019 + max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
1020 +- if (unlikely(!max_num_io_queues)) {
1021 +- dev_err(&pdev->dev, "The device doesn't have io queues\n");
1022 +- return -EFAULT;
1023 +- }
1024 +
1025 + return max_num_io_queues;
1026 + }
1027 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1028 +index 24122ccda614c..72f8751784c31 100644
1029 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1030 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1031 +@@ -365,6 +365,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
1032 + if (!buff->is_eop) {
1033 + buff_ = buff;
1034 + do {
1035 ++ if (buff_->next >= self->size) {
1036 ++ err = -EIO;
1037 ++ goto err_exit;
1038 ++ }
1039 + next_ = buff_->next,
1040 + buff_ = &self->buff_ring[next_];
1041 + is_rsc_completed =
1042 +@@ -388,6 +392,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
1043 + (buff->is_lro && buff->is_cso_err)) {
1044 + buff_ = buff;
1045 + do {
1046 ++ if (buff_->next >= self->size) {
1047 ++ err = -EIO;
1048 ++ goto err_exit;
1049 ++ }
1050 + next_ = buff_->next,
1051 + buff_ = &self->buff_ring[next_];
1052 +
1053 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
1054 +index 76d0b809d1340..cc1cefdd4cdac 100644
1055 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
1056 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
1057 +@@ -99,6 +99,24 @@ MODULE_LICENSE("GPL v2");
1058 +
1059 + static struct workqueue_struct *i40e_wq;
1060 +
1061 ++static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
1062 ++ struct net_device *netdev, int delta)
1063 ++{
1064 ++ struct netdev_hw_addr *ha;
1065 ++
1066 ++ if (!f || !netdev)
1067 ++ return;
1068 ++
1069 ++ netdev_for_each_mc_addr(ha, netdev) {
1070 ++ if (ether_addr_equal(ha->addr, f->macaddr)) {
1071 ++ ha->refcount += delta;
1072 ++ if (ha->refcount <= 0)
1073 ++ ha->refcount = 1;
1074 ++ break;
1075 ++ }
1076 ++ }
1077 ++}
1078 ++
1079 + /**
1080 + * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
1081 + * @hw: pointer to the HW structure
1082 +@@ -2036,6 +2054,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1083 + hlist_for_each_entry_safe(new, h, from, hlist) {
1084 + /* We can simply free the wrapper structure */
1085 + hlist_del(&new->hlist);
1086 ++ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
1087 + kfree(new);
1088 + }
1089 + }
1090 +@@ -2383,6 +2402,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1091 + &tmp_add_list,
1092 + &tmp_del_list,
1093 + vlan_filters);
1094 ++
1095 ++ hlist_for_each_entry(new, &tmp_add_list, hlist)
1096 ++ netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
1097 ++
1098 + if (retval)
1099 + goto err_no_memory_locked;
1100 +
1101 +@@ -2515,6 +2538,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1102 + if (new->f->state == I40E_FILTER_NEW)
1103 + new->f->state = new->state;
1104 + hlist_del(&new->hlist);
1105 ++ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
1106 + kfree(new);
1107 + }
1108 + spin_unlock_bh(&vsi->mac_filter_hash_lock);
1109 +@@ -8716,6 +8740,27 @@ int i40e_open(struct net_device *netdev)
1110 + return 0;
1111 + }
1112 +
1113 ++/**
1114 ++ * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
1115 ++ * @vsi: vsi structure
1116 ++ *
1117 ++ * This updates netdev's number of tx/rx queues
1118 ++ *
1119 ++ * Returns status of setting tx/rx queues
1120 ++ **/
1121 ++static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
1122 ++{
1123 ++ int ret;
1124 ++
1125 ++ ret = netif_set_real_num_rx_queues(vsi->netdev,
1126 ++ vsi->num_queue_pairs);
1127 ++ if (ret)
1128 ++ return ret;
1129 ++
1130 ++ return netif_set_real_num_tx_queues(vsi->netdev,
1131 ++ vsi->num_queue_pairs);
1132 ++}
1133 ++
1134 + /**
1135 + * i40e_vsi_open -
1136 + * @vsi: the VSI to open
1137 +@@ -8752,13 +8797,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
1138 + goto err_setup_rx;
1139 +
1140 + /* Notify the stack of the actual queue counts. */
1141 +- err = netif_set_real_num_tx_queues(vsi->netdev,
1142 +- vsi->num_queue_pairs);
1143 +- if (err)
1144 +- goto err_set_queues;
1145 +-
1146 +- err = netif_set_real_num_rx_queues(vsi->netdev,
1147 +- vsi->num_queue_pairs);
1148 ++ err = i40e_netif_set_realnum_tx_rx_queues(vsi);
1149 + if (err)
1150 + goto err_set_queues;
1151 +
1152 +@@ -14149,6 +14188,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
1153 + case I40E_VSI_MAIN:
1154 + case I40E_VSI_VMDQ2:
1155 + ret = i40e_config_netdev(vsi);
1156 ++ if (ret)
1157 ++ goto err_netdev;
1158 ++ ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
1159 + if (ret)
1160 + goto err_netdev;
1161 + ret = register_netdev(vsi->netdev);
1162 +@@ -15451,8 +15493,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1163 +
1164 + if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1165 + hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
1166 +- dev_info(&pdev->dev,
1167 +- "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
1168 ++ dev_dbg(&pdev->dev,
1169 ++ "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
1170 + hw->aq.api_maj_ver,
1171 + hw->aq.api_min_ver,
1172 + I40E_FW_API_VERSION_MAJOR,
1173 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1174 +index 2ea4deb8fc44c..048f1678ab8ac 100644
1175 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1176 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1177 +@@ -1877,17 +1877,19 @@ sriov_configure_out:
1178 + /***********************virtual channel routines******************/
1179 +
1180 + /**
1181 +- * i40e_vc_send_msg_to_vf
1182 ++ * i40e_vc_send_msg_to_vf_ex
1183 + * @vf: pointer to the VF info
1184 + * @v_opcode: virtual channel opcode
1185 + * @v_retval: virtual channel return value
1186 + * @msg: pointer to the msg buffer
1187 + * @msglen: msg length
1188 ++ * @is_quiet: true for not printing unsuccessful return values, false otherwise
1189 + *
1190 + * send msg to VF
1191 + **/
1192 +-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1193 +- u32 v_retval, u8 *msg, u16 msglen)
1194 ++static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
1195 ++ u32 v_retval, u8 *msg, u16 msglen,
1196 ++ bool is_quiet)
1197 + {
1198 + struct i40e_pf *pf;
1199 + struct i40e_hw *hw;
1200 +@@ -1903,7 +1905,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1201 + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1202 +
1203 + /* single place to detect unsuccessful return values */
1204 +- if (v_retval) {
1205 ++ if (v_retval && !is_quiet) {
1206 + vf->num_invalid_msgs++;
1207 + dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1208 + vf->vf_id, v_opcode, v_retval);
1209 +@@ -1933,6 +1935,23 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1210 + return 0;
1211 + }
1212 +
1213 ++/**
1214 ++ * i40e_vc_send_msg_to_vf
1215 ++ * @vf: pointer to the VF info
1216 ++ * @v_opcode: virtual channel opcode
1217 ++ * @v_retval: virtual channel return value
1218 ++ * @msg: pointer to the msg buffer
1219 ++ * @msglen: msg length
1220 ++ *
1221 ++ * send msg to VF
1222 ++ **/
1223 ++static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1224 ++ u32 v_retval, u8 *msg, u16 msglen)
1225 ++{
1226 ++ return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
1227 ++ msg, msglen, false);
1228 ++}
1229 ++
1230 + /**
1231 + * i40e_vc_send_resp_to_vf
1232 + * @vf: pointer to the VF info
1233 +@@ -2695,6 +2714,7 @@ error_param:
1234 + * i40e_check_vf_permission
1235 + * @vf: pointer to the VF info
1236 + * @al: MAC address list from virtchnl
1237 ++ * @is_quiet: set true for printing msg without opcode info, false otherwise
1238 + *
1239 + * Check that the given list of MAC addresses is allowed. Will return -EPERM
1240 + * if any address in the list is not valid. Checks the following conditions:
1241 +@@ -2709,13 +2729,15 @@ error_param:
1242 + * addresses might not be accurate.
1243 + **/
1244 + static inline int i40e_check_vf_permission(struct i40e_vf *vf,
1245 +- struct virtchnl_ether_addr_list *al)
1246 ++ struct virtchnl_ether_addr_list *al,
1247 ++ bool *is_quiet)
1248 + {
1249 + struct i40e_pf *pf = vf->pf;
1250 + struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
1251 + int mac2add_cnt = 0;
1252 + int i;
1253 +
1254 ++ *is_quiet = false;
1255 + for (i = 0; i < al->num_elements; i++) {
1256 + struct i40e_mac_filter *f;
1257 + u8 *addr = al->list[i].addr;
1258 +@@ -2739,6 +2761,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
1259 + !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
1260 + dev_err(&pf->pdev->dev,
1261 + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
1262 ++ *is_quiet = true;
1263 + return -EPERM;
1264 + }
1265 +
1266 +@@ -2775,6 +2798,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
1267 + (struct virtchnl_ether_addr_list *)msg;
1268 + struct i40e_pf *pf = vf->pf;
1269 + struct i40e_vsi *vsi = NULL;
1270 ++ bool is_quiet = false;
1271 + i40e_status ret = 0;
1272 + int i;
1273 +
1274 +@@ -2791,7 +2815,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
1275 + */
1276 + spin_lock_bh(&vsi->mac_filter_hash_lock);
1277 +
1278 +- ret = i40e_check_vf_permission(vf, al);
1279 ++ ret = i40e_check_vf_permission(vf, al, &is_quiet);
1280 + if (ret) {
1281 + spin_unlock_bh(&vsi->mac_filter_hash_lock);
1282 + goto error_param;
1283 +@@ -2829,8 +2853,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
1284 +
1285 + error_param:
1286 + /* send the response to the VF */
1287 +- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1288 +- ret);
1289 ++ return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1290 ++ ret, NULL, 0, is_quiet);
1291 + }
1292 +
1293 + /**
1294 +diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
1295 +index 4f3b025daa14f..6502c8056a8ee 100644
1296 +--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
1297 ++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
1298 +@@ -2652,8 +2652,11 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
1299 + total_max_rate += tx_rate;
1300 + num_qps += mqprio_qopt->qopt.count[i];
1301 + }
1302 +- if (num_qps > IAVF_MAX_REQ_QUEUES)
1303 ++ if (num_qps > adapter->num_active_queues) {
1304 ++ dev_err(&adapter->pdev->dev,
1305 ++ "Cannot support requested number of queues\n");
1306 + return -EINVAL;
1307 ++ }
1308 +
1309 + ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
1310 + return ret;
1311 +diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
1312 +index 11a6aee852e92..0c6cc21913693 100644
1313 +--- a/drivers/net/ethernet/sfc/falcon/rx.c
1314 ++++ b/drivers/net/ethernet/sfc/falcon/rx.c
1315 +@@ -110,6 +110,8 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
1316 + struct ef4_rx_page_state *state;
1317 + unsigned index;
1318 +
1319 ++ if (unlikely(!rx_queue->page_ring))
1320 ++ return NULL;
1321 + index = rx_queue->page_remove & rx_queue->page_ptr_mask;
1322 + page = rx_queue->page_ring[index];
1323 + if (page == NULL)
1324 +@@ -293,6 +295,9 @@ static void ef4_recycle_rx_pages(struct ef4_channel *channel,
1325 + {
1326 + struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
1327 +
1328 ++ if (unlikely(!rx_queue->page_ring))
1329 ++ return;
1330 ++
1331 + do {
1332 + ef4_recycle_rx_page(channel, rx_buf);
1333 + rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
1334 +diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
1335 +index 0983abc0cc5f0..633ca77a26fd1 100644
1336 +--- a/drivers/net/ethernet/sfc/rx_common.c
1337 ++++ b/drivers/net/ethernet/sfc/rx_common.c
1338 +@@ -45,6 +45,8 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
1339 + unsigned int index;
1340 + struct page *page;
1341 +
1342 ++ if (unlikely(!rx_queue->page_ring))
1343 ++ return NULL;
1344 + index = rx_queue->page_remove & rx_queue->page_ptr_mask;
1345 + page = rx_queue->page_ring[index];
1346 + if (page == NULL)
1347 +@@ -114,6 +116,9 @@ void efx_recycle_rx_pages(struct efx_channel *channel,
1348 + {
1349 + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
1350 +
1351 ++ if (unlikely(!rx_queue->page_ring))
1352 ++ return;
1353 ++
1354 + do {
1355 + efx_recycle_rx_page(channel, rx_buf);
1356 + rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
1357 +diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
1358 +index 23ee0b14cbfa1..2f5e7b31032aa 100644
1359 +--- a/drivers/net/ieee802154/atusb.c
1360 ++++ b/drivers/net/ieee802154/atusb.c
1361 +@@ -93,7 +93,9 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
1362 +
1363 + ret = usb_control_msg(usb_dev, pipe, request, requesttype,
1364 + value, index, data, size, timeout);
1365 +- if (ret < 0) {
1366 ++ if (ret < size) {
1367 ++ ret = ret < 0 ? ret : -ENODATA;
1368 ++
1369 + atusb->err = ret;
1370 + dev_err(&usb_dev->dev,
1371 + "%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
1372 +@@ -861,9 +863,9 @@ static int atusb_get_and_show_build(struct atusb *atusb)
1373 + if (!build)
1374 + return -ENOMEM;
1375 +
1376 +- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
1377 +- ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
1378 +- build, ATUSB_BUILD_SIZE, 1000);
1379 ++ /* We cannot call atusb_control_msg() here, since this request may read various length data */
1380 ++ ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD,
1381 ++ ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000);
1382 + if (ret >= 0) {
1383 + build[ret] = 0;
1384 + dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
1385 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1386 +index c08e0857e8a70..d467a9f3bb44d 100644
1387 +--- a/drivers/net/usb/r8152.c
1388 ++++ b/drivers/net/usb/r8152.c
1389 +@@ -9638,9 +9638,12 @@ static int rtl8152_probe(struct usb_interface *intf,
1390 + netdev->hw_features &= ~NETIF_F_RXCSUM;
1391 + }
1392 +
1393 +- if (udev->parent &&
1394 +- le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) {
1395 +- tp->lenovo_macpassthru = 1;
1396 ++ if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
1397 ++ switch (le16_to_cpu(udev->descriptor.idProduct)) {
1398 ++ case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
1399 ++ case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
1400 ++ tp->lenovo_macpassthru = 1;
1401 ++ }
1402 + }
1403 +
1404 + if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
1405 +diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
1406 +index 85a8b96e39a65..bedd36ab5cf01 100644
1407 +--- a/drivers/net/usb/rndis_host.c
1408 ++++ b/drivers/net/usb/rndis_host.c
1409 +@@ -608,6 +608,11 @@ static const struct usb_device_id products [] = {
1410 + USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042,
1411 + USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
1412 + .driver_info = (unsigned long) &rndis_poll_status_info,
1413 ++}, {
1414 ++ /* Hytera Communications DMR radios' "Radio to PC Network" */
1415 ++ USB_VENDOR_AND_INTERFACE_INFO(0x238b,
1416 ++ USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
1417 ++ .driver_info = (unsigned long)&rndis_info,
1418 + }, {
1419 + /* RNDIS is MSFT's un-official variant of CDC ACM */
1420 + USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
1421 +diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
1422 +index 8688c8ba8894c..81be33c041d6b 100644
1423 +--- a/drivers/power/reset/ltc2952-poweroff.c
1424 ++++ b/drivers/power/reset/ltc2952-poweroff.c
1425 +@@ -161,8 +161,8 @@ static void ltc2952_poweroff_kill(void)
1426 +
1427 + static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
1428 + {
1429 +- data->wde_interval = 300L * 1E6L;
1430 +- data->trigger_delay = ktime_set(2, 500L*1E6L);
1431 ++ data->wde_interval = 300L * NSEC_PER_MSEC;
1432 ++ data->trigger_delay = ktime_set(2, 500L * NSEC_PER_MSEC);
1433 +
1434 + hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1435 + data->timer_trigger.function = ltc2952_poweroff_timer_trigger;
1436 +diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
1437 +index 945c3257ca931..fe814805c68b5 100644
1438 +--- a/drivers/power/supply/bq25890_charger.c
1439 ++++ b/drivers/power/supply/bq25890_charger.c
1440 +@@ -581,12 +581,12 @@ static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq)
1441 +
1442 + if (!new_state.online && bq->state.online) { /* power removed */
1443 + /* disable ADC */
1444 +- ret = bq25890_field_write(bq, F_CONV_START, 0);
1445 ++ ret = bq25890_field_write(bq, F_CONV_RATE, 0);
1446 + if (ret < 0)
1447 + goto error;
1448 + } else if (new_state.online && !bq->state.online) { /* power inserted */
1449 + /* enable ADC, to have control of charge current/voltage */
1450 +- ret = bq25890_field_write(bq, F_CONV_START, 1);
1451 ++ ret = bq25890_field_write(bq, F_CONV_RATE, 1);
1452 + if (ret < 0)
1453 + goto error;
1454 + }
1455 +diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
1456 +index 0c2132c7f5d40..a6e9afa5a1cff 100644
1457 +--- a/drivers/power/supply/power_supply_core.c
1458 ++++ b/drivers/power/supply/power_supply_core.c
1459 +@@ -853,6 +853,10 @@ power_supply_find_ocv2cap_table(struct power_supply_battery_info *info,
1460 + return NULL;
1461 +
1462 + for (i = 0; i < POWER_SUPPLY_OCV_TEMP_MAX; i++) {
1463 ++ /* Out of capacity tables */
1464 ++ if (!info->ocv_table[i])
1465 ++ break;
1466 ++
1467 + temp_diff = abs(info->ocv_temp[i] - temp);
1468 +
1469 + if (temp_diff < best_temp_diff) {
1470 +diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
1471 +index e0704fd2b5336..1e83150388506 100644
1472 +--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
1473 ++++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
1474 +@@ -137,7 +137,12 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
1475 + dev_set_drvdata(dev, priv);
1476 +
1477 + pm_runtime_enable(&pdev->dev);
1478 +- pm_runtime_resume_and_get(&pdev->dev);
1479 ++ error = pm_runtime_resume_and_get(&pdev->dev);
1480 ++ if (error < 0) {
1481 ++ pm_runtime_disable(&pdev->dev);
1482 ++ reset_control_assert(priv->rstc);
1483 ++ return dev_err_probe(&pdev->dev, error, "pm_runtime_resume_and_get failed");
1484 ++ }
1485 +
1486 + /* put pll and phy into reset state */
1487 + spin_lock_irqsave(&priv->lock, flags);
1488 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
1489 +index 5bc91d34df634..cbc263ec9d661 100644
1490 +--- a/drivers/scsi/libiscsi.c
1491 ++++ b/drivers/scsi/libiscsi.c
1492 +@@ -3101,6 +3101,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1493 + {
1494 + struct iscsi_conn *conn = cls_conn->dd_data;
1495 + struct iscsi_session *session = conn->session;
1496 ++ char *tmp_persistent_address = conn->persistent_address;
1497 ++ char *tmp_local_ipaddr = conn->local_ipaddr;
1498 +
1499 + del_timer_sync(&conn->transport_timer);
1500 +
1501 +@@ -3122,8 +3124,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1502 + spin_lock_bh(&session->frwd_lock);
1503 + free_pages((unsigned long) conn->data,
1504 + get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
1505 +- kfree(conn->persistent_address);
1506 +- kfree(conn->local_ipaddr);
1507 + /* regular RX path uses back_lock */
1508 + spin_lock_bh(&session->back_lock);
1509 + kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
1510 +@@ -3135,6 +3135,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1511 + mutex_unlock(&session->eh_mutex);
1512 +
1513 + iscsi_destroy_conn(cls_conn);
1514 ++ kfree(tmp_persistent_address);
1515 ++ kfree(tmp_local_ipaddr);
1516 + }
1517 + EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
1518 +
1519 +diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
1520 +index 0b21da4ee1836..9977600616d7e 100644
1521 +--- a/drivers/usb/mtu3/mtu3_gadget.c
1522 ++++ b/drivers/usb/mtu3/mtu3_gadget.c
1523 +@@ -77,7 +77,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
1524 + if (usb_endpoint_xfer_int(desc) ||
1525 + usb_endpoint_xfer_isoc(desc)) {
1526 + interval = desc->bInterval;
1527 +- interval = clamp_val(interval, 1, 16) - 1;
1528 ++ interval = clamp_val(interval, 1, 16);
1529 + if (usb_endpoint_xfer_isoc(desc) && comp_desc)
1530 + mult = comp_desc->bmAttributes;
1531 + }
1532 +@@ -89,7 +89,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
1533 + if (usb_endpoint_xfer_isoc(desc) ||
1534 + usb_endpoint_xfer_int(desc)) {
1535 + interval = desc->bInterval;
1536 +- interval = clamp_val(interval, 1, 16) - 1;
1537 ++ interval = clamp_val(interval, 1, 16);
1538 + mult = usb_endpoint_maxp_mult(desc) - 1;
1539 + }
1540 + break;
1541 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
1542 +index 7420d2c16e47e..7bd5e2a4a9da5 100644
1543 +--- a/drivers/video/fbdev/core/fbmem.c
1544 ++++ b/drivers/video/fbdev/core/fbmem.c
1545 +@@ -1759,6 +1759,53 @@ int remove_conflicting_framebuffers(struct apertures_struct *a,
1546 + }
1547 + EXPORT_SYMBOL(remove_conflicting_framebuffers);
1548 +
1549 ++/**
1550 ++ * is_firmware_framebuffer - detect if firmware-configured framebuffer matches
1551 ++ * @a: memory range, users of which are to be checked
1552 ++ *
1553 ++ * This function checks framebuffer devices (initialized by firmware/bootloader)
1554 ++ * which use memory range described by @a. If @a matchesm the function returns
1555 ++ * true, otherwise false.
1556 ++ */
1557 ++bool is_firmware_framebuffer(struct apertures_struct *a)
1558 ++{
1559 ++ bool do_free = false;
1560 ++ bool found = false;
1561 ++ int i;
1562 ++
1563 ++ if (!a) {
1564 ++ a = alloc_apertures(1);
1565 ++ if (!a)
1566 ++ return false;
1567 ++
1568 ++ a->ranges[0].base = 0;
1569 ++ a->ranges[0].size = ~0;
1570 ++ do_free = true;
1571 ++ }
1572 ++
1573 ++ mutex_lock(&registration_lock);
1574 ++ /* check all firmware fbs and kick off if the base addr overlaps */
1575 ++ for_each_registered_fb(i) {
1576 ++ struct apertures_struct *gen_aper;
1577 ++
1578 ++ if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
1579 ++ continue;
1580 ++
1581 ++ gen_aper = registered_fb[i]->apertures;
1582 ++ if (fb_do_apertures_overlap(gen_aper, a)) {
1583 ++ found = true;
1584 ++ break;
1585 ++ }
1586 ++ }
1587 ++ mutex_unlock(&registration_lock);
1588 ++
1589 ++ if (do_free)
1590 ++ kfree(a);
1591 ++
1592 ++ return found;
1593 ++}
1594 ++EXPORT_SYMBOL(is_firmware_framebuffer);
1595 ++
1596 + /**
1597 + * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
1598 + * @pdev: PCI device
1599 +diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
1600 +index 0c795dc093efa..09269f478df9c 100644
1601 +--- a/fs/xfs/xfs_ioctl.c
1602 ++++ b/fs/xfs/xfs_ioctl.c
1603 +@@ -687,7 +687,8 @@ xfs_ioc_space(
1604 +
1605 + if (bf->l_start > XFS_ISIZE(ip)) {
1606 + error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
1607 +- bf->l_start - XFS_ISIZE(ip), 0);
1608 ++ bf->l_start - XFS_ISIZE(ip),
1609 ++ XFS_BMAPI_PREALLOC);
1610 + if (error)
1611 + goto out_unlock;
1612 + }
1613 +diff --git a/include/linux/fb.h b/include/linux/fb.h
1614 +index 5950f8f5dc74d..02f362c661c80 100644
1615 +--- a/include/linux/fb.h
1616 ++++ b/include/linux/fb.h
1617 +@@ -610,6 +610,7 @@ extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
1618 + const char *name);
1619 + extern int remove_conflicting_framebuffers(struct apertures_struct *a,
1620 + const char *name, bool primary);
1621 ++extern bool is_firmware_framebuffer(struct apertures_struct *a);
1622 + extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
1623 + extern int fb_show_logo(struct fb_info *fb_info, int rotate);
1624 + extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
1625 +diff --git a/include/linux/fscache.h b/include/linux/fscache.h
1626 +index a4dab59986137..3b2282c157f79 100644
1627 +--- a/include/linux/fscache.h
1628 ++++ b/include/linux/fscache.h
1629 +@@ -167,7 +167,7 @@ struct fscache_cookie {
1630 +
1631 + static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
1632 + {
1633 +- return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
1634 ++ return fscache_cookie_valid(cookie) && test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
1635 + }
1636 +
1637 + /*
1638 +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
1639 +index d314a180ab93d..3ae61ce2eabd0 100644
1640 +--- a/include/net/sctp/sctp.h
1641 ++++ b/include/net/sctp/sctp.h
1642 +@@ -112,8 +112,7 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
1643 + struct rhashtable_iter *iter);
1644 + struct sctp_transport *sctp_transport_get_idx(struct net *net,
1645 + struct rhashtable_iter *iter, int pos);
1646 +-int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
1647 +- struct net *net,
1648 ++int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
1649 + const union sctp_addr *laddr,
1650 + const union sctp_addr *paddr, void *p);
1651 + int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
1652 +diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
1653 +index bfbeabc17a9df..6e36e854b5124 100644
1654 +--- a/kernel/cgroup/cgroup-internal.h
1655 ++++ b/kernel/cgroup/cgroup-internal.h
1656 +@@ -65,6 +65,25 @@ static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc)
1657 + return container_of(kfc, struct cgroup_fs_context, kfc);
1658 + }
1659 +
1660 ++struct cgroup_pidlist;
1661 ++
1662 ++struct cgroup_file_ctx {
1663 ++ struct cgroup_namespace *ns;
1664 ++
1665 ++ struct {
1666 ++ void *trigger;
1667 ++ } psi;
1668 ++
1669 ++ struct {
1670 ++ bool started;
1671 ++ struct css_task_iter iter;
1672 ++ } procs;
1673 ++
1674 ++ struct {
1675 ++ struct cgroup_pidlist *pidlist;
1676 ++ } procs1;
1677 ++};
1678 ++
1679 + /*
1680 + * A cgroup can be associated with multiple css_sets as different tasks may
1681 + * belong to different cgroups on different hierarchies. In the other
1682 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
1683 +index 35b9203283447..9537443de22dd 100644
1684 +--- a/kernel/cgroup/cgroup-v1.c
1685 ++++ b/kernel/cgroup/cgroup-v1.c
1686 +@@ -397,6 +397,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
1687 + * next pid to display, if any
1688 + */
1689 + struct kernfs_open_file *of = s->private;
1690 ++ struct cgroup_file_ctx *ctx = of->priv;
1691 + struct cgroup *cgrp = seq_css(s)->cgroup;
1692 + struct cgroup_pidlist *l;
1693 + enum cgroup_filetype type = seq_cft(s)->private;
1694 +@@ -406,25 +407,24 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
1695 + mutex_lock(&cgrp->pidlist_mutex);
1696 +
1697 + /*
1698 +- * !NULL @of->priv indicates that this isn't the first start()
1699 +- * after open. If the matching pidlist is around, we can use that.
1700 +- * Look for it. Note that @of->priv can't be used directly. It
1701 +- * could already have been destroyed.
1702 ++ * !NULL @ctx->procs1.pidlist indicates that this isn't the first
1703 ++ * start() after open. If the matching pidlist is around, we can use
1704 ++ * that. Look for it. Note that @ctx->procs1.pidlist can't be used
1705 ++ * directly. It could already have been destroyed.
1706 + */
1707 +- if (of->priv)
1708 +- of->priv = cgroup_pidlist_find(cgrp, type);
1709 ++ if (ctx->procs1.pidlist)
1710 ++ ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
1711 +
1712 + /*
1713 + * Either this is the first start() after open or the matching
1714 + * pidlist has been destroyed inbetween. Create a new one.
1715 + */
1716 +- if (!of->priv) {
1717 +- ret = pidlist_array_load(cgrp, type,
1718 +- (struct cgroup_pidlist **)&of->priv);
1719 ++ if (!ctx->procs1.pidlist) {
1720 ++ ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
1721 + if (ret)
1722 + return ERR_PTR(ret);
1723 + }
1724 +- l = of->priv;
1725 ++ l = ctx->procs1.pidlist;
1726 +
1727 + if (pid) {
1728 + int end = l->length;
1729 +@@ -452,7 +452,8 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
1730 + static void cgroup_pidlist_stop(struct seq_file *s, void *v)
1731 + {
1732 + struct kernfs_open_file *of = s->private;
1733 +- struct cgroup_pidlist *l = of->priv;
1734 ++ struct cgroup_file_ctx *ctx = of->priv;
1735 ++ struct cgroup_pidlist *l = ctx->procs1.pidlist;
1736 +
1737 + if (l)
1738 + mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
1739 +@@ -463,7 +464,8 @@ static void cgroup_pidlist_stop(struct seq_file *s, void *v)
1740 + static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
1741 + {
1742 + struct kernfs_open_file *of = s->private;
1743 +- struct cgroup_pidlist *l = of->priv;
1744 ++ struct cgroup_file_ctx *ctx = of->priv;
1745 ++ struct cgroup_pidlist *l = ctx->procs1.pidlist;
1746 + pid_t *p = v;
1747 + pid_t *end = l->list + l->length;
1748 + /*
1749 +@@ -507,10 +509,11 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
1750 + goto out_unlock;
1751 +
1752 + /*
1753 +- * Even if we're attaching all tasks in the thread group, we only
1754 +- * need to check permissions on one of them.
1755 ++ * Even if we're attaching all tasks in the thread group, we only need
1756 ++ * to check permissions on one of them. Check permissions using the
1757 ++ * credentials from file open to protect against inherited fd attacks.
1758 + */
1759 +- cred = current_cred();
1760 ++ cred = of->file->f_cred;
1761 + tcred = get_task_cred(task);
1762 + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
1763 + !uid_eq(cred->euid, tcred->uid) &&
1764 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
1765 +index d6ea872b23aad..bb1a78ff14374 100644
1766 +--- a/kernel/cgroup/cgroup.c
1767 ++++ b/kernel/cgroup/cgroup.c
1768 +@@ -3630,6 +3630,7 @@ static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
1769 + static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
1770 + size_t nbytes, enum psi_res res)
1771 + {
1772 ++ struct cgroup_file_ctx *ctx = of->priv;
1773 + struct psi_trigger *new;
1774 + struct cgroup *cgrp;
1775 + struct psi_group *psi;
1776 +@@ -3648,7 +3649,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
1777 + return PTR_ERR(new);
1778 + }
1779 +
1780 +- psi_trigger_replace(&of->priv, new);
1781 ++ psi_trigger_replace(&ctx->psi.trigger, new);
1782 +
1783 + cgroup_put(cgrp);
1784 +
1785 +@@ -3679,12 +3680,16 @@ static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
1786 + static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
1787 + poll_table *pt)
1788 + {
1789 +- return psi_trigger_poll(&of->priv, of->file, pt);
1790 ++ struct cgroup_file_ctx *ctx = of->priv;
1791 ++
1792 ++ return psi_trigger_poll(&ctx->psi.trigger, of->file, pt);
1793 + }
1794 +
1795 + static void cgroup_pressure_release(struct kernfs_open_file *of)
1796 + {
1797 +- psi_trigger_replace(&of->priv, NULL);
1798 ++ struct cgroup_file_ctx *ctx = of->priv;
1799 ++
1800 ++ psi_trigger_replace(&ctx->psi.trigger, NULL);
1801 + }
1802 +
1803 + bool cgroup_psi_enabled(void)
1804 +@@ -3811,24 +3816,43 @@ static ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf,
1805 + static int cgroup_file_open(struct kernfs_open_file *of)
1806 + {
1807 + struct cftype *cft = of_cft(of);
1808 ++ struct cgroup_file_ctx *ctx;
1809 ++ int ret;
1810 +
1811 +- if (cft->open)
1812 +- return cft->open(of);
1813 +- return 0;
1814 ++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1815 ++ if (!ctx)
1816 ++ return -ENOMEM;
1817 ++
1818 ++ ctx->ns = current->nsproxy->cgroup_ns;
1819 ++ get_cgroup_ns(ctx->ns);
1820 ++ of->priv = ctx;
1821 ++
1822 ++ if (!cft->open)
1823 ++ return 0;
1824 ++
1825 ++ ret = cft->open(of);
1826 ++ if (ret) {
1827 ++ put_cgroup_ns(ctx->ns);
1828 ++ kfree(ctx);
1829 ++ }
1830 ++ return ret;
1831 + }
1832 +
1833 + static void cgroup_file_release(struct kernfs_open_file *of)
1834 + {
1835 + struct cftype *cft = of_cft(of);
1836 ++ struct cgroup_file_ctx *ctx = of->priv;
1837 +
1838 + if (cft->release)
1839 + cft->release(of);
1840 ++ put_cgroup_ns(ctx->ns);
1841 ++ kfree(ctx);
1842 + }
1843 +
1844 + static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
1845 + size_t nbytes, loff_t off)
1846 + {
1847 +- struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
1848 ++ struct cgroup_file_ctx *ctx = of->priv;
1849 + struct cgroup *cgrp = of->kn->parent->priv;
1850 + struct cftype *cft = of_cft(of);
1851 + struct cgroup_subsys_state *css;
1852 +@@ -3845,7 +3869,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
1853 + */
1854 + if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
1855 + !(cft->flags & CFTYPE_NS_DELEGATABLE) &&
1856 +- ns != &init_cgroup_ns && ns->root_cset->dfl_cgrp == cgrp)
1857 ++ ctx->ns != &init_cgroup_ns && ctx->ns->root_cset->dfl_cgrp == cgrp)
1858 + return -EPERM;
1859 +
1860 + if (cft->write)
1861 +@@ -4751,21 +4775,21 @@ void css_task_iter_end(struct css_task_iter *it)
1862 +
1863 + static void cgroup_procs_release(struct kernfs_open_file *of)
1864 + {
1865 +- if (of->priv) {
1866 +- css_task_iter_end(of->priv);
1867 +- kfree(of->priv);
1868 +- }
1869 ++ struct cgroup_file_ctx *ctx = of->priv;
1870 ++
1871 ++ if (ctx->procs.started)
1872 ++ css_task_iter_end(&ctx->procs.iter);
1873 + }
1874 +
1875 + static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
1876 + {
1877 + struct kernfs_open_file *of = s->private;
1878 +- struct css_task_iter *it = of->priv;
1879 ++ struct cgroup_file_ctx *ctx = of->priv;
1880 +
1881 + if (pos)
1882 + (*pos)++;
1883 +
1884 +- return css_task_iter_next(it);
1885 ++ return css_task_iter_next(&ctx->procs.iter);
1886 + }
1887 +
1888 + static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
1889 +@@ -4773,21 +4797,18 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
1890 + {
1891 + struct kernfs_open_file *of = s->private;
1892 + struct cgroup *cgrp = seq_css(s)->cgroup;
1893 +- struct css_task_iter *it = of->priv;
1894 ++ struct cgroup_file_ctx *ctx = of->priv;
1895 ++ struct css_task_iter *it = &ctx->procs.iter;
1896 +
1897 + /*
1898 + * When a seq_file is seeked, it's always traversed sequentially
1899 + * from position 0, so we can simply keep iterating on !0 *pos.
1900 + */
1901 +- if (!it) {
1902 ++ if (!ctx->procs.started) {
1903 + if (WARN_ON_ONCE((*pos)))
1904 + return ERR_PTR(-EINVAL);
1905 +-
1906 +- it = kzalloc(sizeof(*it), GFP_KERNEL);
1907 +- if (!it)
1908 +- return ERR_PTR(-ENOMEM);
1909 +- of->priv = it;
1910 + css_task_iter_start(&cgrp->self, iter_flags, it);
1911 ++ ctx->procs.started = true;
1912 + } else if (!(*pos)) {
1913 + css_task_iter_end(it);
1914 + css_task_iter_start(&cgrp->self, iter_flags, it);
1915 +@@ -4838,9 +4859,9 @@ static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb)
1916 +
1917 + static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
1918 + struct cgroup *dst_cgrp,
1919 +- struct super_block *sb)
1920 ++ struct super_block *sb,
1921 ++ struct cgroup_namespace *ns)
1922 + {
1923 +- struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
1924 + struct cgroup *com_cgrp = src_cgrp;
1925 + int ret;
1926 +
1927 +@@ -4869,11 +4890,12 @@ static int cgroup_procs_write_permission(struct cgroup *src_cgrp,
1928 +
1929 + static int cgroup_attach_permissions(struct cgroup *src_cgrp,
1930 + struct cgroup *dst_cgrp,
1931 +- struct super_block *sb, bool threadgroup)
1932 ++ struct super_block *sb, bool threadgroup,
1933 ++ struct cgroup_namespace *ns)
1934 + {
1935 + int ret = 0;
1936 +
1937 +- ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb);
1938 ++ ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb, ns);
1939 + if (ret)
1940 + return ret;
1941 +
1942 +@@ -4890,8 +4912,10 @@ static int cgroup_attach_permissions(struct cgroup *src_cgrp,
1943 + static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
1944 + bool threadgroup)
1945 + {
1946 ++ struct cgroup_file_ctx *ctx = of->priv;
1947 + struct cgroup *src_cgrp, *dst_cgrp;
1948 + struct task_struct *task;
1949 ++ const struct cred *saved_cred;
1950 + ssize_t ret;
1951 + bool locked;
1952 +
1953 +@@ -4909,9 +4933,16 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
1954 + src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
1955 + spin_unlock_irq(&css_set_lock);
1956 +
1957 +- /* process and thread migrations follow same delegation rule */
1958 ++ /*
1959 ++ * Process and thread migrations follow same delegation rule. Check
1960 ++ * permissions using the credentials from file open to protect against
1961 ++ * inherited fd attacks.
1962 ++ */
1963 ++ saved_cred = override_creds(of->file->f_cred);
1964 + ret = cgroup_attach_permissions(src_cgrp, dst_cgrp,
1965 +- of->file->f_path.dentry->d_sb, threadgroup);
1966 ++ of->file->f_path.dentry->d_sb,
1967 ++ threadgroup, ctx->ns);
1968 ++ revert_creds(saved_cred);
1969 + if (ret)
1970 + goto out_finish;
1971 +
1972 +@@ -6127,7 +6158,8 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
1973 + goto err;
1974 +
1975 + ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
1976 +- !(kargs->flags & CLONE_THREAD));
1977 ++ !(kargs->flags & CLONE_THREAD),
1978 ++ current->nsproxy->cgroup_ns);
1979 + if (ret)
1980 + goto err;
1981 +
1982 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1983 +index 18db461f77cdf..ce05ba0412889 100644
1984 +--- a/kernel/trace/trace.c
1985 ++++ b/kernel/trace/trace.c
1986 +@@ -3231,7 +3231,7 @@ struct trace_buffer_struct {
1987 + char buffer[4][TRACE_BUF_SIZE];
1988 + };
1989 +
1990 +-static struct trace_buffer_struct *trace_percpu_buffer;
1991 ++static struct trace_buffer_struct __percpu *trace_percpu_buffer;
1992 +
1993 + /*
1994 + * This allows for lockless recording. If we're nested too deeply, then
1995 +@@ -3241,7 +3241,7 @@ static char *get_trace_buf(void)
1996 + {
1997 + struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
1998 +
1999 +- if (!buffer || buffer->nesting >= 4)
2000 ++ if (!trace_percpu_buffer || buffer->nesting >= 4)
2001 + return NULL;
2002 +
2003 + buffer->nesting++;
2004 +@@ -3260,7 +3260,7 @@ static void put_trace_buf(void)
2005 +
2006 + static int alloc_percpu_trace_buffer(void)
2007 + {
2008 +- struct trace_buffer_struct *buffers;
2009 ++ struct trace_buffer_struct __percpu *buffers;
2010 +
2011 + if (trace_percpu_buffer)
2012 + return 0;
2013 +diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
2014 +index a3b6658ed7899..6e3419beca098 100644
2015 +--- a/net/batman-adv/multicast.c
2016 ++++ b/net/batman-adv/multicast.c
2017 +@@ -1339,6 +1339,7 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
2018 + * @bat_priv: the bat priv with all the soft interface information
2019 + * @skb: The multicast packet to check
2020 + * @orig: an originator to be set to forward the skb to
2021 ++ * @is_routable: stores whether the destination is routable
2022 + *
2023 + * Return: the forwarding mode as enum batadv_forw_mode and in case of
2024 + * BATADV_FORW_SINGLE set the orig to the single originator the skb
2025 +@@ -1346,17 +1347,16 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
2026 + */
2027 + enum batadv_forw_mode
2028 + batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
2029 +- struct batadv_orig_node **orig)
2030 ++ struct batadv_orig_node **orig, int *is_routable)
2031 + {
2032 + int ret, tt_count, ip_count, unsnoop_count, total_count;
2033 + bool is_unsnoopable = false;
2034 + unsigned int mcast_fanout;
2035 + struct ethhdr *ethhdr;
2036 +- int is_routable = 0;
2037 + int rtr_count = 0;
2038 +
2039 + ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
2040 +- &is_routable);
2041 ++ is_routable);
2042 + if (ret == -ENOMEM)
2043 + return BATADV_FORW_NONE;
2044 + else if (ret < 0)
2045 +@@ -1369,7 +1369,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
2046 + ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
2047 + unsnoop_count = !is_unsnoopable ? 0 :
2048 + atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
2049 +- rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable);
2050 ++ rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
2051 +
2052 + total_count = tt_count + ip_count + unsnoop_count + rtr_count;
2053 +
2054 +@@ -1689,6 +1689,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
2055 + * @bat_priv: the bat priv with all the soft interface information
2056 + * @skb: the multicast packet to transmit
2057 + * @vid: the vlan identifier
2058 ++ * @is_routable: stores whether the destination is routable
2059 + *
2060 + * Sends copies of a frame with multicast destination to any node that signaled
2061 + * interest in it, that is either via the translation table or the according
2062 +@@ -1701,7 +1702,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
2063 + * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
2064 + */
2065 + int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
2066 +- unsigned short vid)
2067 ++ unsigned short vid, int is_routable)
2068 + {
2069 + int ret;
2070 +
2071 +@@ -1717,12 +1718,16 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
2072 + return ret;
2073 + }
2074 +
2075 ++ if (!is_routable)
2076 ++ goto skip_mc_router;
2077 ++
2078 + ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
2079 + if (ret != NET_XMIT_SUCCESS) {
2080 + kfree_skb(skb);
2081 + return ret;
2082 + }
2083 +
2084 ++skip_mc_router:
2085 + consume_skb(skb);
2086 + return ret;
2087 + }
2088 +diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
2089 +index 9fee5da083113..8aec818d0bf63 100644
2090 +--- a/net/batman-adv/multicast.h
2091 ++++ b/net/batman-adv/multicast.h
2092 +@@ -43,7 +43,8 @@ enum batadv_forw_mode {
2093 +
2094 + enum batadv_forw_mode
2095 + batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
2096 +- struct batadv_orig_node **mcast_single_orig);
2097 ++ struct batadv_orig_node **mcast_single_orig,
2098 ++ int *is_routable);
2099 +
2100 + int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
2101 + struct sk_buff *skb,
2102 +@@ -51,7 +52,7 @@ int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
2103 + struct batadv_orig_node *orig_node);
2104 +
2105 + int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
2106 +- unsigned short vid);
2107 ++ unsigned short vid, int is_routable);
2108 +
2109 + void batadv_mcast_init(struct batadv_priv *bat_priv);
2110 +
2111 +@@ -68,7 +69,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
2112 +
2113 + static inline enum batadv_forw_mode
2114 + batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
2115 +- struct batadv_orig_node **mcast_single_orig)
2116 ++ struct batadv_orig_node **mcast_single_orig,
2117 ++ int *is_routable)
2118 + {
2119 + return BATADV_FORW_ALL;
2120 + }
2121 +@@ -85,7 +87,7 @@ batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
2122 +
2123 + static inline int
2124 + batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
2125 +- unsigned short vid)
2126 ++ unsigned short vid, int is_routable)
2127 + {
2128 + kfree_skb(skb);
2129 + return NET_XMIT_DROP;
2130 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
2131 +index 0604b02795731..6ab28b509d4bb 100644
2132 +--- a/net/batman-adv/soft-interface.c
2133 ++++ b/net/batman-adv/soft-interface.c
2134 +@@ -198,6 +198,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
2135 + int gw_mode;
2136 + enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
2137 + struct batadv_orig_node *mcast_single_orig = NULL;
2138 ++ int mcast_is_routable = 0;
2139 + int network_offset = ETH_HLEN;
2140 + __be16 proto;
2141 +
2142 +@@ -300,7 +301,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
2143 + send:
2144 + if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
2145 + forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
2146 +- &mcast_single_orig);
2147 ++ &mcast_single_orig,
2148 ++ &mcast_is_routable);
2149 + if (forw_mode == BATADV_FORW_NONE)
2150 + goto dropped;
2151 +
2152 +@@ -359,7 +361,8 @@ send:
2153 + ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
2154 + mcast_single_orig);
2155 + } else if (forw_mode == BATADV_FORW_SOME) {
2156 +- ret = batadv_mcast_forw_send(bat_priv, skb, vid);
2157 ++ ret = batadv_mcast_forw_send(bat_priv, skb, vid,
2158 ++ mcast_is_routable);
2159 + } else {
2160 + if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
2161 + skb))
2162 +diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
2163 +index 2820aca2173a8..9ccd64e8a666a 100644
2164 +--- a/net/core/lwtunnel.c
2165 ++++ b/net/core/lwtunnel.c
2166 +@@ -197,6 +197,10 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining,
2167 + nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
2168 +
2169 + if (nla_entype) {
2170 ++ if (nla_len(nla_entype) < sizeof(u16)) {
2171 ++ NL_SET_ERR_MSG(extack, "Invalid RTA_ENCAP_TYPE");
2172 ++ return -EINVAL;
2173 ++ }
2174 + encap_type = nla_get_u16(nla_entype);
2175 +
2176 + if (lwtunnel_valid_encap_type(encap_type,
2177 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
2178 +index fde7797b58069..92c29ab3d0428 100644
2179 +--- a/net/ipv4/fib_semantics.c
2180 ++++ b/net/ipv4/fib_semantics.c
2181 +@@ -662,6 +662,19 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
2182 + return nhs;
2183 + }
2184 +
2185 ++static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla,
2186 ++ struct netlink_ext_ack *extack)
2187 ++{
2188 ++ if (nla_len(nla) < sizeof(*gw)) {
2189 ++ NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY");
2190 ++ return -EINVAL;
2191 ++ }
2192 ++
2193 ++ *gw = nla_get_in_addr(nla);
2194 ++
2195 ++ return 0;
2196 ++}
2197 ++
2198 + /* only called when fib_nh is integrated into fib_info */
2199 + static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
2200 + int remaining, struct fib_config *cfg,
2201 +@@ -704,7 +717,11 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
2202 + return -EINVAL;
2203 + }
2204 + if (nla) {
2205 +- fib_cfg.fc_gw4 = nla_get_in_addr(nla);
2206 ++ ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla,
2207 ++ extack);
2208 ++ if (ret)
2209 ++ goto errout;
2210 ++
2211 + if (fib_cfg.fc_gw4)
2212 + fib_cfg.fc_gw_family = AF_INET;
2213 + } else if (nlav) {
2214 +@@ -714,10 +731,18 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
2215 + }
2216 +
2217 + nla = nla_find(attrs, attrlen, RTA_FLOW);
2218 +- if (nla)
2219 ++ if (nla) {
2220 ++ if (nla_len(nla) < sizeof(u32)) {
2221 ++ NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
2222 ++ return -EINVAL;
2223 ++ }
2224 + fib_cfg.fc_flow = nla_get_u32(nla);
2225 ++ }
2226 +
2227 + fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
2228 ++ /* RTA_ENCAP_TYPE length checked in
2229 ++ * lwtunnel_valid_encap_type_attr
2230 ++ */
2231 + nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
2232 + if (nla)
2233 + fib_cfg.fc_encap_type = nla_get_u16(nla);
2234 +@@ -902,6 +927,7 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
2235 + attrlen = rtnh_attrlen(rtnh);
2236 + if (attrlen > 0) {
2237 + struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
2238 ++ int err;
2239 +
2240 + nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2241 + nlav = nla_find(attrs, attrlen, RTA_VIA);
2242 +@@ -912,12 +938,17 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
2243 + }
2244 +
2245 + if (nla) {
2246 ++ __be32 gw;
2247 ++
2248 ++ err = fib_gw_from_attr(&gw, nla, extack);
2249 ++ if (err)
2250 ++ return err;
2251 ++
2252 + if (nh->fib_nh_gw_family != AF_INET ||
2253 +- nla_get_in_addr(nla) != nh->fib_nh_gw4)
2254 ++ gw != nh->fib_nh_gw4)
2255 + return 1;
2256 + } else if (nlav) {
2257 + struct fib_config cfg2;
2258 +- int err;
2259 +
2260 + err = fib_gw_from_via(&cfg2, nlav, extack);
2261 + if (err)
2262 +@@ -940,8 +971,14 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
2263 +
2264 + #ifdef CONFIG_IP_ROUTE_CLASSID
2265 + nla = nla_find(attrs, attrlen, RTA_FLOW);
2266 +- if (nla && nla_get_u32(nla) != nh->nh_tclassid)
2267 +- return 1;
2268 ++ if (nla) {
2269 ++ if (nla_len(nla) < sizeof(u32)) {
2270 ++ NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
2271 ++ return -EINVAL;
2272 ++ }
2273 ++ if (nla_get_u32(nla) != nh->nh_tclassid)
2274 ++ return 1;
2275 ++ }
2276 + #endif
2277 + }
2278 +
2279 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2280 +index be07e3d2b77bc..835b9d6e4e686 100644
2281 +--- a/net/ipv4/udp.c
2282 ++++ b/net/ipv4/udp.c
2283 +@@ -3076,7 +3076,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
2284 + {
2285 + seq_setwidth(seq, 127);
2286 + if (v == SEQ_START_TOKEN)
2287 +- seq_puts(seq, " sl local_address rem_address st tx_queue "
2288 ++ seq_puts(seq, " sl local_address rem_address st tx_queue "
2289 + "rx_queue tr tm->when retrnsmt uid timeout "
2290 + "inode ref pointer drops");
2291 + else {
2292 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
2293 +index 1d8e3ffa225d8..42c37ec832f15 100644
2294 +--- a/net/ipv6/ip6_vti.c
2295 ++++ b/net/ipv6/ip6_vti.c
2296 +@@ -808,6 +808,8 @@ vti6_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data
2297 + struct net *net = dev_net(dev);
2298 + struct vti6_net *ip6n = net_generic(net, vti6_net_id);
2299 +
2300 ++ memset(&p1, 0, sizeof(p1));
2301 ++
2302 + switch (cmd) {
2303 + case SIOCGETTUNNEL:
2304 + if (dev == ip6n->fb_tnl_dev) {
2305 +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
2306 +index 60f1e4f5be5aa..c51d5ce3711c2 100644
2307 +--- a/net/ipv6/raw.c
2308 ++++ b/net/ipv6/raw.c
2309 +@@ -1020,6 +1020,9 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
2310 + struct raw6_sock *rp = raw6_sk(sk);
2311 + int val;
2312 +
2313 ++ if (optlen < sizeof(val))
2314 ++ return -EINVAL;
2315 ++
2316 + if (copy_from_sockptr(&val, optval, sizeof(val)))
2317 + return -EFAULT;
2318 +
2319 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2320 +index 79cb5e5a4948b..0632382a5427b 100644
2321 +--- a/net/ipv6/route.c
2322 ++++ b/net/ipv6/route.c
2323 +@@ -5224,6 +5224,19 @@ out:
2324 + return should_notify;
2325 + }
2326 +
2327 ++static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
2328 ++ struct netlink_ext_ack *extack)
2329 ++{
2330 ++ if (nla_len(nla) < sizeof(*gw)) {
2331 ++ NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
2332 ++ return -EINVAL;
2333 ++ }
2334 ++
2335 ++ *gw = nla_get_in6_addr(nla);
2336 ++
2337 ++ return 0;
2338 ++}
2339 ++
2340 + static int ip6_route_multipath_add(struct fib6_config *cfg,
2341 + struct netlink_ext_ack *extack)
2342 + {
2343 +@@ -5264,10 +5277,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
2344 +
2345 + nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2346 + if (nla) {
2347 +- r_cfg.fc_gateway = nla_get_in6_addr(nla);
2348 ++ err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
2349 ++ extack);
2350 ++ if (err)
2351 ++ goto cleanup;
2352 ++
2353 + r_cfg.fc_flags |= RTF_GATEWAY;
2354 + }
2355 + r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
2356 ++
2357 ++ /* RTA_ENCAP_TYPE length checked in
2358 ++ * lwtunnel_valid_encap_type_attr
2359 ++ */
2360 + nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
2361 + if (nla)
2362 + r_cfg.fc_encap_type = nla_get_u16(nla);
2363 +@@ -5434,7 +5455,13 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
2364 +
2365 + nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2366 + if (nla) {
2367 +- nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2368 ++ err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
2369 ++ extack);
2370 ++ if (err) {
2371 ++ last_err = err;
2372 ++ goto next_rtnh;
2373 ++ }
2374 ++
2375 + r_cfg.fc_flags |= RTF_GATEWAY;
2376 + }
2377 + }
2378 +@@ -5442,6 +5469,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
2379 + if (err)
2380 + last_err = err;
2381 +
2382 ++next_rtnh:
2383 + rtnh = rtnh_next(rtnh, &remaining);
2384 + }
2385 +
2386 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
2387 +index 159af6c3ffb05..e43804c9387ee 100644
2388 +--- a/net/mac80211/ieee80211_i.h
2389 ++++ b/net/mac80211/ieee80211_i.h
2390 +@@ -648,6 +648,26 @@ struct mesh_csa_settings {
2391 + struct cfg80211_csa_settings settings;
2392 + };
2393 +
2394 ++/**
2395 ++ * struct mesh_table
2396 ++ *
2397 ++ * @known_gates: list of known mesh gates and their mpaths by the station. The
2398 ++ * gate's mpath may or may not be resolved and active.
2399 ++ * @gates_lock: protects updates to known_gates
2400 ++ * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
2401 ++ * @walk_head: linked list containing all mesh_path objects
2402 ++ * @walk_lock: lock protecting walk_head
2403 ++ * @entries: number of entries in the table
2404 ++ */
2405 ++struct mesh_table {
2406 ++ struct hlist_head known_gates;
2407 ++ spinlock_t gates_lock;
2408 ++ struct rhashtable rhead;
2409 ++ struct hlist_head walk_head;
2410 ++ spinlock_t walk_lock;
2411 ++ atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
2412 ++};
2413 ++
2414 + struct ieee80211_if_mesh {
2415 + struct timer_list housekeeping_timer;
2416 + struct timer_list mesh_path_timer;
2417 +@@ -722,8 +742,8 @@ struct ieee80211_if_mesh {
2418 + /* offset from skb->data while building IE */
2419 + int meshconf_offset;
2420 +
2421 +- struct mesh_table *mesh_paths;
2422 +- struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
2423 ++ struct mesh_table mesh_paths;
2424 ++ struct mesh_table mpp_paths; /* Store paths for MPP&MAP */
2425 + int mesh_paths_generation;
2426 + int mpp_paths_generation;
2427 + };
2428 +diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
2429 +index 77080b4f87b8a..b2b717a78114f 100644
2430 +--- a/net/mac80211/mesh.h
2431 ++++ b/net/mac80211/mesh.h
2432 +@@ -127,26 +127,6 @@ struct mesh_path {
2433 + u32 path_change_count;
2434 + };
2435 +
2436 +-/**
2437 +- * struct mesh_table
2438 +- *
2439 +- * @known_gates: list of known mesh gates and their mpaths by the station. The
2440 +- * gate's mpath may or may not be resolved and active.
2441 +- * @gates_lock: protects updates to known_gates
2442 +- * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
2443 +- * @walk_head: linked list containing all mesh_path objects
2444 +- * @walk_lock: lock protecting walk_head
2445 +- * @entries: number of entries in the table
2446 +- */
2447 +-struct mesh_table {
2448 +- struct hlist_head known_gates;
2449 +- spinlock_t gates_lock;
2450 +- struct rhashtable rhead;
2451 +- struct hlist_head walk_head;
2452 +- spinlock_t walk_lock;
2453 +- atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
2454 +-};
2455 +-
2456 + /* Recent multicast cache */
2457 + /* RMC_BUCKETS must be a power of 2, maximum 256 */
2458 + #define RMC_BUCKETS 256
2459 +@@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
2460 + void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
2461 + void mesh_path_flush_pending(struct mesh_path *mpath);
2462 + void mesh_path_tx_pending(struct mesh_path *mpath);
2463 +-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
2464 ++void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
2465 + void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
2466 + int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
2467 + void mesh_path_timer(struct timer_list *t);
2468 +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
2469 +index 7cab1cf09bf1a..acc1c299f1ae5 100644
2470 +--- a/net/mac80211/mesh_pathtbl.c
2471 ++++ b/net/mac80211/mesh_pathtbl.c
2472 +@@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr)
2473 + mesh_path_free_rcu(tbl, mpath);
2474 + }
2475 +
2476 +-static struct mesh_table *mesh_table_alloc(void)
2477 ++static void mesh_table_init(struct mesh_table *tbl)
2478 + {
2479 +- struct mesh_table *newtbl;
2480 ++ INIT_HLIST_HEAD(&tbl->known_gates);
2481 ++ INIT_HLIST_HEAD(&tbl->walk_head);
2482 ++ atomic_set(&tbl->entries, 0);
2483 ++ spin_lock_init(&tbl->gates_lock);
2484 ++ spin_lock_init(&tbl->walk_lock);
2485 +
2486 +- newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
2487 +- if (!newtbl)
2488 +- return NULL;
2489 +-
2490 +- INIT_HLIST_HEAD(&newtbl->known_gates);
2491 +- INIT_HLIST_HEAD(&newtbl->walk_head);
2492 +- atomic_set(&newtbl->entries, 0);
2493 +- spin_lock_init(&newtbl->gates_lock);
2494 +- spin_lock_init(&newtbl->walk_lock);
2495 +- if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
2496 +- kfree(newtbl);
2497 +- return NULL;
2498 +- }
2499 +-
2500 +- return newtbl;
2501 ++ /* rhashtable_init() may fail only in case of wrong
2502 ++ * mesh_rht_params
2503 ++ */
2504 ++ WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
2505 + }
2506 +
2507 + static void mesh_table_free(struct mesh_table *tbl)
2508 + {
2509 + rhashtable_free_and_destroy(&tbl->rhead,
2510 + mesh_path_rht_free, tbl);
2511 +- kfree(tbl);
2512 + }
2513 +
2514 + /**
2515 +@@ -238,13 +230,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
2516 + struct mesh_path *
2517 + mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
2518 + {
2519 +- return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
2520 ++ return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
2521 + }
2522 +
2523 + struct mesh_path *
2524 + mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
2525 + {
2526 +- return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
2527 ++ return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
2528 + }
2529 +
2530 + static struct mesh_path *
2531 +@@ -281,7 +273,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
2532 + struct mesh_path *
2533 + mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
2534 + {
2535 +- return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
2536 ++ return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
2537 + }
2538 +
2539 + /**
2540 +@@ -296,7 +288,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
2541 + struct mesh_path *
2542 + mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
2543 + {
2544 +- return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
2545 ++ return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
2546 + }
2547 +
2548 + /**
2549 +@@ -309,7 +301,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
2550 + int err;
2551 +
2552 + rcu_read_lock();
2553 +- tbl = mpath->sdata->u.mesh.mesh_paths;
2554 ++ tbl = &mpath->sdata->u.mesh.mesh_paths;
2555 +
2556 + spin_lock_bh(&mpath->state_lock);
2557 + if (mpath->is_gate) {
2558 +@@ -418,7 +410,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
2559 + if (!new_mpath)
2560 + return ERR_PTR(-ENOMEM);
2561 +
2562 +- tbl = sdata->u.mesh.mesh_paths;
2563 ++ tbl = &sdata->u.mesh.mesh_paths;
2564 + spin_lock_bh(&tbl->walk_lock);
2565 + mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
2566 + &new_mpath->rhash,
2567 +@@ -460,7 +452,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
2568 + return -ENOMEM;
2569 +
2570 + memcpy(new_mpath->mpp, mpp, ETH_ALEN);
2571 +- tbl = sdata->u.mesh.mpp_paths;
2572 ++ tbl = &sdata->u.mesh.mpp_paths;
2573 +
2574 + spin_lock_bh(&tbl->walk_lock);
2575 + ret = rhashtable_lookup_insert_fast(&tbl->rhead,
2576 +@@ -489,7 +481,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
2577 + void mesh_plink_broken(struct sta_info *sta)
2578 + {
2579 + struct ieee80211_sub_if_data *sdata = sta->sdata;
2580 +- struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
2581 ++ struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
2582 + static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2583 + struct mesh_path *mpath;
2584 +
2585 +@@ -548,7 +540,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
2586 + void mesh_path_flush_by_nexthop(struct sta_info *sta)
2587 + {
2588 + struct ieee80211_sub_if_data *sdata = sta->sdata;
2589 +- struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
2590 ++ struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
2591 + struct mesh_path *mpath;
2592 + struct hlist_node *n;
2593 +
2594 +@@ -563,7 +555,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
2595 + static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
2596 + const u8 *proxy)
2597 + {
2598 +- struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
2599 ++ struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
2600 + struct mesh_path *mpath;
2601 + struct hlist_node *n;
2602 +
2603 +@@ -597,8 +589,8 @@ static void table_flush_by_iface(struct mesh_table *tbl)
2604 + */
2605 + void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
2606 + {
2607 +- table_flush_by_iface(sdata->u.mesh.mesh_paths);
2608 +- table_flush_by_iface(sdata->u.mesh.mpp_paths);
2609 ++ table_flush_by_iface(&sdata->u.mesh.mesh_paths);
2610 ++ table_flush_by_iface(&sdata->u.mesh.mpp_paths);
2611 + }
2612 +
2613 + /**
2614 +@@ -644,7 +636,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
2615 + /* flush relevant mpp entries first */
2616 + mpp_flush_by_proxy(sdata, addr);
2617 +
2618 +- err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
2619 ++ err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
2620 + sdata->u.mesh.mesh_paths_generation++;
2621 + return err;
2622 + }
2623 +@@ -682,7 +674,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
2624 + struct mesh_path *gate;
2625 + bool copy = false;
2626 +
2627 +- tbl = sdata->u.mesh.mesh_paths;
2628 ++ tbl = &sdata->u.mesh.mesh_paths;
2629 +
2630 + rcu_read_lock();
2631 + hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
2632 +@@ -762,29 +754,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
2633 + mesh_path_tx_pending(mpath);
2634 + }
2635 +
2636 +-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
2637 ++void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
2638 + {
2639 +- struct mesh_table *tbl_path, *tbl_mpp;
2640 +- int ret;
2641 +-
2642 +- tbl_path = mesh_table_alloc();
2643 +- if (!tbl_path)
2644 +- return -ENOMEM;
2645 +-
2646 +- tbl_mpp = mesh_table_alloc();
2647 +- if (!tbl_mpp) {
2648 +- ret = -ENOMEM;
2649 +- goto free_path;
2650 +- }
2651 +-
2652 +- sdata->u.mesh.mesh_paths = tbl_path;
2653 +- sdata->u.mesh.mpp_paths = tbl_mpp;
2654 +-
2655 +- return 0;
2656 +-
2657 +-free_path:
2658 +- mesh_table_free(tbl_path);
2659 +- return ret;
2660 ++ mesh_table_init(&sdata->u.mesh.mesh_paths);
2661 ++ mesh_table_init(&sdata->u.mesh.mpp_paths);
2662 + }
2663 +
2664 + static
2665 +@@ -806,12 +779,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
2666 +
2667 + void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
2668 + {
2669 +- mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
2670 +- mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
2671 ++ mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
2672 ++ mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
2673 + }
2674 +
2675 + void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
2676 + {
2677 +- mesh_table_free(sdata->u.mesh.mesh_paths);
2678 +- mesh_table_free(sdata->u.mesh.mpp_paths);
2679 ++ mesh_table_free(&sdata->u.mesh.mesh_paths);
2680 ++ mesh_table_free(&sdata->u.mesh.mpp_paths);
2681 + }
2682 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2683 +index dd42d83dbe33e..89c648b035b9a 100644
2684 +--- a/net/mac80211/mlme.c
2685 ++++ b/net/mac80211/mlme.c
2686 +@@ -5216,7 +5216,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
2687 + */
2688 + if (new_sta) {
2689 + u32 rates = 0, basic_rates = 0;
2690 +- bool have_higher_than_11mbit;
2691 ++ bool have_higher_than_11mbit = false;
2692 + int min_rate = INT_MAX, min_rate_index = -1;
2693 + const struct cfg80211_bss_ies *ies;
2694 + int shift = ieee80211_vif_get_shift(&sdata->vif);
2695 +diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
2696 +index 6d16e1ab1a8ab..eef0e3f2f25b0 100644
2697 +--- a/net/netrom/af_netrom.c
2698 ++++ b/net/netrom/af_netrom.c
2699 +@@ -306,7 +306,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
2700 + if (optlen < sizeof(unsigned int))
2701 + return -EINVAL;
2702 +
2703 +- if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
2704 ++ if (copy_from_sockptr(&opt, optval, sizeof(unsigned long)))
2705 + return -EFAULT;
2706 +
2707 + switch (optname) {
2708 +diff --git a/net/phonet/pep.c b/net/phonet/pep.c
2709 +index 72018e5e4d8ef..65d463ad87707 100644
2710 +--- a/net/phonet/pep.c
2711 ++++ b/net/phonet/pep.c
2712 +@@ -868,6 +868,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
2713 +
2714 + err = pep_accept_conn(newsk, skb);
2715 + if (err) {
2716 ++ __sock_put(sk);
2717 + sock_put(newsk);
2718 + newsk = NULL;
2719 + goto drop;
2720 +diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
2721 +index 58a9d42b52b8f..aea435b0aeb34 100644
2722 +--- a/net/sched/sch_qfq.c
2723 ++++ b/net/sched/sch_qfq.c
2724 +@@ -1422,10 +1422,8 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
2725 + if (err < 0)
2726 + return err;
2727 +
2728 +- if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
2729 +- max_classes = QFQ_MAX_AGG_CLASSES;
2730 +- else
2731 +- max_classes = qdisc_dev(sch)->tx_queue_len + 1;
2732 ++ max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
2733 ++ QFQ_MAX_AGG_CLASSES);
2734 + /* max_cl_shift = floor(log_2(max_classes)) */
2735 + max_cl_shift = __fls(max_classes);
2736 + q->max_agg_classes = 1<<max_cl_shift;
2737 +diff --git a/net/sctp/diag.c b/net/sctp/diag.c
2738 +index a7d6231715013..034e2c74497df 100644
2739 +--- a/net/sctp/diag.c
2740 ++++ b/net/sctp/diag.c
2741 +@@ -245,48 +245,44 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
2742 + + 64;
2743 + }
2744 +
2745 +-static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
2746 ++static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
2747 + {
2748 + struct sctp_association *assoc = tsp->asoc;
2749 +- struct sock *sk = tsp->asoc->base.sk;
2750 + struct sctp_comm_param *commp = p;
2751 +- struct sk_buff *in_skb = commp->skb;
2752 ++ struct sock *sk = ep->base.sk;
2753 + const struct inet_diag_req_v2 *req = commp->r;
2754 +- const struct nlmsghdr *nlh = commp->nlh;
2755 +- struct net *net = sock_net(in_skb->sk);
2756 ++ struct sk_buff *skb = commp->skb;
2757 + struct sk_buff *rep;
2758 + int err;
2759 +
2760 + err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
2761 + if (err)
2762 +- goto out;
2763 ++ return err;
2764 +
2765 +- err = -ENOMEM;
2766 + rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
2767 + if (!rep)
2768 +- goto out;
2769 ++ return -ENOMEM;
2770 +
2771 + lock_sock(sk);
2772 +- if (sk != assoc->base.sk) {
2773 +- release_sock(sk);
2774 +- sk = assoc->base.sk;
2775 +- lock_sock(sk);
2776 ++ if (ep != assoc->ep) {
2777 ++ err = -EAGAIN;
2778 ++ goto out;
2779 + }
2780 +- err = inet_sctp_diag_fill(sk, assoc, rep, req,
2781 +- sk_user_ns(NETLINK_CB(in_skb).sk),
2782 +- NETLINK_CB(in_skb).portid,
2783 +- nlh->nlmsg_seq, 0, nlh,
2784 +- commp->net_admin);
2785 +- release_sock(sk);
2786 ++
2787 ++ err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk),
2788 ++ NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0,
2789 ++ commp->nlh, commp->net_admin);
2790 + if (err < 0) {
2791 + WARN_ON(err == -EMSGSIZE);
2792 +- kfree_skb(rep);
2793 + goto out;
2794 + }
2795 ++ release_sock(sk);
2796 +
2797 +- err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
2798 ++ return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid);
2799 +
2800 + out:
2801 ++ release_sock(sk);
2802 ++ kfree_skb(rep);
2803 + return err;
2804 + }
2805 +
2806 +@@ -429,15 +425,15 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
2807 + static int sctp_diag_dump_one(struct netlink_callback *cb,
2808 + const struct inet_diag_req_v2 *req)
2809 + {
2810 +- struct sk_buff *in_skb = cb->skb;
2811 +- struct net *net = sock_net(in_skb->sk);
2812 ++ struct sk_buff *skb = cb->skb;
2813 ++ struct net *net = sock_net(skb->sk);
2814 + const struct nlmsghdr *nlh = cb->nlh;
2815 + union sctp_addr laddr, paddr;
2816 + struct sctp_comm_param commp = {
2817 +- .skb = in_skb,
2818 ++ .skb = skb,
2819 + .r = req,
2820 + .nlh = nlh,
2821 +- .net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
2822 ++ .net_admin = netlink_net_capable(skb, CAP_NET_ADMIN),
2823 + };
2824 +
2825 + if (req->sdiag_family == AF_INET) {
2826 +@@ -460,7 +456,7 @@ static int sctp_diag_dump_one(struct netlink_callback *cb,
2827 + paddr.v6.sin6_family = AF_INET6;
2828 + }
2829 +
2830 +- return sctp_transport_lookup_process(sctp_tsp_dump_one,
2831 ++ return sctp_transport_lookup_process(sctp_sock_dump_one,
2832 + net, &laddr, &paddr, &commp);
2833 + }
2834 +
2835 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2836 +index d2215d24634e8..6b3c32264cbc8 100644
2837 +--- a/net/sctp/socket.c
2838 ++++ b/net/sctp/socket.c
2839 +@@ -5317,23 +5317,31 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
2840 + }
2841 + EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
2842 +
2843 +-int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
2844 +- struct net *net,
2845 ++int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
2846 + const union sctp_addr *laddr,
2847 + const union sctp_addr *paddr, void *p)
2848 + {
2849 + struct sctp_transport *transport;
2850 +- int err;
2851 ++ struct sctp_endpoint *ep;
2852 ++ int err = -ENOENT;
2853 +
2854 + rcu_read_lock();
2855 + transport = sctp_addrs_lookup_transport(net, laddr, paddr);
2856 ++ if (!transport) {
2857 ++ rcu_read_unlock();
2858 ++ return err;
2859 ++ }
2860 ++ ep = transport->asoc->ep;
2861 ++ if (!sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
2862 ++ sctp_transport_put(transport);
2863 ++ rcu_read_unlock();
2864 ++ return err;
2865 ++ }
2866 + rcu_read_unlock();
2867 +- if (!transport)
2868 +- return -ENOENT;
2869 +
2870 +- err = cb(transport, p);
2871 ++ err = cb(ep, transport, p);
2872 ++ sctp_endpoint_put(ep);
2873 + sctp_transport_put(transport);
2874 +-
2875 + return err;
2876 + }
2877 + EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
2878 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
2879 +index ad570c2450be8..3e63c83e641c5 100644
2880 +--- a/net/tipc/socket.c
2881 ++++ b/net/tipc/socket.c
2882 +@@ -1461,6 +1461,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
2883 + msg_set_syn(hdr, 1);
2884 + }
2885 +
2886 ++ memset(&skaddr, 0, sizeof(skaddr));
2887 ++
2888 + /* Determine destination */
2889 + if (atype == TIPC_SERVICE_RANGE) {
2890 + return tipc_sendmcast(sock, ua, m, dlen, timeout);
2891 +diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c
2892 +index 5b9a09957c6e0..89e6bf27cd9f6 100644
2893 +--- a/samples/ftrace/ftrace-direct-modify.c
2894 ++++ b/samples/ftrace/ftrace-direct-modify.c
2895 +@@ -3,6 +3,9 @@
2896 + #include <linux/kthread.h>
2897 + #include <linux/ftrace.h>
2898 +
2899 ++extern void my_direct_func1(void);
2900 ++extern void my_direct_func2(void);
2901 ++
2902 + void my_direct_func1(void)
2903 + {
2904 + trace_printk("my direct func1\n");
2905 +diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c
2906 +index 3f0079c9bd6fa..11b99325f3dbf 100644
2907 +--- a/samples/ftrace/ftrace-direct-too.c
2908 ++++ b/samples/ftrace/ftrace-direct-too.c
2909 +@@ -4,6 +4,9 @@
2910 + #include <linux/mm.h> /* for handle_mm_fault() */
2911 + #include <linux/ftrace.h>
2912 +
2913 ++extern void my_direct_func(struct vm_area_struct *vma,
2914 ++ unsigned long address, unsigned int flags);
2915 ++
2916 + void my_direct_func(struct vm_area_struct *vma,
2917 + unsigned long address, unsigned int flags)
2918 + {
2919 +diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c
2920 +index a2729d1ef17f5..642c50b5f7166 100644
2921 +--- a/samples/ftrace/ftrace-direct.c
2922 ++++ b/samples/ftrace/ftrace-direct.c
2923 +@@ -4,6 +4,8 @@
2924 + #include <linux/sched.h> /* for wake_up_process() */
2925 + #include <linux/ftrace.h>
2926 +
2927 ++extern void my_direct_func(struct task_struct *p);
2928 ++
2929 + void my_direct_func(struct task_struct *p)
2930 + {
2931 + trace_printk("waking up %s-%d\n", p->comm, p->pid);
2932 +diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
2933 +index 3ea73013d9568..6f05e06f67613 100755
2934 +--- a/tools/testing/selftests/net/udpgro_fwd.sh
2935 ++++ b/tools/testing/selftests/net/udpgro_fwd.sh
2936 +@@ -193,7 +193,8 @@ for family in 4 6; do
2937 + SUFFIX="64 nodad"
2938 + VXDEV=vxlan6
2939 + IPT=ip6tables
2940 +- PING="ping6"
2941 ++ # Use ping6 on systems where ping doesn't handle IPv6
2942 ++ ping -w 1 -c 1 ::1 > /dev/null 2>&1 || PING="ping6"
2943 + fi
2944 +
2945 + echo "IPv$family"
2946 +diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
2947 +index 60aa1a4fc69b6..81690f1737c80 100644
2948 +--- a/tools/testing/selftests/vm/userfaultfd.c
2949 ++++ b/tools/testing/selftests/vm/userfaultfd.c
2950 +@@ -86,7 +86,7 @@ static bool test_uffdio_minor = false;
2951 +
2952 + static bool map_shared;
2953 + static int shm_fd;
2954 +-static int huge_fd;
2955 ++static int huge_fd = -1; /* only used for hugetlb_shared test */
2956 + static char *huge_fd_off0;
2957 + static unsigned long long *count_verify;
2958 + static int uffd = -1;
2959 +@@ -222,6 +222,9 @@ static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
2960 +
2961 + static void hugetlb_release_pages(char *rel_area)
2962 + {
2963 ++ if (huge_fd == -1)
2964 ++ return;
2965 ++
2966 + if (fallocate(huge_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
2967 + rel_area == huge_fd_off0 ? 0 : nr_pages * page_size,
2968 + nr_pages * page_size))
2969 +@@ -234,16 +237,17 @@ static void hugetlb_allocate_area(void **alloc_area)
2970 + char **alloc_area_alias;
2971 +
2972 + *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
2973 +- (map_shared ? MAP_SHARED : MAP_PRIVATE) |
2974 +- MAP_HUGETLB,
2975 +- huge_fd, *alloc_area == area_src ? 0 :
2976 +- nr_pages * page_size);
2977 ++ map_shared ? MAP_SHARED :
2978 ++ MAP_PRIVATE | MAP_HUGETLB |
2979 ++ (*alloc_area == area_src ? 0 : MAP_NORESERVE),
2980 ++ huge_fd,
2981 ++ *alloc_area == area_src ? 0 : nr_pages * page_size);
2982 + if (*alloc_area == MAP_FAILED)
2983 + err("mmap of hugetlbfs file failed");
2984 +
2985 + if (map_shared) {
2986 + area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
2987 +- MAP_SHARED | MAP_HUGETLB,
2988 ++ MAP_SHARED,
2989 + huge_fd, *alloc_area == area_src ? 0 :
2990 + nr_pages * page_size);
2991 + if (area_alias == MAP_FAILED)
2992 +diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
2993 +index 65c141ebfbbde..5b45e6986aeab 100644
2994 +--- a/tools/testing/selftests/x86/test_vsyscall.c
2995 ++++ b/tools/testing/selftests/x86/test_vsyscall.c
2996 +@@ -497,7 +497,7 @@ static int test_process_vm_readv(void)
2997 + }
2998 +
2999 + if (vsyscall_map_r) {
3000 +- if (!memcmp(buf, (const void *)0xffffffffff600000, 4096)) {
3001 ++ if (!memcmp(buf, remote.iov_base, sizeof(buf))) {
3002 + printf("[OK]\tIt worked and read correct data\n");
3003 + } else {
3004 + printf("[FAIL]\tIt worked but returned incorrect data\n");