Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Mon, 05 Sep 2022 12:04:24
Message-Id: 1662379436.e733b0241620ba620ba58ee2a86aebd811b73cc1.mpagano@gentoo
1 commit: e733b0241620ba620ba58ee2a86aebd811b73cc1
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Sep 5 12:03:56 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Sep 5 12:03:56 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e733b024
7
8 Linux patch 5.10.141
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1140_linux-5.10.141.patch | 1181 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1185 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f5306e89..1da294a6 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -603,6 +603,10 @@ Patch: 1139_linux-5.10.140.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.140
23
24 +Patch: 1140_linux-5.10.141.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.141
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1140_linux-5.10.141.patch b/1140_linux-5.10.141.patch
33 new file mode 100644
34 index 00000000..c5adbeb2
35 --- /dev/null
36 +++ b/1140_linux-5.10.141.patch
37 @@ -0,0 +1,1181 @@
38 +diff --git a/Makefile b/Makefile
39 +index a80179d2c0057..d2833d29d65f5 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 10
46 +-SUBLEVEL = 140
47 ++SUBLEVEL = 141
48 + EXTRAVERSION =
49 + NAME = Dare mighty things
50 +
51 +diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
52 +index f0bc4dc3e9bf0..6511d15ace45e 100644
53 +--- a/arch/s390/hypfs/hypfs_diag.c
54 ++++ b/arch/s390/hypfs/hypfs_diag.c
55 +@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
56 + int rc;
57 +
58 + if (diag204_probe()) {
59 +- pr_err("The hardware system does not support hypfs\n");
60 ++ pr_info("The hardware system does not support hypfs\n");
61 + return -ENODATA;
62 + }
63 +
64 +diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
65 +index 5c97f48cea91d..ee919bfc81867 100644
66 +--- a/arch/s390/hypfs/inode.c
67 ++++ b/arch/s390/hypfs/inode.c
68 +@@ -496,9 +496,9 @@ fail_hypfs_sprp_exit:
69 + hypfs_vm_exit();
70 + fail_hypfs_diag_exit:
71 + hypfs_diag_exit();
72 ++ pr_err("Initialization of hypfs failed with rc=%i\n", rc);
73 + fail_dbfs_exit:
74 + hypfs_dbfs_exit();
75 +- pr_err("Initialization of hypfs failed with rc=%i\n", rc);
76 + return rc;
77 + }
78 + device_initcall(hypfs_init)
79 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
80 +index ed517fad0d035..1866374356c84 100644
81 +--- a/arch/s390/mm/fault.c
82 ++++ b/arch/s390/mm/fault.c
83 +@@ -429,7 +429,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
84 + flags = FAULT_FLAG_DEFAULT;
85 + if (user_mode(regs))
86 + flags |= FAULT_FLAG_USER;
87 +- if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
88 ++ if ((trans_exc_code & store_indication) == 0x400)
89 ++ access = VM_WRITE;
90 ++ if (access == VM_WRITE)
91 + flags |= FAULT_FLAG_WRITE;
92 + mmap_read_lock(mm);
93 +
94 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
95 +index 0acd99329923c..07f5030073bbc 100644
96 +--- a/arch/x86/include/asm/nospec-branch.h
97 ++++ b/arch/x86/include/asm/nospec-branch.h
98 +@@ -35,33 +35,56 @@
99 + #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
100 +
101 + /*
102 ++ * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
103 ++ */
104 ++#define __FILL_RETURN_SLOT \
105 ++ ANNOTATE_INTRA_FUNCTION_CALL; \
106 ++ call 772f; \
107 ++ int3; \
108 ++772:
109 ++
110 ++/*
111 ++ * Stuff the entire RSB.
112 ++ *
113 + * Google experimented with loop-unrolling and this turned out to be
114 + * the optimal version — two calls, each with their own speculation
115 + * trap should their return address end up getting used, in a loop.
116 + */
117 +-#define __FILL_RETURN_BUFFER(reg, nr, sp) \
118 +- mov $(nr/2), reg; \
119 +-771: \
120 +- ANNOTATE_INTRA_FUNCTION_CALL; \
121 +- call 772f; \
122 +-773: /* speculation trap */ \
123 +- UNWIND_HINT_EMPTY; \
124 +- pause; \
125 +- lfence; \
126 +- jmp 773b; \
127 +-772: \
128 +- ANNOTATE_INTRA_FUNCTION_CALL; \
129 +- call 774f; \
130 +-775: /* speculation trap */ \
131 +- UNWIND_HINT_EMPTY; \
132 +- pause; \
133 +- lfence; \
134 +- jmp 775b; \
135 +-774: \
136 +- add $(BITS_PER_LONG/8) * 2, sp; \
137 +- dec reg; \
138 +- jnz 771b; \
139 +- /* barrier for jnz misprediction */ \
140 ++#ifdef CONFIG_X86_64
141 ++#define __FILL_RETURN_BUFFER(reg, nr) \
142 ++ mov $(nr/2), reg; \
143 ++771: \
144 ++ __FILL_RETURN_SLOT \
145 ++ __FILL_RETURN_SLOT \
146 ++ add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
147 ++ dec reg; \
148 ++ jnz 771b; \
149 ++ /* barrier for jnz misprediction */ \
150 ++ lfence;
151 ++#else
152 ++/*
153 ++ * i386 doesn't unconditionally have LFENCE, as such it can't
154 ++ * do a loop.
155 ++ */
156 ++#define __FILL_RETURN_BUFFER(reg, nr) \
157 ++ .rept nr; \
158 ++ __FILL_RETURN_SLOT; \
159 ++ .endr; \
160 ++ add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
161 ++#endif
162 ++
163 ++/*
164 ++ * Stuff a single RSB slot.
165 ++ *
166 ++ * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
167 ++ * forced to retire before letting a RET instruction execute.
168 ++ *
169 ++ * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
170 ++ * before this point.
171 ++ */
172 ++#define __FILL_ONE_RETURN \
173 ++ __FILL_RETURN_SLOT \
174 ++ add $(BITS_PER_LONG/8), %_ASM_SP; \
175 + lfence;
176 +
177 + #ifdef __ASSEMBLY__
178 +@@ -120,28 +143,15 @@
179 + #endif
180 + .endm
181 +
182 +-.macro ISSUE_UNBALANCED_RET_GUARD
183 +- ANNOTATE_INTRA_FUNCTION_CALL
184 +- call .Lunbalanced_ret_guard_\@
185 +- int3
186 +-.Lunbalanced_ret_guard_\@:
187 +- add $(BITS_PER_LONG/8), %_ASM_SP
188 +- lfence
189 +-.endm
190 +-
191 + /*
192 + * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
193 + * monstrosity above, manually.
194 + */
195 +-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
196 +-.ifb \ftr2
197 +- ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
198 +-.else
199 +- ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
200 +-.endif
201 +- __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
202 +-.Lunbalanced_\@:
203 +- ISSUE_UNBALANCED_RET_GUARD
204 ++.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
205 ++ ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
206 ++ __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
207 ++ __stringify(__FILL_ONE_RETURN), \ftr2
208 ++
209 + .Lskip_rsb_\@:
210 + .endm
211 +
212 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
213 +index 366b124057081..a5d5247c4f3e8 100644
214 +--- a/drivers/android/binder.c
215 ++++ b/drivers/android/binder.c
216 +@@ -6069,6 +6069,7 @@ const struct file_operations binder_fops = {
217 + .open = binder_open,
218 + .flush = binder_flush,
219 + .release = binder_release,
220 ++ .may_pollfree = true,
221 + };
222 +
223 + static int __init init_binder_device(const char *name)
224 +diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
225 +index 2e3b76519b49d..b624f3d8f0e64 100644
226 +--- a/drivers/dma-buf/udmabuf.c
227 ++++ b/drivers/dma-buf/udmabuf.c
228 +@@ -327,7 +327,23 @@ static struct miscdevice udmabuf_misc = {
229 +
230 + static int __init udmabuf_dev_init(void)
231 + {
232 +- return misc_register(&udmabuf_misc);
233 ++ int ret;
234 ++
235 ++ ret = misc_register(&udmabuf_misc);
236 ++ if (ret < 0) {
237 ++ pr_err("Could not initialize udmabuf device\n");
238 ++ return ret;
239 ++ }
240 ++
241 ++ ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
242 ++ DMA_BIT_MASK(64));
243 ++ if (ret < 0) {
244 ++ pr_err("Could not setup DMA mask for udmabuf device\n");
245 ++ misc_deregister(&udmabuf_misc);
246 ++ return ret;
247 ++ }
248 ++
249 ++ return 0;
250 + }
251 +
252 + static void __exit udmabuf_dev_exit(void)
253 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
254 +index d949d6c52f24b..ff5555353eb4f 100644
255 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
256 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
257 +@@ -283,7 +283,7 @@ enum amdgpu_kiq_irq {
258 + AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
259 + AMDGPU_CP_KIQ_IRQ_LAST
260 + };
261 +-
262 ++#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
263 + #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
264 + #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
265 + #define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
266 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
267 +index 150fa5258fb6f..2aa9242c58ab9 100644
268 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
269 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
270 +@@ -371,6 +371,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
271 + uint32_t seq;
272 + uint16_t queried_pasid;
273 + bool ret;
274 ++ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
275 + struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
276 + struct amdgpu_kiq *kiq = &adev->gfx.kiq;
277 +
278 +@@ -389,7 +390,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
279 +
280 + amdgpu_ring_commit(ring);
281 + spin_unlock(&adev->gfx.kiq.ring_lock);
282 +- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
283 ++ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
284 + if (r < 1) {
285 + dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
286 + return -ETIME;
287 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
288 +index 3a864041968f6..1673bf3bae55a 100644
289 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
290 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
291 +@@ -839,6 +839,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
292 + uint32_t seq;
293 + uint16_t queried_pasid;
294 + bool ret;
295 ++ u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
296 + struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
297 + struct amdgpu_kiq *kiq = &adev->gfx.kiq;
298 +
299 +@@ -878,7 +879,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
300 +
301 + amdgpu_ring_commit(ring);
302 + spin_unlock(&adev->gfx.kiq.ring_lock);
303 +- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
304 ++ r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
305 + if (r < 1) {
306 + dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
307 + up_read(&adev->reset_sem);
308 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
309 +index bae3a146b2cc2..89cc852cb27c5 100644
310 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
311 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
312 +@@ -546,9 +546,11 @@ static void dce112_get_pix_clk_dividers_helper (
313 + switch (pix_clk_params->color_depth) {
314 + case COLOR_DEPTH_101010:
315 + actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
316 ++ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
317 + break;
318 + case COLOR_DEPTH_121212:
319 + actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
320 ++ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
321 + break;
322 + case COLOR_DEPTH_161616:
323 + actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
324 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
325 +index 3fcd408e91032..855682590c1bb 100644
326 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
327 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
328 +@@ -125,6 +125,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
329 + while (tmp_mpcc != NULL) {
330 + if (tmp_mpcc->dpp_id == dpp_id)
331 + return tmp_mpcc;
332 ++
333 ++ /* avoid circular linked list */
334 ++ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
335 ++ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
336 ++ break;
337 ++
338 + tmp_mpcc = tmp_mpcc->mpcc_bot;
339 + }
340 + return NULL;
341 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
342 +index 800be2693faca..963d72f96dca3 100644
343 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
344 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
345 +@@ -464,6 +464,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
346 + OTG_CLOCK_ON, 1,
347 + 1, 1000);
348 + } else {
349 ++
350 ++ //last chance to clear underflow, otherwise, it will always there due to clock is off.
351 ++ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
352 ++ optc->funcs->clear_optc_underflow(optc);
353 ++
354 + REG_UPDATE_2(OTG_CLOCK_CONTROL,
355 + OTG_CLOCK_GATE_DIS, 0,
356 + OTG_CLOCK_EN, 0);
357 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
358 +index 99cc095dc33c7..a701ea56c0aa0 100644
359 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
360 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
361 +@@ -533,6 +533,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
362 + while (tmp_mpcc != NULL) {
363 + if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
364 + return tmp_mpcc;
365 ++
366 ++ /* avoid circular linked list */
367 ++ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
368 ++ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
369 ++ break;
370 ++
371 + tmp_mpcc = tmp_mpcc->mpcc_bot;
372 + }
373 + return NULL;
374 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
375 +index af462fe4260de..b0fd8859bd2f2 100644
376 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
377 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
378 +@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
379 + VMID, address->vmid);
380 +
381 + if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
382 +- REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
383 ++ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
384 + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
385 +
386 + } else {
387 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
388 +index 8556c229ff598..49d7fa1d08427 100644
389 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
390 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
391 +@@ -2759,6 +2759,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
392 + .dump_pptable = sienna_cichlid_dump_pptable,
393 + .init_microcode = smu_v11_0_init_microcode,
394 + .load_microcode = smu_v11_0_load_microcode,
395 ++ .fini_microcode = smu_v11_0_fini_microcode,
396 + .init_smc_tables = sienna_cichlid_init_smc_tables,
397 + .fini_smc_tables = smu_v11_0_fini_smc_tables,
398 + .init_power = smu_v11_0_init_power,
399 +diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
400 +index a3b151b29bd71..fc616db4231bb 100644
401 +--- a/drivers/hid/hid-steam.c
402 ++++ b/drivers/hid/hid-steam.c
403 +@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
404 + int ret;
405 +
406 + r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
407 ++ if (!r) {
408 ++ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
409 ++ return -EINVAL;
410 ++ }
411 ++
412 + if (hid_report_len(r) < 64)
413 + return -EINVAL;
414 +
415 +@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
416 + int ret;
417 +
418 + r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
419 ++ if (!r) {
420 ++ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
421 ++ return -EINVAL;
422 ++ }
423 ++
424 + if (hid_report_len(r) < 64)
425 + return -EINVAL;
426 +
427 +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
428 +index 2eee5e31c2b7e..fade7fcf6a146 100644
429 +--- a/drivers/hid/hidraw.c
430 ++++ b/drivers/hid/hidraw.c
431 +@@ -346,10 +346,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
432 + unsigned int minor = iminor(inode);
433 + struct hidraw_list *list = file->private_data;
434 + unsigned long flags;
435 ++ int i;
436 +
437 + mutex_lock(&minors_lock);
438 +
439 + spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
440 ++ for (i = list->tail; i < list->head; i++)
441 ++ kfree(list->buffer[i].value);
442 + list_del(&list->node);
443 + spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
444 + kfree(list);
445 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
446 +index fccd1798445d5..d22ce328a2797 100644
447 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
448 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
449 +@@ -2610,6 +2610,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
450 + del_timer_sync(&hdw->encoder_run_timer);
451 + del_timer_sync(&hdw->encoder_wait_timer);
452 + flush_work(&hdw->workpoll);
453 ++ v4l2_device_unregister(&hdw->v4l2_dev);
454 + usb_free_urb(hdw->ctl_read_urb);
455 + usb_free_urb(hdw->ctl_write_urb);
456 + kfree(hdw->ctl_read_buffer);
457 +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
458 +index f5c965da95013..d71c113f428f6 100644
459 +--- a/drivers/mmc/host/mtk-sd.c
460 ++++ b/drivers/mmc/host/mtk-sd.c
461 +@@ -2293,6 +2293,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
462 + /* disable busy check */
463 + sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
464 +
465 ++ val = readl(host->base + MSDC_INT);
466 ++ writel(val, host->base + MSDC_INT);
467 ++
468 + if (recovery) {
469 + sdr_set_field(host->base + MSDC_DMA_CTRL,
470 + MSDC_DMA_CTRL_STOP, 1);
471 +@@ -2693,11 +2696,14 @@ static int __maybe_unused msdc_suspend(struct device *dev)
472 + {
473 + struct mmc_host *mmc = dev_get_drvdata(dev);
474 + int ret;
475 ++ u32 val;
476 +
477 + if (mmc->caps2 & MMC_CAP2_CQE) {
478 + ret = cqhci_suspend(mmc);
479 + if (ret)
480 + return ret;
481 ++ val = readl(((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT);
482 ++ writel(val, ((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT);
483 + }
484 +
485 + return pm_runtime_force_suspend(dev);
486 +diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
487 +index 5ae81f2df45f7..3779b264dbec3 100644
488 +--- a/drivers/pci/pcie/portdrv_core.c
489 ++++ b/drivers/pci/pcie/portdrv_core.c
490 +@@ -222,8 +222,15 @@ static int get_port_device_capability(struct pci_dev *dev)
491 +
492 + #ifdef CONFIG_PCIEAER
493 + if (dev->aer_cap && pci_aer_available() &&
494 +- (pcie_ports_native || host->native_aer))
495 ++ (pcie_ports_native || host->native_aer)) {
496 + services |= PCIE_PORT_SERVICE_AER;
497 ++
498 ++ /*
499 ++ * Disable AER on this port in case it's been enabled by the
500 ++ * BIOS (the AER service driver will enable it when necessary).
501 ++ */
502 ++ pci_disable_pcie_error_reporting(dev);
503 ++ }
504 + #endif
505 +
506 + /*
507 +diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
508 +index 0642555289e06..c12d46e283598 100644
509 +--- a/drivers/video/fbdev/pm2fb.c
510 ++++ b/drivers/video/fbdev/pm2fb.c
511 +@@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
512 + return -EINVAL;
513 + }
514 +
515 ++ if (!var->pixclock) {
516 ++ DPRINTK("pixclock is zero\n");
517 ++ return -EINVAL;
518 ++ }
519 ++
520 + if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
521 + DPRINTK("pixclock too high (%ldKHz)\n",
522 + PICOS2KHZ(var->pixclock));
523 +diff --git a/fs/io_uring.c b/fs/io_uring.c
524 +index a952288b2ab8e..9654b60a06a58 100644
525 +--- a/fs/io_uring.c
526 ++++ b/fs/io_uring.c
527 +@@ -5198,6 +5198,11 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
528 + struct io_ring_ctx *ctx = req->ctx;
529 + bool cancel = false;
530 +
531 ++ if (req->file->f_op->may_pollfree) {
532 ++ spin_lock_irq(&ctx->completion_lock);
533 ++ return -EOPNOTSUPP;
534 ++ }
535 ++
536 + INIT_HLIST_NODE(&req->hash_node);
537 + io_init_poll_iocb(poll, mask, wake_func);
538 + poll->file = req->file;
539 +diff --git a/fs/signalfd.c b/fs/signalfd.c
540 +index b94fb5f81797a..41dc597b78cc6 100644
541 +--- a/fs/signalfd.c
542 ++++ b/fs/signalfd.c
543 +@@ -248,6 +248,7 @@ static const struct file_operations signalfd_fops = {
544 + .poll = signalfd_poll,
545 + .read = signalfd_read,
546 + .llseek = noop_llseek,
547 ++ .may_pollfree = true,
548 + };
549 +
550 + static int do_signalfd4(int ufd, sigset_t *mask, int flags)
551 +diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
552 +index db23e455eb91d..bc41ec0c483d0 100644
553 +--- a/fs/xfs/xfs_filestream.c
554 ++++ b/fs/xfs/xfs_filestream.c
555 +@@ -128,11 +128,12 @@ xfs_filestream_pick_ag(
556 + if (!pag->pagf_init) {
557 + err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
558 + if (err) {
559 +- xfs_perag_put(pag);
560 +- if (err != -EAGAIN)
561 ++ if (err != -EAGAIN) {
562 ++ xfs_perag_put(pag);
563 + return err;
564 ++ }
565 + /* Couldn't lock the AGF, skip this AG. */
566 +- continue;
567 ++ goto next_ag;
568 + }
569 + }
570 +
571 +diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
572 +index ef1d5bb88b93a..775f833146e30 100644
573 +--- a/fs/xfs/xfs_fsops.c
574 ++++ b/fs/xfs/xfs_fsops.c
575 +@@ -376,46 +376,36 @@ xfs_reserve_blocks(
576 + * If the request is larger than the current reservation, reserve the
577 + * blocks before we update the reserve counters. Sample m_fdblocks and
578 + * perform a partial reservation if the request exceeds free space.
579 ++ *
580 ++ * The code below estimates how many blocks it can request from
581 ++ * fdblocks to stash in the reserve pool. This is a classic TOCTOU
582 ++ * race since fdblocks updates are not always coordinated via
583 ++ * m_sb_lock. Set the reserve size even if there's not enough free
584 ++ * space to fill it because mod_fdblocks will refill an undersized
585 ++ * reserve when it can.
586 + */
587 +- error = -ENOSPC;
588 +- do {
589 +- free = percpu_counter_sum(&mp->m_fdblocks) -
590 +- mp->m_alloc_set_aside;
591 +- if (free <= 0)
592 +- break;
593 +-
594 +- delta = request - mp->m_resblks;
595 +- lcounter = free - delta;
596 +- if (lcounter < 0)
597 +- /* We can't satisfy the request, just get what we can */
598 +- fdblks_delta = free;
599 +- else
600 +- fdblks_delta = delta;
601 +-
602 ++ free = percpu_counter_sum(&mp->m_fdblocks) -
603 ++ xfs_fdblocks_unavailable(mp);
604 ++ delta = request - mp->m_resblks;
605 ++ mp->m_resblks = request;
606 ++ if (delta > 0 && free > 0) {
607 + /*
608 + * We'll either succeed in getting space from the free block
609 +- * count or we'll get an ENOSPC. If we get a ENOSPC, it means
610 +- * things changed while we were calculating fdblks_delta and so
611 +- * we should try again to see if there is anything left to
612 +- * reserve.
613 ++ * count or we'll get an ENOSPC. Don't set the reserved flag
614 ++ * here - we don't want to reserve the extra reserve blocks
615 ++ * from the reserve.
616 + *
617 +- * Don't set the reserved flag here - we don't want to reserve
618 +- * the extra reserve blocks from the reserve.....
619 ++ * The desired reserve size can change after we drop the lock.
620 ++ * Use mod_fdblocks to put the space into the reserve or into
621 ++ * fdblocks as appropriate.
622 + */
623 ++ fdblks_delta = min(free, delta);
624 + spin_unlock(&mp->m_sb_lock);
625 + error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
626 ++ if (!error)
627 ++ xfs_mod_fdblocks(mp, fdblks_delta, 0);
628 + spin_lock(&mp->m_sb_lock);
629 +- } while (error == -ENOSPC);
630 +-
631 +- /*
632 +- * Update the reserve counters if blocks have been successfully
633 +- * allocated.
634 +- */
635 +- if (!error && fdblks_delta) {
636 +- mp->m_resblks += fdblks_delta;
637 +- mp->m_resblks_avail += fdblks_delta;
638 + }
639 +-
640 + out:
641 + if (outval) {
642 + outval->resblks = mp->m_resblks;
643 +diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
644 +index dfa429b77ee28..3a6bc9dc11b5c 100644
645 +--- a/fs/xfs/xfs_mount.h
646 ++++ b/fs/xfs/xfs_mount.h
647 +@@ -406,6 +406,14 @@ extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
648 + xfs_agnumber_t *maxagi);
649 + extern void xfs_unmountfs(xfs_mount_t *);
650 +
651 ++/* Accessor added for 5.10.y backport */
652 ++static inline uint64_t
653 ++xfs_fdblocks_unavailable(
654 ++ struct xfs_mount *mp)
655 ++{
656 ++ return mp->m_alloc_set_aside;
657 ++}
658 ++
659 + extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
660 + bool reserved);
661 + extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
662 +diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
663 +index fe45b0c3970c1..288ea38c43ad0 100644
664 +--- a/fs/xfs/xfs_trans_dquot.c
665 ++++ b/fs/xfs/xfs_trans_dquot.c
666 +@@ -615,7 +615,6 @@ xfs_dqresv_check(
667 + return QUOTA_NL_ISOFTLONGWARN;
668 + }
669 +
670 +- res->warnings++;
671 + return QUOTA_NL_ISOFTWARN;
672 + }
673 +
674 +diff --git a/include/linux/fs.h b/include/linux/fs.h
675 +index 42d246a942283..c8f887641878f 100644
676 +--- a/include/linux/fs.h
677 ++++ b/include/linux/fs.h
678 +@@ -1859,6 +1859,7 @@ struct file_operations {
679 + struct file *file_out, loff_t pos_out,
680 + loff_t len, unsigned int remap_flags);
681 + int (*fadvise)(struct file *, loff_t, loff_t, int);
682 ++ bool may_pollfree;
683 + } __randomize_layout;
684 +
685 + struct inode_operations {
686 +diff --git a/include/linux/rmap.h b/include/linux/rmap.h
687 +index 8d04e7deedc66..297744ea4dd0c 100644
688 +--- a/include/linux/rmap.h
689 ++++ b/include/linux/rmap.h
690 +@@ -39,12 +39,15 @@ struct anon_vma {
691 + atomic_t refcount;
692 +
693 + /*
694 +- * Count of child anon_vmas and VMAs which points to this anon_vma.
695 ++ * Count of child anon_vmas. Equals to the count of all anon_vmas that
696 ++ * have ->parent pointing to this one, including itself.
697 + *
698 + * This counter is used for making decision about reusing anon_vma
699 + * instead of forking new one. See comments in function anon_vma_clone.
700 + */
701 +- unsigned degree;
702 ++ unsigned long num_children;
703 ++ /* Count of VMAs whose ->anon_vma pointer points to this object. */
704 ++ unsigned long num_active_vmas;
705 +
706 + struct anon_vma *parent; /* Parent of this anon_vma */
707 +
708 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
709 +index acbf1875ad506..61fc053a4a4ef 100644
710 +--- a/include/linux/skbuff.h
711 ++++ b/include/linux/skbuff.h
712 +@@ -2222,6 +2222,14 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
713 +
714 + #endif /* NET_SKBUFF_DATA_USES_OFFSET */
715 +
716 ++static inline void skb_assert_len(struct sk_buff *skb)
717 ++{
718 ++#ifdef CONFIG_DEBUG_NET
719 ++ if (WARN_ONCE(!skb->len, "%s\n", __func__))
720 ++ DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
721 ++#endif /* CONFIG_DEBUG_NET */
722 ++}
723 ++
724 + /*
725 + * Add data to an sk_buff
726 + */
727 +diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
728 +index 822c048934e3f..1138dd3071dbd 100644
729 +--- a/include/linux/skmsg.h
730 ++++ b/include/linux/skmsg.h
731 +@@ -281,7 +281,8 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
732 +
733 + static inline struct sk_psock *sk_psock(const struct sock *sk)
734 + {
735 +- return rcu_dereference_sk_user_data(sk);
736 ++ return __rcu_dereference_sk_user_data_with_flags(sk,
737 ++ SK_USER_DATA_PSOCK);
738 + }
739 +
740 + static inline void sk_psock_queue_msg(struct sk_psock *psock,
741 +diff --git a/include/net/sock.h b/include/net/sock.h
742 +index d31c2b9107e54..d53fb64374767 100644
743 +--- a/include/net/sock.h
744 ++++ b/include/net/sock.h
745 +@@ -527,14 +527,26 @@ enum sk_pacing {
746 + SK_PACING_FQ = 2,
747 + };
748 +
749 +-/* Pointer stored in sk_user_data might not be suitable for copying
750 +- * when cloning the socket. For instance, it can point to a reference
751 +- * counted object. sk_user_data bottom bit is set if pointer must not
752 +- * be copied.
753 ++/* flag bits in sk_user_data
754 ++ *
755 ++ * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might
756 ++ * not be suitable for copying when cloning the socket. For instance,
757 ++ * it can point to a reference counted object. sk_user_data bottom
758 ++ * bit is set if pointer must not be copied.
759 ++ *
760 ++ * - SK_USER_DATA_BPF: Mark whether sk_user_data field is
761 ++ * managed/owned by a BPF reuseport array. This bit should be set
762 ++ * when sk_user_data's sk is added to the bpf's reuseport_array.
763 ++ *
764 ++ * - SK_USER_DATA_PSOCK: Mark whether pointer stored in
765 ++ * sk_user_data points to psock type. This bit should be set
766 ++ * when sk_user_data is assigned to a psock object.
767 + */
768 + #define SK_USER_DATA_NOCOPY 1UL
769 +-#define SK_USER_DATA_BPF 2UL /* Managed by BPF */
770 +-#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
771 ++#define SK_USER_DATA_BPF 2UL
772 ++#define SK_USER_DATA_PSOCK 4UL
773 ++#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
774 ++ SK_USER_DATA_PSOCK)
775 +
776 + /**
777 + * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
778 +@@ -547,24 +559,40 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk)
779 +
780 + #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
781 +
782 ++/**
783 ++ * __rcu_dereference_sk_user_data_with_flags - return the pointer
784 ++ * only if argument flags all has been set in sk_user_data. Otherwise
785 ++ * return NULL
786 ++ *
787 ++ * @sk: socket
788 ++ * @flags: flag bits
789 ++ */
790 ++static inline void *
791 ++__rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
792 ++ uintptr_t flags)
793 ++{
794 ++ uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
795 ++
796 ++ WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
797 ++
798 ++ if ((sk_user_data & flags) == flags)
799 ++ return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
800 ++ return NULL;
801 ++}
802 ++
803 + #define rcu_dereference_sk_user_data(sk) \
804 ++ __rcu_dereference_sk_user_data_with_flags(sk, 0)
805 ++#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
806 + ({ \
807 +- void *__tmp = rcu_dereference(__sk_user_data((sk))); \
808 +- (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \
809 +-})
810 +-#define rcu_assign_sk_user_data(sk, ptr) \
811 +-({ \
812 +- uintptr_t __tmp = (uintptr_t)(ptr); \
813 +- WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
814 +- rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
815 +-})
816 +-#define rcu_assign_sk_user_data_nocopy(sk, ptr) \
817 +-({ \
818 +- uintptr_t __tmp = (uintptr_t)(ptr); \
819 +- WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
820 ++ uintptr_t __tmp1 = (uintptr_t)(ptr), \
821 ++ __tmp2 = (uintptr_t)(flags); \
822 ++ WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \
823 ++ WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \
824 + rcu_assign_pointer(__sk_user_data((sk)), \
825 +- __tmp | SK_USER_DATA_NOCOPY); \
826 ++ __tmp1 | __tmp2); \
827 + })
828 ++#define rcu_assign_sk_user_data(sk, ptr) \
829 ++ __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
830 +
831 + /*
832 + * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
833 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
834 +index a397042e46607..a93407da0ae10 100644
835 +--- a/kernel/kprobes.c
836 ++++ b/kernel/kprobes.c
837 +@@ -1786,11 +1786,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
838 + /* Try to disarm and disable this/parent probe */
839 + if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
840 + /*
841 +- * If kprobes_all_disarmed is set, orig_p
842 +- * should have already been disarmed, so
843 +- * skip unneed disarming process.
844 ++ * Don't be lazy here. Even if 'kprobes_all_disarmed'
845 ++ * is false, 'orig_p' might not have been armed yet.
846 ++ * Note arm_all_kprobes() __tries__ to arm all kprobes
847 ++ * on the best effort basis.
848 + */
849 +- if (!kprobes_all_disarmed) {
850 ++ if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
851 + ret = disarm_kprobe(orig_p, true);
852 + if (ret) {
853 + p->flags &= ~KPROBE_FLAG_DISABLED;
854 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
855 +index a63713dcd05d5..d868df6f13c86 100644
856 +--- a/kernel/trace/ftrace.c
857 ++++ b/kernel/trace/ftrace.c
858 +@@ -2899,6 +2899,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
859 +
860 + ftrace_startup_enable(command);
861 +
862 ++ /*
863 ++ * If ftrace is in an undefined state, we just remove ops from list
864 ++ * to prevent the NULL pointer, instead of totally rolling it back and
865 ++ * free trampoline, because those actions could cause further damage.
866 ++ */
867 ++ if (unlikely(ftrace_disabled)) {
868 ++ __unregister_ftrace_function(ops);
869 ++ return -ENODEV;
870 ++ }
871 ++
872 + ops->flags &= ~FTRACE_OPS_FL_ADDING;
873 +
874 + return 0;
875 +diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
876 +index 2082af43d51fb..0717a0dcefed1 100644
877 +--- a/lib/crypto/Kconfig
878 ++++ b/lib/crypto/Kconfig
879 +@@ -33,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
880 +
881 + config CRYPTO_LIB_CHACHA_GENERIC
882 + tristate
883 +- select XOR_BLOCKS
884 + help
885 + This symbol can be depended upon by arch implementations of the
886 + ChaCha library interface that require the generic code as a
887 +diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
888 +index 2919f16981404..c6f6dee087460 100644
889 +--- a/lib/vdso/gettimeofday.c
890 ++++ b/lib/vdso/gettimeofday.c
891 +@@ -46,8 +46,8 @@ static inline bool vdso_cycles_ok(u64 cycles)
892 + #endif
893 +
894 + #ifdef CONFIG_TIME_NS
895 +-static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
896 +- struct __kernel_timespec *ts)
897 ++static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
898 ++ struct __kernel_timespec *ts)
899 + {
900 + const struct vdso_data *vd = __arch_get_timens_vdso_data();
901 + const struct timens_offset *offs = &vdns->offset[clk];
902 +@@ -97,8 +97,8 @@ static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
903 + return NULL;
904 + }
905 +
906 +-static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
907 +- struct __kernel_timespec *ts)
908 ++static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
909 ++ struct __kernel_timespec *ts)
910 + {
911 + return -EINVAL;
912 + }
913 +@@ -159,8 +159,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
914 + }
915 +
916 + #ifdef CONFIG_TIME_NS
917 +-static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
918 +- struct __kernel_timespec *ts)
919 ++static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
920 ++ struct __kernel_timespec *ts)
921 + {
922 + const struct vdso_data *vd = __arch_get_timens_vdso_data();
923 + const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
924 +@@ -188,8 +188,8 @@ static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
925 + return 0;
926 + }
927 + #else
928 +-static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
929 +- struct __kernel_timespec *ts)
930 ++static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
931 ++ struct __kernel_timespec *ts)
932 + {
933 + return -1;
934 + }
935 +diff --git a/mm/mmap.c b/mm/mmap.c
936 +index a1ee93f55cebb..b69c9711bb269 100644
937 +--- a/mm/mmap.c
938 ++++ b/mm/mmap.c
939 +@@ -2669,6 +2669,18 @@ static void unmap_region(struct mm_struct *mm,
940 + tlb_gather_mmu(&tlb, mm, start, end);
941 + update_hiwater_rss(mm);
942 + unmap_vmas(&tlb, vma, start, end);
943 ++
944 ++ /*
945 ++ * Ensure we have no stale TLB entries by the time this mapping is
946 ++ * removed from the rmap.
947 ++ * Note that we don't have to worry about nested flushes here because
948 ++ * we're holding the mm semaphore for removing the mapping - so any
949 ++ * concurrent flush in this region has to be coming through the rmap,
950 ++ * and we synchronize against that using the rmap lock.
951 ++ */
952 ++ if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0)
953 ++ tlb_flush_mmu(&tlb);
954 ++
955 + free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
956 + next ? next->vm_start : USER_PGTABLES_CEILING);
957 + tlb_finish_mmu(&tlb, start, end);
958 +diff --git a/mm/rmap.c b/mm/rmap.c
959 +index 44ad7bf2e5631..e6f840be18906 100644
960 +--- a/mm/rmap.c
961 ++++ b/mm/rmap.c
962 +@@ -89,7 +89,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
963 + anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
964 + if (anon_vma) {
965 + atomic_set(&anon_vma->refcount, 1);
966 +- anon_vma->degree = 1; /* Reference for first vma */
967 ++ anon_vma->num_children = 0;
968 ++ anon_vma->num_active_vmas = 0;
969 + anon_vma->parent = anon_vma;
970 + /*
971 + * Initialise the anon_vma root to point to itself. If called
972 +@@ -197,6 +198,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
973 + anon_vma = anon_vma_alloc();
974 + if (unlikely(!anon_vma))
975 + goto out_enomem_free_avc;
976 ++ anon_vma->num_children++; /* self-parent link for new root */
977 + allocated = anon_vma;
978 + }
979 +
980 +@@ -206,8 +208,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
981 + if (likely(!vma->anon_vma)) {
982 + vma->anon_vma = anon_vma;
983 + anon_vma_chain_link(vma, avc, anon_vma);
984 +- /* vma reference or self-parent link for new root */
985 +- anon_vma->degree++;
986 ++ anon_vma->num_active_vmas++;
987 + allocated = NULL;
988 + avc = NULL;
989 + }
990 +@@ -292,19 +293,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
991 + anon_vma_chain_link(dst, avc, anon_vma);
992 +
993 + /*
994 +- * Reuse existing anon_vma if its degree lower than two,
995 +- * that means it has no vma and only one anon_vma child.
996 ++ * Reuse existing anon_vma if it has no vma and only one
997 ++ * anon_vma child.
998 + *
999 +- * Do not chose parent anon_vma, otherwise first child
1000 +- * will always reuse it. Root anon_vma is never reused:
1001 ++ * Root anon_vma is never reused:
1002 + * it has self-parent reference and at least one child.
1003 + */
1004 + if (!dst->anon_vma && src->anon_vma &&
1005 +- anon_vma != src->anon_vma && anon_vma->degree < 2)
1006 ++ anon_vma->num_children < 2 &&
1007 ++ anon_vma->num_active_vmas == 0)
1008 + dst->anon_vma = anon_vma;
1009 + }
1010 + if (dst->anon_vma)
1011 +- dst->anon_vma->degree++;
1012 ++ dst->anon_vma->num_active_vmas++;
1013 + unlock_anon_vma_root(root);
1014 + return 0;
1015 +
1016 +@@ -354,6 +355,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
1017 + anon_vma = anon_vma_alloc();
1018 + if (!anon_vma)
1019 + goto out_error;
1020 ++ anon_vma->num_active_vmas++;
1021 + avc = anon_vma_chain_alloc(GFP_KERNEL);
1022 + if (!avc)
1023 + goto out_error_free_anon_vma;
1024 +@@ -374,7 +376,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
1025 + vma->anon_vma = anon_vma;
1026 + anon_vma_lock_write(anon_vma);
1027 + anon_vma_chain_link(vma, avc, anon_vma);
1028 +- anon_vma->parent->degree++;
1029 ++ anon_vma->parent->num_children++;
1030 + anon_vma_unlock_write(anon_vma);
1031 +
1032 + return 0;
1033 +@@ -406,7 +408,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
1034 + * to free them outside the lock.
1035 + */
1036 + if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
1037 +- anon_vma->parent->degree--;
1038 ++ anon_vma->parent->num_children--;
1039 + continue;
1040 + }
1041 +
1042 +@@ -414,7 +416,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
1043 + anon_vma_chain_free(avc);
1044 + }
1045 + if (vma->anon_vma)
1046 +- vma->anon_vma->degree--;
1047 ++ vma->anon_vma->num_active_vmas--;
1048 + unlock_anon_vma_root(root);
1049 +
1050 + /*
1051 +@@ -425,7 +427,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
1052 + list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
1053 + struct anon_vma *anon_vma = avc->anon_vma;
1054 +
1055 +- VM_WARN_ON(anon_vma->degree);
1056 ++ VM_WARN_ON(anon_vma->num_children);
1057 ++ VM_WARN_ON(anon_vma->num_active_vmas);
1058 + put_anon_vma(anon_vma);
1059 +
1060 + list_del(&avc->same_vma);
1061 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
1062 +index 88980015ba813..0c38af2ff2097 100644
1063 +--- a/net/bluetooth/l2cap_core.c
1064 ++++ b/net/bluetooth/l2cap_core.c
1065 +@@ -1988,11 +1988,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1066 + src_match = !bacmp(&c->src, src);
1067 + dst_match = !bacmp(&c->dst, dst);
1068 + if (src_match && dst_match) {
1069 +- c = l2cap_chan_hold_unless_zero(c);
1070 +- if (c) {
1071 +- read_unlock(&chan_list_lock);
1072 +- return c;
1073 +- }
1074 ++ if (!l2cap_chan_hold_unless_zero(c))
1075 ++ continue;
1076 ++
1077 ++ read_unlock(&chan_list_lock);
1078 ++ return c;
1079 + }
1080 +
1081 + /* Closest match */
1082 +diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
1083 +index f8b231bbbe381..2983e926fe3cc 100644
1084 +--- a/net/bpf/test_run.c
1085 ++++ b/net/bpf/test_run.c
1086 +@@ -441,6 +441,9 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
1087 + {
1088 + struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
1089 +
1090 ++ if (!skb->len)
1091 ++ return -EINVAL;
1092 ++
1093 + if (!__skb)
1094 + return 0;
1095 +
1096 +diff --git a/net/core/dev.c b/net/core/dev.c
1097 +index 8355cc5e11a98..34b5aab42b912 100644
1098 +--- a/net/core/dev.c
1099 ++++ b/net/core/dev.c
1100 +@@ -4097,6 +4097,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
1101 + bool again = false;
1102 +
1103 + skb_reset_mac_header(skb);
1104 ++ skb_assert_len(skb);
1105 +
1106 + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
1107 + __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
1108 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1109 +index 52a1c8725337b..434c5aab83ea2 100644
1110 +--- a/net/core/neighbour.c
1111 ++++ b/net/core/neighbour.c
1112 +@@ -280,11 +280,26 @@ static int neigh_del_timer(struct neighbour *n)
1113 + return 0;
1114 + }
1115 +
1116 +-static void pneigh_queue_purge(struct sk_buff_head *list)
1117 ++static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
1118 + {
1119 ++ struct sk_buff_head tmp;
1120 ++ unsigned long flags;
1121 + struct sk_buff *skb;
1122 +
1123 +- while ((skb = skb_dequeue(list)) != NULL) {
1124 ++ skb_queue_head_init(&tmp);
1125 ++ spin_lock_irqsave(&list->lock, flags);
1126 ++ skb = skb_peek(list);
1127 ++ while (skb != NULL) {
1128 ++ struct sk_buff *skb_next = skb_peek_next(skb, list);
1129 ++ if (net == NULL || net_eq(dev_net(skb->dev), net)) {
1130 ++ __skb_unlink(skb, list);
1131 ++ __skb_queue_tail(&tmp, skb);
1132 ++ }
1133 ++ skb = skb_next;
1134 ++ }
1135 ++ spin_unlock_irqrestore(&list->lock, flags);
1136 ++
1137 ++ while ((skb = __skb_dequeue(&tmp))) {
1138 + dev_put(skb->dev);
1139 + kfree_skb(skb);
1140 + }
1141 +@@ -358,9 +373,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
1142 + write_lock_bh(&tbl->lock);
1143 + neigh_flush_dev(tbl, dev, skip_perm);
1144 + pneigh_ifdown_and_unlock(tbl, dev);
1145 +-
1146 +- del_timer_sync(&tbl->proxy_timer);
1147 +- pneigh_queue_purge(&tbl->proxy_queue);
1148 ++ pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
1149 ++ if (skb_queue_empty_lockless(&tbl->proxy_queue))
1150 ++ del_timer_sync(&tbl->proxy_timer);
1151 + return 0;
1152 + }
1153 +
1154 +@@ -1743,7 +1758,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
1155 + /* It is not clean... Fix it to unload IPv6 module safely */
1156 + cancel_delayed_work_sync(&tbl->gc_work);
1157 + del_timer_sync(&tbl->proxy_timer);
1158 +- pneigh_queue_purge(&tbl->proxy_queue);
1159 ++ pneigh_queue_purge(&tbl->proxy_queue, NULL);
1160 + neigh_ifdown(tbl, NULL);
1161 + if (atomic_read(&tbl->entries))
1162 + pr_crit("neighbour leakage\n");
1163 +diff --git a/net/core/skmsg.c b/net/core/skmsg.c
1164 +index 545181a1ae043..bb4fbc60b272e 100644
1165 +--- a/net/core/skmsg.c
1166 ++++ b/net/core/skmsg.c
1167 +@@ -612,7 +612,9 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
1168 + sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
1169 + refcount_set(&psock->refcnt, 1);
1170 +
1171 +- rcu_assign_sk_user_data_nocopy(sk, psock);
1172 ++ __rcu_assign_sk_user_data_with_flags(sk, psock,
1173 ++ SK_USER_DATA_NOCOPY |
1174 ++ SK_USER_DATA_PSOCK);
1175 + sock_hold(sk);
1176 +
1177 + out:
1178 +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
1179 +index 6bafd3876aff3..8bf70ce03f951 100644
1180 +--- a/net/netfilter/Kconfig
1181 ++++ b/net/netfilter/Kconfig
1182 +@@ -118,7 +118,6 @@ config NF_CONNTRACK_ZONES
1183 +
1184 + config NF_CONNTRACK_PROCFS
1185 + bool "Supply CT list in procfs (OBSOLETE)"
1186 +- default y
1187 + depends on PROC_FS
1188 + help
1189 + This option enables for the list of known conntrack entries
1190 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1191 +index 5ee600d108a0a..b70b06e312bd0 100644
1192 +--- a/net/packet/af_packet.c
1193 ++++ b/net/packet/af_packet.c
1194 +@@ -2986,8 +2986,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1195 + if (err)
1196 + goto out_free;
1197 +
1198 +- if (sock->type == SOCK_RAW &&
1199 +- !dev_validate_header(dev, skb->data, len)) {
1200 ++ if ((sock->type == SOCK_RAW &&
1201 ++ !dev_validate_header(dev, skb->data, len)) || !skb->len) {
1202 + err = -EINVAL;
1203 + goto out_free;
1204 + }
1205 +diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
1206 +index 12a87be0fb446..42154b6df6529 100644
1207 +--- a/scripts/Makefile.modpost
1208 ++++ b/scripts/Makefile.modpost
1209 +@@ -87,8 +87,7 @@ obj := $(KBUILD_EXTMOD)
1210 + src := $(obj)
1211 +
1212 + # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
1213 +-include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
1214 +- $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
1215 ++include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
1216 +
1217 + # modpost option for external modules
1218 + MODPOST += -e