Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Tue, 27 Nov 2018 16:22:25
Message-Id: 1543335664.94abc16a9aa825808fddff118dad207d3546878b.mpagano@gentoo
1 commit: 94abc16a9aa825808fddff118dad207d3546878b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Nov 27 16:21:04 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Nov 27 16:21:04 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=94abc16a
7
8 proj/linux-patches: Linux patch 4.9.141
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1140_linux-4.9.141.patch | 1369 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1373 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 316dfbb..2838e5f 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -603,6 +603,10 @@ Patch: 1139_linux-4.9.140.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.140
23
24 +Patch: 1140_linux-4.9.141.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.141
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1140_linux-4.9.141.patch b/1140_linux-4.9.141.patch
33 new file mode 100644
34 index 0000000..2cccc46
35 --- /dev/null
36 +++ b/1140_linux-4.9.141.patch
37 @@ -0,0 +1,1369 @@
38 +diff --git a/Makefile b/Makefile
39 +index a9aed2326233..8eba73521a7f 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 140
46 ++SUBLEVEL = 141
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
51 +index 0d551576eb57..4724b8f0b625 100644
52 +--- a/arch/arm64/include/asm/percpu.h
53 ++++ b/arch/arm64/include/asm/percpu.h
54 +@@ -92,6 +92,7 @@ static inline unsigned long __percpu_##op(void *ptr, \
55 + : [val] "Ir" (val)); \
56 + break; \
57 + default: \
58 ++ ret = 0; \
59 + BUILD_BUG(); \
60 + } \
61 + \
62 +@@ -121,6 +122,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
63 + ret = ACCESS_ONCE(*(u64 *)ptr);
64 + break;
65 + default:
66 ++ ret = 0;
67 + BUILD_BUG();
68 + }
69 +
70 +@@ -190,6 +192,7 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
71 + : [val] "r" (val));
72 + break;
73 + default:
74 ++ ret = 0;
75 + BUILD_BUG();
76 + }
77 +
78 +diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
79 +index 6cc947896c77..ca7c3c34f94b 100644
80 +--- a/arch/s390/kernel/vdso32/Makefile
81 ++++ b/arch/s390/kernel/vdso32/Makefile
82 +@@ -32,7 +32,7 @@ UBSAN_SANITIZE := n
83 + $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
84 +
85 + # link rule for the .so file, .lds has to be first
86 +-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
87 ++$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
88 + $(call if_changed,vdso32ld)
89 +
90 + # strip rule for the .so file
91 +@@ -41,12 +41,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
92 + $(call if_changed,objcopy)
93 +
94 + # assembly rules for the .S files
95 +-$(obj-vdso32): %.o: %.S
96 ++$(obj-vdso32): %.o: %.S FORCE
97 + $(call if_changed_dep,vdso32as)
98 +
99 + # actual build commands
100 + quiet_cmd_vdso32ld = VDSO32L $@
101 +- cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
102 ++ cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
103 + quiet_cmd_vdso32as = VDSO32A $@
104 + cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
105 +
106 +diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
107 +index 2d54c18089eb..84af2b6b64c4 100644
108 +--- a/arch/s390/kernel/vdso64/Makefile
109 ++++ b/arch/s390/kernel/vdso64/Makefile
110 +@@ -32,7 +32,7 @@ UBSAN_SANITIZE := n
111 + $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
112 +
113 + # link rule for the .so file, .lds has to be first
114 +-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
115 ++$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
116 + $(call if_changed,vdso64ld)
117 +
118 + # strip rule for the .so file
119 +@@ -41,12 +41,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
120 + $(call if_changed,objcopy)
121 +
122 + # assembly rules for the .S files
123 +-$(obj-vdso64): %.o: %.S
124 ++$(obj-vdso64): %.o: %.S FORCE
125 + $(call if_changed_dep,vdso64as)
126 +
127 + # actual build commands
128 + quiet_cmd_vdso64ld = VDSO64L $@
129 +- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
130 ++ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
131 + quiet_cmd_vdso64as = VDSO64A $@
132 + cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
133 +
134 +diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
135 +index 0dac2640c3a7..e73a1165d261 100644
136 +--- a/arch/s390/numa/numa.c
137 ++++ b/arch/s390/numa/numa.c
138 +@@ -53,6 +53,7 @@ int __node_distance(int a, int b)
139 + {
140 + return mode->distance ? mode->distance(a, b) : 0;
141 + }
142 ++EXPORT_SYMBOL(__node_distance);
143 +
144 + int numa_debug_enabled;
145 +
146 +diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
147 +index 23025d645160..0a99d4515065 100644
148 +--- a/arch/um/os-Linux/skas/process.c
149 ++++ b/arch/um/os-Linux/skas/process.c
150 +@@ -578,6 +578,11 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf)
151 + fatal_sigsegv();
152 + }
153 + longjmp(*switch_buf, 1);
154 ++
155 ++ /* unreachable */
156 ++ printk(UM_KERN_ERR "impossible long jump!");
157 ++ fatal_sigsegv();
158 ++ return 0;
159 + }
160 +
161 + void initial_thread_cb_skas(void (*proc)(void *), void *arg)
162 +diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
163 +index 03250e1f1103..d92eacaef231 100644
164 +--- a/drivers/acpi/acpi_platform.c
165 ++++ b/drivers/acpi/acpi_platform.c
166 +@@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
167 + {"PNP0200", 0}, /* AT DMA Controller */
168 + {"ACPI0009", 0}, /* IOxAPIC */
169 + {"ACPI000A", 0}, /* IOAPIC */
170 ++ {"SMB0001", 0}, /* ACPI SMBUS virtual device */
171 + {"", 0},
172 + };
173 +
174 +diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
175 +index ce8fc680785b..396e358c2cee 100644
176 +--- a/drivers/acpi/acpi_watchdog.c
177 ++++ b/drivers/acpi/acpi_watchdog.c
178 +@@ -17,18 +17,77 @@
179 +
180 + #include "internal.h"
181 +
182 ++#ifdef CONFIG_RTC_MC146818_LIB
183 ++#include <linux/mc146818rtc.h>
184 ++
185 ++/*
186 ++ * There are several systems where the WDAT table is accessing RTC SRAM to
187 ++ * store persistent information. This does not work well with the Linux RTC
188 ++ * driver so on those systems we skip WDAT driver and prefer iTCO_wdt
189 ++ * instead.
190 ++ *
191 ++ * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
192 ++ */
193 ++static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
194 ++{
195 ++ const struct acpi_wdat_entry *entries;
196 ++ int i;
197 ++
198 ++ entries = (struct acpi_wdat_entry *)(wdat + 1);
199 ++ for (i = 0; i < wdat->entries; i++) {
200 ++ const struct acpi_generic_address *gas;
201 ++
202 ++ gas = &entries[i].register_region;
203 ++ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
204 ++ switch (gas->address) {
205 ++ case RTC_PORT(0):
206 ++ case RTC_PORT(1):
207 ++ case RTC_PORT(2):
208 ++ case RTC_PORT(3):
209 ++ return true;
210 ++ }
211 ++ }
212 ++ }
213 ++
214 ++ return false;
215 ++}
216 ++#else
217 ++static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
218 ++{
219 ++ return false;
220 ++}
221 ++#endif
222 ++
223 ++static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
224 ++{
225 ++ const struct acpi_table_wdat *wdat = NULL;
226 ++ acpi_status status;
227 ++
228 ++ if (acpi_disabled)
229 ++ return NULL;
230 ++
231 ++ status = acpi_get_table(ACPI_SIG_WDAT, 0,
232 ++ (struct acpi_table_header **)&wdat);
233 ++ if (ACPI_FAILURE(status)) {
234 ++ /* It is fine if there is no WDAT */
235 ++ return NULL;
236 ++ }
237 ++
238 ++ if (acpi_watchdog_uses_rtc(wdat)) {
239 ++ pr_info("Skipping WDAT on this system because it uses RTC SRAM\n");
240 ++ return NULL;
241 ++ }
242 ++
243 ++ return wdat;
244 ++}
245 ++
246 + /**
247 + * Returns true if this system should prefer ACPI based watchdog instead of
248 + * the native one (which are typically the same hardware).
249 + */
250 + bool acpi_has_watchdog(void)
251 + {
252 +- struct acpi_table_header hdr;
253 +-
254 +- if (acpi_disabled)
255 +- return false;
256 +-
257 +- return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
258 ++ return !!acpi_watchdog_get_wdat();
259 + }
260 + EXPORT_SYMBOL_GPL(acpi_has_watchdog);
261 +
262 +@@ -41,12 +100,10 @@ void __init acpi_watchdog_init(void)
263 + struct platform_device *pdev;
264 + struct resource *resources;
265 + size_t nresources = 0;
266 +- acpi_status status;
267 + int i;
268 +
269 +- status = acpi_get_table(ACPI_SIG_WDAT, 0,
270 +- (struct acpi_table_header **)&wdat);
271 +- if (ACPI_FAILURE(status)) {
272 ++ wdat = acpi_watchdog_get_wdat();
273 ++ if (!wdat) {
274 + /* It is fine if there is no WDAT */
275 + return;
276 + }
277 +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
278 +index b7c0b69a02f5..d64a53d3270a 100644
279 +--- a/drivers/block/zram/zram_drv.c
280 ++++ b/drivers/block/zram/zram_drv.c
281 +@@ -1223,6 +1223,11 @@ static struct attribute_group zram_disk_attr_group = {
282 + .attrs = zram_disk_attrs,
283 + };
284 +
285 ++static const struct attribute_group *zram_disk_attr_groups[] = {
286 ++ &zram_disk_attr_group,
287 ++ NULL,
288 ++};
289 ++
290 + /*
291 + * Allocate and initialize new zram device. the function returns
292 + * '>= 0' device_id upon success, and negative value otherwise.
293 +@@ -1303,24 +1308,15 @@ static int zram_add(void)
294 + zram->disk->queue->limits.discard_zeroes_data = 0;
295 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
296 +
297 ++ disk_to_dev(zram->disk)->groups = zram_disk_attr_groups;
298 + add_disk(zram->disk);
299 +
300 +- ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
301 +- &zram_disk_attr_group);
302 +- if (ret < 0) {
303 +- pr_err("Error creating sysfs group for device %d\n",
304 +- device_id);
305 +- goto out_free_disk;
306 +- }
307 + strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
308 + zram->meta = NULL;
309 +
310 + pr_info("Added device: %s\n", zram->disk->disk_name);
311 + return device_id;
312 +
313 +-out_free_disk:
314 +- del_gendisk(zram->disk);
315 +- put_disk(zram->disk);
316 + out_free_queue:
317 + blk_cleanup_queue(queue);
318 + out_free_idr:
319 +@@ -1348,16 +1344,6 @@ static int zram_remove(struct zram *zram)
320 + zram->claim = true;
321 + mutex_unlock(&bdev->bd_mutex);
322 +
323 +- /*
324 +- * Remove sysfs first, so no one will perform a disksize
325 +- * store while we destroy the devices. This also helps during
326 +- * hot_remove -- zram_reset_device() is the last holder of
327 +- * ->init_lock, no later/concurrent disksize_store() or any
328 +- * other sysfs handlers are possible.
329 +- */
330 +- sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
331 +- &zram_disk_attr_group);
332 +-
333 + /* Make sure all the pending I/O are finished */
334 + fsync_bdev(bdev);
335 + zram_reset_device(zram);
336 +diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
337 +index 4a9493a4159f..3cc9bff9d99d 100644
338 +--- a/drivers/bluetooth/Kconfig
339 ++++ b/drivers/bluetooth/Kconfig
340 +@@ -125,7 +125,6 @@ config BT_HCIUART_LL
341 + config BT_HCIUART_3WIRE
342 + bool "Three-wire UART (H5) protocol support"
343 + depends on BT_HCIUART
344 +- depends on BT_HCIUART_SERDEV
345 + help
346 + The HCI Three-wire UART Transport Layer makes it possible to
347 + user the Bluetooth HCI over a serial port interface. The HCI
348 +diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
349 +index 20724abd38bd..7df6b5b1e7ee 100644
350 +--- a/drivers/clk/clk-fixed-factor.c
351 ++++ b/drivers/clk/clk-fixed-factor.c
352 +@@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev)
353 + {
354 + struct clk *clk = platform_get_drvdata(pdev);
355 +
356 ++ of_clk_del_provider(pdev->dev.of_node);
357 + clk_unregister_fixed_factor(clk);
358 +
359 + return 0;
360 +diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
361 +index b5c46b3f8764..6d6475c32ee5 100644
362 +--- a/drivers/clk/clk-fixed-rate.c
363 ++++ b/drivers/clk/clk-fixed-rate.c
364 +@@ -200,6 +200,7 @@ static int of_fixed_clk_remove(struct platform_device *pdev)
365 + {
366 + struct clk *clk = platform_get_drvdata(pdev);
367 +
368 ++ of_clk_del_provider(pdev->dev.of_node);
369 + clk_unregister_fixed_rate(clk);
370 +
371 + return 0;
372 +diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
373 +index 07fb667e258f..13c09a740840 100644
374 +--- a/drivers/clk/samsung/clk-exynos5420.c
375 ++++ b/drivers/clk/samsung/clk-exynos5420.c
376 +@@ -280,6 +280,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
377 + { .offset = GATE_BUS_TOP, .value = 0xffffffff, },
378 + { .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
379 + { .offset = GATE_IP_PERIC, .value = 0xffffffff, },
380 ++ { .offset = GATE_IP_PERIS, .value = 0xffffffff, },
381 + };
382 +
383 + static int exynos5420_clk_suspend(void)
384 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
385 +index 83d2f43b5a2f..c93dcfedc219 100644
386 +--- a/drivers/gpu/drm/drm_edid.c
387 ++++ b/drivers/gpu/drm/drm_edid.c
388 +@@ -116,6 +116,9 @@ static const struct edid_quirk {
389 + /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
390 + { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
391 +
392 ++ /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
393 ++ { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC },
394 ++
395 + /* Belinea 10 15 55 */
396 + { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
397 + { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
398 +diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
399 +index 7f8ff39ed44b..d02ee5304217 100644
400 +--- a/drivers/hid/uhid.c
401 ++++ b/drivers/hid/uhid.c
402 +@@ -12,6 +12,7 @@
403 +
404 + #include <linux/atomic.h>
405 + #include <linux/compat.h>
406 ++#include <linux/cred.h>
407 + #include <linux/device.h>
408 + #include <linux/fs.h>
409 + #include <linux/hid.h>
410 +@@ -24,6 +25,7 @@
411 + #include <linux/spinlock.h>
412 + #include <linux/uhid.h>
413 + #include <linux/wait.h>
414 ++#include <linux/uaccess.h>
415 +
416 + #define UHID_NAME "uhid"
417 + #define UHID_BUFSIZE 32
418 +@@ -721,6 +723,17 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
419 +
420 + switch (uhid->input_buf.type) {
421 + case UHID_CREATE:
422 ++ /*
423 ++ * 'struct uhid_create_req' contains a __user pointer which is
424 ++ * copied from, so it's unsafe to allow this with elevated
425 ++ * privileges (e.g. from a setuid binary) or via kernel_write().
426 ++ */
427 ++ if (file->f_cred != current_cred() || uaccess_kernel()) {
428 ++ pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
429 ++ task_tgid_vnr(current), current->comm);
430 ++ ret = -EACCES;
431 ++ goto unlock;
432 ++ }
433 + ret = uhid_dev_create(uhid, &uhid->input_buf);
434 + break;
435 + case UHID_CREATE2:
436 +diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
437 +index 6d2e6605751c..18b3c8f258bf 100644
438 +--- a/drivers/hwmon/ibmpowernv.c
439 ++++ b/drivers/hwmon/ibmpowernv.c
440 +@@ -114,7 +114,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
441 + return sprintf(buf, "%s\n", sdata->label);
442 + }
443 +
444 +-static int __init get_logical_cpu(int hwcpu)
445 ++static int get_logical_cpu(int hwcpu)
446 + {
447 + int cpu;
448 +
449 +@@ -125,9 +125,8 @@ static int __init get_logical_cpu(int hwcpu)
450 + return -ENOENT;
451 + }
452 +
453 +-static void __init make_sensor_label(struct device_node *np,
454 +- struct sensor_data *sdata,
455 +- const char *label)
456 ++static void make_sensor_label(struct device_node *np,
457 ++ struct sensor_data *sdata, const char *label)
458 + {
459 + u32 id;
460 + size_t n;
461 +diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
462 +index 567d86835f00..1fda2873375f 100644
463 +--- a/drivers/media/v4l2-core/v4l2-event.c
464 ++++ b/drivers/media/v4l2-core/v4l2-event.c
465 +@@ -197,6 +197,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
466 + }
467 + EXPORT_SYMBOL_GPL(v4l2_event_pending);
468 +
469 ++static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
470 ++{
471 ++ struct v4l2_fh *fh = sev->fh;
472 ++ unsigned int i;
473 ++
474 ++ lockdep_assert_held(&fh->subscribe_lock);
475 ++ assert_spin_locked(&fh->vdev->fh_lock);
476 ++
477 ++ /* Remove any pending events for this subscription */
478 ++ for (i = 0; i < sev->in_use; i++) {
479 ++ list_del(&sev->events[sev_pos(sev, i)].list);
480 ++ fh->navailable--;
481 ++ }
482 ++ list_del(&sev->list);
483 ++}
484 ++
485 + int v4l2_event_subscribe(struct v4l2_fh *fh,
486 + const struct v4l2_event_subscription *sub, unsigned elems,
487 + const struct v4l2_subscribed_event_ops *ops)
488 +@@ -228,27 +244,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
489 +
490 + spin_lock_irqsave(&fh->vdev->fh_lock, flags);
491 + found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
492 ++ if (!found_ev)
493 ++ list_add(&sev->list, &fh->subscribed);
494 + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
495 +
496 + if (found_ev) {
497 + /* Already listening */
498 + kfree(sev);
499 +- goto out_unlock;
500 +- }
501 +-
502 +- if (sev->ops && sev->ops->add) {
503 ++ } else if (sev->ops && sev->ops->add) {
504 + ret = sev->ops->add(sev, elems);
505 + if (ret) {
506 ++ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
507 ++ __v4l2_event_unsubscribe(sev);
508 ++ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
509 + kfree(sev);
510 +- goto out_unlock;
511 + }
512 + }
513 +
514 +- spin_lock_irqsave(&fh->vdev->fh_lock, flags);
515 +- list_add(&sev->list, &fh->subscribed);
516 +- spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
517 +-
518 +-out_unlock:
519 + mutex_unlock(&fh->subscribe_lock);
520 +
521 + return ret;
522 +@@ -283,7 +295,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
523 + {
524 + struct v4l2_subscribed_event *sev;
525 + unsigned long flags;
526 +- int i;
527 +
528 + if (sub->type == V4L2_EVENT_ALL) {
529 + v4l2_event_unsubscribe_all(fh);
530 +@@ -295,14 +306,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
531 + spin_lock_irqsave(&fh->vdev->fh_lock, flags);
532 +
533 + sev = v4l2_event_subscribed(fh, sub->type, sub->id);
534 +- if (sev != NULL) {
535 +- /* Remove any pending events for this subscription */
536 +- for (i = 0; i < sev->in_use; i++) {
537 +- list_del(&sev->events[sev_pos(sev, i)].list);
538 +- fh->navailable--;
539 +- }
540 +- list_del(&sev->list);
541 +- }
542 ++ if (sev != NULL)
543 ++ __v4l2_event_unsubscribe(sev);
544 +
545 + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
546 +
547 +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
548 +index f7ca1fab4808..4df4a1f402be 100644
549 +--- a/drivers/media/v4l2-core/videobuf2-core.c
550 ++++ b/drivers/media/v4l2-core/videobuf2-core.c
551 +@@ -914,12 +914,9 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
552 + dprintk(4, "done processing on buffer %d, state: %d\n",
553 + vb->index, state);
554 +
555 +- if (state != VB2_BUF_STATE_QUEUED &&
556 +- state != VB2_BUF_STATE_REQUEUEING) {
557 +- /* sync buffers */
558 +- for (plane = 0; plane < vb->num_planes; ++plane)
559 +- call_void_memop(vb, finish, vb->planes[plane].mem_priv);
560 +- }
561 ++ /* sync buffers */
562 ++ for (plane = 0; plane < vb->num_planes; ++plane)
563 ++ call_void_memop(vb, finish, vb->planes[plane].mem_priv);
564 +
565 + spin_lock_irqsave(&q->done_lock, flags);
566 + if (state == VB2_BUF_STATE_QUEUED ||
567 +diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
568 +index 0516ecda54d3..3a6e4ec21c87 100644
569 +--- a/drivers/misc/atmel-ssc.c
570 ++++ b/drivers/misc/atmel-ssc.c
571 +@@ -130,7 +130,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
572 + MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
573 + #endif
574 +
575 +-static inline const struct atmel_ssc_platform_data * __init
576 ++static inline const struct atmel_ssc_platform_data *
577 + atmel_ssc_get_driver_data(struct platform_device *pdev)
578 + {
579 + if (pdev->dev.of_node) {
580 +diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
581 +index 313da3150262..1540a7785e14 100644
582 +--- a/drivers/misc/sgi-gru/grukdump.c
583 ++++ b/drivers/misc/sgi-gru/grukdump.c
584 +@@ -27,6 +27,9 @@
585 + #include <linux/delay.h>
586 + #include <linux/bitops.h>
587 + #include <asm/uv/uv_hub.h>
588 ++
589 ++#include <linux/nospec.h>
590 ++
591 + #include "gru.h"
592 + #include "grutables.h"
593 + #include "gruhandles.h"
594 +@@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg)
595 + /* Currently, only dump by gid is implemented */
596 + if (req.gid >= gru_max_gids)
597 + return -EINVAL;
598 ++ req.gid = array_index_nospec(req.gid, gru_max_gids);
599 +
600 + gru = GID_TO_GRU(req.gid);
601 + ubuf = req.buf;
602 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
603 +index b2c08e4d2a9b..bae7b7f9b1cf 100644
604 +--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
605 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
606 +@@ -132,6 +132,9 @@ struct qed_spq_entry {
607 + enum spq_mode comp_mode;
608 + struct qed_spq_comp_cb comp_cb;
609 + struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
610 ++
611 ++ /* Posted entry for unlimited list entry in EBLOCK mode */
612 ++ struct qed_spq_entry *post_ent;
613 + };
614 +
615 + struct qed_eq {
616 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
617 +index 2888eb0628f8..ac69ff3f7c5c 100644
618 +--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
619 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
620 +@@ -56,7 +56,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
621 +
622 + case QED_SPQ_MODE_BLOCK:
623 + if (!p_data->p_comp_data)
624 +- return -EINVAL;
625 ++ goto err;
626 +
627 + p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
628 + break;
629 +@@ -71,7 +71,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
630 + default:
631 + DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
632 + p_ent->comp_mode);
633 +- return -EINVAL;
634 ++ goto err;
635 + }
636 +
637 + DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
638 +@@ -85,6 +85,18 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
639 + memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
640 +
641 + return 0;
642 ++
643 ++err:
644 ++ /* qed_spq_get_entry() can either get an entry from the free_pool,
645 ++ * or, if no entries are left, allocate a new entry and add it to
646 ++ * the unlimited_pending list.
647 ++ */
648 ++ if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
649 ++ kfree(p_ent);
650 ++ else
651 ++ qed_spq_return_entry(p_hwfn, p_ent);
652 ++
653 ++ return -EINVAL;
654 + }
655 +
656 + static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
657 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
658 +index 9fbaf9429fd0..80c8c7f0d932 100644
659 +--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
660 ++++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
661 +@@ -595,6 +595,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
662 + /* EBLOCK responsible to free the allocated p_ent */
663 + if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
664 + kfree(p_ent);
665 ++ else
666 ++ p_ent->post_ent = p_en2;
667 +
668 + p_ent = p_en2;
669 + }
670 +@@ -678,6 +680,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
671 + SPQ_HIGH_PRI_RESERVE_DEFAULT);
672 + }
673 +
674 ++/* Avoid overriding of SPQ entries when getting out-of-order completions, by
675 ++ * marking the completions in a bitmap and increasing the chain consumer only
676 ++ * for the first successive completed entries.
677 ++ */
678 ++static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
679 ++{
680 ++ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
681 ++ struct qed_spq *p_spq = p_hwfn->p_spq;
682 ++
683 ++ __set_bit(pos, p_spq->p_comp_bitmap);
684 ++ while (test_bit(p_spq->comp_bitmap_idx,
685 ++ p_spq->p_comp_bitmap)) {
686 ++ __clear_bit(p_spq->comp_bitmap_idx,
687 ++ p_spq->p_comp_bitmap);
688 ++ p_spq->comp_bitmap_idx++;
689 ++ qed_chain_return_produced(&p_spq->chain);
690 ++ }
691 ++}
692 ++
693 + int qed_spq_post(struct qed_hwfn *p_hwfn,
694 + struct qed_spq_entry *p_ent, u8 *fw_return_code)
695 + {
696 +@@ -728,11 +749,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
697 + rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
698 +
699 + if (p_ent->queue == &p_spq->unlimited_pending) {
700 +- /* This is an allocated p_ent which does not need to
701 +- * return to pool.
702 +- */
703 ++ struct qed_spq_entry *p_post_ent = p_ent->post_ent;
704 ++
705 + kfree(p_ent);
706 +- return rc;
707 ++
708 ++ /* Return the entry which was actually posted */
709 ++ p_ent = p_post_ent;
710 + }
711 +
712 + if (rc)
713 +@@ -746,7 +768,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
714 + spq_post_fail2:
715 + spin_lock_bh(&p_spq->lock);
716 + list_del(&p_ent->list);
717 +- qed_chain_return_produced(&p_spq->chain);
718 ++ qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
719 +
720 + spq_post_fail:
721 + /* return to the free pool */
722 +@@ -778,25 +800,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
723 + spin_lock_bh(&p_spq->lock);
724 + list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
725 + if (p_ent->elem.hdr.echo == echo) {
726 +- u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
727 +-
728 + list_del(&p_ent->list);
729 +-
730 +- /* Avoid overriding of SPQ entries when getting
731 +- * out-of-order completions, by marking the completions
732 +- * in a bitmap and increasing the chain consumer only
733 +- * for the first successive completed entries.
734 +- */
735 +- __set_bit(pos, p_spq->p_comp_bitmap);
736 +-
737 +- while (test_bit(p_spq->comp_bitmap_idx,
738 +- p_spq->p_comp_bitmap)) {
739 +- __clear_bit(p_spq->comp_bitmap_idx,
740 +- p_spq->p_comp_bitmap);
741 +- p_spq->comp_bitmap_idx++;
742 +- qed_chain_return_produced(&p_spq->chain);
743 +- }
744 +-
745 ++ qed_spq_comp_bmap_update(p_hwfn, echo);
746 + p_spq->comp_count++;
747 + found = p_ent;
748 + break;
749 +@@ -835,11 +840,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
750 + QED_MSG_SPQ,
751 + "Got a completion without a callback function\n");
752 +
753 +- if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
754 +- (found->queue == &p_spq->unlimited_pending))
755 ++ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
756 + /* EBLOCK is responsible for returning its own entry into the
757 +- * free list, unless it originally added the entry into the
758 +- * unlimited pending list.
759 ++ * free list.
760 + */
761 + qed_spq_return_entry(p_hwfn, found);
762 +
763 +diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
764 +index 2acdb0d6ea89..a0533e4e52d7 100644
765 +--- a/drivers/platform/x86/acerhdf.c
766 ++++ b/drivers/platform/x86/acerhdf.c
767 +@@ -233,6 +233,7 @@ static const struct bios_settings bios_tbl[] = {
768 + {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
769 + {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
770 + {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
771 ++ {"Gateway", "LT31", "v1.3307", 0x55, 0x58, {0x9e, 0x00}, 0},
772 + /* Packard Bell */
773 + {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0},
774 + {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
775 +diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
776 +index ef29f18b1951..4069433a0ec6 100644
777 +--- a/drivers/platform/x86/intel_telemetry_debugfs.c
778 ++++ b/drivers/platform/x86/intel_telemetry_debugfs.c
779 +@@ -953,12 +953,16 @@ static int __init telemetry_debugfs_init(void)
780 + debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data;
781 +
782 + err = telemetry_pltconfig_valid();
783 +- if (err < 0)
784 ++ if (err < 0) {
785 ++ pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n");
786 + return -ENODEV;
787 ++ }
788 +
789 + err = telemetry_debugfs_check_evts();
790 +- if (err < 0)
791 ++ if (err < 0) {
792 ++ pr_info("telemetry_debugfs_check_evts failed\n");
793 + return -EINVAL;
794 ++ }
795 +
796 +
797 + #ifdef CONFIG_PM_SLEEP
798 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
799 +index efefe075557f..6e6ba1baf9c4 100644
800 +--- a/drivers/s390/net/qeth_l3_main.c
801 ++++ b/drivers/s390/net/qeth_l3_main.c
802 +@@ -363,9 +363,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
803 +
804 + QETH_CARD_TEXT(card, 4, "clearip");
805 +
806 +- if (recover && card->options.sniffer)
807 +- return;
808 +-
809 + spin_lock_bh(&card->ip_lock);
810 +
811 + hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
812 +@@ -823,6 +820,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
813 + int rc = 0;
814 + int cnt = 3;
815 +
816 ++ if (card->options.sniffer)
817 ++ return 0;
818 +
819 + if (addr->proto == QETH_PROT_IPV4) {
820 + QETH_CARD_TEXT(card, 2, "setaddr4");
821 +@@ -858,6 +857,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
822 + {
823 + int rc = 0;
824 +
825 ++ if (card->options.sniffer)
826 ++ return 0;
827 ++
828 + if (addr->proto == QETH_PROT_IPV4) {
829 + QETH_CARD_TEXT(card, 2, "deladdr4");
830 + QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
831 +diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
832 +index cfbfef08c94a..e6b20716e8e0 100644
833 +--- a/drivers/uio/uio.c
834 ++++ b/drivers/uio/uio.c
835 +@@ -850,6 +850,8 @@ int __uio_register_device(struct module *owner,
836 + if (ret)
837 + goto err_uio_dev_add_attributes;
838 +
839 ++ info->uio_dev = idev;
840 ++
841 + if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
842 + /*
843 + * Note that we deliberately don't use devm_request_irq
844 +@@ -861,11 +863,12 @@ int __uio_register_device(struct module *owner,
845 + */
846 + ret = request_irq(info->irq, uio_interrupt,
847 + info->irq_flags, info->name, idev);
848 +- if (ret)
849 ++ if (ret) {
850 ++ info->uio_dev = NULL;
851 + goto err_request_irq;
852 ++ }
853 + }
854 +
855 +- info->uio_dev = idev;
856 + return 0;
857 +
858 + err_request_irq:
859 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
860 +index dbe44e890c99..cd4f96354fa8 100644
861 +--- a/drivers/usb/class/cdc-acm.c
862 ++++ b/drivers/usb/class/cdc-acm.c
863 +@@ -1659,6 +1659,9 @@ static const struct usb_device_id acm_ids[] = {
864 + { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
865 + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
866 + },
867 ++ { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */
868 ++ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
869 ++ },
870 + { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
871 + .driver_info = QUIRK_CONTROL_LINE_STATE, },
872 + { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
873 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
874 +index 37a5e07b3488..1e8f68960014 100644
875 +--- a/drivers/usb/core/quirks.c
876 ++++ b/drivers/usb/core/quirks.c
877 +@@ -243,6 +243,9 @@ static const struct usb_device_id usb_quirk_list[] = {
878 + { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
879 + USB_QUIRK_DELAY_CTRL_MSG },
880 +
881 ++ /* Corsair K70 LUX RGB */
882 ++ { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT },
883 ++
884 + /* Corsair K70 LUX */
885 + { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
886 +
887 +@@ -263,6 +266,11 @@ static const struct usb_device_id usb_quirk_list[] = {
888 + { USB_DEVICE(0x2040, 0x7200), .driver_info =
889 + USB_QUIRK_CONFIG_INTF_STRINGS },
890 +
891 ++ /* Raydium Touchscreen */
892 ++ { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
893 ++
894 ++ { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
895 ++
896 + /* DJI CineSSD */
897 + { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
898 +
899 +diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
900 +index da5ff401a354..2d3c656e0bff 100644
901 +--- a/drivers/usb/misc/appledisplay.c
902 ++++ b/drivers/usb/misc/appledisplay.c
903 +@@ -63,6 +63,7 @@ static const struct usb_device_id appledisplay_table[] = {
904 + { APPLEDISPLAY_DEVICE(0x9219) },
905 + { APPLEDISPLAY_DEVICE(0x921c) },
906 + { APPLEDISPLAY_DEVICE(0x921d) },
907 ++ { APPLEDISPLAY_DEVICE(0x9222) },
908 + { APPLEDISPLAY_DEVICE(0x9236) },
909 +
910 + /* Terminating entry */
911 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
912 +index 18d05323ca53..57d375c68e46 100644
913 +--- a/fs/btrfs/disk-io.c
914 ++++ b/fs/btrfs/disk-io.c
915 +@@ -4491,6 +4491,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
916 + static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
917 + struct extent_io_tree *pinned_extents)
918 + {
919 ++ struct btrfs_fs_info *fs_info = root->fs_info;
920 + struct extent_io_tree *unpin;
921 + u64 start;
922 + u64 end;
923 +@@ -4500,21 +4501,31 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
924 + unpin = pinned_extents;
925 + again:
926 + while (1) {
927 ++ /*
928 ++ * The btrfs_finish_extent_commit() may get the same range as
929 ++ * ours between find_first_extent_bit and clear_extent_dirty.
930 ++ * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
931 ++ * the same extent range.
932 ++ */
933 ++ mutex_lock(&fs_info->unused_bg_unpin_mutex);
934 + ret = find_first_extent_bit(unpin, 0, &start, &end,
935 + EXTENT_DIRTY, NULL);
936 +- if (ret)
937 ++ if (ret) {
938 ++ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
939 + break;
940 ++ }
941 +
942 + clear_extent_dirty(unpin, start, end);
943 + btrfs_error_unpin_extent_range(root, start, end);
944 ++ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
945 + cond_resched();
946 + }
947 +
948 + if (loop) {
949 +- if (unpin == &root->fs_info->freed_extents[0])
950 +- unpin = &root->fs_info->freed_extents[1];
951 ++ if (unpin == &fs_info->freed_extents[0])
952 ++ unpin = &fs_info->freed_extents[1];
953 + else
954 +- unpin = &root->fs_info->freed_extents[0];
955 ++ unpin = &fs_info->freed_extents[0];
956 + loop = false;
957 + goto again;
958 + }
959 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
960 +index 163b61a92b59..a775307f3b6b 100644
961 +--- a/fs/btrfs/extent-tree.c
962 ++++ b/fs/btrfs/extent-tree.c
963 +@@ -11140,6 +11140,15 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
964 + return ret;
965 + }
966 +
967 ++/*
968 ++ * Trim the whole filesystem by:
969 ++ * 1) trimming the free space in each block group
970 ++ * 2) trimming the unallocated space on each device
971 ++ *
972 ++ * This will also continue trimming even if a block group or device encounters
973 ++ * an error. The return value will be the last error, or 0 if nothing bad
974 ++ * happens.
975 ++ */
976 + int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
977 + {
978 + struct btrfs_fs_info *fs_info = root->fs_info;
979 +@@ -11150,18 +11159,14 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
980 + u64 start;
981 + u64 end;
982 + u64 trimmed = 0;
983 +- u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
984 ++ u64 bg_failed = 0;
985 ++ u64 dev_failed = 0;
986 ++ int bg_ret = 0;
987 ++ int dev_ret = 0;
988 + int ret = 0;
989 +
990 +- /*
991 +- * try to trim all FS space, our block group may start from non-zero.
992 +- */
993 +- if (range->len == total_bytes)
994 +- cache = btrfs_lookup_first_block_group(fs_info, range->start);
995 +- else
996 +- cache = btrfs_lookup_block_group(fs_info, range->start);
997 +-
998 +- while (cache) {
999 ++ cache = btrfs_lookup_first_block_group(fs_info, range->start);
1000 ++ for (; cache; cache = next_block_group(fs_info->tree_root, cache)) {
1001 + if (cache->key.objectid >= (range->start + range->len)) {
1002 + btrfs_put_block_group(cache);
1003 + break;
1004 +@@ -11175,13 +11180,15 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
1005 + if (!block_group_cache_done(cache)) {
1006 + ret = cache_block_group(cache, 0);
1007 + if (ret) {
1008 +- btrfs_put_block_group(cache);
1009 +- break;
1010 ++ bg_failed++;
1011 ++ bg_ret = ret;
1012 ++ continue;
1013 + }
1014 + ret = wait_block_group_cache_done(cache);
1015 + if (ret) {
1016 +- btrfs_put_block_group(cache);
1017 +- break;
1018 ++ bg_failed++;
1019 ++ bg_ret = ret;
1020 ++ continue;
1021 + }
1022 + }
1023 + ret = btrfs_trim_block_group(cache,
1024 +@@ -11192,28 +11199,40 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
1025 +
1026 + trimmed += group_trimmed;
1027 + if (ret) {
1028 +- btrfs_put_block_group(cache);
1029 +- break;
1030 ++ bg_failed++;
1031 ++ bg_ret = ret;
1032 ++ continue;
1033 + }
1034 + }
1035 +-
1036 +- cache = next_block_group(fs_info->tree_root, cache);
1037 + }
1038 +
1039 +- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1040 +- devices = &root->fs_info->fs_devices->devices;
1041 ++ if (bg_failed)
1042 ++ btrfs_warn(fs_info,
1043 ++ "failed to trim %llu block group(s), last error %d",
1044 ++ bg_failed, bg_ret);
1045 ++ mutex_lock(&fs_info->fs_devices->device_list_mutex);
1046 ++ devices = &fs_info->fs_devices->devices;
1047 + list_for_each_entry(device, devices, dev_list) {
1048 + ret = btrfs_trim_free_extents(device, range->minlen,
1049 + &group_trimmed);
1050 +- if (ret)
1051 ++ if (ret) {
1052 ++ dev_failed++;
1053 ++ dev_ret = ret;
1054 + break;
1055 ++ }
1056 +
1057 + trimmed += group_trimmed;
1058 + }
1059 + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1060 +
1061 ++ if (dev_failed)
1062 ++ btrfs_warn(fs_info,
1063 ++ "failed to trim %llu device(s), last error %d",
1064 ++ dev_failed, dev_ret);
1065 + range->len = trimmed;
1066 +- return ret;
1067 ++ if (bg_ret)
1068 ++ return bg_ret;
1069 ++ return dev_ret;
1070 + }
1071 +
1072 + /*
1073 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1074 +index 96ad2778405b..242584a0d3b5 100644
1075 +--- a/fs/btrfs/ioctl.c
1076 ++++ b/fs/btrfs/ioctl.c
1077 +@@ -380,7 +380,6 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
1078 + struct fstrim_range range;
1079 + u64 minlen = ULLONG_MAX;
1080 + u64 num_devices = 0;
1081 +- u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
1082 + int ret;
1083 +
1084 + if (!capable(CAP_SYS_ADMIN))
1085 +@@ -404,11 +403,15 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
1086 + return -EOPNOTSUPP;
1087 + if (copy_from_user(&range, arg, sizeof(range)))
1088 + return -EFAULT;
1089 +- if (range.start > total_bytes ||
1090 +- range.len < fs_info->sb->s_blocksize)
1091 ++
1092 ++ /*
1093 ++ * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
1094 ++ * block group is in the logical address space, which can be any
1095 ++ * sectorsize aligned bytenr in the range [0, U64_MAX].
1096 ++ */
1097 ++ if (range.len < fs_info->sb->s_blocksize)
1098 + return -EINVAL;
1099 +
1100 +- range.len = min(range.len, total_bytes - range.start);
1101 + range.minlen = max(range.minlen, minlen);
1102 + ret = btrfs_trim_fs(fs_info->tree_root, &range);
1103 + if (ret < 0)
1104 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
1105 +index 87658f63b374..be84d49f2406 100644
1106 +--- a/fs/cifs/cifsfs.c
1107 ++++ b/fs/cifs/cifsfs.c
1108 +@@ -927,8 +927,8 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
1109 + struct inode *src_inode = file_inode(src_file);
1110 + struct inode *target_inode = file_inode(dst_file);
1111 + struct cifsFileInfo *smb_file_src = src_file->private_data;
1112 +- struct cifsFileInfo *smb_file_target = dst_file->private_data;
1113 +- struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
1114 ++ struct cifsFileInfo *smb_file_target;
1115 ++ struct cifs_tcon *target_tcon;
1116 + unsigned int xid;
1117 + int rc;
1118 +
1119 +@@ -942,6 +942,9 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
1120 + goto out;
1121 + }
1122 +
1123 ++ smb_file_target = dst_file->private_data;
1124 ++ target_tcon = tlink_tcon(smb_file_target->tlink);
1125 ++
1126 + /*
1127 + * Note: cifs case is easier than btrfs since server responsible for
1128 + * checks for proper open modes and file type and if it wants
1129 +diff --git a/fs/exofs/super.c b/fs/exofs/super.c
1130 +index 1076a4233b39..0c48138486dc 100644
1131 +--- a/fs/exofs/super.c
1132 ++++ b/fs/exofs/super.c
1133 +@@ -100,6 +100,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
1134 + token = match_token(p, tokens, args);
1135 + switch (token) {
1136 + case Opt_name:
1137 ++ kfree(opts->dev_name);
1138 + opts->dev_name = match_strdup(&args[0]);
1139 + if (unlikely(!opts->dev_name)) {
1140 + EXOFS_ERR("Error allocating dev_name");
1141 +@@ -868,8 +869,10 @@ static struct dentry *exofs_mount(struct file_system_type *type,
1142 + int ret;
1143 +
1144 + ret = parse_options(data, &opts);
1145 +- if (ret)
1146 ++ if (ret) {
1147 ++ kfree(opts.dev_name);
1148 + return ERR_PTR(ret);
1149 ++ }
1150 +
1151 + if (!opts.dev_name)
1152 + opts.dev_name = dev_name;
1153 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
1154 +index 832824994aae..073126707270 100644
1155 +--- a/fs/gfs2/rgrp.c
1156 ++++ b/fs/gfs2/rgrp.c
1157 +@@ -715,6 +715,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
1158 + spin_lock(&gl->gl_lockref.lock);
1159 + gl->gl_object = NULL;
1160 + spin_unlock(&gl->gl_lockref.lock);
1161 ++ gfs2_rgrp_brelse(rgd);
1162 + gfs2_glock_add_to_lru(gl);
1163 + gfs2_glock_put(gl);
1164 + }
1165 +@@ -1125,7 +1126,7 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1166 + * @rgd: the struct gfs2_rgrpd describing the RG to read in
1167 + *
1168 + * Read in all of a Resource Group's header and bitmap blocks.
1169 +- * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
1170 ++ * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
1171 + *
1172 + * Returns: errno
1173 + */
1174 +diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
1175 +index 2a6f3c67cb3f..2e713673df42 100644
1176 +--- a/fs/hfs/brec.c
1177 ++++ b/fs/hfs/brec.c
1178 +@@ -424,6 +424,10 @@ skip:
1179 + if (new_node) {
1180 + __be32 cnid;
1181 +
1182 ++ if (!new_node->parent) {
1183 ++ hfs_btree_inc_height(tree);
1184 ++ new_node->parent = tree->root;
1185 ++ }
1186 + fd->bnode = hfs_bnode_find(tree, new_node->parent);
1187 + /* create index key and entry */
1188 + hfs_bnode_read_key(new_node, fd->search_key, 14);
1189 +diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
1190 +index 754fdf8c6356..1002a0c08319 100644
1191 +--- a/fs/hfsplus/brec.c
1192 ++++ b/fs/hfsplus/brec.c
1193 +@@ -427,6 +427,10 @@ skip:
1194 + if (new_node) {
1195 + __be32 cnid;
1196 +
1197 ++ if (!new_node->parent) {
1198 ++ hfs_btree_inc_height(tree);
1199 ++ new_node->parent = tree->root;
1200 ++ }
1201 + fd->bnode = hfs_bnode_find(tree, new_node->parent);
1202 + /* create index key and entry */
1203 + hfs_bnode_read_key(new_node, fd->search_key, 14);
1204 +diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
1205 +index 06a9fae202a7..9e313fc7fdc7 100644
1206 +--- a/fs/reiserfs/xattr.c
1207 ++++ b/fs/reiserfs/xattr.c
1208 +@@ -184,6 +184,7 @@ struct reiserfs_dentry_buf {
1209 + struct dir_context ctx;
1210 + struct dentry *xadir;
1211 + int count;
1212 ++ int err;
1213 + struct dentry *dentries[8];
1214 + };
1215 +
1216 +@@ -206,6 +207,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
1217 +
1218 + dentry = lookup_one_len(name, dbuf->xadir, namelen);
1219 + if (IS_ERR(dentry)) {
1220 ++ dbuf->err = PTR_ERR(dentry);
1221 + return PTR_ERR(dentry);
1222 + } else if (d_really_is_negative(dentry)) {
1223 + /* A directory entry exists, but no file? */
1224 +@@ -214,6 +216,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
1225 + "not found for file %pd.\n",
1226 + dentry, dbuf->xadir);
1227 + dput(dentry);
1228 ++ dbuf->err = -EIO;
1229 + return -EIO;
1230 + }
1231 +
1232 +@@ -261,6 +264,10 @@ static int reiserfs_for_each_xattr(struct inode *inode,
1233 + err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx);
1234 + if (err)
1235 + break;
1236 ++ if (buf.err) {
1237 ++ err = buf.err;
1238 ++ break;
1239 ++ }
1240 + if (!buf.count)
1241 + break;
1242 + for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
1243 +diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
1244 +index 8d0248525957..9f34204978e4 100644
1245 +--- a/include/linux/netfilter/ipset/ip_set_comment.h
1246 ++++ b/include/linux/netfilter/ipset/ip_set_comment.h
1247 +@@ -41,11 +41,11 @@ ip_set_init_comment(struct ip_set_comment *comment,
1248 + rcu_assign_pointer(comment->c, c);
1249 + }
1250 +
1251 +-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
1252 ++/* Used only when dumping a set, protected by rcu_read_lock() */
1253 + static inline int
1254 + ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
1255 + {
1256 +- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
1257 ++ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
1258 +
1259 + if (!c)
1260 + return 0;
1261 +diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
1262 +index f30c187ed785..9442423979c1 100644
1263 +--- a/include/linux/uaccess.h
1264 ++++ b/include/linux/uaccess.h
1265 +@@ -2,6 +2,9 @@
1266 + #define __LINUX_UACCESS_H__
1267 +
1268 + #include <linux/sched.h>
1269 ++
1270 ++#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
1271 ++
1272 + #include <asm/uaccess.h>
1273 +
1274 + static __always_inline void pagefault_disabled_inc(void)
1275 +diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
1276 +index 2c7b60edea04..1faeef0c30b9 100644
1277 +--- a/lib/raid6/test/Makefile
1278 ++++ b/lib/raid6/test/Makefile
1279 +@@ -26,7 +26,7 @@ ifeq ($(ARCH),arm)
1280 + CFLAGS += -I../../../arch/arm/include -mfpu=neon
1281 + HAS_NEON = yes
1282 + endif
1283 +-ifeq ($(ARCH),arm64)
1284 ++ifeq ($(ARCH),aarch64)
1285 + CFLAGS += -I../../../arch/arm64/include
1286 + HAS_NEON = yes
1287 + endif
1288 +@@ -40,7 +40,7 @@ ifeq ($(IS_X86),yes)
1289 + gcc -c -x assembler - >&/dev/null && \
1290 + rm ./-.o && echo -DCONFIG_AS_AVX512=1)
1291 + else ifeq ($(HAS_NEON),yes)
1292 +- OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
1293 ++ OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
1294 + CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
1295 + else
1296 + HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
1297 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
1298 +index 98ea28dc03f9..68acf94fae72 100644
1299 +--- a/net/ceph/messenger.c
1300 ++++ b/net/ceph/messenger.c
1301 +@@ -588,9 +588,15 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
1302 + int ret;
1303 + struct kvec iov;
1304 +
1305 +- /* sendpage cannot properly handle pages with page_count == 0,
1306 +- * we need to fallback to sendmsg if that's the case */
1307 +- if (page_count(page) >= 1)
1308 ++ /*
1309 ++ * sendpage cannot properly handle pages with page_count == 0,
1310 ++ * we need to fall back to sendmsg if that's the case.
1311 ++ *
1312 ++ * Same goes for slab pages: skb_can_coalesce() allows
1313 ++ * coalescing neighboring slab objects into a single frag which
1314 ++ * triggers one of hardened usercopy checks.
1315 ++ */
1316 ++ if (page_count(page) >= 1 && !PageSlab(page))
1317 + return __ceph_tcp_sendpage(sock, page, offset, size, more);
1318 +
1319 + iov.iov_base = kmap(page) + offset;
1320 +diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
1321 +index 9a14c237830f..b259a5814965 100644
1322 +--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
1323 ++++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
1324 +@@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
1325 +
1326 + if (tb[IPSET_ATTR_CIDR]) {
1327 + e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
1328 +- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
1329 ++ if (e.cidr[0] > HOST_MASK)
1330 + return -IPSET_ERR_INVALID_CIDR;
1331 + }
1332 +
1333 + if (tb[IPSET_ATTR_CIDR2]) {
1334 + e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
1335 +- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
1336 ++ if (e.cidr[1] > HOST_MASK)
1337 + return -IPSET_ERR_INVALID_CIDR;
1338 + }
1339 +
1340 +@@ -492,13 +492,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
1341 +
1342 + if (tb[IPSET_ATTR_CIDR]) {
1343 + e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
1344 +- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
1345 ++ if (e.cidr[0] > HOST_MASK)
1346 + return -IPSET_ERR_INVALID_CIDR;
1347 + }
1348 +
1349 + if (tb[IPSET_ATTR_CIDR2]) {
1350 + e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
1351 +- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
1352 ++ if (e.cidr[1] > HOST_MASK)
1353 + return -IPSET_ERR_INVALID_CIDR;
1354 + }
1355 +
1356 +diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
1357 +index bb5d6a058fb7..921c9bd7e1e7 100644
1358 +--- a/net/netfilter/xt_IDLETIMER.c
1359 ++++ b/net/netfilter/xt_IDLETIMER.c
1360 +@@ -116,6 +116,22 @@ static void idletimer_tg_expired(unsigned long data)
1361 + schedule_work(&timer->work);
1362 + }
1363 +
1364 ++static int idletimer_check_sysfs_name(const char *name, unsigned int size)
1365 ++{
1366 ++ int ret;
1367 ++
1368 ++ ret = xt_check_proc_name(name, size);
1369 ++ if (ret < 0)
1370 ++ return ret;
1371 ++
1372 ++ if (!strcmp(name, "power") ||
1373 ++ !strcmp(name, "subsystem") ||
1374 ++ !strcmp(name, "uevent"))
1375 ++ return -EINVAL;
1376 ++
1377 ++ return 0;
1378 ++}
1379 ++
1380 + static int idletimer_tg_create(struct idletimer_tg_info *info)
1381 + {
1382 + int ret;
1383 +@@ -126,6 +142,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
1384 + goto out;
1385 + }
1386 +
1387 ++ ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
1388 ++ if (ret < 0)
1389 ++ goto out_free_timer;
1390 ++
1391 + sysfs_attr_init(&info->timer->attr.attr);
1392 + info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
1393 + if (!info->timer->attr.attr.name) {
1394 +diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
1395 +index 1b38fc486351..69846c6574ef 100644
1396 +--- a/net/sunrpc/xdr.c
1397 ++++ b/net/sunrpc/xdr.c
1398 +@@ -512,7 +512,7 @@ EXPORT_SYMBOL_GPL(xdr_commit_encode);
1399 + static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
1400 + size_t nbytes)
1401 + {
1402 +- static __be32 *p;
1403 ++ __be32 *p;
1404 + int space_left;
1405 + int frag1bytes, frag2bytes;
1406 +