Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Sun, 21 Nov 2021 20:38:00
Message-Id: 1637527063.8a93dc84dd9b3cf2370f6f7a5e44269d62358733.mpagano@gentoo
1 commit: 8a93dc84dd9b3cf2370f6f7a5e44269d62358733
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Nov 21 20:37:43 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Nov 21 20:37:43 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8a93dc84
7
8 Linux patch 5.15.4
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1003_linux-5.15.4.patch | 873 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 877 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 44bc743a..da91b24d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -55,6 +55,10 @@ Patch: 1002_linux-5.15.3.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.15.3
23
24 +Patch: 1003_linux-5.15.4.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.15.4
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1003_linux-5.15.4.patch b/1003_linux-5.15.4.patch
33 new file mode 100644
34 index 00000000..e7131c52
35 --- /dev/null
36 +++ b/1003_linux-5.15.4.patch
37 @@ -0,0 +1,873 @@
38 +diff --git a/Makefile b/Makefile
39 +index 79eeb13c09730..759e68a02cf01 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 15
46 +-SUBLEVEL = 3
47 ++SUBLEVEL = 4
48 + EXTRAVERSION =
49 + NAME = Trick or Treat
50 +
51 +diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
52 +index 2716e58b498bb..437c8d31f3907 100644
53 +--- a/arch/parisc/kernel/entry.S
54 ++++ b/arch/parisc/kernel/entry.S
55 +@@ -1835,7 +1835,7 @@ syscall_restore:
56 +
57 + /* Are we being ptraced? */
58 + LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
59 +- ldi _TIF_SYSCALL_TRACE_MASK,%r2
60 ++ ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
61 + and,COND(=) %r19,%r2,%r0
62 + b,n syscall_restore_rfi
63 +
64 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
65 +index 3de93d4bf16ba..c48e2b5729c5d 100644
66 +--- a/arch/x86/kvm/x86.c
67 ++++ b/arch/x86/kvm/x86.c
68 +@@ -3242,9 +3242,9 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
69 + "xor %1, %1\n"
70 + "2:\n"
71 + _ASM_EXTABLE_UA(1b, 2b)
72 +- : "+r" (st_preempted),
73 +- "+&r" (err)
74 +- : "m" (st->preempted));
75 ++ : "+q" (st_preempted),
76 ++ "+&r" (err),
77 ++ "+m" (st->preempted));
78 + if (err)
79 + goto out;
80 +
81 +diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
82 +index 1cfafa254e3d4..7a33a6d985f89 100644
83 +--- a/drivers/acpi/glue.c
84 ++++ b/drivers/acpi/glue.c
85 +@@ -340,28 +340,3 @@ void acpi_device_notify_remove(struct device *dev)
86 +
87 + acpi_unbind_one(dev);
88 + }
89 +-
90 +-int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used)
91 +-{
92 +- struct acpi_device *adev = to_acpi_device(dev);
93 +-
94 +- /*
95 +- * Skip device objects with device IDs, because they may be in use even
96 +- * if they are not companions of any physical device objects.
97 +- */
98 +- if (adev->pnp.type.hardware_id)
99 +- return 0;
100 +-
101 +- mutex_lock(&adev->physical_node_lock);
102 +-
103 +- /*
104 +- * Device objects without device IDs are not in use if they have no
105 +- * corresponding physical device objects.
106 +- */
107 +- if (list_empty(&adev->physical_node_list))
108 +- acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
109 +-
110 +- mutex_unlock(&adev->physical_node_lock);
111 +-
112 +- return 0;
113 +-}
114 +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
115 +index 8fbdc172864b0..d91b560e88674 100644
116 +--- a/drivers/acpi/internal.h
117 ++++ b/drivers/acpi/internal.h
118 +@@ -117,7 +117,6 @@ bool acpi_device_is_battery(struct acpi_device *adev);
119 + bool acpi_device_is_first_physical_node(struct acpi_device *adev,
120 + const struct device *dev);
121 + int acpi_bus_register_early_device(int type);
122 +-int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used);
123 +
124 + /* --------------------------------------------------------------------------
125 + Device Matching and Notification
126 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
127 +index 770b82483d74d..5b54c80b9d32a 100644
128 +--- a/drivers/acpi/scan.c
129 ++++ b/drivers/acpi/scan.c
130 +@@ -2559,12 +2559,6 @@ int __init acpi_scan_init(void)
131 + }
132 + }
133 +
134 +- /*
135 +- * Make sure that power management resources are not blocked by ACPI
136 +- * device objects with no users.
137 +- */
138 +- bus_for_each_dev(&acpi_bus_type, NULL, NULL, acpi_dev_turn_off_if_unused);
139 +-
140 + acpi_turn_off_unused_power_resources();
141 +
142 + acpi_scan_initialized = true;
143 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
144 +index 7bf4686af774e..dfc72a1f6500d 100644
145 +--- a/drivers/block/loop.c
146 ++++ b/drivers/block/loop.c
147 +@@ -272,19 +272,6 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
148 + blk_mq_unfreeze_queue(lo->lo_queue);
149 + }
150 +
151 +-/**
152 +- * loop_validate_block_size() - validates the passed in block size
153 +- * @bsize: size to validate
154 +- */
155 +-static int
156 +-loop_validate_block_size(unsigned short bsize)
157 +-{
158 +- if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
159 +- return -EINVAL;
160 +-
161 +- return 0;
162 +-}
163 +-
164 + /**
165 + * loop_set_size() - sets device size and notifies userspace
166 + * @lo: struct loop_device to set the size for
167 +@@ -1236,7 +1223,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
168 + }
169 +
170 + if (config->block_size) {
171 +- error = loop_validate_block_size(config->block_size);
172 ++ error = blk_validate_block_size(config->block_size);
173 + if (error)
174 + goto out_unlock;
175 + }
176 +@@ -1759,7 +1746,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
177 + if (lo->lo_state != Lo_bound)
178 + return -ENXIO;
179 +
180 +- err = loop_validate_block_size(arg);
181 ++ err = blk_validate_block_size(arg);
182 + if (err)
183 + return err;
184 +
185 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
186 +index 60d2fce59a71d..79d0db542da3b 100644
187 +--- a/drivers/bluetooth/btusb.c
188 ++++ b/drivers/bluetooth/btusb.c
189 +@@ -433,6 +433,10 @@ static const struct usb_device_id blacklist_table[] = {
190 + { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
191 + { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
192 +
193 ++ /* Additional Realtek 8761B Bluetooth devices */
194 ++ { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK |
195 ++ BTUSB_WIDEBAND_SPEECH },
196 ++
197 + /* Additional Realtek 8761BU Bluetooth devices */
198 + { USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |
199 + BTUSB_WIDEBAND_SPEECH },
200 +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
201 +index 6ae4269118af3..cea777ae7fb92 100644
202 +--- a/drivers/gpu/drm/Kconfig
203 ++++ b/drivers/gpu/drm/Kconfig
204 +@@ -102,8 +102,9 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
205 +
206 + config DRM_FBDEV_EMULATION
207 + bool "Enable legacy fbdev support for your modesetting driver"
208 +- depends on DRM_KMS_HELPER
209 +- depends on FB=y || FB=DRM_KMS_HELPER
210 ++ depends on DRM
211 ++ depends on FB
212 ++ select DRM_KMS_HELPER
213 + select FB_CFB_FILLRECT
214 + select FB_CFB_COPYAREA
215 + select FB_CFB_IMAGEBLIT
216 +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
217 +index ea6371eb9b257..e2dedfa9072db 100644
218 +--- a/drivers/pci/msi.c
219 ++++ b/drivers/pci/msi.c
220 +@@ -477,6 +477,9 @@ msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
221 + goto out;
222 +
223 + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
224 ++ /* Lies, damned lies, and MSIs */
225 ++ if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
226 ++ control |= PCI_MSI_FLAGS_MASKBIT;
227 +
228 + entry->msi_attrib.is_msix = 0;
229 + entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
230 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
231 +index 6c957124f84d6..208fa03acdda0 100644
232 +--- a/drivers/pci/quirks.c
233 ++++ b/drivers/pci/quirks.c
234 +@@ -5796,3 +5796,9 @@ static void apex_pci_fixup_class(struct pci_dev *pdev)
235 + }
236 + DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
237 + PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
238 ++
239 ++static void nvidia_ion_ahci_fixup(struct pci_dev *pdev)
240 ++{
241 ++ pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING;
242 ++}
243 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup);
244 +diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
245 +index 6379f26a335f6..9233f7e744544 100644
246 +--- a/drivers/thermal/thermal_of.c
247 ++++ b/drivers/thermal/thermal_of.c
248 +@@ -89,7 +89,7 @@ static int of_thermal_get_temp(struct thermal_zone_device *tz,
249 + {
250 + struct __thermal_zone *data = tz->devdata;
251 +
252 +- if (!data->ops->get_temp)
253 ++ if (!data->ops || !data->ops->get_temp)
254 + return -EINVAL;
255 +
256 + return data->ops->get_temp(data->sensor_data, temp);
257 +@@ -186,6 +186,9 @@ static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
258 + {
259 + struct __thermal_zone *data = tz->devdata;
260 +
261 ++ if (!data->ops || !data->ops->set_emul_temp)
262 ++ return -EINVAL;
263 ++
264 + return data->ops->set_emul_temp(data->sensor_data, temp);
265 + }
266 +
267 +@@ -194,7 +197,7 @@ static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
268 + {
269 + struct __thermal_zone *data = tz->devdata;
270 +
271 +- if (!data->ops->get_trend)
272 ++ if (!data->ops || !data->ops->get_trend)
273 + return -EINVAL;
274 +
275 + return data->ops->get_trend(data->sensor_data, trip, trend);
276 +@@ -301,7 +304,7 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
277 + if (trip >= data->ntrips || trip < 0)
278 + return -EDOM;
279 +
280 +- if (data->ops->set_trip_temp) {
281 ++ if (data->ops && data->ops->set_trip_temp) {
282 + int ret;
283 +
284 + ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
285 +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
286 +index a3b830b8410a8..a53ebc52bd51f 100644
287 +--- a/fs/btrfs/block-group.c
288 ++++ b/fs/btrfs/block-group.c
289 +@@ -902,6 +902,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
290 + spin_unlock(&cluster->refill_lock);
291 +
292 + btrfs_clear_treelog_bg(block_group);
293 ++ btrfs_clear_data_reloc_bg(block_group);
294 +
295 + path = btrfs_alloc_path();
296 + if (!path) {
297 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
298 +index c0cebcf745cef..ae06ad5593535 100644
299 +--- a/fs/btrfs/ctree.h
300 ++++ b/fs/btrfs/ctree.h
301 +@@ -1017,6 +1017,13 @@ struct btrfs_fs_info {
302 + spinlock_t treelog_bg_lock;
303 + u64 treelog_bg;
304 +
305 ++ /*
306 ++ * Start of the dedicated data relocation block group, protected by
307 ++ * relocation_bg_lock.
308 ++ */
309 ++ spinlock_t relocation_bg_lock;
310 ++ u64 data_reloc_bg;
311 ++
312 + #ifdef CONFIG_BTRFS_FS_REF_VERIFY
313 + spinlock_t ref_verify_lock;
314 + struct rb_root block_tree;
315 +@@ -3842,6 +3849,11 @@ static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
316 + return fs_info->zoned != 0;
317 + }
318 +
319 ++static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
320 ++{
321 ++ return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
322 ++}
323 ++
324 + /*
325 + * We use page status Private2 to indicate there is an ordered extent with
326 + * unfinished IO.
327 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
328 +index 6965ed0813462..e00c4c1f622f3 100644
329 +--- a/fs/btrfs/disk-io.c
330 ++++ b/fs/btrfs/disk-io.c
331 +@@ -1500,7 +1500,7 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
332 + goto fail;
333 +
334 + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
335 +- root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
336 ++ !btrfs_is_data_reloc_root(root)) {
337 + set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
338 + btrfs_check_and_init_root_item(&root->root_item);
339 + }
340 +@@ -2883,6 +2883,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
341 + spin_lock_init(&fs_info->buffer_lock);
342 + spin_lock_init(&fs_info->unused_bgs_lock);
343 + spin_lock_init(&fs_info->treelog_bg_lock);
344 ++ spin_lock_init(&fs_info->relocation_bg_lock);
345 + rwlock_init(&fs_info->tree_mod_log_lock);
346 + mutex_init(&fs_info->unused_bg_unpin_mutex);
347 + mutex_init(&fs_info->reclaim_bgs_lock);
348 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
349 +index 0ab456cb4bf80..87c23c5c0f26d 100644
350 +--- a/fs/btrfs/extent-tree.c
351 ++++ b/fs/btrfs/extent-tree.c
352 +@@ -2376,7 +2376,7 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
353 +
354 + out:
355 + btrfs_free_path(path);
356 +- if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
357 ++ if (btrfs_is_data_reloc_root(root))
358 + WARN_ON(ret > 0);
359 + return ret;
360 + }
361 +@@ -3495,6 +3495,9 @@ struct find_free_extent_ctl {
362 + /* Allocation is called for tree-log */
363 + bool for_treelog;
364 +
365 ++ /* Allocation is called for data relocation */
366 ++ bool for_data_reloc;
367 ++
368 + /* RAID index, converted from flags */
369 + int index;
370 +
371 +@@ -3756,6 +3759,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
372 + u64 avail;
373 + u64 bytenr = block_group->start;
374 + u64 log_bytenr;
375 ++ u64 data_reloc_bytenr;
376 + int ret = 0;
377 + bool skip;
378 +
379 +@@ -3773,13 +3777,31 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
380 + if (skip)
381 + return 1;
382 +
383 ++ /*
384 ++ * Do not allow non-relocation blocks in the dedicated relocation block
385 ++ * group, and vice versa.
386 ++ */
387 ++ spin_lock(&fs_info->relocation_bg_lock);
388 ++ data_reloc_bytenr = fs_info->data_reloc_bg;
389 ++ if (data_reloc_bytenr &&
390 ++ ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) ||
391 ++ (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr)))
392 ++ skip = true;
393 ++ spin_unlock(&fs_info->relocation_bg_lock);
394 ++ if (skip)
395 ++ return 1;
396 ++
397 + spin_lock(&space_info->lock);
398 + spin_lock(&block_group->lock);
399 + spin_lock(&fs_info->treelog_bg_lock);
400 ++ spin_lock(&fs_info->relocation_bg_lock);
401 +
402 + ASSERT(!ffe_ctl->for_treelog ||
403 + block_group->start == fs_info->treelog_bg ||
404 + fs_info->treelog_bg == 0);
405 ++ ASSERT(!ffe_ctl->for_data_reloc ||
406 ++ block_group->start == fs_info->data_reloc_bg ||
407 ++ fs_info->data_reloc_bg == 0);
408 +
409 + if (block_group->ro) {
410 + ret = 1;
411 +@@ -3796,6 +3818,16 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
412 + goto out;
413 + }
414 +
415 ++ /*
416 ++ * Do not allow currently used block group to be the data relocation
417 ++ * dedicated block group.
418 ++ */
419 ++ if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg &&
420 ++ (block_group->used || block_group->reserved)) {
421 ++ ret = 1;
422 ++ goto out;
423 ++ }
424 ++
425 + avail = block_group->length - block_group->alloc_offset;
426 + if (avail < num_bytes) {
427 + if (ffe_ctl->max_extent_size < avail) {
428 +@@ -3813,6 +3845,9 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
429 + if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
430 + fs_info->treelog_bg = block_group->start;
431 +
432 ++ if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg)
433 ++ fs_info->data_reloc_bg = block_group->start;
434 ++
435 + ffe_ctl->found_offset = start + block_group->alloc_offset;
436 + block_group->alloc_offset += num_bytes;
437 + spin_lock(&ctl->tree_lock);
438 +@@ -3829,6 +3864,9 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
439 + out:
440 + if (ret && ffe_ctl->for_treelog)
441 + fs_info->treelog_bg = 0;
442 ++ if (ret && ffe_ctl->for_data_reloc)
443 ++ fs_info->data_reloc_bg = 0;
444 ++ spin_unlock(&fs_info->relocation_bg_lock);
445 + spin_unlock(&fs_info->treelog_bg_lock);
446 + spin_unlock(&block_group->lock);
447 + spin_unlock(&space_info->lock);
448 +@@ -4085,6 +4123,12 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
449 + ffe_ctl->hint_byte = fs_info->treelog_bg;
450 + spin_unlock(&fs_info->treelog_bg_lock);
451 + }
452 ++ if (ffe_ctl->for_data_reloc) {
453 ++ spin_lock(&fs_info->relocation_bg_lock);
454 ++ if (fs_info->data_reloc_bg)
455 ++ ffe_ctl->hint_byte = fs_info->data_reloc_bg;
456 ++ spin_unlock(&fs_info->relocation_bg_lock);
457 ++ }
458 + return 0;
459 + default:
460 + BUG();
461 +@@ -4129,6 +4173,8 @@ static noinline int find_free_extent(struct btrfs_root *root,
462 + struct btrfs_space_info *space_info;
463 + bool full_search = false;
464 + bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
465 ++ bool for_data_reloc = (btrfs_is_data_reloc_root(root) &&
466 ++ flags & BTRFS_BLOCK_GROUP_DATA);
467 +
468 + WARN_ON(num_bytes < fs_info->sectorsize);
469 +
470 +@@ -4143,6 +4189,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
471 + ffe_ctl.found_offset = 0;
472 + ffe_ctl.hint_byte = hint_byte_orig;
473 + ffe_ctl.for_treelog = for_treelog;
474 ++ ffe_ctl.for_data_reloc = for_data_reloc;
475 + ffe_ctl.policy = BTRFS_EXTENT_ALLOC_CLUSTERED;
476 +
477 + /* For clustered allocation */
478 +@@ -4220,6 +4267,8 @@ search:
479 + if (unlikely(block_group->ro)) {
480 + if (for_treelog)
481 + btrfs_clear_treelog_bg(block_group);
482 ++ if (ffe_ctl.for_data_reloc)
483 ++ btrfs_clear_data_reloc_bg(block_group);
484 + continue;
485 + }
486 +
487 +@@ -4408,6 +4457,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
488 + u64 flags;
489 + int ret;
490 + bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
491 ++ bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data);
492 +
493 + flags = get_alloc_profile_by_root(root, is_data);
494 + again:
495 +@@ -4431,8 +4481,8 @@ again:
496 +
497 + sinfo = btrfs_find_space_info(fs_info, flags);
498 + btrfs_err(fs_info,
499 +- "allocation failed flags %llu, wanted %llu tree-log %d",
500 +- flags, num_bytes, for_treelog);
501 ++ "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d",
502 ++ flags, num_bytes, for_treelog, for_data_reloc);
503 + if (sinfo)
504 + btrfs_dump_space_info(fs_info, sinfo,
505 + num_bytes, 1);
506 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
507 +index aaddd72253481..a40fb9c74dda3 100644
508 +--- a/fs/btrfs/extent_io.c
509 ++++ b/fs/btrfs/extent_io.c
510 +@@ -5120,6 +5120,9 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
511 + int extent_writepages(struct address_space *mapping,
512 + struct writeback_control *wbc)
513 + {
514 ++ struct inode *inode = mapping->host;
515 ++ const bool data_reloc = btrfs_is_data_reloc_root(BTRFS_I(inode)->root);
516 ++ const bool zoned = btrfs_is_zoned(BTRFS_I(inode)->root->fs_info);
517 + int ret = 0;
518 + struct extent_page_data epd = {
519 + .bio_ctrl = { 0 },
520 +@@ -5127,7 +5130,15 @@ int extent_writepages(struct address_space *mapping,
521 + .sync_io = wbc->sync_mode == WB_SYNC_ALL,
522 + };
523 +
524 ++ /*
525 ++ * Allow only a single thread to do the reloc work in zoned mode to
526 ++ * protect the write pointer updates.
527 ++ */
528 ++ if (data_reloc && zoned)
529 ++ btrfs_inode_lock(inode, 0);
530 + ret = extent_write_cache_pages(mapping, wbc, &epd);
531 ++ if (data_reloc && zoned)
532 ++ btrfs_inode_unlock(inode, 0);
533 + ASSERT(ret <= 0);
534 + if (ret < 0) {
535 + end_write_bio(&epd, ret);
536 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
537 +index 7c096ab9bb5eb..61b4651f008d4 100644
538 +--- a/fs/btrfs/inode.c
539 ++++ b/fs/btrfs/inode.c
540 +@@ -1151,7 +1151,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
541 + * fails during the stage where it updates the bytenr of file extent
542 + * items.
543 + */
544 +- if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
545 ++ if (btrfs_is_data_reloc_root(root))
546 + min_alloc_size = num_bytes;
547 + else
548 + min_alloc_size = fs_info->sectorsize;
549 +@@ -1187,8 +1187,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
550 + if (ret)
551 + goto out_drop_extent_cache;
552 +
553 +- if (root->root_key.objectid ==
554 +- BTRFS_DATA_RELOC_TREE_OBJECTID) {
555 ++ if (btrfs_is_data_reloc_root(root)) {
556 + ret = btrfs_reloc_clone_csums(inode, start,
557 + cur_alloc_size);
558 + /*
559 +@@ -1504,8 +1503,7 @@ static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
560 + int *page_started, unsigned long *nr_written)
561 + {
562 + const bool is_space_ino = btrfs_is_free_space_inode(inode);
563 +- const bool is_reloc_ino = (inode->root->root_key.objectid ==
564 +- BTRFS_DATA_RELOC_TREE_OBJECTID);
565 ++ const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
566 + const u64 range_bytes = end + 1 - start;
567 + struct extent_io_tree *io_tree = &inode->io_tree;
568 + u64 range_start = start;
569 +@@ -1867,8 +1865,7 @@ out_check:
570 + btrfs_dec_nocow_writers(fs_info, disk_bytenr);
571 + nocow = false;
572 +
573 +- if (root->root_key.objectid ==
574 +- BTRFS_DATA_RELOC_TREE_OBJECTID)
575 ++ if (btrfs_is_data_reloc_root(root))
576 + /*
577 + * Error handled later, as we must prevent
578 + * extent_clear_unlock_delalloc() in error handler
579 +@@ -1948,7 +1945,15 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
580 + const bool zoned = btrfs_is_zoned(inode->root->fs_info);
581 +
582 + if (should_nocow(inode, start, end)) {
583 +- ASSERT(!zoned);
584 ++ /*
585 ++ * Normally on a zoned device we're only doing COW writes, but
586 ++ * in case of relocation on a zoned filesystem we have taken
587 ++ * precaution, that we're only writing sequentially. It's safe
588 ++ * to use run_delalloc_nocow() here, like for regular
589 ++ * preallocated inodes.
590 ++ */
591 ++ ASSERT(!zoned ||
592 ++ (zoned && btrfs_is_data_reloc_root(inode->root)));
593 + ret = run_delalloc_nocow(inode, locked_page, start, end,
594 + page_started, nr_written);
595 + } else if (!inode_can_compress(inode) ||
596 +@@ -2207,7 +2212,7 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
597 + if (btrfs_is_testing(fs_info))
598 + return;
599 +
600 +- if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
601 ++ if (!btrfs_is_data_reloc_root(root) &&
602 + do_list && !(state->state & EXTENT_NORESERVE) &&
603 + (*bits & EXTENT_CLEAR_DATA_RESV))
604 + btrfs_free_reserved_data_space_noquota(fs_info, len);
605 +@@ -2532,7 +2537,7 @@ blk_status_t btrfs_submit_data_bio(struct inode *inode, struct bio *bio,
606 + goto mapit;
607 + } else if (async && !skip_sum) {
608 + /* csum items have already been cloned */
609 +- if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
610 ++ if (btrfs_is_data_reloc_root(root))
611 + goto mapit;
612 + /* we're doing a write, do the async checksumming */
613 + ret = btrfs_wq_submit_bio(inode, bio, mirror_num, bio_flags,
614 +@@ -3304,7 +3309,7 @@ unsigned int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
615 + u64 file_offset = pg_off + page_offset(page);
616 + int ret;
617 +
618 +- if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
619 ++ if (btrfs_is_data_reloc_root(root) &&
620 + test_range_bit(io_tree, file_offset,
621 + file_offset + sectorsize - 1,
622 + EXTENT_NODATASUM, 1, NULL)) {
623 +@@ -4005,7 +4010,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
624 + * without delay
625 + */
626 + if (!btrfs_is_free_space_inode(inode)
627 +- && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
628 ++ && !btrfs_is_data_reloc_root(root)
629 + && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
630 + btrfs_update_root_times(trans, root);
631 +
632 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
633 +index 914d403b4415d..d81bee621d373 100644
634 +--- a/fs/btrfs/relocation.c
635 ++++ b/fs/btrfs/relocation.c
636 +@@ -2852,31 +2852,6 @@ static noinline_for_stack int prealloc_file_extent_cluster(
637 + if (ret)
638 + return ret;
639 +
640 +- /*
641 +- * On a zoned filesystem, we cannot preallocate the file region.
642 +- * Instead, we dirty and fiemap_write the region.
643 +- */
644 +- if (btrfs_is_zoned(inode->root->fs_info)) {
645 +- struct btrfs_root *root = inode->root;
646 +- struct btrfs_trans_handle *trans;
647 +-
648 +- end = cluster->end - offset + 1;
649 +- trans = btrfs_start_transaction(root, 1);
650 +- if (IS_ERR(trans))
651 +- return PTR_ERR(trans);
652 +-
653 +- inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
654 +- i_size_write(&inode->vfs_inode, end);
655 +- ret = btrfs_update_inode(trans, root, inode);
656 +- if (ret) {
657 +- btrfs_abort_transaction(trans, ret);
658 +- btrfs_end_transaction(trans);
659 +- return ret;
660 +- }
661 +-
662 +- return btrfs_end_transaction(trans);
663 +- }
664 +-
665 + btrfs_inode_lock(&inode->vfs_inode, 0);
666 + for (nr = 0; nr < cluster->nr; nr++) {
667 + start = cluster->boundary[nr] - offset;
668 +@@ -3084,7 +3059,6 @@ release_page:
669 + static int relocate_file_extent_cluster(struct inode *inode,
670 + struct file_extent_cluster *cluster)
671 + {
672 +- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
673 + u64 offset = BTRFS_I(inode)->index_cnt;
674 + unsigned long index;
675 + unsigned long last_index;
676 +@@ -3114,8 +3088,6 @@ static int relocate_file_extent_cluster(struct inode *inode,
677 + for (index = (cluster->start - offset) >> PAGE_SHIFT;
678 + index <= last_index && !ret; index++)
679 + ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
680 +- if (btrfs_is_zoned(fs_info) && !ret)
681 +- ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
682 + if (ret == 0)
683 + WARN_ON(cluster_nr != cluster->nr);
684 + out:
685 +@@ -3770,12 +3742,8 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
686 + struct btrfs_path *path;
687 + struct btrfs_inode_item *item;
688 + struct extent_buffer *leaf;
689 +- u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
690 + int ret;
691 +
692 +- if (btrfs_is_zoned(trans->fs_info))
693 +- flags &= ~BTRFS_INODE_PREALLOC;
694 +-
695 + path = btrfs_alloc_path();
696 + if (!path)
697 + return -ENOMEM;
698 +@@ -3790,7 +3758,8 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
699 + btrfs_set_inode_generation(leaf, item, 1);
700 + btrfs_set_inode_size(leaf, item, 0);
701 + btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
702 +- btrfs_set_inode_flags(leaf, item, flags);
703 ++ btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
704 ++ BTRFS_INODE_PREALLOC);
705 + btrfs_mark_buffer_dirty(leaf);
706 + out:
707 + btrfs_free_path(path);
708 +@@ -4386,8 +4355,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
709 + if (!rc)
710 + return 0;
711 +
712 +- BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
713 +- root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
714 ++ BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
715 +
716 + level = btrfs_header_level(buf);
717 + if (btrfs_header_generation(buf) <=
718 +diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
719 +index 47af1ab3bf120..5672c24a2d582 100644
720 +--- a/fs/btrfs/zoned.c
721 ++++ b/fs/btrfs/zoned.c
722 +@@ -1304,6 +1304,17 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
723 + if (!is_data_inode(&inode->vfs_inode))
724 + return false;
725 +
726 ++ /*
727 ++ * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
728 ++ * extent layout the relocation code has.
729 ++ * Furthermore we have set aside own block-group from which only the
730 ++ * relocation "process" can allocate and make sure only one process at a
731 ++ * time can add pages to an extent that gets relocated, so it's safe to
732 ++ * use regular REQ_OP_WRITE for this special case.
733 ++ */
734 ++ if (btrfs_is_data_reloc_root(inode->root))
735 ++ return false;
736 ++
737 + cache = btrfs_lookup_block_group(fs_info, start);
738 + ASSERT(cache);
739 + if (!cache)
740 +@@ -1530,3 +1541,13 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
741 +
742 + return device;
743 + }
744 ++
745 ++void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)
746 ++{
747 ++ struct btrfs_fs_info *fs_info = bg->fs_info;
748 ++
749 ++ spin_lock(&fs_info->relocation_bg_lock);
750 ++ if (fs_info->data_reloc_bg == bg->start)
751 ++ fs_info->data_reloc_bg = 0;
752 ++ spin_unlock(&fs_info->relocation_bg_lock);
753 ++}
754 +diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
755 +index 4b299705bb12b..70b3be517599f 100644
756 +--- a/fs/btrfs/zoned.h
757 ++++ b/fs/btrfs/zoned.h
758 +@@ -66,6 +66,7 @@ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
759 + u64 physical_start, u64 physical_pos);
760 + struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
761 + u64 logical, u64 length);
762 ++void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg);
763 + #else /* CONFIG_BLK_DEV_ZONED */
764 + static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
765 + struct blk_zone *zone)
766 +@@ -199,6 +200,8 @@ static inline struct btrfs_device *btrfs_zoned_get_device(
767 + return ERR_PTR(-EOPNOTSUPP);
768 + }
769 +
770 ++static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { }
771 ++
772 + #endif
773 +
774 + static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
775 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
776 +index 683aee3654200..0a9fdcbbab83d 100644
777 +--- a/include/linux/blkdev.h
778 ++++ b/include/linux/blkdev.h
779 +@@ -235,6 +235,14 @@ struct request {
780 + void *end_io_data;
781 + };
782 +
783 ++static inline int blk_validate_block_size(unsigned int bsize)
784 ++{
785 ++ if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
786 ++ return -EINVAL;
787 ++
788 ++ return 0;
789 ++}
790 ++
791 + static inline bool blk_op_is_passthrough(unsigned int op)
792 + {
793 + op &= REQ_OP_MASK;
794 +diff --git a/include/linux/pci.h b/include/linux/pci.h
795 +index cd8aa6fce2041..152a4d74f87f0 100644
796 +--- a/include/linux/pci.h
797 ++++ b/include/linux/pci.h
798 +@@ -233,6 +233,8 @@ enum pci_dev_flags {
799 + PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
800 + /* Don't use Relaxed Ordering for TLPs directed at this device */
801 + PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
802 ++ /* Device does honor MSI masking despite saying otherwise */
803 ++ PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
804 + };
805 +
806 + enum pci_irq_reroute_variant {
807 +diff --git a/include/linux/string.h b/include/linux/string.h
808 +index 5e96d656be7ae..d68097b4f600b 100644
809 +--- a/include/linux/string.h
810 ++++ b/include/linux/string.h
811 +@@ -262,23 +262,8 @@ void __write_overflow(void) __compiletime_error("detected write beyond size of o
812 + #include <linux/fortify-string.h>
813 + #endif
814 +
815 +-/**
816 +- * memcpy_and_pad - Copy one buffer to another with padding
817 +- * @dest: Where to copy to
818 +- * @dest_len: The destination buffer size
819 +- * @src: Where to copy from
820 +- * @count: The number of bytes to copy
821 +- * @pad: Character to use for padding if space is left in destination.
822 +- */
823 +-static inline void memcpy_and_pad(void *dest, size_t dest_len,
824 +- const void *src, size_t count, int pad)
825 +-{
826 +- if (dest_len > count) {
827 +- memcpy(dest, src, count);
828 +- memset(dest + count, pad, dest_len - count);
829 +- } else
830 +- memcpy(dest, src, dest_len);
831 +-}
832 ++void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
833 ++ int pad);
834 +
835 + /**
836 + * str_has_prefix - Test if a string has a given prefix
837 +diff --git a/kernel/events/core.c b/kernel/events/core.c
838 +index f23ca260307f0..7162b600e7eaa 100644
839 +--- a/kernel/events/core.c
840 ++++ b/kernel/events/core.c
841 +@@ -7154,7 +7154,6 @@ void perf_output_sample(struct perf_output_handle *handle,
842 + static u64 perf_virt_to_phys(u64 virt)
843 + {
844 + u64 phys_addr = 0;
845 +- struct page *p = NULL;
846 +
847 + if (!virt)
848 + return 0;
849 +@@ -7173,14 +7172,15 @@ static u64 perf_virt_to_phys(u64 virt)
850 + * If failed, leave phys_addr as 0.
851 + */
852 + if (current->mm != NULL) {
853 ++ struct page *p;
854 ++
855 + pagefault_disable();
856 +- if (get_user_page_fast_only(virt, 0, &p))
857 ++ if (get_user_page_fast_only(virt, 0, &p)) {
858 + phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
859 ++ put_page(p);
860 ++ }
861 + pagefault_enable();
862 + }
863 +-
864 +- if (p)
865 +- put_page(p);
866 + }
867 +
868 + return phys_addr;
869 +diff --git a/lib/string_helpers.c b/lib/string_helpers.c
870 +index 3806a52ce697a..2ddc10bd9add6 100644
871 +--- a/lib/string_helpers.c
872 ++++ b/lib/string_helpers.c
873 +@@ -696,3 +696,23 @@ void kfree_strarray(char **array, size_t n)
874 + kfree(array);
875 + }
876 + EXPORT_SYMBOL_GPL(kfree_strarray);
877 ++
878 ++/**
879 ++ * memcpy_and_pad - Copy one buffer to another with padding
880 ++ * @dest: Where to copy to
881 ++ * @dest_len: The destination buffer size
882 ++ * @src: Where to copy from
883 ++ * @count: The number of bytes to copy
884 ++ * @pad: Character to use for padding if space is left in destination.
885 ++ */
886 ++void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
887 ++ int pad)
888 ++{
889 ++ if (dest_len > count) {
890 ++ memcpy(dest, src, count);
891 ++ memset(dest + count, pad, dest_len - count);
892 ++ } else {
893 ++ memcpy(dest, src, dest_len);
894 ++ }
895 ++}
896 ++EXPORT_SYMBOL(memcpy_and_pad);
897 +diff --git a/security/Kconfig b/security/Kconfig
898 +index 0ced7fd33e4d0..fe6c0395fa025 100644
899 +--- a/security/Kconfig
900 ++++ b/security/Kconfig
901 +@@ -191,6 +191,9 @@ config HARDENED_USERCOPY_PAGESPAN
902 + config FORTIFY_SOURCE
903 + bool "Harden common str/mem functions against buffer overflows"
904 + depends on ARCH_HAS_FORTIFY_SOURCE
905 ++ # https://bugs.llvm.org/show_bug.cgi?id=50322
906 ++ # https://bugs.llvm.org/show_bug.cgi?id=41459
907 ++ depends on !CC_IS_CLANG
908 + help
909 + Detect overflows of buffers in common string and memory functions
910 + where the compiler can determine and validate the buffer sizes.