Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Sat, 30 Jan 2021 13:37:05
Message-Id: 1612013794.887c511ea6efa5499c8faad2860c5ff1a30231e1.alicef@gentoo
1 commit: 887c511ea6efa5499c8faad2860c5ff1a30231e1
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jan 30 13:36:19 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Sat Jan 30 13:36:34 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=887c511e
7
8 Linux patch 5.4.94
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1093_linux-5.4.94.patch | 1065 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1069 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e33ed0d..daa3ba2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -415,6 +415,10 @@ Patch: 1092_linux-5.4.93.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.93
23
24 +Patch: 1093_linux-5.4.94.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.94
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1093_linux-5.4.94.patch b/1093_linux-5.4.94.patch
33 new file mode 100644
34 index 0000000..e14ba30
35 --- /dev/null
36 +++ b/1093_linux-5.4.94.patch
37 @@ -0,0 +1,1065 @@
38 +diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst
39 +index a30aa91b5fbe9..3463883844c0b 100644
40 +--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
41 ++++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
42 +@@ -177,6 +177,12 @@ bitmap_flush_interval:number
43 + The bitmap flush interval in milliseconds. The metadata buffers
44 + are synchronized when this interval expires.
45 +
46 ++legacy_recalculate
47 ++ Allow recalculating of volumes with HMAC keys. This is disabled by
48 ++ default for security reasons - an attacker could modify the volume,
49 ++ set recalc_sector to zero, and the kernel would not detect the
50 ++ modification.
51 ++
52 +
53 + The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
54 + be changed when reloading the target (load an inactive table and swap the
55 +diff --git a/Makefile b/Makefile
56 +index f8462f8d8a151..ad1b8dc6e462a 100644
57 +--- a/Makefile
58 ++++ b/Makefile
59 +@@ -1,7 +1,7 @@
60 + # SPDX-License-Identifier: GPL-2.0
61 + VERSION = 5
62 + PATCHLEVEL = 4
63 +-SUBLEVEL = 93
64 ++SUBLEVEL = 94
65 + EXTRAVERSION =
66 + NAME = Kleptomaniac Octopus
67 +
68 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
69 +index 08df42e4db96c..51d867cf146c1 100644
70 +--- a/arch/arm64/include/asm/memory.h
71 ++++ b/arch/arm64/include/asm/memory.h
72 +@@ -178,7 +178,6 @@ extern u64 vabits_actual;
73 + #include <linux/bitops.h>
74 + #include <linux/mmdebug.h>
75 +
76 +-extern s64 physvirt_offset;
77 + extern s64 memstart_addr;
78 + /* PHYS_OFFSET - the physical address of the start of memory. */
79 + #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
80 +@@ -254,7 +253,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
81 + */
82 + #define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
83 +
84 +-#define __lm_to_phys(addr) (((addr) + physvirt_offset))
85 ++#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
86 + #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
87 +
88 + #define __virt_to_phys_nodebug(x) ({ \
89 +@@ -272,7 +271,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
90 + #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
91 + #endif /* CONFIG_DEBUG_VIRTUAL */
92 +
93 +-#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset))
94 ++#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
95 + #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
96 +
97 + /*
98 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
99 +index 69dfc340e71b1..8c420f916fe2e 100644
100 +--- a/arch/arm64/include/asm/pgtable.h
101 ++++ b/arch/arm64/include/asm/pgtable.h
102 +@@ -23,6 +23,8 @@
103 + #define VMALLOC_START (MODULES_END)
104 + #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
105 +
106 ++#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
107 ++
108 + #define FIRST_USER_ADDRESS 0UL
109 +
110 + #ifndef __ASSEMBLY__
111 +@@ -33,8 +35,6 @@
112 + #include <linux/mm_types.h>
113 + #include <linux/sched.h>
114 +
115 +-extern struct page *vmemmap;
116 +-
117 + extern void __pte_error(const char *file, int line, unsigned long val);
118 + extern void __pmd_error(const char *file, int line, unsigned long val);
119 + extern void __pud_error(const char *file, int line, unsigned long val);
120 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
121 +index 45c00a54909c9..602bd19630ff8 100644
122 +--- a/arch/arm64/mm/init.c
123 ++++ b/arch/arm64/mm/init.c
124 +@@ -50,12 +50,6 @@
125 + s64 memstart_addr __ro_after_init = -1;
126 + EXPORT_SYMBOL(memstart_addr);
127 +
128 +-s64 physvirt_offset __ro_after_init;
129 +-EXPORT_SYMBOL(physvirt_offset);
130 +-
131 +-struct page *vmemmap __ro_after_init;
132 +-EXPORT_SYMBOL(vmemmap);
133 +-
134 + phys_addr_t arm64_dma_phys_limit __ro_after_init;
135 +
136 + #ifdef CONFIG_KEXEC_CORE
137 +@@ -321,20 +315,6 @@ void __init arm64_memblock_init(void)
138 + memstart_addr = round_down(memblock_start_of_DRAM(),
139 + ARM64_MEMSTART_ALIGN);
140 +
141 +- physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
142 +-
143 +- vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
144 +-
145 +- /*
146 +- * If we are running with a 52-bit kernel VA config on a system that
147 +- * does not support it, we have to offset our vmemmap and physvirt_offset
148 +- * s.t. we avoid the 52-bit portion of the direct linear map
149 +- */
150 +- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
151 +- vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
152 +- physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
153 +- }
154 +-
155 + /*
156 + * Remove the memory that we will not be able to cover with the
157 + * linear mapping. Take care not to clip the kernel which may be
158 +@@ -349,6 +329,16 @@ void __init arm64_memblock_init(void)
159 + memblock_remove(0, memstart_addr);
160 + }
161 +
162 ++ /*
163 ++ * If we are running with a 52-bit kernel VA config on a system that
164 ++ * does not support it, we have to place the available physical
165 ++ * memory in the 48-bit addressable part of the linear region, i.e.,
166 ++ * we have to move it upward. Since memstart_addr represents the
167 ++ * physical address of PAGE_OFFSET, we have to *subtract* from it.
168 ++ */
169 ++ if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
170 ++ memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
171 ++
172 + /*
173 + * Apply the memory limit if it was set. Since the kernel may be loaded
174 + * high up in memory, add back the kernel region that must be accessible
175 +diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
176 +index 3985d6e1c17dc..89a053b1d2799 100644
177 +--- a/drivers/gpio/gpio-mvebu.c
178 ++++ b/drivers/gpio/gpio-mvebu.c
179 +@@ -657,9 +657,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
180 +
181 + spin_lock_irqsave(&mvpwm->lock, flags);
182 +
183 +- val = (unsigned long long)
184 +- readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
185 +- val *= NSEC_PER_SEC;
186 ++ u = readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
187 ++ val = (unsigned long long) u * NSEC_PER_SEC;
188 + do_div(val, mvpwm->clk_rate);
189 + if (val > UINT_MAX)
190 + state->duty_cycle = UINT_MAX;
191 +@@ -668,21 +667,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
192 + else
193 + state->duty_cycle = 1;
194 +
195 +- val = (unsigned long long)
196 +- readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
197 ++ val = (unsigned long long) u; /* on duration */
198 ++ /* period = on + off duration */
199 ++ val += readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
200 + val *= NSEC_PER_SEC;
201 + do_div(val, mvpwm->clk_rate);
202 +- if (val < state->duty_cycle) {
203 ++ if (val > UINT_MAX)
204 ++ state->period = UINT_MAX;
205 ++ else if (val)
206 ++ state->period = val;
207 ++ else
208 + state->period = 1;
209 +- } else {
210 +- val -= state->duty_cycle;
211 +- if (val > UINT_MAX)
212 +- state->period = UINT_MAX;
213 +- else if (val)
214 +- state->period = val;
215 +- else
216 +- state->period = 1;
217 +- }
218 +
219 + regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
220 + if (u)
221 +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
222 +index 9e852b4bbf92b..73dafa60080f1 100644
223 +--- a/drivers/hid/wacom_sys.c
224 ++++ b/drivers/hid/wacom_sys.c
225 +@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
226 + }
227 +
228 + if (flush)
229 +- wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
230 ++ wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
231 + else if (insert)
232 +- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
233 ++ wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
234 + raw_data, report_size);
235 +
236 + return insert && !flush;
237 +@@ -1280,7 +1280,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res)
238 + static int wacom_devm_kfifo_alloc(struct wacom *wacom)
239 + {
240 + struct wacom_wac *wacom_wac = &wacom->wacom_wac;
241 +- struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
242 ++ struct kfifo_rec_ptr_2 *pen_fifo;
243 + int error;
244 +
245 + pen_fifo = devres_alloc(wacom_devm_kfifo_release,
246 +@@ -1297,6 +1297,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom)
247 + }
248 +
249 + devres_add(&wacom->hdev->dev, pen_fifo);
250 ++ wacom_wac->pen_fifo = pen_fifo;
251 +
252 + return 0;
253 + }
254 +diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
255 +index da612b6e9c779..195910dd2154e 100644
256 +--- a/drivers/hid/wacom_wac.h
257 ++++ b/drivers/hid/wacom_wac.h
258 +@@ -342,7 +342,7 @@ struct wacom_wac {
259 + struct input_dev *pen_input;
260 + struct input_dev *touch_input;
261 + struct input_dev *pad_input;
262 +- struct kfifo_rec_ptr_2 pen_fifo;
263 ++ struct kfifo_rec_ptr_2 *pen_fifo;
264 + int pid;
265 + int num_contacts_left;
266 + u8 bt_features;
267 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
268 +index 57f66f2ad98dc..c967c2cdba870 100644
269 +--- a/drivers/md/dm-integrity.c
270 ++++ b/drivers/md/dm-integrity.c
271 +@@ -254,6 +254,7 @@ struct dm_integrity_c {
272 + bool journal_uptodate;
273 + bool just_formatted;
274 + bool recalculate_flag;
275 ++ bool legacy_recalculate;
276 +
277 + struct alg_spec internal_hash_alg;
278 + struct alg_spec journal_crypt_alg;
279 +@@ -381,6 +382,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic)
280 + return READ_ONCE(ic->failed);
281 + }
282 +
283 ++static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
284 ++{
285 ++ if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
286 ++ !ic->legacy_recalculate)
287 ++ return true;
288 ++ return false;
289 ++}
290 ++
291 + static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
292 + unsigned j, unsigned char seq)
293 + {
294 +@@ -2998,6 +3007,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
295 + arg_count += !!ic->internal_hash_alg.alg_string;
296 + arg_count += !!ic->journal_crypt_alg.alg_string;
297 + arg_count += !!ic->journal_mac_alg.alg_string;
298 ++ arg_count += ic->legacy_recalculate;
299 + DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
300 + ic->tag_size, ic->mode, arg_count);
301 + if (ic->meta_dev)
302 +@@ -3017,6 +3027,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
303 + DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
304 + DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
305 + }
306 ++ if (ic->legacy_recalculate)
307 ++ DMEMIT(" legacy_recalculate");
308 +
309 + #define EMIT_ALG(a, n) \
310 + do { \
311 +@@ -3625,7 +3637,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
312 + unsigned extra_args;
313 + struct dm_arg_set as;
314 + static const struct dm_arg _args[] = {
315 +- {0, 15, "Invalid number of feature args"},
316 ++ {0, 14, "Invalid number of feature args"},
317 + };
318 + unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
319 + bool should_write_sb;
320 +@@ -3769,6 +3781,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
321 + goto bad;
322 + } else if (!strcmp(opt_string, "recalculate")) {
323 + ic->recalculate_flag = true;
324 ++ } else if (!strcmp(opt_string, "legacy_recalculate")) {
325 ++ ic->legacy_recalculate = true;
326 + } else {
327 + r = -EINVAL;
328 + ti->error = "Invalid argument";
329 +@@ -4067,6 +4081,14 @@ try_smaller_buffer:
330 + }
331 + }
332 +
333 ++ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
334 ++ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
335 ++ dm_integrity_disable_recalculate(ic)) {
336 ++ ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
337 ++ r = -EOPNOTSUPP;
338 ++ goto bad;
339 ++ }
340 ++
341 + ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
342 + 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
343 + if (IS_ERR(ic->bufio)) {
344 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
345 +index be06b26d6ca03..7adecfd0c1e99 100644
346 +--- a/fs/cifs/smb2pdu.c
347 ++++ b/fs/cifs/smb2pdu.c
348 +@@ -490,8 +490,8 @@ build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
349 + pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
350 + pneg_ctxt->DataLength = cpu_to_le16(38);
351 + pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
352 +- pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
353 +- get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
354 ++ pneg_ctxt->SaltLength = cpu_to_le16(SMB311_LINUX_CLIENT_SALT_SIZE);
355 ++ get_random_bytes(pneg_ctxt->Salt, SMB311_LINUX_CLIENT_SALT_SIZE);
356 + pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
357 + }
358 +
359 +@@ -617,6 +617,9 @@ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
360 + if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
361 + printk_once(KERN_WARNING "server sent bad preauth context\n");
362 + return;
363 ++ } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
364 ++ pr_warn_once("server sent invalid SaltLength\n");
365 ++ return;
366 + }
367 + if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
368 + printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n");
369 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
370 +index f264e1d36fe16..2482978f09486 100644
371 +--- a/fs/cifs/smb2pdu.h
372 ++++ b/fs/cifs/smb2pdu.h
373 +@@ -271,12 +271,20 @@ struct smb2_neg_context {
374 + /* Followed by array of data */
375 + } __packed;
376 +
377 +-#define SMB311_SALT_SIZE 32
378 ++#define SMB311_LINUX_CLIENT_SALT_SIZE 32
379 + /* Hash Algorithm Types */
380 + #define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001)
381 + #define SMB2_PREAUTH_HASH_SIZE 64
382 +
383 +-#define MIN_PREAUTH_CTXT_DATA_LEN (SMB311_SALT_SIZE + 6)
384 ++/*
385 ++ * SaltLength that the server send can be zero, so the only three required
386 ++ * fields (all __le16) end up six bytes total, so the minimum context data len
387 ++ * in the response is six bytes which accounts for
388 ++ *
389 ++ * HashAlgorithmCount, SaltLength, and 1 HashAlgorithm.
390 ++ */
391 ++#define MIN_PREAUTH_CTXT_DATA_LEN 6
392 ++
393 + struct smb2_preauth_neg_context {
394 + __le16 ContextType; /* 1 */
395 + __le16 DataLength;
396 +@@ -284,7 +292,7 @@ struct smb2_preauth_neg_context {
397 + __le16 HashAlgorithmCount; /* 1 */
398 + __le16 SaltLength;
399 + __le16 HashAlgorithms; /* HashAlgorithms[0] since only one defined */
400 +- __u8 Salt[SMB311_SALT_SIZE];
401 ++ __u8 Salt[SMB311_LINUX_CLIENT_SALT_SIZE];
402 + } __packed;
403 +
404 + /* Encryption Algorithms Ciphers */
405 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
406 +index 3bac525f0439d..539d95bd364d4 100644
407 +--- a/fs/ext4/inode.c
408 ++++ b/fs/ext4/inode.c
409 +@@ -5209,7 +5209,7 @@ static int other_inode_match(struct inode * inode, unsigned long ino,
410 + (inode->i_state & I_DIRTY_TIME)) {
411 + struct ext4_inode_info *ei = EXT4_I(inode);
412 +
413 +- inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
414 ++ inode->i_state &= ~I_DIRTY_TIME;
415 + spin_unlock(&inode->i_lock);
416 +
417 + spin_lock(&ei->i_raw_lock);
418 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
419 +index 5f6400ba82c00..a2cf2db0d3def 100644
420 +--- a/fs/fs-writeback.c
421 ++++ b/fs/fs-writeback.c
422 +@@ -1238,7 +1238,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
423 + */
424 + static int move_expired_inodes(struct list_head *delaying_queue,
425 + struct list_head *dispatch_queue,
426 +- int flags, unsigned long dirtied_before)
427 ++ unsigned long dirtied_before)
428 + {
429 + LIST_HEAD(tmp);
430 + struct list_head *pos, *node;
431 +@@ -1254,8 +1254,6 @@ static int move_expired_inodes(struct list_head *delaying_queue,
432 + list_move(&inode->i_io_list, &tmp);
433 + moved++;
434 + spin_lock(&inode->i_lock);
435 +- if (flags & EXPIRE_DIRTY_ATIME)
436 +- inode->i_state |= I_DIRTY_TIME_EXPIRED;
437 + inode->i_state |= I_SYNC_QUEUED;
438 + spin_unlock(&inode->i_lock);
439 + if (sb_is_blkdev_sb(inode->i_sb))
440 +@@ -1303,11 +1301,11 @@ static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
441 +
442 + assert_spin_locked(&wb->list_lock);
443 + list_splice_init(&wb->b_more_io, &wb->b_io);
444 +- moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before);
445 ++ moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
446 + if (!work->for_sync)
447 + time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
448 + moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
449 +- EXPIRE_DIRTY_ATIME, time_expire_jif);
450 ++ time_expire_jif);
451 + if (moved)
452 + wb_io_lists_populated(wb);
453 + trace_writeback_queue_io(wb, work, dirtied_before, moved);
454 +@@ -1475,26 +1473,26 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
455 + ret = err;
456 + }
457 +
458 ++ /*
459 ++ * If the inode has dirty timestamps and we need to write them, call
460 ++ * mark_inode_dirty_sync() to notify the filesystem about it and to
461 ++ * change I_DIRTY_TIME into I_DIRTY_SYNC.
462 ++ */
463 ++ if ((inode->i_state & I_DIRTY_TIME) &&
464 ++ (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
465 ++ time_after(jiffies, inode->dirtied_time_when +
466 ++ dirtytime_expire_interval * HZ))) {
467 ++ trace_writeback_lazytime(inode);
468 ++ mark_inode_dirty_sync(inode);
469 ++ }
470 ++
471 + /*
472 + * Some filesystems may redirty the inode during the writeback
473 + * due to delalloc, clear dirty metadata flags right before
474 + * write_inode()
475 + */
476 + spin_lock(&inode->i_lock);
477 +-
478 + dirty = inode->i_state & I_DIRTY;
479 +- if (inode->i_state & I_DIRTY_TIME) {
480 +- if ((dirty & I_DIRTY_INODE) ||
481 +- wbc->sync_mode == WB_SYNC_ALL ||
482 +- unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
483 +- unlikely(time_after(jiffies,
484 +- (inode->dirtied_time_when +
485 +- dirtytime_expire_interval * HZ)))) {
486 +- dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
487 +- trace_writeback_lazytime(inode);
488 +- }
489 +- } else
490 +- inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
491 + inode->i_state &= ~dirty;
492 +
493 + /*
494 +@@ -1515,8 +1513,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
495 +
496 + spin_unlock(&inode->i_lock);
497 +
498 +- if (dirty & I_DIRTY_TIME)
499 +- mark_inode_dirty_sync(inode);
500 + /* Don't write the inode if only I_DIRTY_PAGES was set */
501 + if (dirty & ~I_DIRTY_PAGES) {
502 + int err = write_inode(inode, wbc);
503 +diff --git a/fs/io_uring.c b/fs/io_uring.c
504 +index 4127ea027a14d..478df7e10767a 100644
505 +--- a/fs/io_uring.c
506 ++++ b/fs/io_uring.c
507 +@@ -2226,7 +2226,8 @@ restart:
508 + /* Ensure we clear previously set non-block flag */
509 + req->rw.ki_flags &= ~IOCB_NOWAIT;
510 +
511 +- if (req->fs != current->fs && current->fs != old_fs_struct) {
512 ++ if ((req->fs && req->fs != current->fs) ||
513 ++ (!req->fs && current->fs != old_fs_struct)) {
514 + task_lock(current);
515 + if (req->fs)
516 + current->fs = req->fs;
517 +@@ -2351,7 +2352,7 @@ out:
518 + mmput(cur_mm);
519 + }
520 + revert_creds(old_cred);
521 +- if (old_fs_struct) {
522 ++ if (old_fs_struct != current->fs) {
523 + task_lock(current);
524 + current->fs = old_fs_struct;
525 + task_unlock(current);
526 +diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
527 +index 6c7354abd0aea..0ba7368b9a5f0 100644
528 +--- a/fs/xfs/libxfs/xfs_trans_inode.c
529 ++++ b/fs/xfs/libxfs/xfs_trans_inode.c
530 +@@ -100,9 +100,9 @@ xfs_trans_log_inode(
531 + * to log the timestamps, or will clear already cleared fields in the
532 + * worst case.
533 + */
534 +- if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) {
535 ++ if (inode->i_state & I_DIRTY_TIME) {
536 + spin_lock(&inode->i_lock);
537 +- inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
538 ++ inode->i_state &= ~I_DIRTY_TIME;
539 + spin_unlock(&inode->i_lock);
540 + }
541 +
542 +diff --git a/include/linux/fs.h b/include/linux/fs.h
543 +index 4c82683e034a7..ef118b8ba6993 100644
544 +--- a/include/linux/fs.h
545 ++++ b/include/linux/fs.h
546 +@@ -2161,7 +2161,6 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
547 + #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
548 + #define I_LINKABLE (1 << 10)
549 + #define I_DIRTY_TIME (1 << 11)
550 +-#define I_DIRTY_TIME_EXPIRED (1 << 12)
551 + #define I_WB_SWITCH (1 << 13)
552 + #define I_OVL_INUSE (1 << 14)
553 + #define I_CREATING (1 << 15)
554 +diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
555 +index a8af22e469ce5..011e8faa608b9 100644
556 +--- a/include/trace/events/writeback.h
557 ++++ b/include/trace/events/writeback.h
558 +@@ -20,7 +20,6 @@
559 + {I_CLEAR, "I_CLEAR"}, \
560 + {I_SYNC, "I_SYNC"}, \
561 + {I_DIRTY_TIME, "I_DIRTY_TIME"}, \
562 +- {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \
563 + {I_REFERENCED, "I_REFERENCED"} \
564 + )
565 +
566 +diff --git a/kernel/futex.c b/kernel/futex.c
567 +index b6dec5f79370c..042c2707e9131 100644
568 +--- a/kernel/futex.c
569 ++++ b/kernel/futex.c
570 +@@ -857,6 +857,29 @@ static struct futex_pi_state *alloc_pi_state(void)
571 + return pi_state;
572 + }
573 +
574 ++static void pi_state_update_owner(struct futex_pi_state *pi_state,
575 ++ struct task_struct *new_owner)
576 ++{
577 ++ struct task_struct *old_owner = pi_state->owner;
578 ++
579 ++ lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
580 ++
581 ++ if (old_owner) {
582 ++ raw_spin_lock(&old_owner->pi_lock);
583 ++ WARN_ON(list_empty(&pi_state->list));
584 ++ list_del_init(&pi_state->list);
585 ++ raw_spin_unlock(&old_owner->pi_lock);
586 ++ }
587 ++
588 ++ if (new_owner) {
589 ++ raw_spin_lock(&new_owner->pi_lock);
590 ++ WARN_ON(!list_empty(&pi_state->list));
591 ++ list_add(&pi_state->list, &new_owner->pi_state_list);
592 ++ pi_state->owner = new_owner;
593 ++ raw_spin_unlock(&new_owner->pi_lock);
594 ++ }
595 ++}
596 ++
597 + static void get_pi_state(struct futex_pi_state *pi_state)
598 + {
599 + WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
600 +@@ -879,17 +902,11 @@ static void put_pi_state(struct futex_pi_state *pi_state)
601 + * and has cleaned up the pi_state already
602 + */
603 + if (pi_state->owner) {
604 +- struct task_struct *owner;
605 + unsigned long flags;
606 +
607 + raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
608 +- owner = pi_state->owner;
609 +- if (owner) {
610 +- raw_spin_lock(&owner->pi_lock);
611 +- list_del_init(&pi_state->list);
612 +- raw_spin_unlock(&owner->pi_lock);
613 +- }
614 +- rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
615 ++ pi_state_update_owner(pi_state, NULL);
616 ++ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
617 + raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
618 + }
619 +
620 +@@ -1035,7 +1052,8 @@ static inline void exit_pi_state_list(struct task_struct *curr) { }
621 + * FUTEX_OWNER_DIED bit. See [4]
622 + *
623 + * [10] There is no transient state which leaves owner and user space
624 +- * TID out of sync.
625 ++ * TID out of sync. Except one error case where the kernel is denied
626 ++ * write access to the user address, see fixup_pi_state_owner().
627 + *
628 + *
629 + * Serialization and lifetime rules:
630 +@@ -1614,26 +1632,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
631 + ret = -EINVAL;
632 + }
633 +
634 +- if (ret)
635 +- goto out_unlock;
636 +-
637 +- /*
638 +- * This is a point of no return; once we modify the uval there is no
639 +- * going back and subsequent operations must not fail.
640 +- */
641 +-
642 +- raw_spin_lock(&pi_state->owner->pi_lock);
643 +- WARN_ON(list_empty(&pi_state->list));
644 +- list_del_init(&pi_state->list);
645 +- raw_spin_unlock(&pi_state->owner->pi_lock);
646 +-
647 +- raw_spin_lock(&new_owner->pi_lock);
648 +- WARN_ON(!list_empty(&pi_state->list));
649 +- list_add(&pi_state->list, &new_owner->pi_state_list);
650 +- pi_state->owner = new_owner;
651 +- raw_spin_unlock(&new_owner->pi_lock);
652 +-
653 +- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
654 ++ if (!ret) {
655 ++ /*
656 ++ * This is a point of no return; once we modified the uval
657 ++ * there is no going back and subsequent operations must
658 ++ * not fail.
659 ++ */
660 ++ pi_state_update_owner(pi_state, new_owner);
661 ++ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
662 ++ }
663 +
664 + out_unlock:
665 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
666 +@@ -2456,18 +2463,13 @@ static void unqueue_me_pi(struct futex_q *q)
667 + spin_unlock(q->lock_ptr);
668 + }
669 +
670 +-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
671 +- struct task_struct *argowner)
672 ++static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
673 ++ struct task_struct *argowner)
674 + {
675 ++ u32 uval, uninitialized_var(curval), newval, newtid;
676 + struct futex_pi_state *pi_state = q->pi_state;
677 +- u32 uval, uninitialized_var(curval), newval;
678 + struct task_struct *oldowner, *newowner;
679 +- u32 newtid;
680 +- int ret, err = 0;
681 +-
682 +- lockdep_assert_held(q->lock_ptr);
683 +-
684 +- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
685 ++ int err = 0;
686 +
687 + oldowner = pi_state->owner;
688 +
689 +@@ -2501,14 +2503,12 @@ retry:
690 + * We raced against a concurrent self; things are
691 + * already fixed up. Nothing to do.
692 + */
693 +- ret = 0;
694 +- goto out_unlock;
695 ++ return 0;
696 + }
697 +
698 + if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
699 +- /* We got the lock after all, nothing to fix. */
700 +- ret = 0;
701 +- goto out_unlock;
702 ++ /* We got the lock. pi_state is correct. Tell caller. */
703 ++ return 1;
704 + }
705 +
706 + /*
707 +@@ -2535,8 +2535,7 @@ retry:
708 + * We raced against a concurrent self; things are
709 + * already fixed up. Nothing to do.
710 + */
711 +- ret = 0;
712 +- goto out_unlock;
713 ++ return 1;
714 + }
715 + newowner = argowner;
716 + }
717 +@@ -2566,22 +2565,9 @@ retry:
718 + * We fixed up user space. Now we need to fix the pi_state
719 + * itself.
720 + */
721 +- if (pi_state->owner != NULL) {
722 +- raw_spin_lock(&pi_state->owner->pi_lock);
723 +- WARN_ON(list_empty(&pi_state->list));
724 +- list_del_init(&pi_state->list);
725 +- raw_spin_unlock(&pi_state->owner->pi_lock);
726 +- }
727 ++ pi_state_update_owner(pi_state, newowner);
728 +
729 +- pi_state->owner = newowner;
730 +-
731 +- raw_spin_lock(&newowner->pi_lock);
732 +- WARN_ON(!list_empty(&pi_state->list));
733 +- list_add(&pi_state->list, &newowner->pi_state_list);
734 +- raw_spin_unlock(&newowner->pi_lock);
735 +- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
736 +-
737 +- return 0;
738 ++ return argowner == current;
739 +
740 + /*
741 + * In order to reschedule or handle a page fault, we need to drop the
742 +@@ -2602,17 +2588,16 @@ handle_err:
743 +
744 + switch (err) {
745 + case -EFAULT:
746 +- ret = fault_in_user_writeable(uaddr);
747 ++ err = fault_in_user_writeable(uaddr);
748 + break;
749 +
750 + case -EAGAIN:
751 + cond_resched();
752 +- ret = 0;
753 ++ err = 0;
754 + break;
755 +
756 + default:
757 + WARN_ON_ONCE(1);
758 +- ret = err;
759 + break;
760 + }
761 +
762 +@@ -2622,17 +2607,44 @@ handle_err:
763 + /*
764 + * Check if someone else fixed it for us:
765 + */
766 +- if (pi_state->owner != oldowner) {
767 +- ret = 0;
768 +- goto out_unlock;
769 +- }
770 ++ if (pi_state->owner != oldowner)
771 ++ return argowner == current;
772 +
773 +- if (ret)
774 +- goto out_unlock;
775 ++ /* Retry if err was -EAGAIN or the fault in succeeded */
776 ++ if (!err)
777 ++ goto retry;
778 +
779 +- goto retry;
780 ++ /*
781 ++ * fault_in_user_writeable() failed so user state is immutable. At
782 ++ * best we can make the kernel state consistent but user state will
783 ++ * be most likely hosed and any subsequent unlock operation will be
784 ++ * rejected due to PI futex rule [10].
785 ++ *
786 ++ * Ensure that the rtmutex owner is also the pi_state owner despite
787 ++ * the user space value claiming something different. There is no
788 ++ * point in unlocking the rtmutex if current is the owner as it
789 ++ * would need to wait until the next waiter has taken the rtmutex
790 ++ * to guarantee consistent state. Keep it simple. Userspace asked
791 ++ * for this wreckaged state.
792 ++ *
793 ++ * The rtmutex has an owner - either current or some other
794 ++ * task. See the EAGAIN loop above.
795 ++ */
796 ++ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
797 +
798 +-out_unlock:
799 ++ return err;
800 ++}
801 ++
802 ++static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
803 ++ struct task_struct *argowner)
804 ++{
805 ++ struct futex_pi_state *pi_state = q->pi_state;
806 ++ int ret;
807 ++
808 ++ lockdep_assert_held(q->lock_ptr);
809 ++
810 ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
811 ++ ret = __fixup_pi_state_owner(uaddr, q, argowner);
812 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
813 + return ret;
814 + }
815 +@@ -2656,8 +2668,6 @@ static long futex_wait_restart(struct restart_block *restart);
816 + */
817 + static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
818 + {
819 +- int ret = 0;
820 +-
821 + if (locked) {
822 + /*
823 + * Got the lock. We might not be the anticipated owner if we
824 +@@ -2668,8 +2678,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
825 + * stable state, anything else needs more attention.
826 + */
827 + if (q->pi_state->owner != current)
828 +- ret = fixup_pi_state_owner(uaddr, q, current);
829 +- goto out;
830 ++ return fixup_pi_state_owner(uaddr, q, current);
831 ++ return 1;
832 + }
833 +
834 + /*
835 +@@ -2680,24 +2690,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
836 + * Another speculative read; pi_state->owner == current is unstable
837 + * but needs our attention.
838 + */
839 +- if (q->pi_state->owner == current) {
840 +- ret = fixup_pi_state_owner(uaddr, q, NULL);
841 +- goto out;
842 +- }
843 ++ if (q->pi_state->owner == current)
844 ++ return fixup_pi_state_owner(uaddr, q, NULL);
845 +
846 + /*
847 + * Paranoia check. If we did not take the lock, then we should not be
848 +- * the owner of the rt_mutex.
849 ++ * the owner of the rt_mutex. Warn and establish consistent state.
850 + */
851 +- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
852 +- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
853 +- "pi-state %p\n", ret,
854 +- q->pi_state->pi_mutex.owner,
855 +- q->pi_state->owner);
856 +- }
857 ++ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
858 ++ return fixup_pi_state_owner(uaddr, q, current);
859 +
860 +-out:
861 +- return ret ? ret : locked;
862 ++ return 0;
863 + }
864 +
865 + /**
866 +@@ -2909,7 +2912,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
867 + ktime_t *time, int trylock)
868 + {
869 + struct hrtimer_sleeper timeout, *to;
870 +- struct futex_pi_state *pi_state = NULL;
871 + struct task_struct *exiting = NULL;
872 + struct rt_mutex_waiter rt_waiter;
873 + struct futex_hash_bucket *hb;
874 +@@ -3046,23 +3048,9 @@ no_block:
875 + if (res)
876 + ret = (res < 0) ? res : 0;
877 +
878 +- /*
879 +- * If fixup_owner() faulted and was unable to handle the fault, unlock
880 +- * it and return the fault to userspace.
881 +- */
882 +- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
883 +- pi_state = q.pi_state;
884 +- get_pi_state(pi_state);
885 +- }
886 +-
887 + /* Unqueue and drop the lock */
888 + unqueue_me_pi(&q);
889 +
890 +- if (pi_state) {
891 +- rt_mutex_futex_unlock(&pi_state->pi_mutex);
892 +- put_pi_state(pi_state);
893 +- }
894 +-
895 + goto out_put_key;
896 +
897 + out_unlock_put_key:
898 +@@ -3328,7 +3316,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
899 + u32 __user *uaddr2)
900 + {
901 + struct hrtimer_sleeper timeout, *to;
902 +- struct futex_pi_state *pi_state = NULL;
903 + struct rt_mutex_waiter rt_waiter;
904 + struct futex_hash_bucket *hb;
905 + union futex_key key2 = FUTEX_KEY_INIT;
906 +@@ -3406,16 +3393,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
907 + if (q.pi_state && (q.pi_state->owner != current)) {
908 + spin_lock(q.lock_ptr);
909 + ret = fixup_pi_state_owner(uaddr2, &q, current);
910 +- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
911 +- pi_state = q.pi_state;
912 +- get_pi_state(pi_state);
913 +- }
914 + /*
915 + * Drop the reference to the pi state which
916 + * the requeue_pi() code acquired for us.
917 + */
918 + put_pi_state(q.pi_state);
919 + spin_unlock(q.lock_ptr);
920 ++ /*
921 ++ * Adjust the return value. It's either -EFAULT or
922 ++ * success (1) but the caller expects 0 for success.
923 ++ */
924 ++ ret = ret < 0 ? ret : 0;
925 + }
926 + } else {
927 + struct rt_mutex *pi_mutex;
928 +@@ -3446,25 +3434,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
929 + if (res)
930 + ret = (res < 0) ? res : 0;
931 +
932 +- /*
933 +- * If fixup_pi_state_owner() faulted and was unable to handle
934 +- * the fault, unlock the rt_mutex and return the fault to
935 +- * userspace.
936 +- */
937 +- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
938 +- pi_state = q.pi_state;
939 +- get_pi_state(pi_state);
940 +- }
941 +-
942 + /* Unqueue and drop the lock. */
943 + unqueue_me_pi(&q);
944 + }
945 +
946 +- if (pi_state) {
947 +- rt_mutex_futex_unlock(&pi_state->pi_mutex);
948 +- put_pi_state(pi_state);
949 +- }
950 +-
951 + if (ret == -EINTR) {
952 + /*
953 + * We've already been requeued, but cannot restart by calling
954 +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
955 +index 2874bf5561620..734698aec5f9e 100644
956 +--- a/kernel/locking/rtmutex.c
957 ++++ b/kernel/locking/rtmutex.c
958 +@@ -1718,8 +1718,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
959 + * possible because it belongs to the pi_state which is about to be freed
960 + * and it is not longer visible to other tasks.
961 + */
962 +-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
963 +- struct task_struct *proxy_owner)
964 ++void rt_mutex_proxy_unlock(struct rt_mutex *lock)
965 + {
966 + debug_rt_mutex_proxy_unlock(lock);
967 + rt_mutex_set_owner(lock, NULL);
968 +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
969 +index d1d62f942be22..ca6fb489007b6 100644
970 +--- a/kernel/locking/rtmutex_common.h
971 ++++ b/kernel/locking/rtmutex_common.h
972 +@@ -133,8 +133,7 @@ enum rtmutex_chainwalk {
973 + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
974 + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
975 + struct task_struct *proxy_owner);
976 +-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
977 +- struct task_struct *proxy_owner);
978 ++extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
979 + extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
980 + extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
981 + struct rt_mutex_waiter *waiter,
982 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
983 +index 077877ed54f73..7283741666538 100644
984 +--- a/kernel/trace/ring_buffer.c
985 ++++ b/kernel/trace/ring_buffer.c
986 +@@ -4448,6 +4448,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
987 +
988 + if (!cpumask_test_cpu(cpu, buffer->cpumask))
989 + return;
990 ++ /* prevent another thread from changing buffer sizes */
991 ++ mutex_lock(&buffer->mutex);
992 +
993 + atomic_inc(&buffer->resize_disabled);
994 + atomic_inc(&cpu_buffer->record_disabled);
995 +@@ -4471,6 +4473,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
996 +
997 + atomic_dec(&cpu_buffer->record_disabled);
998 + atomic_dec(&buffer->resize_disabled);
999 ++
1000 ++ mutex_unlock(&buffer->mutex);
1001 + }
1002 + EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
1003 +
1004 +diff --git a/mm/slub.c b/mm/slub.c
1005 +index 8b3ef45a0f103..e622e8f4c2ac4 100644
1006 +--- a/mm/slub.c
1007 ++++ b/mm/slub.c
1008 +@@ -5819,10 +5819,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
1009 +
1010 + s->kobj.kset = kset;
1011 + err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
1012 +- if (err) {
1013 +- kobject_put(&s->kobj);
1014 ++ if (err)
1015 + goto out;
1016 +- }
1017 +
1018 + err = sysfs_create_group(&s->kobj, &slab_attr_group);
1019 + if (err)
1020 +diff --git a/tools/build/Makefile b/tools/build/Makefile
1021 +index 727050c40f096..8a55378e8b7ce 100644
1022 +--- a/tools/build/Makefile
1023 ++++ b/tools/build/Makefile
1024 +@@ -15,10 +15,6 @@ endef
1025 + $(call allow-override,CC,$(CROSS_COMPILE)gcc)
1026 + $(call allow-override,LD,$(CROSS_COMPILE)ld)
1027 +
1028 +-HOSTCC ?= gcc
1029 +-HOSTLD ?= ld
1030 +-HOSTAR ?= ar
1031 +-
1032 + export HOSTCC HOSTLD HOSTAR
1033 +
1034 + ifeq ($(V),1)
1035 +diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
1036 +index f591c4d1b6fe2..9ae4a10438ee3 100644
1037 +--- a/tools/objtool/Makefile
1038 ++++ b/tools/objtool/Makefile
1039 +@@ -3,15 +3,6 @@ include ../scripts/Makefile.include
1040 + include ../scripts/Makefile.arch
1041 +
1042 + # always use the host compiler
1043 +-ifneq ($(LLVM),)
1044 +-HOSTAR ?= llvm-ar
1045 +-HOSTCC ?= clang
1046 +-HOSTLD ?= ld.lld
1047 +-else
1048 +-HOSTAR ?= ar
1049 +-HOSTCC ?= gcc
1050 +-HOSTLD ?= ld
1051 +-endif
1052 + AR = $(HOSTAR)
1053 + CC = $(HOSTCC)
1054 + LD = $(HOSTLD)
1055 +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
1056 +index 902c792f326a5..961f5e4fd6566 100644
1057 +--- a/tools/perf/Makefile.perf
1058 ++++ b/tools/perf/Makefile.perf
1059 +@@ -163,10 +163,6 @@ endef
1060 +
1061 + LD += $(EXTRA_LDFLAGS)
1062 +
1063 +-HOSTCC ?= gcc
1064 +-HOSTLD ?= ld
1065 +-HOSTAR ?= ar
1066 +-
1067 + PKG_CONFIG = $(CROSS_COMPILE)pkg-config
1068 + LLVM_CONFIG ?= llvm-config
1069 +
1070 +diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
1071 +index 54a2857c2510a..331f6d30f4726 100644
1072 +--- a/tools/power/acpi/Makefile.config
1073 ++++ b/tools/power/acpi/Makefile.config
1074 +@@ -54,7 +54,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM}
1075 + CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
1076 + CROSS_COMPILE ?= $(CROSS)
1077 + LD = $(CC)
1078 +-HOSTCC = gcc
1079 +
1080 + # check if compiler option is supported
1081 + cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
1082 +diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
1083 +index 6d2f3a1b22493..812fc97bb1a97 100644
1084 +--- a/tools/scripts/Makefile.include
1085 ++++ b/tools/scripts/Makefile.include
1086 +@@ -59,6 +59,16 @@ $(call allow-override,LD,$(CROSS_COMPILE)ld)
1087 + $(call allow-override,CXX,$(CROSS_COMPILE)g++)
1088 + $(call allow-override,STRIP,$(CROSS_COMPILE)strip)
1089 +
1090 ++ifneq ($(LLVM),)
1091 ++HOSTAR ?= llvm-ar
1092 ++HOSTCC ?= clang
1093 ++HOSTLD ?= ld.lld
1094 ++else
1095 ++HOSTAR ?= ar
1096 ++HOSTCC ?= gcc
1097 ++HOSTLD ?= ld
1098 ++endif
1099 ++
1100 + ifeq ($(CC_NO_CLANG), 1)
1101 + EXTRA_WARNINGS += -Wstrict-aliasing=3
1102 + endif