Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sun, 11 Jul 2021 14:47:41
Message-Id: 1626014845.794b5f0c81f864a96d1db09cabd2cefa1ebf7d6c.mpagano@gentoo
1 commit: 794b5f0c81f864a96d1db09cabd2cefa1ebf7d6c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Jul 11 14:47:25 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Jul 11 14:47:25 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=794b5f0c
7
8 Linux patch 4.9.275
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1274_linux-4.9.275.patch | 423 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 427 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 5d3fb76..c18617b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1139,6 +1139,10 @@ Patch: 1273_linux-4.9.274.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.274
23
24 +Patch: 1274_linux-4.9.275.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.275
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1274_linux-4.9.275.patch b/1274_linux-4.9.275.patch
33 new file mode 100644
34 index 0000000..8b636d6
35 --- /dev/null
36 +++ b/1274_linux-4.9.275.patch
37 @@ -0,0 +1,423 @@
38 +diff --git a/Makefile b/Makefile
39 +index 3002dfee32314..dfd253648758c 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 274
46 ++SUBLEVEL = 275
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
51 +index a2e6a81669e78..94b7798bdea4e 100644
52 +--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
53 ++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
54 +@@ -447,7 +447,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
55 + struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
56 + int i;
57 +
58 +- if (!ttm_dma)
59 ++ if (!ttm_dma || !ttm_dma->dma_address)
60 + return;
61 +
62 + /* Don't waste time looping if the object is coherent */
63 +@@ -467,7 +467,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
64 + struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
65 + int i;
66 +
67 +- if (!ttm_dma)
68 ++ if (!ttm_dma || !ttm_dma->dma_address)
69 + return;
70 +
71 + /* Don't waste time looping if the object is coherent */
72 +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
73 +index 67a73ea0a615e..5e51a39a0c27e 100644
74 +--- a/drivers/scsi/sr.c
75 ++++ b/drivers/scsi/sr.c
76 +@@ -216,6 +216,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
77 + return DISK_EVENT_EJECT_REQUEST;
78 + else if (med->media_event_code == 2)
79 + return DISK_EVENT_MEDIA_CHANGE;
80 ++ else if (med->media_event_code == 3)
81 ++ return DISK_EVENT_EJECT_REQUEST;
82 + return 0;
83 + }
84 +
85 +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
86 +index ea307f40cab19..c6e6b7470cbf6 100644
87 +--- a/drivers/xen/events/events_base.c
88 ++++ b/drivers/xen/events/events_base.c
89 +@@ -533,6 +533,9 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
90 + }
91 +
92 + info->eoi_time = 0;
93 ++
94 ++ /* is_active hasn't been reset yet, do it now. */
95 ++ smp_store_release(&info->is_active, 0);
96 + do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
97 + }
98 +
99 +@@ -1778,10 +1781,22 @@ static void lateeoi_ack_dynirq(struct irq_data *data)
100 + struct irq_info *info = info_for_irq(data->irq);
101 + evtchn_port_t evtchn = info ? info->evtchn : 0;
102 +
103 +- if (VALID_EVTCHN(evtchn)) {
104 +- do_mask(info, EVT_MASK_REASON_EOI_PENDING);
105 +- ack_dynirq(data);
106 +- }
107 ++ if (!VALID_EVTCHN(evtchn))
108 ++ return;
109 ++
110 ++ do_mask(info, EVT_MASK_REASON_EOI_PENDING);
111 ++
112 ++ if (unlikely(irqd_is_setaffinity_pending(data)) &&
113 ++ likely(!irqd_irq_disabled(data))) {
114 ++ do_mask(info, EVT_MASK_REASON_TEMPORARY);
115 ++
116 ++ clear_evtchn(evtchn);
117 ++
118 ++ irq_move_masked_irq(data);
119 ++
120 ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY);
121 ++ } else
122 ++ clear_evtchn(evtchn);
123 + }
124 +
125 + static void lateeoi_mask_ack_dynirq(struct irq_data *data)
126 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
127 +index 8dd365c654780..6417bc845db56 100644
128 +--- a/include/linux/hugetlb.h
129 ++++ b/include/linux/hugetlb.h
130 +@@ -451,17 +451,6 @@ static inline int hstate_index(struct hstate *h)
131 + return h - hstates;
132 + }
133 +
134 +-pgoff_t __basepage_index(struct page *page);
135 +-
136 +-/* Return page->index in PAGE_SIZE units */
137 +-static inline pgoff_t basepage_index(struct page *page)
138 +-{
139 +- if (!PageCompound(page))
140 +- return page->index;
141 +-
142 +- return __basepage_index(page);
143 +-}
144 +-
145 + extern int dissolve_free_huge_pages(unsigned long start_pfn,
146 + unsigned long end_pfn);
147 + static inline bool hugepage_migration_supported(struct hstate *h)
148 +@@ -529,10 +518,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
149 + #define hstate_index_to_shift(index) 0
150 + #define hstate_index(h) 0
151 +
152 +-static inline pgoff_t basepage_index(struct page *page)
153 +-{
154 +- return page->index;
155 +-}
156 + #define dissolve_free_huge_pages(s, e) 0
157 + #define hugepage_migration_supported(h) false
158 +
159 +diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
160 +index 451a811f48f26..d1fb3bbff37ad 100644
161 +--- a/include/linux/mmdebug.h
162 ++++ b/include/linux/mmdebug.h
163 +@@ -36,10 +36,22 @@ void dump_mm(const struct mm_struct *mm);
164 + BUG(); \
165 + } \
166 + } while (0)
167 +-#define VM_WARN_ON(cond) WARN_ON(cond)
168 +-#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
169 +-#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
170 +-#define VM_WARN(cond, format...) WARN(cond, format)
171 ++#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
172 ++ static bool __section(".data.once") __warned; \
173 ++ int __ret_warn_once = !!(cond); \
174 ++ \
175 ++ if (unlikely(__ret_warn_once && !__warned)) { \
176 ++ dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\
177 ++ __warned = true; \
178 ++ WARN_ON(1); \
179 ++ } \
180 ++ unlikely(__ret_warn_once); \
181 ++})
182 ++
183 ++#define VM_WARN_ON(cond) (void)WARN_ON(cond)
184 ++#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
185 ++#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
186 ++#define VM_WARN(cond, format...) (void)WARN(cond, format)
187 + #else
188 + #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
189 + #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
190 +@@ -47,6 +59,7 @@ void dump_mm(const struct mm_struct *mm);
191 + #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
192 + #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
193 + #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
194 ++#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
195 + #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
196 + #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
197 + #endif
198 +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
199 +index 35f4c4d9c4054..8672291633ddf 100644
200 +--- a/include/linux/pagemap.h
201 ++++ b/include/linux/pagemap.h
202 +@@ -374,7 +374,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
203 + }
204 +
205 + /*
206 +- * Get index of the page with in radix-tree
207 ++ * Get index of the page within radix-tree (but not for hugetlb pages).
208 + * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
209 + */
210 + static inline pgoff_t page_to_index(struct page *page)
211 +@@ -393,15 +393,16 @@ static inline pgoff_t page_to_index(struct page *page)
212 + return pgoff;
213 + }
214 +
215 ++extern pgoff_t hugetlb_basepage_index(struct page *page);
216 ++
217 + /*
218 +- * Get the offset in PAGE_SIZE.
219 +- * (TODO: hugepage should have ->index in PAGE_SIZE)
220 ++ * Get the offset in PAGE_SIZE (even for hugetlb pages).
221 ++ * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
222 + */
223 + static inline pgoff_t page_to_pgoff(struct page *page)
224 + {
225 +- if (unlikely(PageHeadHuge(page)))
226 +- return page->index << compound_order(page);
227 +-
228 ++ if (unlikely(PageHuge(page)))
229 ++ return hugetlb_basepage_index(page);
230 + return page_to_index(page);
231 + }
232 +
233 +diff --git a/kernel/futex.c b/kernel/futex.c
234 +index 324fb85c89049..b3823736af6f9 100644
235 +--- a/kernel/futex.c
236 ++++ b/kernel/futex.c
237 +@@ -717,7 +717,7 @@ again:
238 +
239 + key->both.offset |= FUT_OFF_INODE; /* inode-based key */
240 + key->shared.i_seq = get_inode_sequence_number(inode);
241 +- key->shared.pgoff = basepage_index(tail);
242 ++ key->shared.pgoff = page_to_pgoff(tail);
243 + rcu_read_unlock();
244 + }
245 +
246 +diff --git a/kernel/kthread.c b/kernel/kthread.c
247 +index 60f54c5a07a46..52b89c582189b 100644
248 +--- a/kernel/kthread.c
249 ++++ b/kernel/kthread.c
250 +@@ -952,8 +952,38 @@ void kthread_flush_work(struct kthread_work *work)
251 + EXPORT_SYMBOL_GPL(kthread_flush_work);
252 +
253 + /*
254 +- * This function removes the work from the worker queue. Also it makes sure
255 +- * that it won't get queued later via the delayed work's timer.
256 ++ * Make sure that the timer is neither set nor running and could
257 ++ * not manipulate the work list_head any longer.
258 ++ *
259 ++ * The function is called under worker->lock. The lock is temporary
260 ++ * released but the timer can't be set again in the meantime.
261 ++ */
262 ++static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
263 ++ unsigned long *flags)
264 ++{
265 ++ struct kthread_delayed_work *dwork =
266 ++ container_of(work, struct kthread_delayed_work, work);
267 ++ struct kthread_worker *worker = work->worker;
268 ++
269 ++ /*
270 ++ * del_timer_sync() must be called to make sure that the timer
271 ++ * callback is not running. The lock must be temporary released
272 ++ * to avoid a deadlock with the callback. In the meantime,
273 ++ * any queuing is blocked by setting the canceling counter.
274 ++ */
275 ++ work->canceling++;
276 ++ spin_unlock_irqrestore(&worker->lock, *flags);
277 ++ del_timer_sync(&dwork->timer);
278 ++ spin_lock_irqsave(&worker->lock, *flags);
279 ++ work->canceling--;
280 ++}
281 ++
282 ++/*
283 ++ * This function removes the work from the worker queue.
284 ++ *
285 ++ * It is called under worker->lock. The caller must make sure that
286 ++ * the timer used by delayed work is not running, e.g. by calling
287 ++ * kthread_cancel_delayed_work_timer().
288 + *
289 + * The work might still be in use when this function finishes. See the
290 + * current_work proceed by the worker.
291 +@@ -961,28 +991,8 @@ EXPORT_SYMBOL_GPL(kthread_flush_work);
292 + * Return: %true if @work was pending and successfully canceled,
293 + * %false if @work was not pending
294 + */
295 +-static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
296 +- unsigned long *flags)
297 ++static bool __kthread_cancel_work(struct kthread_work *work)
298 + {
299 +- /* Try to cancel the timer if exists. */
300 +- if (is_dwork) {
301 +- struct kthread_delayed_work *dwork =
302 +- container_of(work, struct kthread_delayed_work, work);
303 +- struct kthread_worker *worker = work->worker;
304 +-
305 +- /*
306 +- * del_timer_sync() must be called to make sure that the timer
307 +- * callback is not running. The lock must be temporary released
308 +- * to avoid a deadlock with the callback. In the meantime,
309 +- * any queuing is blocked by setting the canceling counter.
310 +- */
311 +- work->canceling++;
312 +- spin_unlock_irqrestore(&worker->lock, *flags);
313 +- del_timer_sync(&dwork->timer);
314 +- spin_lock_irqsave(&worker->lock, *flags);
315 +- work->canceling--;
316 +- }
317 +-
318 + /*
319 + * Try to remove the work from a worker list. It might either
320 + * be from worker->work_list or from worker->delayed_work_list.
321 +@@ -1035,11 +1045,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
322 + /* Work must not be used with >1 worker, see kthread_queue_work() */
323 + WARN_ON_ONCE(work->worker != worker);
324 +
325 +- /* Do not fight with another command that is canceling this work. */
326 ++ /*
327 ++ * Temporary cancel the work but do not fight with another command
328 ++ * that is canceling the work as well.
329 ++ *
330 ++ * It is a bit tricky because of possible races with another
331 ++ * mod_delayed_work() and cancel_delayed_work() callers.
332 ++ *
333 ++ * The timer must be canceled first because worker->lock is released
334 ++ * when doing so. But the work can be removed from the queue (list)
335 ++ * only when it can be queued again so that the return value can
336 ++ * be used for reference counting.
337 ++ */
338 ++ kthread_cancel_delayed_work_timer(work, &flags);
339 + if (work->canceling)
340 + goto out;
341 ++ ret = __kthread_cancel_work(work);
342 +
343 +- ret = __kthread_cancel_work(work, true, &flags);
344 + fast_queue:
345 + __kthread_queue_delayed_work(worker, dwork, delay);
346 + out:
347 +@@ -1061,7 +1083,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
348 + /* Work must not be used with >1 worker, see kthread_queue_work(). */
349 + WARN_ON_ONCE(work->worker != worker);
350 +
351 +- ret = __kthread_cancel_work(work, is_dwork, &flags);
352 ++ if (is_dwork)
353 ++ kthread_cancel_delayed_work_timer(work, &flags);
354 ++
355 ++ ret = __kthread_cancel_work(work);
356 +
357 + if (worker->current_work != work)
358 + goto out_fast;
359 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
360 +index 14cd0ef33b628..177ca028b9868 100644
361 +--- a/mm/huge_memory.c
362 ++++ b/mm/huge_memory.c
363 +@@ -1891,7 +1891,7 @@ static void unmap_page(struct page *page)
364 + {
365 + enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
366 + TTU_RMAP_LOCKED;
367 +- int i, ret;
368 ++ int i;
369 +
370 + VM_BUG_ON_PAGE(!PageHead(page), page);
371 +
372 +@@ -1899,15 +1899,16 @@ static void unmap_page(struct page *page)
373 + ttu_flags |= TTU_MIGRATION;
374 +
375 + /* We only need TTU_SPLIT_HUGE_PMD once */
376 +- ret = try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD);
377 +- for (i = 1; !ret && i < HPAGE_PMD_NR; i++) {
378 ++ try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD);
379 ++ for (i = 1; i < HPAGE_PMD_NR; i++) {
380 + /* Cut short if the page is unmapped */
381 + if (page_count(page) == 1)
382 + return;
383 +
384 +- ret = try_to_unmap(page + i, ttu_flags);
385 ++ try_to_unmap(page + i, ttu_flags);
386 + }
387 +- VM_BUG_ON_PAGE(ret, page + i - 1);
388 ++
389 ++ VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
390 + }
391 +
392 + static void remap_page(struct page *page)
393 +@@ -2137,7 +2138,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
394 + struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
395 + struct anon_vma *anon_vma = NULL;
396 + struct address_space *mapping = NULL;
397 +- int count, mapcount, extra_pins, ret;
398 ++ int extra_pins, ret;
399 + bool mlocked;
400 + unsigned long flags;
401 + pgoff_t end;
402 +@@ -2200,7 +2201,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
403 +
404 + mlocked = PageMlocked(page);
405 + unmap_page(head);
406 +- VM_BUG_ON_PAGE(compound_mapcount(head), head);
407 +
408 + /* Make sure the page is not on per-CPU pagevec as it takes pin */
409 + if (mlocked)
410 +@@ -2226,9 +2226,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
411 +
412 + /* Prevent deferred_split_scan() touching ->_refcount */
413 + spin_lock(&pgdata->split_queue_lock);
414 +- count = page_count(head);
415 +- mapcount = total_mapcount(head);
416 +- if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
417 ++ if (page_ref_freeze(head, 1 + extra_pins)) {
418 + if (!list_empty(page_deferred_list(head))) {
419 + pgdata->split_queue_len--;
420 + list_del(page_deferred_list(head));
421 +@@ -2239,16 +2237,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
422 + __split_huge_page(page, list, end, flags);
423 + ret = 0;
424 + } else {
425 +- if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
426 +- pr_alert("total_mapcount: %u, page_count(): %u\n",
427 +- mapcount, count);
428 +- if (PageTail(page))
429 +- dump_page(head, NULL);
430 +- dump_page(page, "total_mapcount(head) > 0");
431 +- BUG();
432 +- }
433 + spin_unlock(&pgdata->split_queue_lock);
434 +-fail: if (mapping)
435 ++fail:
436 ++ if (mapping)
437 + spin_unlock(&mapping->tree_lock);
438 + spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
439 + remap_page(head);
440 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
441 +index b7215b0807ca6..de89e9295f6c5 100644
442 +--- a/mm/hugetlb.c
443 ++++ b/mm/hugetlb.c
444 +@@ -1380,15 +1380,12 @@ int PageHeadHuge(struct page *page_head)
445 + return get_compound_page_dtor(page_head) == free_huge_page;
446 + }
447 +
448 +-pgoff_t __basepage_index(struct page *page)
449 ++pgoff_t hugetlb_basepage_index(struct page *page)
450 + {
451 + struct page *page_head = compound_head(page);
452 + pgoff_t index = page_index(page_head);
453 + unsigned long compound_idx;
454 +
455 +- if (!PageHuge(page_head))
456 +- return page_index(page);
457 +-
458 + if (compound_order(page_head) >= MAX_ORDER)
459 + compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
460 + else