1 |
commit: 9723a6ca8963f59a83672d3b433b01f598183bcc |
2 |
Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sat Jan 30 13:26:22 2021 +0000 |
4 |
Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org> |
5 |
CommitDate: Sat Jan 30 13:27:06 2021 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9723a6ca |
7 |
|
8 |
Linux patch 5.10.12 |
9 |
|
10 |
Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1011_linux-5.10.12.patch | 1263 ++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 1267 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index fe8a778..8c99e2c 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -87,6 +87,10 @@ Patch: 1010_linux-5.10.11.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 5.10.11 |
23 |
|
24 |
+Patch: 1011_linux-5.10.12.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 5.10.12 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1011_linux-5.10.12.patch b/1011_linux-5.10.12.patch |
33 |
new file mode 100644 |
34 |
index 0000000..40728cb |
35 |
--- /dev/null |
36 |
+++ b/1011_linux-5.10.12.patch |
37 |
@@ -0,0 +1,1263 @@ |
38 |
+diff --git a/Makefile b/Makefile |
39 |
+index 7a5d906f6ee36..a6b2e64bcf6c7 100644 |
40 |
+--- a/Makefile |
41 |
++++ b/Makefile |
42 |
+@@ -1,7 +1,7 @@ |
43 |
+ # SPDX-License-Identifier: GPL-2.0 |
44 |
+ VERSION = 5 |
45 |
+ PATCHLEVEL = 10 |
46 |
+-SUBLEVEL = 11 |
47 |
++SUBLEVEL = 12 |
48 |
+ EXTRAVERSION = |
49 |
+ NAME = Kleptomaniac Octopus |
50 |
+ |
51 |
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c |
52 |
+index 2f245594a90a6..ed7c5fc47f524 100644 |
53 |
+--- a/drivers/gpio/gpio-mvebu.c |
54 |
++++ b/drivers/gpio/gpio-mvebu.c |
55 |
+@@ -660,9 +660,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip, |
56 |
+ |
57 |
+ spin_lock_irqsave(&mvpwm->lock, flags); |
58 |
+ |
59 |
+- val = (unsigned long long) |
60 |
+- readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm)); |
61 |
+- val *= NSEC_PER_SEC; |
62 |
++ u = readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm)); |
63 |
++ val = (unsigned long long) u * NSEC_PER_SEC; |
64 |
+ do_div(val, mvpwm->clk_rate); |
65 |
+ if (val > UINT_MAX) |
66 |
+ state->duty_cycle = UINT_MAX; |
67 |
+@@ -671,21 +670,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip, |
68 |
+ else |
69 |
+ state->duty_cycle = 1; |
70 |
+ |
71 |
+- val = (unsigned long long) |
72 |
+- readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm)); |
73 |
++ val = (unsigned long long) u; /* on duration */ |
74 |
++ /* period = on + off duration */ |
75 |
++ val += readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm)); |
76 |
+ val *= NSEC_PER_SEC; |
77 |
+ do_div(val, mvpwm->clk_rate); |
78 |
+- if (val < state->duty_cycle) { |
79 |
++ if (val > UINT_MAX) |
80 |
++ state->period = UINT_MAX; |
81 |
++ else if (val) |
82 |
++ state->period = val; |
83 |
++ else |
84 |
+ state->period = 1; |
85 |
+- } else { |
86 |
+- val -= state->duty_cycle; |
87 |
+- if (val > UINT_MAX) |
88 |
+- state->period = UINT_MAX; |
89 |
+- else if (val) |
90 |
+- state->period = val; |
91 |
+- else |
92 |
+- state->period = 1; |
93 |
+- } |
94 |
+ |
95 |
+ regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u); |
96 |
+ if (u) |
97 |
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c |
98 |
+index 0743ef51d3b24..8429ebe7097e4 100644 |
99 |
+--- a/drivers/hid/hid-multitouch.c |
100 |
++++ b/drivers/hid/hid-multitouch.c |
101 |
+@@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, |
102 |
+ MT_STORE_FIELD(inrange_state); |
103 |
+ return 1; |
104 |
+ case HID_DG_CONFIDENCE: |
105 |
+- if (cls->name == MT_CLS_WIN_8 && |
106 |
++ if ((cls->name == MT_CLS_WIN_8 || |
107 |
++ cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) && |
108 |
+ (field->application == HID_DG_TOUCHPAD || |
109 |
+ field->application == HID_DG_TOUCHSCREEN)) |
110 |
+ app->quirks |= MT_QUIRK_CONFIDENCE; |
111 |
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c |
112 |
+index 9e852b4bbf92b..73dafa60080f1 100644 |
113 |
+--- a/drivers/hid/wacom_sys.c |
114 |
++++ b/drivers/hid/wacom_sys.c |
115 |
+@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev, |
116 |
+ } |
117 |
+ |
118 |
+ if (flush) |
119 |
+- wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo); |
120 |
++ wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo); |
121 |
+ else if (insert) |
122 |
+- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, |
123 |
++ wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo, |
124 |
+ raw_data, report_size); |
125 |
+ |
126 |
+ return insert && !flush; |
127 |
+@@ -1280,7 +1280,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res) |
128 |
+ static int wacom_devm_kfifo_alloc(struct wacom *wacom) |
129 |
+ { |
130 |
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac; |
131 |
+- struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo; |
132 |
++ struct kfifo_rec_ptr_2 *pen_fifo; |
133 |
+ int error; |
134 |
+ |
135 |
+ pen_fifo = devres_alloc(wacom_devm_kfifo_release, |
136 |
+@@ -1297,6 +1297,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom) |
137 |
+ } |
138 |
+ |
139 |
+ devres_add(&wacom->hdev->dev, pen_fifo); |
140 |
++ wacom_wac->pen_fifo = pen_fifo; |
141 |
+ |
142 |
+ return 0; |
143 |
+ } |
144 |
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h |
145 |
+index da612b6e9c779..195910dd2154e 100644 |
146 |
+--- a/drivers/hid/wacom_wac.h |
147 |
++++ b/drivers/hid/wacom_wac.h |
148 |
+@@ -342,7 +342,7 @@ struct wacom_wac { |
149 |
+ struct input_dev *pen_input; |
150 |
+ struct input_dev *touch_input; |
151 |
+ struct input_dev *pad_input; |
152 |
+- struct kfifo_rec_ptr_2 pen_fifo; |
153 |
++ struct kfifo_rec_ptr_2 *pen_fifo; |
154 |
+ int pid; |
155 |
+ int num_contacts_left; |
156 |
+ u8 bt_features; |
157 |
+diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h |
158 |
+index c142f5e7f25f8..de57f2fed7437 100644 |
159 |
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h |
160 |
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h |
161 |
+@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags) |
162 |
+ return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX); |
163 |
+ } |
164 |
+ |
165 |
++static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type) |
166 |
++{ |
167 |
++ switch (type) { |
168 |
++ case PVRDMA_NETWORK_ROCE_V1: |
169 |
++ return RDMA_NETWORK_ROCE_V1; |
170 |
++ case PVRDMA_NETWORK_IPV4: |
171 |
++ return RDMA_NETWORK_IPV4; |
172 |
++ case PVRDMA_NETWORK_IPV6: |
173 |
++ return RDMA_NETWORK_IPV6; |
174 |
++ default: |
175 |
++ return RDMA_NETWORK_IPV6; |
176 |
++ } |
177 |
++} |
178 |
++ |
179 |
+ void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, |
180 |
+ const struct pvrdma_qp_cap *src); |
181 |
+ void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, |
182 |
+diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c |
183 |
+index 319546a39a0d5..62164db593a4f 100644 |
184 |
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c |
185 |
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c |
186 |
+@@ -364,7 +364,7 @@ retry: |
187 |
+ wc->dlid_path_bits = cqe->dlid_path_bits; |
188 |
+ wc->port_num = cqe->port_num; |
189 |
+ wc->vendor_err = cqe->vendor_err; |
190 |
+- wc->network_hdr_type = cqe->network_hdr_type; |
191 |
++ wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type); |
192 |
+ |
193 |
+ /* Update shared ring state */ |
194 |
+ pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe); |
195 |
+diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c |
196 |
+index 96d3b2b2aa318..3f61f5863bf77 100644 |
197 |
+--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c |
198 |
++++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c |
199 |
+@@ -118,8 +118,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b) |
200 |
+ return -EINVAL; |
201 |
+ } |
202 |
+ } else { |
203 |
+- length = (b->memory == VB2_MEMORY_USERPTR || |
204 |
+- b->memory == VB2_MEMORY_DMABUF) |
205 |
++ length = (b->memory == VB2_MEMORY_USERPTR) |
206 |
+ ? b->length : vb->planes[0].length; |
207 |
+ |
208 |
+ if (b->bytesused > length) |
209 |
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c |
210 |
+index 8fa1c22fd96db..fcad5cdcabfa4 100644 |
211 |
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c |
212 |
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c |
213 |
+@@ -237,13 +237,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, |
214 |
+ if (le32_to_cpu(tlv->length) < sizeof(*reg)) |
215 |
+ return -EINVAL; |
216 |
+ |
217 |
+- /* For safe using a string from FW make sure we have a |
218 |
+- * null terminator |
219 |
+- */ |
220 |
+- reg->name[IWL_FW_INI_MAX_NAME - 1] = 0; |
221 |
+- |
222 |
+- IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name); |
223 |
+- |
224 |
+ if (id >= IWL_FW_INI_MAX_REGION_ID) { |
225 |
+ IWL_ERR(trans, "WRT: Invalid region id %u\n", id); |
226 |
+ return -EINVAL; |
227 |
+diff --git a/fs/file.c b/fs/file.c |
228 |
+index 4559b5fec3bd5..21c0893f2f1df 100644 |
229 |
+--- a/fs/file.c |
230 |
++++ b/fs/file.c |
231 |
+@@ -21,7 +21,6 @@ |
232 |
+ #include <linux/rcupdate.h> |
233 |
+ #include <linux/close_range.h> |
234 |
+ #include <net/sock.h> |
235 |
+-#include <linux/io_uring.h> |
236 |
+ |
237 |
+ unsigned int sysctl_nr_open __read_mostly = 1024*1024; |
238 |
+ unsigned int sysctl_nr_open_min = BITS_PER_LONG; |
239 |
+@@ -453,7 +452,6 @@ void exit_files(struct task_struct *tsk) |
240 |
+ struct files_struct * files = tsk->files; |
241 |
+ |
242 |
+ if (files) { |
243 |
+- io_uring_files_cancel(files); |
244 |
+ task_lock(tsk); |
245 |
+ tsk->files = NULL; |
246 |
+ task_unlock(tsk); |
247 |
+diff --git a/fs/io_uring.c b/fs/io_uring.c |
248 |
+index 8cb0db187d90f..fd12d9327ee5b 100644 |
249 |
+--- a/fs/io_uring.c |
250 |
++++ b/fs/io_uring.c |
251 |
+@@ -260,6 +260,7 @@ struct io_ring_ctx { |
252 |
+ unsigned int drain_next: 1; |
253 |
+ unsigned int eventfd_async: 1; |
254 |
+ unsigned int restricted: 1; |
255 |
++ unsigned int sqo_dead: 1; |
256 |
+ |
257 |
+ /* |
258 |
+ * Ring buffer of indices into array of io_uring_sqe, which is |
259 |
+@@ -970,6 +971,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, |
260 |
+ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, |
261 |
+ const struct iovec *fast_iov, |
262 |
+ struct iov_iter *iter, bool force); |
263 |
++static void io_req_drop_files(struct io_kiocb *req); |
264 |
+ |
265 |
+ static struct kmem_cache *req_cachep; |
266 |
+ |
267 |
+@@ -990,8 +992,7 @@ EXPORT_SYMBOL(io_uring_get_socket); |
268 |
+ |
269 |
+ static inline void io_clean_op(struct io_kiocb *req) |
270 |
+ { |
271 |
+- if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED | |
272 |
+- REQ_F_INFLIGHT)) |
273 |
++ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED)) |
274 |
+ __io_clean_op(req); |
275 |
+ } |
276 |
+ |
277 |
+@@ -1213,11 +1214,6 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) |
278 |
+ |
279 |
+ /* order cqe stores with ring update */ |
280 |
+ smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); |
281 |
+- |
282 |
+- if (wq_has_sleeper(&ctx->cq_wait)) { |
283 |
+- wake_up_interruptible(&ctx->cq_wait); |
284 |
+- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); |
285 |
+- } |
286 |
+ } |
287 |
+ |
288 |
+ static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req) |
289 |
+@@ -1260,6 +1256,8 @@ static void io_req_clean_work(struct io_kiocb *req) |
290 |
+ free_fs_struct(fs); |
291 |
+ req->work.flags &= ~IO_WQ_WORK_FS; |
292 |
+ } |
293 |
++ if (req->flags & REQ_F_INFLIGHT) |
294 |
++ io_req_drop_files(req); |
295 |
+ |
296 |
+ io_put_identity(req->task->io_uring, req); |
297 |
+ } |
298 |
+@@ -1603,6 +1601,10 @@ static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx) |
299 |
+ |
300 |
+ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) |
301 |
+ { |
302 |
++ if (wq_has_sleeper(&ctx->cq_wait)) { |
303 |
++ wake_up_interruptible(&ctx->cq_wait); |
304 |
++ kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); |
305 |
++ } |
306 |
+ if (waitqueue_active(&ctx->wait)) |
307 |
+ wake_up(&ctx->wait); |
308 |
+ if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait)) |
309 |
+@@ -2083,11 +2085,9 @@ static void io_req_task_cancel(struct callback_head *cb) |
310 |
+ static void __io_req_task_submit(struct io_kiocb *req) |
311 |
+ { |
312 |
+ struct io_ring_ctx *ctx = req->ctx; |
313 |
+- bool fail; |
314 |
+ |
315 |
+- fail = __io_sq_thread_acquire_mm(ctx); |
316 |
+ mutex_lock(&ctx->uring_lock); |
317 |
+- if (!fail) |
318 |
++ if (!ctx->sqo_dead && !__io_sq_thread_acquire_mm(ctx)) |
319 |
+ __io_queue_sqe(req, NULL); |
320 |
+ else |
321 |
+ __io_req_task_cancel(req, -EFAULT); |
322 |
+@@ -5962,9 +5962,6 @@ static void __io_clean_op(struct io_kiocb *req) |
323 |
+ } |
324 |
+ req->flags &= ~REQ_F_NEED_CLEANUP; |
325 |
+ } |
326 |
+- |
327 |
+- if (req->flags & REQ_F_INFLIGHT) |
328 |
+- io_req_drop_files(req); |
329 |
+ } |
330 |
+ |
331 |
+ static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock, |
332 |
+@@ -6796,7 +6793,7 @@ again: |
333 |
+ to_submit = 8; |
334 |
+ |
335 |
+ mutex_lock(&ctx->uring_lock); |
336 |
+- if (likely(!percpu_ref_is_dying(&ctx->refs))) |
337 |
++ if (likely(!percpu_ref_is_dying(&ctx->refs) && !ctx->sqo_dead)) |
338 |
+ ret = io_submit_sqes(ctx, to_submit); |
339 |
+ mutex_unlock(&ctx->uring_lock); |
340 |
+ |
341 |
+@@ -8487,6 +8484,10 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) |
342 |
+ mutex_lock(&ctx->uring_lock); |
343 |
+ percpu_ref_kill(&ctx->refs); |
344 |
+ /* if force is set, the ring is going away. always drop after that */ |
345 |
++ |
346 |
++ if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead)) |
347 |
++ ctx->sqo_dead = 1; |
348 |
++ |
349 |
+ ctx->cq_overflow_flushed = 1; |
350 |
+ if (ctx->rings) |
351 |
+ __io_cqring_overflow_flush(ctx, true, NULL, NULL); |
352 |
+@@ -8698,6 +8699,8 @@ static bool io_uring_cancel_files(struct io_ring_ctx *ctx, |
353 |
+ break; |
354 |
+ /* cancel this request, or head link requests */ |
355 |
+ io_attempt_cancel(ctx, cancel_req); |
356 |
++ io_cqring_overflow_flush(ctx, true, task, files); |
357 |
++ |
358 |
+ io_put_req(cancel_req); |
359 |
+ /* cancellations _may_ trigger task work */ |
360 |
+ io_run_task_work(); |
361 |
+@@ -8745,6 +8748,17 @@ static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, |
362 |
+ return ret; |
363 |
+ } |
364 |
+ |
365 |
++static void io_disable_sqo_submit(struct io_ring_ctx *ctx) |
366 |
++{ |
367 |
++ mutex_lock(&ctx->uring_lock); |
368 |
++ ctx->sqo_dead = 1; |
369 |
++ mutex_unlock(&ctx->uring_lock); |
370 |
++ |
371 |
++ /* make sure callers enter the ring to get error */ |
372 |
++ if (ctx->rings) |
373 |
++ io_ring_set_wakeup_flag(ctx); |
374 |
++} |
375 |
++ |
376 |
+ /* |
377 |
+ * We need to iteratively cancel requests, in case a request has dependent |
378 |
+ * hard links. These persist even for failure of cancelations, hence keep |
379 |
+@@ -8756,6 +8770,9 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, |
380 |
+ struct task_struct *task = current; |
381 |
+ |
382 |
+ if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { |
383 |
++ /* for SQPOLL only sqo_task has task notes */ |
384 |
++ WARN_ON_ONCE(ctx->sqo_task != current); |
385 |
++ io_disable_sqo_submit(ctx); |
386 |
+ task = ctx->sq_data->thread; |
387 |
+ atomic_inc(&task->io_uring->in_idle); |
388 |
+ io_sq_thread_park(ctx->sq_data); |
389 |
+@@ -8835,23 +8852,6 @@ static void io_uring_del_task_file(struct file *file) |
390 |
+ fput(file); |
391 |
+ } |
392 |
+ |
393 |
+-/* |
394 |
+- * Drop task note for this file if we're the only ones that hold it after |
395 |
+- * pending fput() |
396 |
+- */ |
397 |
+-static void io_uring_attempt_task_drop(struct file *file) |
398 |
+-{ |
399 |
+- if (!current->io_uring) |
400 |
+- return; |
401 |
+- /* |
402 |
+- * fput() is pending, will be 2 if the only other ref is our potential |
403 |
+- * task file note. If the task is exiting, drop regardless of count. |
404 |
+- */ |
405 |
+- if (fatal_signal_pending(current) || (current->flags & PF_EXITING) || |
406 |
+- atomic_long_read(&file->f_count) == 2) |
407 |
+- io_uring_del_task_file(file); |
408 |
+-} |
409 |
+- |
410 |
+ static void io_uring_remove_task_files(struct io_uring_task *tctx) |
411 |
+ { |
412 |
+ struct file *file; |
413 |
+@@ -8917,6 +8917,10 @@ void __io_uring_task_cancel(void) |
414 |
+ /* make sure overflow events are dropped */ |
415 |
+ atomic_inc(&tctx->in_idle); |
416 |
+ |
417 |
++ /* trigger io_disable_sqo_submit() */ |
418 |
++ if (tctx->sqpoll) |
419 |
++ __io_uring_files_cancel(NULL); |
420 |
++ |
421 |
+ do { |
422 |
+ /* read completions before cancelations */ |
423 |
+ inflight = tctx_inflight(tctx); |
424 |
+@@ -8943,7 +8947,36 @@ void __io_uring_task_cancel(void) |
425 |
+ |
426 |
+ static int io_uring_flush(struct file *file, void *data) |
427 |
+ { |
428 |
+- io_uring_attempt_task_drop(file); |
429 |
++ struct io_uring_task *tctx = current->io_uring; |
430 |
++ struct io_ring_ctx *ctx = file->private_data; |
431 |
++ |
432 |
++ if (!tctx) |
433 |
++ return 0; |
434 |
++ |
435 |
++ /* we should have cancelled and erased it before PF_EXITING */ |
436 |
++ WARN_ON_ONCE((current->flags & PF_EXITING) && |
437 |
++ xa_load(&tctx->xa, (unsigned long)file)); |
438 |
++ |
439 |
++ /* |
440 |
++ * fput() is pending, will be 2 if the only other ref is our potential |
441 |
++ * task file note. If the task is exiting, drop regardless of count. |
442 |
++ */ |
443 |
++ if (atomic_long_read(&file->f_count) != 2) |
444 |
++ return 0; |
445 |
++ |
446 |
++ if (ctx->flags & IORING_SETUP_SQPOLL) { |
447 |
++ /* there is only one file note, which is owned by sqo_task */ |
448 |
++ WARN_ON_ONCE(ctx->sqo_task != current && |
449 |
++ xa_load(&tctx->xa, (unsigned long)file)); |
450 |
++ /* sqo_dead check is for when this happens after cancellation */ |
451 |
++ WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead && |
452 |
++ !xa_load(&tctx->xa, (unsigned long)file)); |
453 |
++ |
454 |
++ io_disable_sqo_submit(ctx); |
455 |
++ } |
456 |
++ |
457 |
++ if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current) |
458 |
++ io_uring_del_task_file(file); |
459 |
+ return 0; |
460 |
+ } |
461 |
+ |
462 |
+@@ -9017,8 +9050,9 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, |
463 |
+ |
464 |
+ #endif /* !CONFIG_MMU */ |
465 |
+ |
466 |
+-static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) |
467 |
++static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx) |
468 |
+ { |
469 |
++ int ret = 0; |
470 |
+ DEFINE_WAIT(wait); |
471 |
+ |
472 |
+ do { |
473 |
+@@ -9027,6 +9061,11 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) |
474 |
+ |
475 |
+ prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE); |
476 |
+ |
477 |
++ if (unlikely(ctx->sqo_dead)) { |
478 |
++ ret = -EOWNERDEAD; |
479 |
++ goto out; |
480 |
++ } |
481 |
++ |
482 |
+ if (!io_sqring_full(ctx)) |
483 |
+ break; |
484 |
+ |
485 |
+@@ -9034,6 +9073,8 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) |
486 |
+ } while (!signal_pending(current)); |
487 |
+ |
488 |
+ finish_wait(&ctx->sqo_sq_wait, &wait); |
489 |
++out: |
490 |
++ return ret; |
491 |
+ } |
492 |
+ |
493 |
+ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, |
494 |
+@@ -9077,10 +9118,16 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, |
495 |
+ if (ctx->flags & IORING_SETUP_SQPOLL) { |
496 |
+ io_cqring_overflow_flush(ctx, false, NULL, NULL); |
497 |
+ |
498 |
++ ret = -EOWNERDEAD; |
499 |
++ if (unlikely(ctx->sqo_dead)) |
500 |
++ goto out; |
501 |
+ if (flags & IORING_ENTER_SQ_WAKEUP) |
502 |
+ wake_up(&ctx->sq_data->wait); |
503 |
+- if (flags & IORING_ENTER_SQ_WAIT) |
504 |
+- io_sqpoll_wait_sq(ctx); |
505 |
++ if (flags & IORING_ENTER_SQ_WAIT) { |
506 |
++ ret = io_sqpoll_wait_sq(ctx); |
507 |
++ if (ret) |
508 |
++ goto out; |
509 |
++ } |
510 |
+ submitted = to_submit; |
511 |
+ } else if (to_submit) { |
512 |
+ ret = io_uring_add_task_file(ctx, f.file); |
513 |
+@@ -9491,6 +9538,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, |
514 |
+ */ |
515 |
+ ret = io_uring_install_fd(ctx, file); |
516 |
+ if (ret < 0) { |
517 |
++ io_disable_sqo_submit(ctx); |
518 |
+ /* fput will clean it up */ |
519 |
+ fput(file); |
520 |
+ return ret; |
521 |
+@@ -9499,6 +9547,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, |
522 |
+ trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); |
523 |
+ return ret; |
524 |
+ err: |
525 |
++ io_disable_sqo_submit(ctx); |
526 |
+ io_ring_ctx_wait_and_kill(ctx); |
527 |
+ return ret; |
528 |
+ } |
529 |
+diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h |
530 |
+index 00850b98078a2..a38454d9e0f54 100644 |
531 |
+--- a/include/uapi/linux/v4l2-subdev.h |
532 |
++++ b/include/uapi/linux/v4l2-subdev.h |
533 |
+@@ -176,7 +176,7 @@ struct v4l2_subdev_capability { |
534 |
+ }; |
535 |
+ |
536 |
+ /* The v4l2 sub-device video device node is registered in read-only mode. */ |
537 |
+-#define V4L2_SUBDEV_CAP_RO_SUBDEV BIT(0) |
538 |
++#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001 |
539 |
+ |
540 |
+ /* Backwards compatibility define --- to be removed */ |
541 |
+ #define v4l2_subdev_edid v4l2_edid |
542 |
+diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h |
543 |
+index f8b638c73371d..901a4fd72c09f 100644 |
544 |
+--- a/include/uapi/rdma/vmw_pvrdma-abi.h |
545 |
++++ b/include/uapi/rdma/vmw_pvrdma-abi.h |
546 |
+@@ -133,6 +133,13 @@ enum pvrdma_wc_flags { |
547 |
+ PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE, |
548 |
+ }; |
549 |
+ |
550 |
++enum pvrdma_network_type { |
551 |
++ PVRDMA_NETWORK_IB, |
552 |
++ PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB, |
553 |
++ PVRDMA_NETWORK_IPV4, |
554 |
++ PVRDMA_NETWORK_IPV6 |
555 |
++}; |
556 |
++ |
557 |
+ struct pvrdma_alloc_ucontext_resp { |
558 |
+ __u32 qp_tab_size; |
559 |
+ __u32 reserved; |
560 |
+diff --git a/kernel/exit.c b/kernel/exit.c |
561 |
+index 1f236ed375f83..d13d67fc5f4e2 100644 |
562 |
+--- a/kernel/exit.c |
563 |
++++ b/kernel/exit.c |
564 |
+@@ -63,6 +63,7 @@ |
565 |
+ #include <linux/random.h> |
566 |
+ #include <linux/rcuwait.h> |
567 |
+ #include <linux/compat.h> |
568 |
++#include <linux/io_uring.h> |
569 |
+ |
570 |
+ #include <linux/uaccess.h> |
571 |
+ #include <asm/unistd.h> |
572 |
+@@ -762,6 +763,7 @@ void __noreturn do_exit(long code) |
573 |
+ schedule(); |
574 |
+ } |
575 |
+ |
576 |
++ io_uring_files_cancel(tsk->files); |
577 |
+ exit_signals(tsk); /* sets PF_EXITING */ |
578 |
+ |
579 |
+ /* sync mm's RSS info before statistics gathering */ |
580 |
+diff --git a/kernel/futex.c b/kernel/futex.c |
581 |
+index 00259c7e288ee..0693b3ea0f9a4 100644 |
582 |
+--- a/kernel/futex.c |
583 |
++++ b/kernel/futex.c |
584 |
+@@ -765,6 +765,29 @@ static struct futex_pi_state *alloc_pi_state(void) |
585 |
+ return pi_state; |
586 |
+ } |
587 |
+ |
588 |
++static void pi_state_update_owner(struct futex_pi_state *pi_state, |
589 |
++ struct task_struct *new_owner) |
590 |
++{ |
591 |
++ struct task_struct *old_owner = pi_state->owner; |
592 |
++ |
593 |
++ lockdep_assert_held(&pi_state->pi_mutex.wait_lock); |
594 |
++ |
595 |
++ if (old_owner) { |
596 |
++ raw_spin_lock(&old_owner->pi_lock); |
597 |
++ WARN_ON(list_empty(&pi_state->list)); |
598 |
++ list_del_init(&pi_state->list); |
599 |
++ raw_spin_unlock(&old_owner->pi_lock); |
600 |
++ } |
601 |
++ |
602 |
++ if (new_owner) { |
603 |
++ raw_spin_lock(&new_owner->pi_lock); |
604 |
++ WARN_ON(!list_empty(&pi_state->list)); |
605 |
++ list_add(&pi_state->list, &new_owner->pi_state_list); |
606 |
++ pi_state->owner = new_owner; |
607 |
++ raw_spin_unlock(&new_owner->pi_lock); |
608 |
++ } |
609 |
++} |
610 |
++ |
611 |
+ static void get_pi_state(struct futex_pi_state *pi_state) |
612 |
+ { |
613 |
+ WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount)); |
614 |
+@@ -787,17 +810,11 @@ static void put_pi_state(struct futex_pi_state *pi_state) |
615 |
+ * and has cleaned up the pi_state already |
616 |
+ */ |
617 |
+ if (pi_state->owner) { |
618 |
+- struct task_struct *owner; |
619 |
+ unsigned long flags; |
620 |
+ |
621 |
+ raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags); |
622 |
+- owner = pi_state->owner; |
623 |
+- if (owner) { |
624 |
+- raw_spin_lock(&owner->pi_lock); |
625 |
+- list_del_init(&pi_state->list); |
626 |
+- raw_spin_unlock(&owner->pi_lock); |
627 |
+- } |
628 |
+- rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner); |
629 |
++ pi_state_update_owner(pi_state, NULL); |
630 |
++ rt_mutex_proxy_unlock(&pi_state->pi_mutex); |
631 |
+ raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags); |
632 |
+ } |
633 |
+ |
634 |
+@@ -943,7 +960,8 @@ static inline void exit_pi_state_list(struct task_struct *curr) { } |
635 |
+ * FUTEX_OWNER_DIED bit. See [4] |
636 |
+ * |
637 |
+ * [10] There is no transient state which leaves owner and user space |
638 |
+- * TID out of sync. |
639 |
++ * TID out of sync. Except one error case where the kernel is denied |
640 |
++ * write access to the user address, see fixup_pi_state_owner(). |
641 |
+ * |
642 |
+ * |
643 |
+ * Serialization and lifetime rules: |
644 |
+@@ -1523,26 +1541,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ |
645 |
+ ret = -EINVAL; |
646 |
+ } |
647 |
+ |
648 |
+- if (ret) |
649 |
+- goto out_unlock; |
650 |
+- |
651 |
+- /* |
652 |
+- * This is a point of no return; once we modify the uval there is no |
653 |
+- * going back and subsequent operations must not fail. |
654 |
+- */ |
655 |
+- |
656 |
+- raw_spin_lock(&pi_state->owner->pi_lock); |
657 |
+- WARN_ON(list_empty(&pi_state->list)); |
658 |
+- list_del_init(&pi_state->list); |
659 |
+- raw_spin_unlock(&pi_state->owner->pi_lock); |
660 |
+- |
661 |
+- raw_spin_lock(&new_owner->pi_lock); |
662 |
+- WARN_ON(!list_empty(&pi_state->list)); |
663 |
+- list_add(&pi_state->list, &new_owner->pi_state_list); |
664 |
+- pi_state->owner = new_owner; |
665 |
+- raw_spin_unlock(&new_owner->pi_lock); |
666 |
+- |
667 |
+- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); |
668 |
++ if (!ret) { |
669 |
++ /* |
670 |
++ * This is a point of no return; once we modified the uval |
671 |
++ * there is no going back and subsequent operations must |
672 |
++ * not fail. |
673 |
++ */ |
674 |
++ pi_state_update_owner(pi_state, new_owner); |
675 |
++ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); |
676 |
++ } |
677 |
+ |
678 |
+ out_unlock: |
679 |
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
680 |
+@@ -2325,18 +2332,13 @@ static void unqueue_me_pi(struct futex_q *q) |
681 |
+ spin_unlock(q->lock_ptr); |
682 |
+ } |
683 |
+ |
684 |
+-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
685 |
+- struct task_struct *argowner) |
686 |
++static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
687 |
++ struct task_struct *argowner) |
688 |
+ { |
689 |
+ struct futex_pi_state *pi_state = q->pi_state; |
690 |
+- u32 uval, curval, newval; |
691 |
+ struct task_struct *oldowner, *newowner; |
692 |
+- u32 newtid; |
693 |
+- int ret, err = 0; |
694 |
+- |
695 |
+- lockdep_assert_held(q->lock_ptr); |
696 |
+- |
697 |
+- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
698 |
++ u32 uval, curval, newval, newtid; |
699 |
++ int err = 0; |
700 |
+ |
701 |
+ oldowner = pi_state->owner; |
702 |
+ |
703 |
+@@ -2370,14 +2372,12 @@ retry: |
704 |
+ * We raced against a concurrent self; things are |
705 |
+ * already fixed up. Nothing to do. |
706 |
+ */ |
707 |
+- ret = 0; |
708 |
+- goto out_unlock; |
709 |
++ return 0; |
710 |
+ } |
711 |
+ |
712 |
+ if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { |
713 |
+- /* We got the lock after all, nothing to fix. */ |
714 |
+- ret = 0; |
715 |
+- goto out_unlock; |
716 |
++ /* We got the lock. pi_state is correct. Tell caller. */ |
717 |
++ return 1; |
718 |
+ } |
719 |
+ |
720 |
+ /* |
721 |
+@@ -2404,8 +2404,7 @@ retry: |
722 |
+ * We raced against a concurrent self; things are |
723 |
+ * already fixed up. Nothing to do. |
724 |
+ */ |
725 |
+- ret = 0; |
726 |
+- goto out_unlock; |
727 |
++ return 1; |
728 |
+ } |
729 |
+ newowner = argowner; |
730 |
+ } |
731 |
+@@ -2435,22 +2434,9 @@ retry: |
732 |
+ * We fixed up user space. Now we need to fix the pi_state |
733 |
+ * itself. |
734 |
+ */ |
735 |
+- if (pi_state->owner != NULL) { |
736 |
+- raw_spin_lock(&pi_state->owner->pi_lock); |
737 |
+- WARN_ON(list_empty(&pi_state->list)); |
738 |
+- list_del_init(&pi_state->list); |
739 |
+- raw_spin_unlock(&pi_state->owner->pi_lock); |
740 |
+- } |
741 |
++ pi_state_update_owner(pi_state, newowner); |
742 |
+ |
743 |
+- pi_state->owner = newowner; |
744 |
+- |
745 |
+- raw_spin_lock(&newowner->pi_lock); |
746 |
+- WARN_ON(!list_empty(&pi_state->list)); |
747 |
+- list_add(&pi_state->list, &newowner->pi_state_list); |
748 |
+- raw_spin_unlock(&newowner->pi_lock); |
749 |
+- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
750 |
+- |
751 |
+- return 0; |
752 |
++ return argowner == current; |
753 |
+ |
754 |
+ /* |
755 |
+ * In order to reschedule or handle a page fault, we need to drop the |
756 |
+@@ -2471,17 +2457,16 @@ handle_err: |
757 |
+ |
758 |
+ switch (err) { |
759 |
+ case -EFAULT: |
760 |
+- ret = fault_in_user_writeable(uaddr); |
761 |
++ err = fault_in_user_writeable(uaddr); |
762 |
+ break; |
763 |
+ |
764 |
+ case -EAGAIN: |
765 |
+ cond_resched(); |
766 |
+- ret = 0; |
767 |
++ err = 0; |
768 |
+ break; |
769 |
+ |
770 |
+ default: |
771 |
+ WARN_ON_ONCE(1); |
772 |
+- ret = err; |
773 |
+ break; |
774 |
+ } |
775 |
+ |
776 |
+@@ -2491,17 +2476,44 @@ handle_err: |
777 |
+ /* |
778 |
+ * Check if someone else fixed it for us: |
779 |
+ */ |
780 |
+- if (pi_state->owner != oldowner) { |
781 |
+- ret = 0; |
782 |
+- goto out_unlock; |
783 |
+- } |
784 |
++ if (pi_state->owner != oldowner) |
785 |
++ return argowner == current; |
786 |
+ |
787 |
+- if (ret) |
788 |
+- goto out_unlock; |
789 |
++ /* Retry if err was -EAGAIN or the fault in succeeded */ |
790 |
++ if (!err) |
791 |
++ goto retry; |
792 |
+ |
793 |
+- goto retry; |
794 |
++ /* |
795 |
++ * fault_in_user_writeable() failed so user state is immutable. At |
796 |
++ * best we can make the kernel state consistent but user state will |
797 |
++ * be most likely hosed and any subsequent unlock operation will be |
798 |
++ * rejected due to PI futex rule [10]. |
799 |
++ * |
800 |
++ * Ensure that the rtmutex owner is also the pi_state owner despite |
801 |
++ * the user space value claiming something different. There is no |
802 |
++ * point in unlocking the rtmutex if current is the owner as it |
803 |
++ * would need to wait until the next waiter has taken the rtmutex |
804 |
++ * to guarantee consistent state. Keep it simple. Userspace asked |
805 |
++ * for this wreckaged state. |
806 |
++ * |
807 |
++ * The rtmutex has an owner - either current or some other |
808 |
++ * task. See the EAGAIN loop above. |
809 |
++ */ |
810 |
++ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); |
811 |
+ |
812 |
+-out_unlock: |
813 |
++ return err; |
814 |
++} |
815 |
++ |
816 |
++static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
817 |
++ struct task_struct *argowner) |
818 |
++{ |
819 |
++ struct futex_pi_state *pi_state = q->pi_state; |
820 |
++ int ret; |
821 |
++ |
822 |
++ lockdep_assert_held(q->lock_ptr); |
823 |
++ |
824 |
++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
825 |
++ ret = __fixup_pi_state_owner(uaddr, q, argowner); |
826 |
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
827 |
+ return ret; |
828 |
+ } |
829 |
+@@ -2525,8 +2537,6 @@ static long futex_wait_restart(struct restart_block *restart); |
830 |
+ */ |
831 |
+ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
832 |
+ { |
833 |
+- int ret = 0; |
834 |
+- |
835 |
+ if (locked) { |
836 |
+ /* |
837 |
+ * Got the lock. We might not be the anticipated owner if we |
838 |
+@@ -2537,8 +2547,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
839 |
+ * stable state, anything else needs more attention. |
840 |
+ */ |
841 |
+ if (q->pi_state->owner != current) |
842 |
+- ret = fixup_pi_state_owner(uaddr, q, current); |
843 |
+- return ret ? ret : locked; |
844 |
++ return fixup_pi_state_owner(uaddr, q, current); |
845 |
++ return 1; |
846 |
+ } |
847 |
+ |
848 |
+ /* |
849 |
+@@ -2549,23 +2559,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
850 |
+ * Another speculative read; pi_state->owner == current is unstable |
851 |
+ * but needs our attention. |
852 |
+ */ |
853 |
+- if (q->pi_state->owner == current) { |
854 |
+- ret = fixup_pi_state_owner(uaddr, q, NULL); |
855 |
+- return ret; |
856 |
+- } |
857 |
++ if (q->pi_state->owner == current) |
858 |
++ return fixup_pi_state_owner(uaddr, q, NULL); |
859 |
+ |
860 |
+ /* |
861 |
+ * Paranoia check. If we did not take the lock, then we should not be |
862 |
+- * the owner of the rt_mutex. |
863 |
++ * the owner of the rt_mutex. Warn and establish consistent state. |
864 |
+ */ |
865 |
+- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) { |
866 |
+- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " |
867 |
+- "pi-state %p\n", ret, |
868 |
+- q->pi_state->pi_mutex.owner, |
869 |
+- q->pi_state->owner); |
870 |
+- } |
871 |
++ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) |
872 |
++ return fixup_pi_state_owner(uaddr, q, current); |
873 |
+ |
874 |
+- return ret; |
875 |
++ return 0; |
876 |
+ } |
877 |
+ |
878 |
+ /** |
879 |
+@@ -2773,7 +2777,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, |
880 |
+ ktime_t *time, int trylock) |
881 |
+ { |
882 |
+ struct hrtimer_sleeper timeout, *to; |
883 |
+- struct futex_pi_state *pi_state = NULL; |
884 |
+ struct task_struct *exiting = NULL; |
885 |
+ struct rt_mutex_waiter rt_waiter; |
886 |
+ struct futex_hash_bucket *hb; |
887 |
+@@ -2909,23 +2912,8 @@ no_block: |
888 |
+ if (res) |
889 |
+ ret = (res < 0) ? res : 0; |
890 |
+ |
891 |
+- /* |
892 |
+- * If fixup_owner() faulted and was unable to handle the fault, unlock |
893 |
+- * it and return the fault to userspace. |
894 |
+- */ |
895 |
+- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) { |
896 |
+- pi_state = q.pi_state; |
897 |
+- get_pi_state(pi_state); |
898 |
+- } |
899 |
+- |
900 |
+ /* Unqueue and drop the lock */ |
901 |
+ unqueue_me_pi(&q); |
902 |
+- |
903 |
+- if (pi_state) { |
904 |
+- rt_mutex_futex_unlock(&pi_state->pi_mutex); |
905 |
+- put_pi_state(pi_state); |
906 |
+- } |
907 |
+- |
908 |
+ goto out; |
909 |
+ |
910 |
+ out_unlock_put_key: |
911 |
+@@ -3185,7 +3173,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
912 |
+ u32 __user *uaddr2) |
913 |
+ { |
914 |
+ struct hrtimer_sleeper timeout, *to; |
915 |
+- struct futex_pi_state *pi_state = NULL; |
916 |
+ struct rt_mutex_waiter rt_waiter; |
917 |
+ struct futex_hash_bucket *hb; |
918 |
+ union futex_key key2 = FUTEX_KEY_INIT; |
919 |
+@@ -3263,16 +3250,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
920 |
+ if (q.pi_state && (q.pi_state->owner != current)) { |
921 |
+ spin_lock(q.lock_ptr); |
922 |
+ ret = fixup_pi_state_owner(uaddr2, &q, current); |
923 |
+- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { |
924 |
+- pi_state = q.pi_state; |
925 |
+- get_pi_state(pi_state); |
926 |
+- } |
927 |
+ /* |
928 |
+ * Drop the reference to the pi state which |
929 |
+ * the requeue_pi() code acquired for us. |
930 |
+ */ |
931 |
+ put_pi_state(q.pi_state); |
932 |
+ spin_unlock(q.lock_ptr); |
933 |
++ /* |
934 |
++ * Adjust the return value. It's either -EFAULT or |
935 |
++ * success (1) but the caller expects 0 for success. |
936 |
++ */ |
937 |
++ ret = ret < 0 ? ret : 0; |
938 |
+ } |
939 |
+ } else { |
940 |
+ struct rt_mutex *pi_mutex; |
941 |
+@@ -3303,25 +3291,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
942 |
+ if (res) |
943 |
+ ret = (res < 0) ? res : 0; |
944 |
+ |
945 |
+- /* |
946 |
+- * If fixup_pi_state_owner() faulted and was unable to handle |
947 |
+- * the fault, unlock the rt_mutex and return the fault to |
948 |
+- * userspace. |
949 |
+- */ |
950 |
+- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { |
951 |
+- pi_state = q.pi_state; |
952 |
+- get_pi_state(pi_state); |
953 |
+- } |
954 |
+- |
955 |
+ /* Unqueue and drop the lock. */ |
956 |
+ unqueue_me_pi(&q); |
957 |
+ } |
958 |
+ |
959 |
+- if (pi_state) { |
960 |
+- rt_mutex_futex_unlock(&pi_state->pi_mutex); |
961 |
+- put_pi_state(pi_state); |
962 |
+- } |
963 |
+- |
964 |
+ if (ret == -EINTR) { |
965 |
+ /* |
966 |
+ * We've already been requeued, but cannot restart by calling |
967 |
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c |
968 |
+index cfdd5b93264d7..2f8cd616d3b29 100644 |
969 |
+--- a/kernel/locking/rtmutex.c |
970 |
++++ b/kernel/locking/rtmutex.c |
971 |
+@@ -1716,8 +1716,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, |
972 |
+ * possible because it belongs to the pi_state which is about to be freed |
973 |
+ * and it is not longer visible to other tasks. |
974 |
+ */ |
975 |
+-void rt_mutex_proxy_unlock(struct rt_mutex *lock, |
976 |
+- struct task_struct *proxy_owner) |
977 |
++void rt_mutex_proxy_unlock(struct rt_mutex *lock) |
978 |
+ { |
979 |
+ debug_rt_mutex_proxy_unlock(lock); |
980 |
+ rt_mutex_set_owner(lock, NULL); |
981 |
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h |
982 |
+index d1d62f942be22..ca6fb489007b6 100644 |
983 |
+--- a/kernel/locking/rtmutex_common.h |
984 |
++++ b/kernel/locking/rtmutex_common.h |
985 |
+@@ -133,8 +133,7 @@ enum rtmutex_chainwalk { |
986 |
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); |
987 |
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, |
988 |
+ struct task_struct *proxy_owner); |
989 |
+-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, |
990 |
+- struct task_struct *proxy_owner); |
991 |
++extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); |
992 |
+ extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); |
993 |
+ extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
994 |
+ struct rt_mutex_waiter *waiter, |
995 |
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c |
996 |
+index 801f8bc52b34f..aafec8cb8637d 100644 |
997 |
+--- a/kernel/printk/printk.c |
998 |
++++ b/kernel/printk/printk.c |
999 |
+@@ -1338,11 +1338,16 @@ static size_t info_print_prefix(const struct printk_info *info, bool syslog, |
1000 |
+ * done: |
1001 |
+ * |
1002 |
+ * - Add prefix for each line. |
1003 |
++ * - Drop truncated lines that no longer fit into the buffer. |
1004 |
+ * - Add the trailing newline that has been removed in vprintk_store(). |
1005 |
+- * - Drop truncated lines that do not longer fit into the buffer. |
1006 |
++ * - Add a string terminator. |
1007 |
++ * |
1008 |
++ * Since the produced string is always terminated, the maximum possible |
1009 |
++ * return value is @r->text_buf_size - 1; |
1010 |
+ * |
1011 |
+ * Return: The length of the updated/prepared text, including the added |
1012 |
+- * prefixes and the newline. The dropped line(s) are not counted. |
1013 |
++ * prefixes and the newline. The terminator is not counted. The dropped |
1014 |
++ * line(s) are not counted. |
1015 |
+ */ |
1016 |
+ static size_t record_print_text(struct printk_record *r, bool syslog, |
1017 |
+ bool time) |
1018 |
+@@ -1385,26 +1390,31 @@ static size_t record_print_text(struct printk_record *r, bool syslog, |
1019 |
+ |
1020 |
+ /* |
1021 |
+ * Truncate the text if there is not enough space to add the |
1022 |
+- * prefix and a trailing newline. |
1023 |
++ * prefix and a trailing newline and a terminator. |
1024 |
+ */ |
1025 |
+- if (len + prefix_len + text_len + 1 > buf_size) { |
1026 |
++ if (len + prefix_len + text_len + 1 + 1 > buf_size) { |
1027 |
+ /* Drop even the current line if no space. */ |
1028 |
+- if (len + prefix_len + line_len + 1 > buf_size) |
1029 |
++ if (len + prefix_len + line_len + 1 + 1 > buf_size) |
1030 |
+ break; |
1031 |
+ |
1032 |
+- text_len = buf_size - len - prefix_len - 1; |
1033 |
++ text_len = buf_size - len - prefix_len - 1 - 1; |
1034 |
+ truncated = true; |
1035 |
+ } |
1036 |
+ |
1037 |
+ memmove(text + prefix_len, text, text_len); |
1038 |
+ memcpy(text, prefix, prefix_len); |
1039 |
+ |
1040 |
++ /* |
1041 |
++ * Increment the prepared length to include the text and |
1042 |
++ * prefix that were just moved+copied. Also increment for the |
1043 |
++ * newline at the end of this line. If this is the last line, |
1044 |
++ * there is no newline, but it will be added immediately below. |
1045 |
++ */ |
1046 |
+ len += prefix_len + line_len + 1; |
1047 |
+- |
1048 |
+ if (text_len == line_len) { |
1049 |
+ /* |
1050 |
+- * Add the trailing newline removed in |
1051 |
+- * vprintk_store(). |
1052 |
++ * This is the last line. Add the trailing newline |
1053 |
++ * removed in vprintk_store(). |
1054 |
+ */ |
1055 |
+ text[prefix_len + line_len] = '\n'; |
1056 |
+ break; |
1057 |
+@@ -1429,6 +1439,14 @@ static size_t record_print_text(struct printk_record *r, bool syslog, |
1058 |
+ text_len -= line_len + 1; |
1059 |
+ } |
1060 |
+ |
1061 |
++ /* |
1062 |
++ * If a buffer was provided, it will be terminated. Space for the |
1063 |
++ * string terminator is guaranteed to be available. The terminator is |
1064 |
++ * not counted in the return value. |
1065 |
++ */ |
1066 |
++ if (buf_size > 0) |
1067 |
++ r->text_buf[len] = 0; |
1068 |
++ |
1069 |
+ return len; |
1070 |
+ } |
1071 |
+ |
1072 |
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
1073 |
+index 14b9e83ff9da2..88639706ae177 100644 |
1074 |
+--- a/mm/page_alloc.c |
1075 |
++++ b/mm/page_alloc.c |
1076 |
+@@ -2846,20 +2846,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, |
1077 |
+ { |
1078 |
+ struct page *page; |
1079 |
+ |
1080 |
+-#ifdef CONFIG_CMA |
1081 |
+- /* |
1082 |
+- * Balance movable allocations between regular and CMA areas by |
1083 |
+- * allocating from CMA when over half of the zone's free memory |
1084 |
+- * is in the CMA area. |
1085 |
+- */ |
1086 |
+- if (alloc_flags & ALLOC_CMA && |
1087 |
+- zone_page_state(zone, NR_FREE_CMA_PAGES) > |
1088 |
+- zone_page_state(zone, NR_FREE_PAGES) / 2) { |
1089 |
+- page = __rmqueue_cma_fallback(zone, order); |
1090 |
+- if (page) |
1091 |
+- return page; |
1092 |
++ if (IS_ENABLED(CONFIG_CMA)) { |
1093 |
++ /* |
1094 |
++ * Balance movable allocations between regular and CMA areas by |
1095 |
++ * allocating from CMA when over half of the zone's free memory |
1096 |
++ * is in the CMA area. |
1097 |
++ */ |
1098 |
++ if (alloc_flags & ALLOC_CMA && |
1099 |
++ zone_page_state(zone, NR_FREE_CMA_PAGES) > |
1100 |
++ zone_page_state(zone, NR_FREE_PAGES) / 2) { |
1101 |
++ page = __rmqueue_cma_fallback(zone, order); |
1102 |
++ if (page) |
1103 |
++ goto out; |
1104 |
++ } |
1105 |
+ } |
1106 |
+-#endif |
1107 |
+ retry: |
1108 |
+ page = __rmqueue_smallest(zone, order, migratetype); |
1109 |
+ if (unlikely(!page)) { |
1110 |
+@@ -2870,8 +2870,9 @@ retry: |
1111 |
+ alloc_flags)) |
1112 |
+ goto retry; |
1113 |
+ } |
1114 |
+- |
1115 |
+- trace_mm_page_alloc_zone_locked(page, order, migratetype); |
1116 |
++out: |
1117 |
++ if (page) |
1118 |
++ trace_mm_page_alloc_zone_locked(page, order, migratetype); |
1119 |
+ return page; |
1120 |
+ } |
1121 |
+ |
1122 |
+diff --git a/mm/slub.c b/mm/slub.c |
1123 |
+index 3f4303f4b657d..071e41067ea67 100644 |
1124 |
+--- a/mm/slub.c |
1125 |
++++ b/mm/slub.c |
1126 |
+@@ -5620,10 +5620,8 @@ static int sysfs_slab_add(struct kmem_cache *s) |
1127 |
+ |
1128 |
+ s->kobj.kset = kset; |
1129 |
+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); |
1130 |
+- if (err) { |
1131 |
+- kobject_put(&s->kobj); |
1132 |
++ if (err) |
1133 |
+ goto out; |
1134 |
+- } |
1135 |
+ |
1136 |
+ err = sysfs_create_group(&s->kobj, &slab_attr_group); |
1137 |
+ if (err) |
1138 |
+diff --git a/mm/swapfile.c b/mm/swapfile.c |
1139 |
+index d58361109066d..16db9d1ebcbf3 100644 |
1140 |
+--- a/mm/swapfile.c |
1141 |
++++ b/mm/swapfile.c |
1142 |
+@@ -1045,16 +1045,18 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) |
1143 |
+ /* Only single cluster request supported */ |
1144 |
+ WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER); |
1145 |
+ |
1146 |
++ spin_lock(&swap_avail_lock); |
1147 |
++ |
1148 |
+ avail_pgs = atomic_long_read(&nr_swap_pages) / size; |
1149 |
+- if (avail_pgs <= 0) |
1150 |
++ if (avail_pgs <= 0) { |
1151 |
++ spin_unlock(&swap_avail_lock); |
1152 |
+ goto noswap; |
1153 |
++ } |
1154 |
+ |
1155 |
+ n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs); |
1156 |
+ |
1157 |
+ atomic_long_sub(n_goal * size, &nr_swap_pages); |
1158 |
+ |
1159 |
+- spin_lock(&swap_avail_lock); |
1160 |
+- |
1161 |
+ start_over: |
1162 |
+ node = numa_node_id(); |
1163 |
+ plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { |
1164 |
+@@ -1128,14 +1130,13 @@ swp_entry_t get_swap_page_of_type(int type) |
1165 |
+ |
1166 |
+ spin_lock(&si->lock); |
1167 |
+ if (si->flags & SWP_WRITEOK) { |
1168 |
+- atomic_long_dec(&nr_swap_pages); |
1169 |
+ /* This is called for allocating swap entry, not cache */ |
1170 |
+ offset = scan_swap_map(si, 1); |
1171 |
+ if (offset) { |
1172 |
++ atomic_long_dec(&nr_swap_pages); |
1173 |
+ spin_unlock(&si->lock); |
1174 |
+ return swp_entry(type, offset); |
1175 |
+ } |
1176 |
+- atomic_long_inc(&nr_swap_pages); |
1177 |
+ } |
1178 |
+ spin_unlock(&si->lock); |
1179 |
+ fail: |
1180 |
+diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile |
1181 |
+index 66cb92136de4a..bf656432ad736 100644 |
1182 |
+--- a/tools/bpf/resolve_btfids/Makefile |
1183 |
++++ b/tools/bpf/resolve_btfids/Makefile |
1184 |
+@@ -18,15 +18,6 @@ else |
1185 |
+ endif |
1186 |
+ |
1187 |
+ # always use the host compiler |
1188 |
+-ifneq ($(LLVM),) |
1189 |
+-HOSTAR ?= llvm-ar |
1190 |
+-HOSTCC ?= clang |
1191 |
+-HOSTLD ?= ld.lld |
1192 |
+-else |
1193 |
+-HOSTAR ?= ar |
1194 |
+-HOSTCC ?= gcc |
1195 |
+-HOSTLD ?= ld |
1196 |
+-endif |
1197 |
+ AR = $(HOSTAR) |
1198 |
+ CC = $(HOSTCC) |
1199 |
+ LD = $(HOSTLD) |
1200 |
+diff --git a/tools/build/Makefile b/tools/build/Makefile |
1201 |
+index 722f1700d96a8..bae48e6fa9952 100644 |
1202 |
+--- a/tools/build/Makefile |
1203 |
++++ b/tools/build/Makefile |
1204 |
+@@ -15,10 +15,6 @@ endef |
1205 |
+ $(call allow-override,CC,$(CROSS_COMPILE)gcc) |
1206 |
+ $(call allow-override,LD,$(CROSS_COMPILE)ld) |
1207 |
+ |
1208 |
+-HOSTCC ?= gcc |
1209 |
+-HOSTLD ?= ld |
1210 |
+-HOSTAR ?= ar |
1211 |
+- |
1212 |
+ export HOSTCC HOSTLD HOSTAR |
1213 |
+ |
1214 |
+ ifeq ($(V),1) |
1215 |
+diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile |
1216 |
+index 4ea9a833dde7a..5cdb19036d7f7 100644 |
1217 |
+--- a/tools/objtool/Makefile |
1218 |
++++ b/tools/objtool/Makefile |
1219 |
+@@ -3,15 +3,6 @@ include ../scripts/Makefile.include |
1220 |
+ include ../scripts/Makefile.arch |
1221 |
+ |
1222 |
+ # always use the host compiler |
1223 |
+-ifneq ($(LLVM),) |
1224 |
+-HOSTAR ?= llvm-ar |
1225 |
+-HOSTCC ?= clang |
1226 |
+-HOSTLD ?= ld.lld |
1227 |
+-else |
1228 |
+-HOSTAR ?= ar |
1229 |
+-HOSTCC ?= gcc |
1230 |
+-HOSTLD ?= ld |
1231 |
+-endif |
1232 |
+ AR = $(HOSTAR) |
1233 |
+ CC = $(HOSTCC) |
1234 |
+ LD = $(HOSTLD) |
1235 |
+diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c |
1236 |
+index 4e1d7460574b4..9452cfb01ef19 100644 |
1237 |
+--- a/tools/objtool/elf.c |
1238 |
++++ b/tools/objtool/elf.c |
1239 |
+@@ -354,8 +354,11 @@ static int read_symbols(struct elf *elf) |
1240 |
+ |
1241 |
+ symtab = find_section_by_name(elf, ".symtab"); |
1242 |
+ if (!symtab) { |
1243 |
+- WARN("missing symbol table"); |
1244 |
+- return -1; |
1245 |
++ /* |
1246 |
++ * A missing symbol table is actually possible if it's an empty |
1247 |
++ * .o file. This can happen for thunk_64.o. |
1248 |
++ */ |
1249 |
++ return 0; |
1250 |
+ } |
1251 |
+ |
1252 |
+ symtab_shndx = find_section_by_name(elf, ".symtab_shndx"); |
1253 |
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf |
1254 |
+index 7ce3f2e8b9c74..62f3deb1d3a8b 100644 |
1255 |
+--- a/tools/perf/Makefile.perf |
1256 |
++++ b/tools/perf/Makefile.perf |
1257 |
+@@ -175,10 +175,6 @@ endef |
1258 |
+ |
1259 |
+ LD += $(EXTRA_LDFLAGS) |
1260 |
+ |
1261 |
+-HOSTCC ?= gcc |
1262 |
+-HOSTLD ?= ld |
1263 |
+-HOSTAR ?= ar |
1264 |
+- |
1265 |
+ PKG_CONFIG = $(CROSS_COMPILE)pkg-config |
1266 |
+ LLVM_CONFIG ?= llvm-config |
1267 |
+ |
1268 |
+diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config |
1269 |
+index 54a2857c2510a..331f6d30f4726 100644 |
1270 |
+--- a/tools/power/acpi/Makefile.config |
1271 |
++++ b/tools/power/acpi/Makefile.config |
1272 |
+@@ -54,7 +54,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM} |
1273 |
+ CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc- |
1274 |
+ CROSS_COMPILE ?= $(CROSS) |
1275 |
+ LD = $(CC) |
1276 |
+-HOSTCC = gcc |
1277 |
+ |
1278 |
+ # check if compiler option is supported |
1279 |
+ cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;} |
1280 |
+diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include |
1281 |
+index a7974638561ca..1358e89cdf7d6 100644 |
1282 |
+--- a/tools/scripts/Makefile.include |
1283 |
++++ b/tools/scripts/Makefile.include |
1284 |
+@@ -59,6 +59,16 @@ $(call allow-override,LD,$(CROSS_COMPILE)ld) |
1285 |
+ $(call allow-override,CXX,$(CROSS_COMPILE)g++) |
1286 |
+ $(call allow-override,STRIP,$(CROSS_COMPILE)strip) |
1287 |
+ |
1288 |
++ifneq ($(LLVM),) |
1289 |
++HOSTAR ?= llvm-ar |
1290 |
++HOSTCC ?= clang |
1291 |
++HOSTLD ?= ld.lld |
1292 |
++else |
1293 |
++HOSTAR ?= ar |
1294 |
++HOSTCC ?= gcc |
1295 |
++HOSTLD ?= ld |
1296 |
++endif |
1297 |
++ |
1298 |
+ ifeq ($(CC_NO_CLANG), 1) |
1299 |
+ EXTRA_WARNINGS += -Wstrict-aliasing=3 |
1300 |
+ endif |