Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sat, 29 Jan 2022 17:45:51
Message-Id: 1643478336.4f8bb1e6bb81698b38566270cd36beea00e7c0c7.mpagano@gentoo
1 commit: 4f8bb1e6bb81698b38566270cd36beea00e7c0c7
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jan 29 17:45:36 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Jan 29 17:45:36 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4f8bb1e6
7
8 Linux patch 4.14.264
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1263_linux-4.14.264.patch | 352 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 356 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 7ba8ed5f..26b19eee 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1099,6 +1099,10 @@ Patch: 1262_linux-4.14.263.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.263
23
24 +Patch: 1263_linux-4.14.264.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.264
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1263_linux-4.14.264.patch b/1263_linux-4.14.264.patch
33 new file mode 100644
34 index 00000000..027a1409
35 --- /dev/null
36 +++ b/1263_linux-4.14.264.patch
37 @@ -0,0 +1,352 @@
38 +diff --git a/Makefile b/Makefile
39 +index 0d754c4d8925f..c5508214fa1f8 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 263
47 ++SUBLEVEL = 264
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
52 +index 816781f209d66..f8d13ee2d5382 100644
53 +--- a/drivers/gpu/drm/i915/i915_drv.h
54 ++++ b/drivers/gpu/drm/i915/i915_drv.h
55 +@@ -2166,6 +2166,8 @@ struct drm_i915_private {
56 +
57 + struct intel_uncore uncore;
58 +
59 ++ struct mutex tlb_invalidate_lock;
60 ++
61 + struct i915_virtual_gpu vgpu;
62 +
63 + struct intel_gvt *gvt;
64 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
65 +index 9263b65720bc6..08d31744e2d92 100644
66 +--- a/drivers/gpu/drm/i915/i915_gem.c
67 ++++ b/drivers/gpu/drm/i915/i915_gem.c
68 +@@ -2220,6 +2220,76 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
69 + rcu_read_unlock();
70 + }
71 +
72 ++struct reg_and_bit {
73 ++ i915_reg_t reg;
74 ++ u32 bit;
75 ++};
76 ++
77 ++static struct reg_and_bit
78 ++get_reg_and_bit(const struct intel_engine_cs *engine,
79 ++ const i915_reg_t *regs, const unsigned int num)
80 ++{
81 ++ const unsigned int class = engine->class;
82 ++ struct reg_and_bit rb = { .bit = 1 };
83 ++
84 ++ if (WARN_ON_ONCE(class >= num || !regs[class].reg))
85 ++ return rb;
86 ++
87 ++ rb.reg = regs[class];
88 ++ if (class == VIDEO_DECODE_CLASS)
89 ++ rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
90 ++
91 ++ return rb;
92 ++}
93 ++
94 ++static void invalidate_tlbs(struct drm_i915_private *dev_priv)
95 ++{
96 ++ static const i915_reg_t gen8_regs[] = {
97 ++ [RENDER_CLASS] = GEN8_RTCR,
98 ++ [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
99 ++ [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
100 ++ [COPY_ENGINE_CLASS] = GEN8_BTCR,
101 ++ };
102 ++ const unsigned int num = ARRAY_SIZE(gen8_regs);
103 ++ const i915_reg_t *regs = gen8_regs;
104 ++ struct intel_engine_cs *engine;
105 ++ enum intel_engine_id id;
106 ++
107 ++ if (INTEL_GEN(dev_priv) < 8)
108 ++ return;
109 ++
110 ++ assert_rpm_wakelock_held(dev_priv);
111 ++
112 ++ mutex_lock(&dev_priv->tlb_invalidate_lock);
113 ++ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
114 ++
115 ++ for_each_engine(engine, dev_priv, id) {
116 ++ /*
117 ++ * HW architecture suggest typical invalidation time at 40us,
118 ++ * with pessimistic cases up to 100us and a recommendation to
119 ++ * cap at 1ms. We go a bit higher just in case.
120 ++ */
121 ++ const unsigned int timeout_us = 100;
122 ++ const unsigned int timeout_ms = 4;
123 ++ struct reg_and_bit rb;
124 ++
125 ++ rb = get_reg_and_bit(engine, regs, num);
126 ++ if (!i915_mmio_reg_offset(rb.reg))
127 ++ continue;
128 ++
129 ++ I915_WRITE_FW(rb.reg, rb.bit);
130 ++ if (__intel_wait_for_register_fw(dev_priv,
131 ++ rb.reg, rb.bit, 0,
132 ++ timeout_us, timeout_ms,
133 ++ NULL))
134 ++ DRM_ERROR_RATELIMITED("%s TLB invalidation did not complete in %ums!\n",
135 ++ engine->name, timeout_ms);
136 ++ }
137 ++
138 ++ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
139 ++ mutex_unlock(&dev_priv->tlb_invalidate_lock);
140 ++}
141 ++
142 + void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
143 + enum i915_mm_subclass subclass)
144 + {
145 +@@ -2257,8 +2327,18 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
146 +
147 + __i915_gem_object_reset_page_iter(obj);
148 +
149 +- if (!IS_ERR(pages))
150 ++ if (!IS_ERR(pages)) {
151 ++ if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
152 ++ struct drm_i915_private *i915 = to_i915(obj->base.dev);
153 ++
154 ++ if (intel_runtime_pm_get_if_in_use(i915)) {
155 ++ invalidate_tlbs(i915);
156 ++ intel_runtime_pm_put(i915);
157 ++ }
158 ++ }
159 ++
160 + obj->ops->put_pages(obj, pages);
161 ++ }
162 +
163 + unlock:
164 + mutex_unlock(&obj->mm.lock);
165 +@@ -4972,6 +5052,8 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
166 +
167 + spin_lock_init(&dev_priv->fb_tracking.lock);
168 +
169 ++ mutex_init(&dev_priv->tlb_invalidate_lock);
170 ++
171 + return 0;
172 +
173 + err_priorities:
174 +diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
175 +index 39cfe04dcdb8b..180a8bf247918 100644
176 +--- a/drivers/gpu/drm/i915/i915_gem_object.h
177 ++++ b/drivers/gpu/drm/i915/i915_gem_object.h
178 +@@ -135,6 +135,7 @@ struct drm_i915_gem_object {
179 + * activity?
180 + */
181 + #define I915_BO_ACTIVE_REF 0
182 ++#define I915_BO_WAS_BOUND_BIT 1
183 +
184 + /*
185 + * Is the object to be mapped as read-only to the GPU
186 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
187 +index 1db70350af0bf..333e943817896 100644
188 +--- a/drivers/gpu/drm/i915/i915_reg.h
189 ++++ b/drivers/gpu/drm/i915/i915_reg.h
190 +@@ -2380,6 +2380,12 @@ enum i915_power_well_id {
191 + #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
192 + #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
193 +
194 ++#define GEN8_RTCR _MMIO(0x4260)
195 ++#define GEN8_M1TCR _MMIO(0x4264)
196 ++#define GEN8_M2TCR _MMIO(0x4268)
197 ++#define GEN8_BTCR _MMIO(0x426c)
198 ++#define GEN8_VTCR _MMIO(0x4270)
199 ++
200 + #if 0
201 + #define PRB0_TAIL _MMIO(0x2030)
202 + #define PRB0_HEAD _MMIO(0x2034)
203 +diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
204 +index 76eed1fdac096..5653e7bac914b 100644
205 +--- a/drivers/gpu/drm/i915/i915_vma.c
206 ++++ b/drivers/gpu/drm/i915/i915_vma.c
207 +@@ -272,6 +272,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
208 + return ret;
209 +
210 + vma->flags |= bind_flags;
211 ++
212 ++ if (vma->obj)
213 ++ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
214 ++
215 + return 0;
216 + }
217 +
218 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
219 +index 8c65cc3b0dda2..8f5321f098569 100644
220 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
221 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
222 +@@ -837,15 +837,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
223 + struct vmw_private *dev_priv,
224 + struct vmw_fence_obj **p_fence,
225 + uint32_t *p_handle);
226 +-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
227 ++extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
228 + struct vmw_fpriv *vmw_fp,
229 + int ret,
230 + struct drm_vmw_fence_rep __user
231 + *user_fence_rep,
232 + struct vmw_fence_obj *fence,
233 + uint32_t fence_handle,
234 +- int32_t out_fence_fd,
235 +- struct sync_file *sync_file);
236 ++ int32_t out_fence_fd);
237 + extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
238 + struct ttm_buffer_object *bo,
239 + bool interruptible,
240 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
241 +index dc677ba4dc380..996696ad6f988 100644
242 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
243 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
244 +@@ -3848,20 +3848,19 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
245 + * object so we wait for it immediately, and then unreference the
246 + * user-space reference.
247 + */
248 +-void
249 ++int
250 + vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
251 + struct vmw_fpriv *vmw_fp,
252 + int ret,
253 + struct drm_vmw_fence_rep __user *user_fence_rep,
254 + struct vmw_fence_obj *fence,
255 + uint32_t fence_handle,
256 +- int32_t out_fence_fd,
257 +- struct sync_file *sync_file)
258 ++ int32_t out_fence_fd)
259 + {
260 + struct drm_vmw_fence_rep fence_rep;
261 +
262 + if (user_fence_rep == NULL)
263 +- return;
264 ++ return 0;
265 +
266 + memset(&fence_rep, 0, sizeof(fence_rep));
267 +
268 +@@ -3889,20 +3888,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
269 + * and unreference the handle.
270 + */
271 + if (unlikely(ret != 0) && (fence_rep.error == 0)) {
272 +- if (sync_file)
273 +- fput(sync_file->file);
274 +-
275 +- if (fence_rep.fd != -1) {
276 +- put_unused_fd(fence_rep.fd);
277 +- fence_rep.fd = -1;
278 +- }
279 +-
280 + ttm_ref_object_base_unref(vmw_fp->tfile,
281 + fence_handle, TTM_REF_USAGE);
282 + DRM_ERROR("Fence copy error. Syncing.\n");
283 + (void) vmw_fence_obj_wait(fence, false, false,
284 + VMW_FENCE_WAIT_TIMEOUT);
285 + }
286 ++
287 ++ return ret ? -EFAULT : 0;
288 + }
289 +
290 + /**
291 +@@ -4262,16 +4255,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
292 +
293 + (void) vmw_fence_obj_wait(fence, false, false,
294 + VMW_FENCE_WAIT_TIMEOUT);
295 ++ }
296 ++ }
297 ++
298 ++ ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
299 ++ user_fence_rep, fence, handle, out_fence_fd);
300 ++
301 ++ if (sync_file) {
302 ++ if (ret) {
303 ++ /* usercopy of fence failed, put the file object */
304 ++ fput(sync_file->file);
305 ++ put_unused_fd(out_fence_fd);
306 + } else {
307 + /* Link the fence with the FD created earlier */
308 + fd_install(out_fence_fd, sync_file->file);
309 + }
310 + }
311 +
312 +- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
313 +- user_fence_rep, fence, handle,
314 +- out_fence_fd, sync_file);
315 +-
316 + /* Don't unreference when handing fence out */
317 + if (unlikely(out_fence != NULL)) {
318 + *out_fence = fence;
319 +@@ -4290,7 +4290,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
320 + */
321 + vmw_resource_list_unreference(sw_context, &resource_list);
322 +
323 +- return 0;
324 ++ return ret;
325 +
326 + out_unlock_binding:
327 + mutex_unlock(&dev_priv->binding_mutex);
328 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
329 +index d6b1c509ae019..7d2482644ef70 100644
330 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
331 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
332 +@@ -1150,7 +1150,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
333 + }
334 +
335 + vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
336 +- handle, -1, NULL);
337 ++ handle, -1);
338 + vmw_fence_obj_unreference(&fence);
339 + return 0;
340 + out_no_create:
341 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
342 +index 848c9d009be2a..fbba55edbfd02 100644
343 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
344 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
345 +@@ -2511,7 +2511,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
346 + if (file_priv)
347 + vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
348 + ret, user_fence_rep, fence,
349 +- handle, -1, NULL);
350 ++ handle, -1);
351 + if (out_fence)
352 + *out_fence = fence;
353 + else
354 +diff --git a/net/can/bcm.c b/net/can/bcm.c
355 +index 324c4cdc003eb..b3f3b02ffd42d 100644
356 +--- a/net/can/bcm.c
357 ++++ b/net/can/bcm.c
358 +@@ -762,21 +762,21 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
359 + static void bcm_remove_op(struct bcm_op *op)
360 + {
361 + if (op->tsklet.func) {
362 +- while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
363 +- test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
364 +- hrtimer_active(&op->timer)) {
365 +- hrtimer_cancel(&op->timer);
366 ++ do {
367 + tasklet_kill(&op->tsklet);
368 +- }
369 ++ hrtimer_cancel(&op->timer);
370 ++ } while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
371 ++ test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
372 ++ hrtimer_active(&op->timer));
373 + }
374 +
375 + if (op->thrtsklet.func) {
376 +- while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
377 +- test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
378 +- hrtimer_active(&op->thrtimer)) {
379 +- hrtimer_cancel(&op->thrtimer);
380 ++ do {
381 + tasklet_kill(&op->thrtsklet);
382 +- }
383 ++ hrtimer_cancel(&op->thrtimer);
384 ++ } while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
385 ++ test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
386 ++ hrtimer_active(&op->thrtimer));
387 + }
388 +
389 + if ((op->frames) && (op->frames != &op->sframe))