Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sat, 29 Jan 2022 17:45:11
Message-Id: 1643478295.932ad64931d0eea03c4ec9117dc4e6cecf5aaaea.mpagano@gentoo
1 commit: 932ad64931d0eea03c4ec9117dc4e6cecf5aaaea
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jan 29 17:44:55 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Jan 29 17:44:55 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=932ad649
7
8 Linux patch 4.19.227
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1226_linux-4.19.227.patch | 416 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 420 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 263aac7b..cc68b74d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -943,6 +943,10 @@ Patch: 1225_linux-4.19.226.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.226
23
24 +Patch: 1226_linux-4.19.227.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.227
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1226_linux-4.19.227.patch b/1226_linux-4.19.227.patch
33 new file mode 100644
34 index 00000000..5775a2ef
35 --- /dev/null
36 +++ b/1226_linux-4.19.227.patch
37 @@ -0,0 +1,416 @@
38 +diff --git a/Makefile b/Makefile
39 +index 72399555ce886..1e9652cb9c1fc 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 226
47 ++SUBLEVEL = 227
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
52 +index 37c80cfecd097..c25ee6a02d65e 100644
53 +--- a/drivers/gpu/drm/i915/i915_drv.h
54 ++++ b/drivers/gpu/drm/i915/i915_drv.h
55 +@@ -1595,6 +1595,8 @@ struct drm_i915_private {
56 +
57 + struct intel_uncore uncore;
58 +
59 ++ struct mutex tlb_invalidate_lock;
60 ++
61 + struct i915_virtual_gpu vgpu;
62 +
63 + struct intel_gvt *gvt;
64 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
65 +index c7d05ac7af3cb..5b0d6d8b3ab8e 100644
66 +--- a/drivers/gpu/drm/i915/i915_gem.c
67 ++++ b/drivers/gpu/drm/i915/i915_gem.c
68 +@@ -2446,6 +2446,78 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
69 + rcu_read_unlock();
70 + }
71 +
72 ++struct reg_and_bit {
73 ++ i915_reg_t reg;
74 ++ u32 bit;
75 ++};
76 ++
77 ++static struct reg_and_bit
78 ++get_reg_and_bit(const struct intel_engine_cs *engine,
79 ++ const i915_reg_t *regs, const unsigned int num)
80 ++{
81 ++ const unsigned int class = engine->class;
82 ++ struct reg_and_bit rb = { .bit = 1 };
83 ++
84 ++ if (WARN_ON_ONCE(class >= num || !regs[class].reg))
85 ++ return rb;
86 ++
87 ++ rb.reg = regs[class];
88 ++ if (class == VIDEO_DECODE_CLASS)
89 ++ rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
90 ++
91 ++ return rb;
92 ++}
93 ++
94 ++static void invalidate_tlbs(struct drm_i915_private *dev_priv)
95 ++{
96 ++ static const i915_reg_t gen8_regs[] = {
97 ++ [RENDER_CLASS] = GEN8_RTCR,
98 ++ [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
99 ++ [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
100 ++ [COPY_ENGINE_CLASS] = GEN8_BTCR,
101 ++ };
102 ++ const unsigned int num = ARRAY_SIZE(gen8_regs);
103 ++ const i915_reg_t *regs = gen8_regs;
104 ++ struct intel_engine_cs *engine;
105 ++ enum intel_engine_id id;
106 ++
107 ++ if (INTEL_GEN(dev_priv) < 8)
108 ++ return;
109 ++
110 ++ GEM_TRACE("\n");
111 ++
112 ++ assert_rpm_wakelock_held(dev_priv);
113 ++
114 ++ mutex_lock(&dev_priv->tlb_invalidate_lock);
115 ++ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
116 ++
117 ++ for_each_engine(engine, dev_priv, id) {
118 ++ /*
119 ++ * HW architecture suggest typical invalidation time at 40us,
120 ++ * with pessimistic cases up to 100us and a recommendation to
121 ++ * cap at 1ms. We go a bit higher just in case.
122 ++ */
123 ++ const unsigned int timeout_us = 100;
124 ++ const unsigned int timeout_ms = 4;
125 ++ struct reg_and_bit rb;
126 ++
127 ++ rb = get_reg_and_bit(engine, regs, num);
128 ++ if (!i915_mmio_reg_offset(rb.reg))
129 ++ continue;
130 ++
131 ++ I915_WRITE_FW(rb.reg, rb.bit);
132 ++ if (__intel_wait_for_register_fw(dev_priv,
133 ++ rb.reg, rb.bit, 0,
134 ++ timeout_us, timeout_ms,
135 ++ NULL))
136 ++ DRM_ERROR_RATELIMITED("%s TLB invalidation did not complete in %ums!\n",
137 ++ engine->name, timeout_ms);
138 ++ }
139 ++
140 ++ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
141 ++ mutex_unlock(&dev_priv->tlb_invalidate_lock);
142 ++}
143 ++
144 + static struct sg_table *
145 + __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
146 + {
147 +@@ -2475,6 +2547,15 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
148 + __i915_gem_object_reset_page_iter(obj);
149 + obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
150 +
151 ++ if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
152 ++ struct drm_i915_private *i915 = to_i915(obj->base.dev);
153 ++
154 ++ if (intel_runtime_pm_get_if_in_use(i915)) {
155 ++ invalidate_tlbs(i915);
156 ++ intel_runtime_pm_put(i915);
157 ++ }
158 ++ }
159 ++
160 + return pages;
161 + }
162 +
163 +@@ -5792,6 +5873,8 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
164 +
165 + spin_lock_init(&dev_priv->fb_tracking.lock);
166 +
167 ++ mutex_init(&dev_priv->tlb_invalidate_lock);
168 ++
169 + err = i915_gemfs_init(dev_priv);
170 + if (err)
171 + DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
172 +diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
173 +index 83e5e01fa9eaa..2e3a713e9bcd8 100644
174 +--- a/drivers/gpu/drm/i915/i915_gem_object.h
175 ++++ b/drivers/gpu/drm/i915/i915_gem_object.h
176 +@@ -136,6 +136,7 @@ struct drm_i915_gem_object {
177 + * activity?
178 + */
179 + #define I915_BO_ACTIVE_REF 0
180 ++#define I915_BO_WAS_BOUND_BIT 1
181 +
182 + /*
183 + * Is the object to be mapped as read-only to the GPU
184 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
185 +index a6f4f32dd71ce..830049985e56d 100644
186 +--- a/drivers/gpu/drm/i915/i915_reg.h
187 ++++ b/drivers/gpu/drm/i915/i915_reg.h
188 +@@ -2431,6 +2431,12 @@ enum i915_power_well_id {
189 + #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
190 + #define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
191 +
192 ++#define GEN8_RTCR _MMIO(0x4260)
193 ++#define GEN8_M1TCR _MMIO(0x4264)
194 ++#define GEN8_M2TCR _MMIO(0x4268)
195 ++#define GEN8_BTCR _MMIO(0x426c)
196 ++#define GEN8_VTCR _MMIO(0x4270)
197 ++
198 + #if 0
199 + #define PRB0_TAIL _MMIO(0x2030)
200 + #define PRB0_HEAD _MMIO(0x2034)
201 +diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
202 +index 98358b4b36dea..9aceacc43f4b7 100644
203 +--- a/drivers/gpu/drm/i915/i915_vma.c
204 ++++ b/drivers/gpu/drm/i915/i915_vma.c
205 +@@ -335,6 +335,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
206 + return ret;
207 +
208 + vma->flags |= bind_flags;
209 ++
210 ++ if (vma->obj)
211 ++ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
212 ++
213 + return 0;
214 + }
215 +
216 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
217 +index 1abe21758b0d7..bca0b8980c0e7 100644
218 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
219 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
220 +@@ -855,15 +855,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
221 + struct vmw_private *dev_priv,
222 + struct vmw_fence_obj **p_fence,
223 + uint32_t *p_handle);
224 +-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
225 ++extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
226 + struct vmw_fpriv *vmw_fp,
227 + int ret,
228 + struct drm_vmw_fence_rep __user
229 + *user_fence_rep,
230 + struct vmw_fence_obj *fence,
231 + uint32_t fence_handle,
232 +- int32_t out_fence_fd,
233 +- struct sync_file *sync_file);
234 ++ int32_t out_fence_fd);
235 + extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
236 + struct ttm_buffer_object *bo,
237 + bool interruptible,
238 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
239 +index 3834aa71c9c4c..e65554f5a89d5 100644
240 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
241 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
242 +@@ -3873,20 +3873,19 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
243 + * object so we wait for it immediately, and then unreference the
244 + * user-space reference.
245 + */
246 +-void
247 ++int
248 + vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
249 + struct vmw_fpriv *vmw_fp,
250 + int ret,
251 + struct drm_vmw_fence_rep __user *user_fence_rep,
252 + struct vmw_fence_obj *fence,
253 + uint32_t fence_handle,
254 +- int32_t out_fence_fd,
255 +- struct sync_file *sync_file)
256 ++ int32_t out_fence_fd)
257 + {
258 + struct drm_vmw_fence_rep fence_rep;
259 +
260 + if (user_fence_rep == NULL)
261 +- return;
262 ++ return 0;
263 +
264 + memset(&fence_rep, 0, sizeof(fence_rep));
265 +
266 +@@ -3914,20 +3913,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
267 + * and unreference the handle.
268 + */
269 + if (unlikely(ret != 0) && (fence_rep.error == 0)) {
270 +- if (sync_file)
271 +- fput(sync_file->file);
272 +-
273 +- if (fence_rep.fd != -1) {
274 +- put_unused_fd(fence_rep.fd);
275 +- fence_rep.fd = -1;
276 +- }
277 +-
278 + ttm_ref_object_base_unref(vmw_fp->tfile,
279 + fence_handle, TTM_REF_USAGE);
280 + DRM_ERROR("Fence copy error. Syncing.\n");
281 + (void) vmw_fence_obj_wait(fence, false, false,
282 + VMW_FENCE_WAIT_TIMEOUT);
283 + }
284 ++
285 ++ return ret ? -EFAULT : 0;
286 + }
287 +
288 + /**
289 +@@ -4287,16 +4280,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
290 +
291 + (void) vmw_fence_obj_wait(fence, false, false,
292 + VMW_FENCE_WAIT_TIMEOUT);
293 ++ }
294 ++ }
295 ++
296 ++ ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
297 ++ user_fence_rep, fence, handle, out_fence_fd);
298 ++
299 ++ if (sync_file) {
300 ++ if (ret) {
301 ++ /* usercopy of fence failed, put the file object */
302 ++ fput(sync_file->file);
303 ++ put_unused_fd(out_fence_fd);
304 + } else {
305 + /* Link the fence with the FD created earlier */
306 + fd_install(out_fence_fd, sync_file->file);
307 + }
308 + }
309 +
310 +- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
311 +- user_fence_rep, fence, handle,
312 +- out_fence_fd, sync_file);
313 +-
314 + /* Don't unreference when handing fence out */
315 + if (unlikely(out_fence != NULL)) {
316 + *out_fence = fence;
317 +@@ -4315,7 +4315,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
318 + */
319 + vmw_resource_list_unreference(sw_context, &resource_list);
320 +
321 +- return 0;
322 ++ return ret;
323 +
324 + out_unlock_binding:
325 + mutex_unlock(&dev_priv->binding_mutex);
326 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
327 +index 3d546d4093341..72a75316d472b 100644
328 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
329 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
330 +@@ -1169,7 +1169,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
331 + }
332 +
333 + vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
334 +- handle, -1, NULL);
335 ++ handle, -1);
336 + vmw_fence_obj_unreference(&fence);
337 + return 0;
338 + out_no_create:
339 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
340 +index e486b6517ac55..d87bd2a8c75fb 100644
341 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
342 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
343 +@@ -2662,7 +2662,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
344 + if (file_priv)
345 + vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
346 + ret, user_fence_rep, fence,
347 +- handle, -1, NULL);
348 ++ handle, -1);
349 + if (out_fence)
350 + *out_fence = fence;
351 + else
352 +diff --git a/fs/select.c b/fs/select.c
353 +index 11a7051075b4f..1c3985d0bcc3e 100644
354 +--- a/fs/select.c
355 ++++ b/fs/select.c
356 +@@ -431,9 +431,11 @@ get_max:
357 + return max;
358 + }
359 +
360 +-#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR)
361 +-#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR)
362 +-#define POLLEX_SET (EPOLLPRI)
363 ++#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR |\
364 ++ EPOLLNVAL)
365 ++#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR |\
366 ++ EPOLLNVAL)
367 ++#define POLLEX_SET (EPOLLPRI | EPOLLNVAL)
368 +
369 + static inline void wait_key_set(poll_table *wait, unsigned long in,
370 + unsigned long out, unsigned long bit,
371 +@@ -500,6 +502,7 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
372 + break;
373 + if (!(bit & all_bits))
374 + continue;
375 ++ mask = EPOLLNVAL;
376 + f = fdget(i);
377 + if (f.file) {
378 + wait_key_set(wait, in, out, bit,
379 +@@ -507,34 +510,34 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
380 + mask = vfs_poll(f.file, wait);
381 +
382 + fdput(f);
383 +- if ((mask & POLLIN_SET) && (in & bit)) {
384 +- res_in |= bit;
385 +- retval++;
386 +- wait->_qproc = NULL;
387 +- }
388 +- if ((mask & POLLOUT_SET) && (out & bit)) {
389 +- res_out |= bit;
390 +- retval++;
391 +- wait->_qproc = NULL;
392 +- }
393 +- if ((mask & POLLEX_SET) && (ex & bit)) {
394 +- res_ex |= bit;
395 +- retval++;
396 +- wait->_qproc = NULL;
397 +- }
398 +- /* got something, stop busy polling */
399 +- if (retval) {
400 +- can_busy_loop = false;
401 +- busy_flag = 0;
402 +-
403 +- /*
404 +- * only remember a returned
405 +- * POLL_BUSY_LOOP if we asked for it
406 +- */
407 +- } else if (busy_flag & mask)
408 +- can_busy_loop = true;
409 +-
410 + }
411 ++ if ((mask & POLLIN_SET) && (in & bit)) {
412 ++ res_in |= bit;
413 ++ retval++;
414 ++ wait->_qproc = NULL;
415 ++ }
416 ++ if ((mask & POLLOUT_SET) && (out & bit)) {
417 ++ res_out |= bit;
418 ++ retval++;
419 ++ wait->_qproc = NULL;
420 ++ }
421 ++ if ((mask & POLLEX_SET) && (ex & bit)) {
422 ++ res_ex |= bit;
423 ++ retval++;
424 ++ wait->_qproc = NULL;
425 ++ }
426 ++ /* got something, stop busy polling */
427 ++ if (retval) {
428 ++ can_busy_loop = false;
429 ++ busy_flag = 0;
430 ++
431 ++ /*
432 ++ * only remember a returned
433 ++ * POLL_BUSY_LOOP if we asked for it
434 ++ */
435 ++ } else if (busy_flag & mask)
436 ++ can_busy_loop = true;
437 ++
438 + }
439 + if (res_in)
440 + *rinp = res_in;
441 +diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
442 +index a350c05b7ff5e..7c6b1024dd4b5 100644
443 +--- a/net/bridge/br_device.c
444 ++++ b/net/bridge/br_device.c
445 +@@ -42,6 +42,8 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
446 + struct ethhdr *eth;
447 + u16 vid = 0;
448 +
449 ++ memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
450 ++
451 + rcu_read_lock();
452 + nf_ops = rcu_dereference(nf_br_ops);
453 + if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {