Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sat, 29 Jan 2022 17:46:39
Message-Id: 1643478386.76a9bde411289f9d109fa8370cae7ed52a44fec6.mpagano@gentoo
1 commit: 76a9bde411289f9d109fa8370cae7ed52a44fec6
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Jan 29 17:46:26 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Jan 29 17:46:26 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=76a9bde4
7
8 Linux patch 4.9.299
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1298_linux-4.9.299.patch | 807 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 811 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 52cd88fb..671be5e2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1235,6 +1235,10 @@ Patch: 1297_linux-4.9.298.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.298
23
24 +Patch: 1298_linux-4.9.299.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.299
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1298_linux-4.9.299.patch b/1298_linux-4.9.299.patch
33 new file mode 100644
34 index 00000000..95feaed3
35 --- /dev/null
36 +++ b/1298_linux-4.9.299.patch
37 @@ -0,0 +1,807 @@
38 +diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
39 +index 481b6a9c25d5a..16ddfd6bd6a1a 100644
40 +--- a/Documentation/virtual/kvm/mmu.txt
41 ++++ b/Documentation/virtual/kvm/mmu.txt
42 +@@ -152,8 +152,8 @@ Shadow pages contain the following information:
43 + shadow pages) so role.quadrant takes values in the range 0..3. Each
44 + quadrant maps 1GB virtual address space.
45 + role.access:
46 +- Inherited guest access permissions in the form uwx. Note execute
47 +- permission is positive, not negative.
48 ++ Inherited guest access permissions from the parent ptes in the form uwx.
49 ++ Note execute permission is positive, not negative.
50 + role.invalid:
51 + The page is invalid and should not be used. It is a root page that is
52 + currently pinned (by a cpu hardware register pointing to it); once it is
53 +diff --git a/Makefile b/Makefile
54 +index b0f683f18df71..99d37c23495ef 100644
55 +--- a/Makefile
56 ++++ b/Makefile
57 +@@ -1,6 +1,6 @@
58 + VERSION = 4
59 + PATCHLEVEL = 9
60 +-SUBLEVEL = 298
61 ++SUBLEVEL = 299
62 + EXTRAVERSION =
63 + NAME = Roaring Lionus
64 +
65 +diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
66 +index bb8f39fe3a225..8df8cdd093e98 100644
67 +--- a/arch/arm/Kconfig.debug
68 ++++ b/arch/arm/Kconfig.debug
69 +@@ -15,30 +15,42 @@ config ARM_PTDUMP
70 + kernel.
71 + If in doubt, say "N"
72 +
73 +-# RMK wants arm kernels compiled with frame pointers or stack unwinding.
74 +-# If you know what you are doing and are willing to live without stack
75 +-# traces, you can get a slightly smaller kernel by setting this option to
76 +-# n, but then RMK will have to kill you ;).
77 +-config FRAME_POINTER
78 +- bool
79 +- depends on !THUMB2_KERNEL
80 +- default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
81 ++choice
82 ++ prompt "Choose kernel unwinder"
83 ++ default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
84 ++ default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
85 ++ help
86 ++ This determines which method will be used for unwinding kernel stack
87 ++ traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
88 ++ livepatch, lockdep, and more.
89 ++
90 ++config UNWINDER_FRAME_POINTER
91 ++ bool "Frame pointer unwinder"
92 ++ depends on !THUMB2_KERNEL && !CC_IS_CLANG
93 ++ select ARCH_WANT_FRAME_POINTERS
94 ++ select FRAME_POINTER
95 + help
96 +- If you say N here, the resulting kernel will be slightly smaller and
97 +- faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
98 +- when a problem occurs with the kernel, the information that is
99 +- reported is severely limited.
100 ++ This option enables the frame pointer unwinder for unwinding
101 ++ kernel stack traces.
102 +
103 +-config ARM_UNWIND
104 +- bool "Enable stack unwinding support (EXPERIMENTAL)"
105 ++config UNWINDER_ARM
106 ++ bool "ARM EABI stack unwinder"
107 + depends on AEABI
108 +- default y
109 ++ select ARM_UNWIND
110 + help
111 + This option enables stack unwinding support in the kernel
112 + using the information automatically generated by the
113 + compiler. The resulting kernel image is slightly bigger but
114 + the performance is not affected. Currently, this feature
115 +- only works with EABI compilers. If unsure say Y.
116 ++ only works with EABI compilers.
117 ++
118 ++endchoice
119 ++
120 ++config ARM_UNWIND
121 ++ bool
122 ++
123 ++config FRAME_POINTER
124 ++ bool
125 +
126 + config OLD_MCOUNT
127 + bool
128 +diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
129 +index e03225e707b26..d92c7758efad1 100644
130 +--- a/arch/x86/kvm/paging_tmpl.h
131 ++++ b/arch/x86/kvm/paging_tmpl.h
132 +@@ -100,8 +100,8 @@ struct guest_walker {
133 + gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
134 + pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
135 + bool pte_writable[PT_MAX_FULL_LEVELS];
136 +- unsigned pt_access;
137 +- unsigned pte_access;
138 ++ unsigned int pt_access[PT_MAX_FULL_LEVELS];
139 ++ unsigned int pte_access;
140 + gfn_t gfn;
141 + struct x86_exception fault;
142 + };
143 +@@ -285,9 +285,11 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
144 + pt_element_t pte;
145 + pt_element_t __user *uninitialized_var(ptep_user);
146 + gfn_t table_gfn;
147 +- unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey;
148 ++ u64 pt_access, pte_access;
149 ++ unsigned index, accessed_dirty, pte_pkey;
150 + gpa_t pte_gpa;
151 + int offset;
152 ++ u64 walk_nx_mask = 0;
153 + const int write_fault = access & PFERR_WRITE_MASK;
154 + const int user_fault = access & PFERR_USER_MASK;
155 + const int fetch_fault = access & PFERR_FETCH_MASK;
156 +@@ -301,6 +303,7 @@ retry_walk:
157 + pte = mmu->get_cr3(vcpu);
158 +
159 + #if PTTYPE == 64
160 ++ walk_nx_mask = 1ULL << PT64_NX_SHIFT;
161 + if (walker->level == PT32E_ROOT_LEVEL) {
162 + pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
163 + trace_kvm_mmu_paging_element(pte, walker->level);
164 +@@ -312,15 +315,14 @@ retry_walk:
165 + walker->max_level = walker->level;
166 + ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
167 +
168 +- accessed_dirty = PT_GUEST_ACCESSED_MASK;
169 +- pt_access = pte_access = ACC_ALL;
170 ++ pte_access = ~0;
171 + ++walker->level;
172 +
173 + do {
174 + gfn_t real_gfn;
175 + unsigned long host_addr;
176 +
177 +- pt_access &= pte_access;
178 ++ pt_access = pte_access;
179 + --walker->level;
180 +
181 + index = PT_INDEX(addr, walker->level);
182 +@@ -363,6 +365,12 @@ retry_walk:
183 +
184 + trace_kvm_mmu_paging_element(pte, walker->level);
185 +
186 ++ /*
187 ++ * Inverting the NX it lets us AND it like other
188 ++ * permission bits.
189 ++ */
190 ++ pte_access = pt_access & (pte ^ walk_nx_mask);
191 ++
192 + if (unlikely(!FNAME(is_present_gpte)(pte)))
193 + goto error;
194 +
195 +@@ -371,14 +379,18 @@ retry_walk:
196 + goto error;
197 + }
198 +
199 +- accessed_dirty &= pte;
200 +- pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
201 +-
202 + walker->ptes[walker->level - 1] = pte;
203 ++
204 ++ /* Convert to ACC_*_MASK flags for struct guest_walker. */
205 ++ walker->pt_access[walker->level - 1] = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
206 + } while (!is_last_gpte(mmu, walker->level, pte));
207 +
208 + pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
209 +- errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access);
210 ++ accessed_dirty = pte_access & PT_GUEST_ACCESSED_MASK;
211 ++
212 ++ /* Convert to ACC_*_MASK flags for struct guest_walker. */
213 ++ walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask);
214 ++ errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
215 + if (unlikely(errcode))
216 + goto error;
217 +
218 +@@ -395,7 +407,7 @@ retry_walk:
219 + walker->gfn = real_gpa >> PAGE_SHIFT;
220 +
221 + if (!write_fault)
222 +- FNAME(protect_clean_gpte)(&pte_access, pte);
223 ++ FNAME(protect_clean_gpte)(&walker->pte_access, pte);
224 + else
225 + /*
226 + * On a write fault, fold the dirty bit into accessed_dirty.
227 +@@ -413,10 +425,9 @@ retry_walk:
228 + goto retry_walk;
229 + }
230 +
231 +- walker->pt_access = pt_access;
232 +- walker->pte_access = pte_access;
233 + pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
234 +- __func__, (u64)pte, pte_access, pt_access);
235 ++ __func__, (u64)pte, walker->pte_access,
236 ++ walker->pt_access[walker->level - 1]);
237 + return 1;
238 +
239 + error:
240 +@@ -444,7 +455,7 @@ error:
241 + */
242 + if (!(errcode & PFERR_RSVD_MASK)) {
243 + vcpu->arch.exit_qualification &= 0x187;
244 +- vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
245 ++ vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
246 + }
247 + #endif
248 + walker->fault.address = addr;
249 +@@ -578,7 +589,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
250 + {
251 + struct kvm_mmu_page *sp = NULL;
252 + struct kvm_shadow_walk_iterator it;
253 +- unsigned direct_access, access = gw->pt_access;
254 ++ unsigned int direct_access, access;
255 + int top_level, ret;
256 + gfn_t gfn, base_gfn;
257 +
258 +@@ -610,6 +621,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
259 + sp = NULL;
260 + if (!is_shadow_present_pte(*it.sptep)) {
261 + table_gfn = gw->table_gfn[it.level - 2];
262 ++ access = gw->pt_access[it.level - 2];
263 + sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
264 + false, access);
265 + }
266 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
267 +index c4f155663ca9a..14cd0a742e794 100644
268 +--- a/drivers/gpu/drm/i915/i915_drv.h
269 ++++ b/drivers/gpu/drm/i915/i915_drv.h
270 +@@ -1763,6 +1763,8 @@ struct drm_i915_private {
271 +
272 + struct intel_uncore uncore;
273 +
274 ++ struct mutex tlb_invalidate_lock;
275 ++
276 + struct i915_virtual_gpu vgpu;
277 +
278 + struct intel_gvt gvt;
279 +@@ -2211,7 +2213,8 @@ struct drm_i915_gem_object {
280 + * rendering and so a non-zero seqno), and is not set if it i s on
281 + * inactive (ready to be unbound) list.
282 + */
283 +-#define I915_BO_ACTIVE_SHIFT 0
284 ++#define I915_BO_WAS_BOUND_BIT 0
285 ++#define I915_BO_ACTIVE_SHIFT 1
286 + #define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
287 + #define __I915_BO_ACTIVE(bo) \
288 + ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
289 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
290 +index 3fb4f9acacba0..9265ac5774c25 100644
291 +--- a/drivers/gpu/drm/i915/i915_gem.c
292 ++++ b/drivers/gpu/drm/i915/i915_gem.c
293 +@@ -2185,6 +2185,67 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
294 + kfree(obj->pages);
295 + }
296 +
297 ++static int
298 ++__intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
299 ++ i915_reg_t reg,
300 ++ const u32 mask,
301 ++ const u32 value,
302 ++ const unsigned int timeout_us,
303 ++ const unsigned int timeout_ms)
304 ++{
305 ++#define done ((I915_READ_FW(reg) & mask) == value)
306 ++ int ret = wait_for_us(done, timeout_us);
307 ++ if (ret)
308 ++ ret = wait_for(done, timeout_ms);
309 ++ return ret;
310 ++#undef done
311 ++}
312 ++
313 ++static void invalidate_tlbs(struct drm_i915_private *dev_priv)
314 ++{
315 ++ static const i915_reg_t gen8_regs[] = {
316 ++ [RCS] = GEN8_RTCR,
317 ++ [VCS] = GEN8_M1TCR,
318 ++ [VCS2] = GEN8_M2TCR,
319 ++ [VECS] = GEN8_VTCR,
320 ++ [BCS] = GEN8_BTCR,
321 ++ };
322 ++ struct intel_engine_cs *engine;
323 ++
324 ++ if (INTEL_GEN(dev_priv) < 8)
325 ++ return;
326 ++
327 ++ assert_rpm_wakelock_held(dev_priv);
328 ++
329 ++ mutex_lock(&dev_priv->tlb_invalidate_lock);
330 ++ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
331 ++
332 ++ for_each_engine(engine, dev_priv) {
333 ++ /*
334 ++ * HW architecture suggest typical invalidation time at 40us,
335 ++ * with pessimistic cases up to 100us and a recommendation to
336 ++ * cap at 1ms. We go a bit higher just in case.
337 ++ */
338 ++ const unsigned int timeout_us = 100;
339 ++ const unsigned int timeout_ms = 4;
340 ++ const enum intel_engine_id id = engine->id;
341 ++
342 ++ if (WARN_ON_ONCE(id >= ARRAY_SIZE(gen8_regs) ||
343 ++ !i915_mmio_reg_offset(gen8_regs[id])))
344 ++ continue;
345 ++
346 ++ I915_WRITE_FW(gen8_regs[id], 1);
347 ++ if (__intel_wait_for_register_fw(dev_priv,
348 ++ gen8_regs[id], 1, 0,
349 ++ timeout_us, timeout_ms))
350 ++ DRM_ERROR_RATELIMITED("%s TLB invalidation did not complete in %ums!\n",
351 ++ engine->name, timeout_ms);
352 ++ }
353 ++
354 ++ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
355 ++ mutex_unlock(&dev_priv->tlb_invalidate_lock);
356 ++}
357 ++
358 + int
359 + i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
360 + {
361 +@@ -2215,6 +2276,15 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
362 + obj->mapping = NULL;
363 + }
364 +
365 ++ if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
366 ++ struct drm_i915_private *i915 = to_i915(obj->base.dev);
367 ++
368 ++ if (intel_runtime_pm_get_if_in_use(i915)) {
369 ++ invalidate_tlbs(i915);
370 ++ intel_runtime_pm_put(i915);
371 ++ }
372 ++ }
373 ++
374 + ops->put_pages(obj);
375 + obj->pages = NULL;
376 +
377 +@@ -4627,6 +4697,8 @@ i915_gem_load_init(struct drm_device *dev)
378 +
379 + atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
380 +
381 ++ mutex_init(&dev_priv->tlb_invalidate_lock);
382 ++
383 + spin_lock_init(&dev_priv->fb_tracking.lock);
384 + }
385 +
386 +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
387 +index 16f56f14f4d06..edaff73b7aa9d 100644
388 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
389 ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
390 +@@ -3685,6 +3685,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
391 + return ret;
392 +
393 + vma->flags |= bind_flags;
394 ++
395 ++ if (vma->obj)
396 ++ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
397 ++
398 + return 0;
399 + }
400 +
401 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
402 +index 5468e69bf520a..1ff1e33df2c71 100644
403 +--- a/drivers/gpu/drm/i915/i915_reg.h
404 ++++ b/drivers/gpu/drm/i915/i915_reg.h
405 +@@ -1698,6 +1698,12 @@ enum skl_disp_power_wells {
406 + #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
407 + #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
408 +
409 ++#define GEN8_RTCR _MMIO(0x4260)
410 ++#define GEN8_M1TCR _MMIO(0x4264)
411 ++#define GEN8_M2TCR _MMIO(0x4268)
412 ++#define GEN8_BTCR _MMIO(0x426c)
413 ++#define GEN8_VTCR _MMIO(0x4270)
414 ++
415 + #if 0
416 + #define PRB0_TAIL _MMIO(0x2030)
417 + #define PRB0_HEAD _MMIO(0x2034)
418 +diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
419 +index 280b5ffea5922..3a373711f5ad9 100644
420 +--- a/drivers/media/firewire/firedtv-avc.c
421 ++++ b/drivers/media/firewire/firedtv-avc.c
422 +@@ -1169,7 +1169,11 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
423 + read_pos += program_info_length;
424 + write_pos += program_info_length;
425 + }
426 +- while (read_pos < length) {
427 ++ while (read_pos + 4 < length) {
428 ++ if (write_pos + 4 >= sizeof(c->operand) - 4) {
429 ++ ret = -EINVAL;
430 ++ goto out;
431 ++ }
432 + c->operand[write_pos++] = msg[read_pos++];
433 + c->operand[write_pos++] = msg[read_pos++];
434 + c->operand[write_pos++] = msg[read_pos++];
435 +@@ -1181,13 +1185,17 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
436 + c->operand[write_pos++] = es_info_length >> 8;
437 + c->operand[write_pos++] = es_info_length & 0xff;
438 + if (es_info_length > 0) {
439 ++ if (read_pos >= length) {
440 ++ ret = -EINVAL;
441 ++ goto out;
442 ++ }
443 + pmt_cmd_id = msg[read_pos++];
444 + if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
445 + dev_err(fdtv->device, "invalid pmt_cmd_id %d "
446 + "at stream level\n", pmt_cmd_id);
447 +
448 +- if (es_info_length > sizeof(c->operand) - 4 -
449 +- write_pos) {
450 ++ if (es_info_length > sizeof(c->operand) - 4 - write_pos ||
451 ++ es_info_length > length - read_pos) {
452 + ret = -EINVAL;
453 + goto out;
454 + }
455 +diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
456 +index edbb30fdd9d95..93fb4b7312afc 100644
457 +--- a/drivers/media/firewire/firedtv-ci.c
458 ++++ b/drivers/media/firewire/firedtv-ci.c
459 +@@ -138,6 +138,8 @@ static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
460 + } else {
461 + data_length = msg->msg[3];
462 + }
463 ++ if (data_length > sizeof(msg->msg) - data_pos)
464 ++ return -EINVAL;
465 +
466 + return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length);
467 + }
468 +diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
469 +index e3596855a7031..a27865b94416b 100644
470 +--- a/drivers/staging/android/ion/ion-ioctl.c
471 ++++ b/drivers/staging/android/ion/ion-ioctl.c
472 +@@ -30,6 +30,69 @@ union ion_ioctl_arg {
473 + struct ion_heap_query query;
474 + };
475 +
476 ++/* Must hold the client lock */
477 ++static void user_ion_handle_get(struct ion_handle *handle)
478 ++{
479 ++ if (handle->user_ref_count++ == 0)
480 ++ kref_get(&handle->ref);
481 ++}
482 ++
483 ++/* Must hold the client lock */
484 ++static struct ion_handle *user_ion_handle_get_check_overflow(
485 ++ struct ion_handle *handle)
486 ++{
487 ++ if (handle->user_ref_count + 1 == 0)
488 ++ return ERR_PTR(-EOVERFLOW);
489 ++ user_ion_handle_get(handle);
490 ++ return handle;
491 ++}
492 ++
493 ++/* passes a kref to the user ref count.
494 ++ * We know we're holding a kref to the object before and
495 ++ * after this call, so no need to reverify handle.
496 ++ */
497 ++static struct ion_handle *pass_to_user(struct ion_handle *handle)
498 ++{
499 ++ struct ion_client *client = handle->client;
500 ++ struct ion_handle *ret;
501 ++
502 ++ mutex_lock(&client->lock);
503 ++ ret = user_ion_handle_get_check_overflow(handle);
504 ++ ion_handle_put_nolock(handle);
505 ++ mutex_unlock(&client->lock);
506 ++ return ret;
507 ++}
508 ++
509 ++/* Must hold the client lock */
510 ++static int user_ion_handle_put_nolock(struct ion_handle *handle)
511 ++{
512 ++ int ret;
513 ++
514 ++ if (--handle->user_ref_count == 0)
515 ++ ret = ion_handle_put_nolock(handle);
516 ++
517 ++ return ret;
518 ++}
519 ++
520 ++static void user_ion_free_nolock(struct ion_client *client,
521 ++ struct ion_handle *handle)
522 ++{
523 ++ bool valid_handle;
524 ++
525 ++ WARN_ON(client != handle->client);
526 ++
527 ++ valid_handle = ion_handle_validate(client, handle);
528 ++ if (!valid_handle) {
529 ++ WARN(1, "%s: invalid handle passed to free.\n", __func__);
530 ++ return;
531 ++ }
532 ++ if (handle->user_ref_count == 0) {
533 ++ WARN(1, "%s: User does not have access!\n", __func__);
534 ++ return;
535 ++ }
536 ++ user_ion_handle_put_nolock(handle);
537 ++}
538 ++
539 + static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
540 + {
541 + int ret = 0;
542 +@@ -96,16 +159,15 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
543 + {
544 + struct ion_handle *handle;
545 +
546 +- handle = ion_alloc(client, data.allocation.len,
547 +- data.allocation.align,
548 +- data.allocation.heap_id_mask,
549 +- data.allocation.flags);
550 ++ handle = __ion_alloc(client, data.allocation.len,
551 ++ data.allocation.align,
552 ++ data.allocation.heap_id_mask,
553 ++ data.allocation.flags, true);
554 + if (IS_ERR(handle))
555 + return PTR_ERR(handle);
556 +-
557 + data.allocation.handle = handle->id;
558 +-
559 + cleanup_handle = handle;
560 ++ pass_to_user(handle);
561 + break;
562 + }
563 + case ION_IOC_FREE:
564 +@@ -118,7 +180,7 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
565 + mutex_unlock(&client->lock);
566 + return PTR_ERR(handle);
567 + }
568 +- ion_free_nolock(client, handle);
569 ++ user_ion_free_nolock(client, handle);
570 + ion_handle_put_nolock(handle);
571 + mutex_unlock(&client->lock);
572 + break;
573 +@@ -146,10 +208,16 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
574 + struct ion_handle *handle;
575 +
576 + handle = ion_import_dma_buf_fd(client, data.fd.fd);
577 +- if (IS_ERR(handle))
578 ++ if (IS_ERR(handle)) {
579 + ret = PTR_ERR(handle);
580 +- else
581 ++ } else {
582 + data.handle.handle = handle->id;
583 ++ handle = pass_to_user(handle);
584 ++ if (IS_ERR(handle)) {
585 ++ ret = PTR_ERR(handle);
586 ++ data.handle.handle = 0;
587 ++ }
588 ++ }
589 + break;
590 + }
591 + case ION_IOC_SYNC:
592 +@@ -174,10 +242,16 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
593 +
594 + if (dir & _IOC_READ) {
595 + if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
596 +- if (cleanup_handle)
597 +- ion_free(client, cleanup_handle);
598 ++ if (cleanup_handle) {
599 ++ mutex_lock(&client->lock);
600 ++ user_ion_free_nolock(client, cleanup_handle);
601 ++ ion_handle_put_nolock(cleanup_handle);
602 ++ mutex_unlock(&client->lock);
603 ++ }
604 + return -EFAULT;
605 + }
606 + }
607 ++ if (cleanup_handle)
608 ++ ion_handle_put(cleanup_handle);
609 + return ret;
610 + }
611 +diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
612 +index aac9b38b8c25c..b272f2ab87e8f 100644
613 +--- a/drivers/staging/android/ion/ion.c
614 ++++ b/drivers/staging/android/ion/ion.c
615 +@@ -363,8 +363,8 @@ struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
616 + return ERR_PTR(-EINVAL);
617 + }
618 +
619 +-static bool ion_handle_validate(struct ion_client *client,
620 +- struct ion_handle *handle)
621 ++bool ion_handle_validate(struct ion_client *client,
622 ++ struct ion_handle *handle)
623 + {
624 + WARN_ON(!mutex_is_locked(&client->lock));
625 + return idr_find(&client->idr, handle->id) == handle;
626 +@@ -401,9 +401,9 @@ static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
627 + return 0;
628 + }
629 +
630 +-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
631 +- size_t align, unsigned int heap_id_mask,
632 +- unsigned int flags)
633 ++struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
634 ++ size_t align, unsigned int heap_id_mask,
635 ++ unsigned int flags, bool grab_handle)
636 + {
637 + struct ion_handle *handle;
638 + struct ion_device *dev = client->dev;
639 +@@ -453,6 +453,8 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
640 + return handle;
641 +
642 + mutex_lock(&client->lock);
643 ++ if (grab_handle)
644 ++ ion_handle_get(handle);
645 + ret = ion_handle_add(client, handle);
646 + mutex_unlock(&client->lock);
647 + if (ret) {
648 +@@ -462,6 +464,13 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
649 +
650 + return handle;
651 + }
652 ++
653 ++struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
654 ++ size_t align, unsigned int heap_id_mask,
655 ++ unsigned int flags)
656 ++{
657 ++ return __ion_alloc(client, len, align, heap_id_mask, flags, false);
658 ++}
659 + EXPORT_SYMBOL(ion_alloc);
660 +
661 + void ion_free_nolock(struct ion_client *client,
662 +diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
663 +index 93dafb4586e43..cfa50dfb46edc 100644
664 +--- a/drivers/staging/android/ion/ion.h
665 ++++ b/drivers/staging/android/ion/ion.h
666 +@@ -109,6 +109,10 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
667 + size_t align, unsigned int heap_id_mask,
668 + unsigned int flags);
669 +
670 ++struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
671 ++ size_t align, unsigned int heap_id_mask,
672 ++ unsigned int flags, bool grab_handle);
673 ++
674 + /**
675 + * ion_free - free a handle
676 + * @client: the client
677 +diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
678 +index 760e41885448a..e1dd25eab1dbd 100644
679 +--- a/drivers/staging/android/ion/ion_priv.h
680 ++++ b/drivers/staging/android/ion/ion_priv.h
681 +@@ -149,6 +149,7 @@ struct ion_client {
682 + */
683 + struct ion_handle {
684 + struct kref ref;
685 ++ unsigned int user_ref_count;
686 + struct ion_client *client;
687 + struct ion_buffer *buffer;
688 + struct rb_node node;
689 +@@ -459,6 +460,9 @@ int ion_sync_for_device(struct ion_client *client, int fd);
690 + struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
691 + int id);
692 +
693 ++bool ion_handle_validate(struct ion_client *client,
694 ++ struct ion_handle *handle);
695 ++
696 + void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
697 +
698 + int ion_handle_put_nolock(struct ion_handle *handle);
699 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
700 +index 3ee60c5332179..2fb4633897084 100644
701 +--- a/fs/nfs/nfs4client.c
702 ++++ b/fs/nfs/nfs4client.c
703 +@@ -177,8 +177,11 @@ void nfs40_shutdown_client(struct nfs_client *clp)
704 +
705 + struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
706 + {
707 +- int err;
708 ++ char buf[INET6_ADDRSTRLEN + 1];
709 ++ const char *ip_addr = cl_init->ip_addr;
710 + struct nfs_client *clp = nfs_alloc_client(cl_init);
711 ++ int err;
712 ++
713 + if (IS_ERR(clp))
714 + return clp;
715 +
716 +@@ -202,6 +205,44 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
717 + #if IS_ENABLED(CONFIG_NFS_V4_1)
718 + init_waitqueue_head(&clp->cl_lock_waitq);
719 + #endif
720 ++
721 ++ if (cl_init->minorversion != 0)
722 ++ __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
723 ++ __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
724 ++ __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
725 ++
726 ++ /*
727 ++ * Set up the connection to the server before we add add to the
728 ++ * global list.
729 ++ */
730 ++ err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
731 ++ if (err == -EINVAL)
732 ++ err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
733 ++ if (err < 0)
734 ++ goto error;
735 ++
736 ++ /* If no clientaddr= option was specified, find a usable cb address */
737 ++ if (ip_addr == NULL) {
738 ++ struct sockaddr_storage cb_addr;
739 ++ struct sockaddr *sap = (struct sockaddr *)&cb_addr;
740 ++
741 ++ err = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
742 ++ if (err < 0)
743 ++ goto error;
744 ++ err = rpc_ntop(sap, buf, sizeof(buf));
745 ++ if (err < 0)
746 ++ goto error;
747 ++ ip_addr = (const char *)buf;
748 ++ }
749 ++ strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
750 ++
751 ++ err = nfs_idmap_new(clp);
752 ++ if (err < 0) {
753 ++ dprintk("%s: failed to create idmapper. Error = %d\n",
754 ++ __func__, err);
755 ++ goto error;
756 ++ }
757 ++ __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
758 + return clp;
759 +
760 + error:
761 +@@ -354,8 +395,6 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
762 + struct nfs_client *nfs4_init_client(struct nfs_client *clp,
763 + const struct nfs_client_initdata *cl_init)
764 + {
765 +- char buf[INET6_ADDRSTRLEN + 1];
766 +- const char *ip_addr = cl_init->ip_addr;
767 + struct nfs_client *old;
768 + int error;
769 +
770 +@@ -365,43 +404,6 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
771 + return clp;
772 + }
773 +
774 +- /* Check NFS protocol revision and initialize RPC op vector */
775 +- clp->rpc_ops = &nfs_v4_clientops;
776 +-
777 +- if (clp->cl_minorversion != 0)
778 +- __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
779 +- __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
780 +- __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
781 +-
782 +- error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
783 +- if (error == -EINVAL)
784 +- error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
785 +- if (error < 0)
786 +- goto error;
787 +-
788 +- /* If no clientaddr= option was specified, find a usable cb address */
789 +- if (ip_addr == NULL) {
790 +- struct sockaddr_storage cb_addr;
791 +- struct sockaddr *sap = (struct sockaddr *)&cb_addr;
792 +-
793 +- error = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
794 +- if (error < 0)
795 +- goto error;
796 +- error = rpc_ntop(sap, buf, sizeof(buf));
797 +- if (error < 0)
798 +- goto error;
799 +- ip_addr = (const char *)buf;
800 +- }
801 +- strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
802 +-
803 +- error = nfs_idmap_new(clp);
804 +- if (error < 0) {
805 +- dprintk("%s: failed to create idmapper. Error = %d\n",
806 +- __func__, error);
807 +- goto error;
808 +- }
809 +- __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
810 +-
811 + error = nfs4_init_client_minor_version(clp);
812 + if (error < 0)
813 + goto error;
814 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
815 +index bc5ff3a53d4a6..e7addfcd302f4 100644
816 +--- a/lib/Kconfig.debug
817 ++++ b/lib/Kconfig.debug
818 +@@ -1091,7 +1091,7 @@ config LOCKDEP
819 + bool
820 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
821 + select STACKTRACE
822 +- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
823 ++ select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !SCORE
824 + select KALLSYMS
825 + select KALLSYMS_ALL
826 +
827 +@@ -1670,7 +1670,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
828 + depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
829 + depends on !X86_64
830 + select STACKTRACE
831 +- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
832 ++ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !SCORE
833 + help
834 + Provide stacktrace filter for fault-injection capabilities
835 +
836 +@@ -1679,7 +1679,7 @@ config LATENCYTOP
837 + depends on DEBUG_KERNEL
838 + depends on STACKTRACE_SUPPORT
839 + depends on PROC_FS
840 +- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
841 ++ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC
842 + select KALLSYMS
843 + select KALLSYMS_ALL
844 + select STACKTRACE