Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 29 Jan 2020 16:18:49
Message-Id: 1580314630.bfe9312182d69deb9786d697a6fb0e57b6cd9ffa.mpagano@gentoo
1 commit: bfe9312182d69deb9786d697a6fb0e57b6cd9ffa
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 29 16:17:10 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 29 16:17:10 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bfe93121
7
8 Linux patch 5.4.16
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1015_linux-5.4.16.patch | 5662 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5666 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 34417c5..85cdd05 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -103,6 +103,10 @@ Patch: 1014_linux-5.4.15.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.15
23
24 +Patch: 1015_linux-5.4.16.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.16
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1015_linux-5.4.16.patch b/1015_linux-5.4.16.patch
33 new file mode 100644
34 index 0000000..2c9e8a8
35 --- /dev/null
36 +++ b/1015_linux-5.4.16.patch
37 @@ -0,0 +1,5662 @@
38 +diff --git a/Makefile b/Makefile
39 +index 30600e309c73..e16d2e58ed4b 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 15
47 ++SUBLEVEL = 16
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
52 +index 15b75005bc34..3fa1b962dc27 100644
53 +--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
54 ++++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
55 +@@ -600,8 +600,11 @@ extern void slb_set_size(u16 size);
56 + *
57 + */
58 + #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
59 ++
60 ++// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
61 + #define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
62 +- MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
63 ++ MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
64 ++
65 + /*
66 + * For platforms that support on 65bit VA we limit the context bits
67 + */
68 +diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
69 +index f2dfcd50a2d3..33aee7490cbb 100644
70 +--- a/arch/powerpc/include/asm/xive-regs.h
71 ++++ b/arch/powerpc/include/asm/xive-regs.h
72 +@@ -39,6 +39,7 @@
73 +
74 + #define XIVE_ESB_VAL_P 0x2
75 + #define XIVE_ESB_VAL_Q 0x1
76 ++#define XIVE_ESB_INVALID 0xFF
77 +
78 + /*
79 + * Thread Management (aka "TM") registers
80 +diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
81 +index f5fadbd2533a..9651ca061828 100644
82 +--- a/arch/powerpc/sysdev/xive/common.c
83 ++++ b/arch/powerpc/sysdev/xive/common.c
84 +@@ -972,12 +972,21 @@ static int xive_get_irqchip_state(struct irq_data *data,
85 + enum irqchip_irq_state which, bool *state)
86 + {
87 + struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
88 ++ u8 pq;
89 +
90 + switch (which) {
91 + case IRQCHIP_STATE_ACTIVE:
92 +- *state = !xd->stale_p &&
93 +- (xd->saved_p ||
94 +- !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
95 ++ pq = xive_esb_read(xd, XIVE_ESB_GET);
96 ++
97 ++ /*
98 ++ * The esb value being all 1's means we couldn't get
99 ++ * the PQ state of the interrupt through mmio. It may
100 ++ * happen, for example when querying a PHB interrupt
101 ++ * while the PHB is in an error state. We consider the
102 ++ * interrupt to be inactive in that case.
103 ++ */
104 ++ *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
105 ++ (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
106 + return 0;
107 + default:
108 + return -EINVAL;
109 +diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
110 +index 2bbab0230aeb..d287837ed755 100644
111 +--- a/drivers/atm/firestream.c
112 ++++ b/drivers/atm/firestream.c
113 +@@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
114 + }
115 + if (!to) {
116 + printk ("No more free channels for FS50..\n");
117 ++ kfree(vcc);
118 + return -EBUSY;
119 + }
120 + vcc->channo = dev->channo;
121 +@@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
122 + if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
123 + ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
124 + printk ("Channel is in use for FS155.\n");
125 ++ kfree(vcc);
126 + return -EBUSY;
127 + }
128 + }
129 +@@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
130 + tc, sizeof (struct fs_transmit_config));
131 + if (!tc) {
132 + fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
133 ++ kfree(vcc);
134 + return -ENOMEM;
135 + }
136 +
137 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
138 +index 3d4f5775a4ba..25235ef630c1 100644
139 +--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
140 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
141 +@@ -9,16 +9,16 @@
142 + #include "i915_gem_ioctls.h"
143 + #include "i915_gem_object.h"
144 +
145 +-static __always_inline u32 __busy_read_flag(u8 id)
146 ++static __always_inline u32 __busy_read_flag(u16 id)
147 + {
148 +- if (id == (u8)I915_ENGINE_CLASS_INVALID)
149 ++ if (id == (u16)I915_ENGINE_CLASS_INVALID)
150 + return 0xffff0000u;
151 +
152 + GEM_BUG_ON(id >= 16);
153 + return 0x10000u << id;
154 + }
155 +
156 +-static __always_inline u32 __busy_write_id(u8 id)
157 ++static __always_inline u32 __busy_write_id(u16 id)
158 + {
159 + /*
160 + * The uABI guarantees an active writer is also amongst the read
161 +@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
162 + * last_read - hence we always set both read and write busy for
163 + * last_write.
164 + */
165 +- if (id == (u8)I915_ENGINE_CLASS_INVALID)
166 ++ if (id == (u16)I915_ENGINE_CLASS_INVALID)
167 + return 0xffffffffu;
168 +
169 + return (id + 1) | __busy_read_flag(id);
170 + }
171 +
172 + static __always_inline unsigned int
173 +-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
174 ++__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
175 + {
176 + const struct i915_request *rq;
177 +
178 +@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
179 + return 0;
180 +
181 + /* Beware type-expansion follies! */
182 +- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
183 ++ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
184 + return flag(rq->engine->uabi_class);
185 + }
186 +
187 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
188 +index abfbac49b8e8..968d9b2705d0 100644
189 +--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
190 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
191 +@@ -427,7 +427,7 @@ struct get_pages_work {
192 +
193 + static struct sg_table *
194 + __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
195 +- struct page **pvec, int num_pages)
196 ++ struct page **pvec, unsigned long num_pages)
197 + {
198 + unsigned int max_segment = i915_sg_segment_size();
199 + struct sg_table *st;
200 +@@ -473,9 +473,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
201 + {
202 + struct get_pages_work *work = container_of(_work, typeof(*work), work);
203 + struct drm_i915_gem_object *obj = work->obj;
204 +- const int npages = obj->base.size >> PAGE_SHIFT;
205 ++ const unsigned long npages = obj->base.size >> PAGE_SHIFT;
206 ++ unsigned long pinned;
207 + struct page **pvec;
208 +- int pinned, ret;
209 ++ int ret;
210 +
211 + ret = -ENOMEM;
212 + pinned = 0;
213 +@@ -578,7 +579,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
214 +
215 + static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
216 + {
217 +- const int num_pages = obj->base.size >> PAGE_SHIFT;
218 ++ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
219 + struct mm_struct *mm = obj->userptr.mm->mm;
220 + struct page **pvec;
221 + struct sg_table *pages;
222 +diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
223 +index 9dd8c299cb2d..798e1b024406 100644
224 +--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
225 ++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
226 +@@ -300,8 +300,8 @@ struct intel_engine_cs {
227 + u8 class;
228 + u8 instance;
229 +
230 +- u8 uabi_class;
231 +- u8 uabi_instance;
232 ++ u16 uabi_class;
233 ++ u16 uabi_instance;
234 +
235 + u32 context_size;
236 + u32 mmio_base;
237 +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
238 +index b1a7a8b9b46a..f614646ed3f9 100644
239 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
240 ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
241 +@@ -1178,6 +1178,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
242 + pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
243 + vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
244 + do {
245 ++ GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
246 + vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
247 +
248 + iter->dma += I915_GTT_PAGE_SIZE;
249 +@@ -1657,6 +1658,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
250 +
251 + vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
252 + do {
253 ++ GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
254 + vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
255 +
256 + iter.dma += I915_GTT_PAGE_SIZE;
257 +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
258 +index 1c67ac434e10..5906c80c4b2c 100644
259 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
260 ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
261 +@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
262 + static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
263 + struct drm_file *file)
264 + {
265 ++ struct panfrost_file_priv *priv = file->driver_priv;
266 + struct panfrost_gem_object *bo;
267 + struct drm_panfrost_create_bo *args = data;
268 ++ struct panfrost_gem_mapping *mapping;
269 +
270 + if (!args->size || args->pad ||
271 + (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
272 +@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
273 + if (IS_ERR(bo))
274 + return PTR_ERR(bo);
275 +
276 +- args->offset = bo->node.start << PAGE_SHIFT;
277 ++ mapping = panfrost_gem_mapping_get(bo, priv);
278 ++ if (!mapping) {
279 ++ drm_gem_object_put_unlocked(&bo->base.base);
280 ++ return -EINVAL;
281 ++ }
282 ++
283 ++ args->offset = mapping->mmnode.start << PAGE_SHIFT;
284 ++ panfrost_gem_mapping_put(mapping);
285 +
286 + return 0;
287 + }
288 +@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
289 + struct drm_panfrost_submit *args,
290 + struct panfrost_job *job)
291 + {
292 ++ struct panfrost_file_priv *priv = file_priv->driver_priv;
293 ++ struct panfrost_gem_object *bo;
294 ++ unsigned int i;
295 ++ int ret;
296 ++
297 + job->bo_count = args->bo_handle_count;
298 +
299 + if (!job->bo_count)
300 +@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
301 + if (!job->implicit_fences)
302 + return -ENOMEM;
303 +
304 +- return drm_gem_objects_lookup(file_priv,
305 +- (void __user *)(uintptr_t)args->bo_handles,
306 +- job->bo_count, &job->bos);
307 ++ ret = drm_gem_objects_lookup(file_priv,
308 ++ (void __user *)(uintptr_t)args->bo_handles,
309 ++ job->bo_count, &job->bos);
310 ++ if (ret)
311 ++ return ret;
312 ++
313 ++ job->mappings = kvmalloc_array(job->bo_count,
314 ++ sizeof(struct panfrost_gem_mapping *),
315 ++ GFP_KERNEL | __GFP_ZERO);
316 ++ if (!job->mappings)
317 ++ return -ENOMEM;
318 ++
319 ++ for (i = 0; i < job->bo_count; i++) {
320 ++ struct panfrost_gem_mapping *mapping;
321 ++
322 ++ bo = to_panfrost_bo(job->bos[i]);
323 ++ mapping = panfrost_gem_mapping_get(bo, priv);
324 ++ if (!mapping) {
325 ++ ret = -EINVAL;
326 ++ break;
327 ++ }
328 ++
329 ++ job->mappings[i] = mapping;
330 ++ }
331 ++
332 ++ return ret;
333 + }
334 +
335 + /**
336 +@@ -320,7 +357,9 @@ out:
337 + static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
338 + struct drm_file *file_priv)
339 + {
340 ++ struct panfrost_file_priv *priv = file_priv->driver_priv;
341 + struct drm_panfrost_get_bo_offset *args = data;
342 ++ struct panfrost_gem_mapping *mapping;
343 + struct drm_gem_object *gem_obj;
344 + struct panfrost_gem_object *bo;
345 +
346 +@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
347 + }
348 + bo = to_panfrost_bo(gem_obj);
349 +
350 +- args->offset = bo->node.start << PAGE_SHIFT;
351 +-
352 ++ mapping = panfrost_gem_mapping_get(bo, priv);
353 + drm_gem_object_put_unlocked(gem_obj);
354 ++
355 ++ if (!mapping)
356 ++ return -EINVAL;
357 ++
358 ++ args->offset = mapping->mmnode.start << PAGE_SHIFT;
359 ++ panfrost_gem_mapping_put(mapping);
360 + return 0;
361 + }
362 +
363 + static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
364 + struct drm_file *file_priv)
365 + {
366 ++ struct panfrost_file_priv *priv = file_priv->driver_priv;
367 + struct drm_panfrost_madvise *args = data;
368 + struct panfrost_device *pfdev = dev->dev_private;
369 + struct drm_gem_object *gem_obj;
370 ++ struct panfrost_gem_object *bo;
371 ++ int ret = 0;
372 +
373 + gem_obj = drm_gem_object_lookup(file_priv, args->handle);
374 + if (!gem_obj) {
375 +@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
376 + return -ENOENT;
377 + }
378 +
379 ++ bo = to_panfrost_bo(gem_obj);
380 ++
381 + mutex_lock(&pfdev->shrinker_lock);
382 ++ mutex_lock(&bo->mappings.lock);
383 ++ if (args->madv == PANFROST_MADV_DONTNEED) {
384 ++ struct panfrost_gem_mapping *first;
385 ++
386 ++ first = list_first_entry(&bo->mappings.list,
387 ++ struct panfrost_gem_mapping,
388 ++ node);
389 ++
390 ++ /*
391 ++ * If we want to mark the BO purgeable, there must be only one
392 ++ * user: the caller FD.
393 ++ * We could do something smarter and mark the BO purgeable only
394 ++ * when all its users have marked it purgeable, but globally
395 ++ * visible/shared BOs are likely to never be marked purgeable
396 ++ * anyway, so let's not bother.
397 ++ */
398 ++ if (!list_is_singular(&bo->mappings.list) ||
399 ++ WARN_ON_ONCE(first->mmu != &priv->mmu)) {
400 ++ ret = -EINVAL;
401 ++ goto out_unlock_mappings;
402 ++ }
403 ++ }
404 ++
405 + args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
406 +
407 + if (args->retained) {
408 +- struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
409 +-
410 + if (args->madv == PANFROST_MADV_DONTNEED)
411 + list_add_tail(&bo->base.madv_list,
412 + &pfdev->shrinker_list);
413 + else if (args->madv == PANFROST_MADV_WILLNEED)
414 + list_del_init(&bo->base.madv_list);
415 + }
416 ++
417 ++out_unlock_mappings:
418 ++ mutex_unlock(&bo->mappings.lock);
419 + mutex_unlock(&pfdev->shrinker_lock);
420 +
421 + drm_gem_object_put_unlocked(gem_obj);
422 +- return 0;
423 ++ return ret;
424 + }
425 +
426 + int panfrost_unstable_ioctl_check(void)
427 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
428 +index 92a95210a899..77c3a3855c68 100644
429 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
430 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
431 +@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
432 + list_del_init(&bo->base.madv_list);
433 + mutex_unlock(&pfdev->shrinker_lock);
434 +
435 ++ /*
436 ++ * If we still have mappings attached to the BO, there's a problem in
437 ++ * our refcounting.
438 ++ */
439 ++ WARN_ON_ONCE(!list_empty(&bo->mappings.list));
440 ++
441 + if (bo->sgts) {
442 + int i;
443 + int n_sgt = bo->base.base.size / SZ_2M;
444 +@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
445 + drm_gem_shmem_free_object(obj);
446 + }
447 +
448 ++struct panfrost_gem_mapping *
449 ++panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
450 ++ struct panfrost_file_priv *priv)
451 ++{
452 ++ struct panfrost_gem_mapping *iter, *mapping = NULL;
453 ++
454 ++ mutex_lock(&bo->mappings.lock);
455 ++ list_for_each_entry(iter, &bo->mappings.list, node) {
456 ++ if (iter->mmu == &priv->mmu) {
457 ++ kref_get(&iter->refcount);
458 ++ mapping = iter;
459 ++ break;
460 ++ }
461 ++ }
462 ++ mutex_unlock(&bo->mappings.lock);
463 ++
464 ++ return mapping;
465 ++}
466 ++
467 ++static void
468 ++panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
469 ++{
470 ++ struct panfrost_file_priv *priv;
471 ++
472 ++ if (mapping->active)
473 ++ panfrost_mmu_unmap(mapping);
474 ++
475 ++ priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
476 ++ spin_lock(&priv->mm_lock);
477 ++ if (drm_mm_node_allocated(&mapping->mmnode))
478 ++ drm_mm_remove_node(&mapping->mmnode);
479 ++ spin_unlock(&priv->mm_lock);
480 ++}
481 ++
482 ++static void panfrost_gem_mapping_release(struct kref *kref)
483 ++{
484 ++ struct panfrost_gem_mapping *mapping;
485 ++
486 ++ mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
487 ++
488 ++ panfrost_gem_teardown_mapping(mapping);
489 ++ drm_gem_object_put_unlocked(&mapping->obj->base.base);
490 ++ kfree(mapping);
491 ++}
492 ++
493 ++void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
494 ++{
495 ++ if (!mapping)
496 ++ return;
497 ++
498 ++ kref_put(&mapping->refcount, panfrost_gem_mapping_release);
499 ++}
500 ++
501 ++void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
502 ++{
503 ++ struct panfrost_gem_mapping *mapping;
504 ++
505 ++ mutex_lock(&bo->mappings.lock);
506 ++ list_for_each_entry(mapping, &bo->mappings.list, node)
507 ++ panfrost_gem_teardown_mapping(mapping);
508 ++ mutex_unlock(&bo->mappings.lock);
509 ++}
510 ++
511 + int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
512 + {
513 + int ret;
514 +@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
515 + struct panfrost_gem_object *bo = to_panfrost_bo(obj);
516 + unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
517 + struct panfrost_file_priv *priv = file_priv->driver_priv;
518 ++ struct panfrost_gem_mapping *mapping;
519 ++
520 ++ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
521 ++ if (!mapping)
522 ++ return -ENOMEM;
523 ++
524 ++ INIT_LIST_HEAD(&mapping->node);
525 ++ kref_init(&mapping->refcount);
526 ++ drm_gem_object_get(obj);
527 ++ mapping->obj = bo;
528 +
529 + /*
530 + * Executable buffers cannot cross a 16MB boundary as the program
531 +@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
532 + else
533 + align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
534 +
535 +- bo->mmu = &priv->mmu;
536 ++ mapping->mmu = &priv->mmu;
537 + spin_lock(&priv->mm_lock);
538 +- ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
539 ++ ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
540 + size >> PAGE_SHIFT, align, color, 0);
541 + spin_unlock(&priv->mm_lock);
542 + if (ret)
543 +- return ret;
544 ++ goto err;
545 +
546 + if (!bo->is_heap) {
547 +- ret = panfrost_mmu_map(bo);
548 +- if (ret) {
549 +- spin_lock(&priv->mm_lock);
550 +- drm_mm_remove_node(&bo->node);
551 +- spin_unlock(&priv->mm_lock);
552 +- }
553 ++ ret = panfrost_mmu_map(mapping);
554 ++ if (ret)
555 ++ goto err;
556 + }
557 ++
558 ++ mutex_lock(&bo->mappings.lock);
559 ++ WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
560 ++ list_add_tail(&mapping->node, &bo->mappings.list);
561 ++ mutex_unlock(&bo->mappings.lock);
562 ++
563 ++err:
564 ++ if (ret)
565 ++ panfrost_gem_mapping_put(mapping);
566 + return ret;
567 + }
568 +
569 + void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
570 + {
571 +- struct panfrost_gem_object *bo = to_panfrost_bo(obj);
572 + struct panfrost_file_priv *priv = file_priv->driver_priv;
573 ++ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
574 ++ struct panfrost_gem_mapping *mapping = NULL, *iter;
575 +
576 +- if (bo->is_mapped)
577 +- panfrost_mmu_unmap(bo);
578 ++ mutex_lock(&bo->mappings.lock);
579 ++ list_for_each_entry(iter, &bo->mappings.list, node) {
580 ++ if (iter->mmu == &priv->mmu) {
581 ++ mapping = iter;
582 ++ list_del(&iter->node);
583 ++ break;
584 ++ }
585 ++ }
586 ++ mutex_unlock(&bo->mappings.lock);
587 +
588 +- spin_lock(&priv->mm_lock);
589 +- if (drm_mm_node_allocated(&bo->node))
590 +- drm_mm_remove_node(&bo->node);
591 +- spin_unlock(&priv->mm_lock);
592 ++ panfrost_gem_mapping_put(mapping);
593 + }
594 +
595 + static int panfrost_gem_pin(struct drm_gem_object *obj)
596 +@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
597 + if (!obj)
598 + return NULL;
599 +
600 ++ INIT_LIST_HEAD(&obj->mappings.list);
601 ++ mutex_init(&obj->mappings.lock);
602 + obj->base.base.funcs = &panfrost_gem_funcs;
603 +
604 + return &obj->base.base;
605 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
606 +index 4b17e7308764..ca1bc9019600 100644
607 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
608 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
609 +@@ -13,23 +13,46 @@ struct panfrost_gem_object {
610 + struct drm_gem_shmem_object base;
611 + struct sg_table *sgts;
612 +
613 +- struct panfrost_mmu *mmu;
614 +- struct drm_mm_node node;
615 +- bool is_mapped :1;
616 ++ /*
617 ++ * Use a list for now. If searching a mapping ever becomes the
618 ++ * bottleneck, we should consider using an RB-tree, or even better,
619 ++ * let the core store drm_gem_object_mapping entries (where we
620 ++ * could place driver specific data) instead of drm_gem_object ones
621 ++ * in its drm_file->object_idr table.
622 ++ *
623 ++ * struct drm_gem_object_mapping {
624 ++ * struct drm_gem_object *obj;
625 ++ * void *driver_priv;
626 ++ * };
627 ++ */
628 ++ struct {
629 ++ struct list_head list;
630 ++ struct mutex lock;
631 ++ } mappings;
632 ++
633 + bool noexec :1;
634 + bool is_heap :1;
635 + };
636 +
637 ++struct panfrost_gem_mapping {
638 ++ struct list_head node;
639 ++ struct kref refcount;
640 ++ struct panfrost_gem_object *obj;
641 ++ struct drm_mm_node mmnode;
642 ++ struct panfrost_mmu *mmu;
643 ++ bool active :1;
644 ++};
645 ++
646 + static inline
647 + struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
648 + {
649 + return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
650 + }
651 +
652 +-static inline
653 +-struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
654 ++static inline struct panfrost_gem_mapping *
655 ++drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
656 + {
657 +- return container_of(node, struct panfrost_gem_object, node);
658 ++ return container_of(node, struct panfrost_gem_mapping, mmnode);
659 + }
660 +
661 + struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
662 +@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
663 + void panfrost_gem_close(struct drm_gem_object *obj,
664 + struct drm_file *file_priv);
665 +
666 ++struct panfrost_gem_mapping *
667 ++panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
668 ++ struct panfrost_file_priv *priv);
669 ++void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
670 ++void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
671 ++
672 + void panfrost_gem_shrinker_init(struct drm_device *dev);
673 + void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
674 +
675 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
676 +index 458f0fa68111..f5dd7b29bc95 100644
677 +--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
678 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
679 +@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
680 + static bool panfrost_gem_purge(struct drm_gem_object *obj)
681 + {
682 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
683 ++ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
684 +
685 + if (!mutex_trylock(&shmem->pages_lock))
686 + return false;
687 +
688 +- panfrost_mmu_unmap(to_panfrost_bo(obj));
689 ++ panfrost_gem_teardown_mappings(bo);
690 + drm_gem_shmem_purge_locked(obj);
691 +
692 + mutex_unlock(&shmem->pages_lock);
693 +diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
694 +index 21f34d44aac2..bbb0c5e3ca6f 100644
695 +--- a/drivers/gpu/drm/panfrost/panfrost_job.c
696 ++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
697 +@@ -269,9 +269,20 @@ static void panfrost_job_cleanup(struct kref *ref)
698 + dma_fence_put(job->done_fence);
699 + dma_fence_put(job->render_done_fence);
700 +
701 +- if (job->bos) {
702 ++ if (job->mappings) {
703 + for (i = 0; i < job->bo_count; i++)
704 ++ panfrost_gem_mapping_put(job->mappings[i]);
705 ++ kvfree(job->mappings);
706 ++ }
707 ++
708 ++ if (job->bos) {
709 ++ struct panfrost_gem_object *bo;
710 ++
711 ++ for (i = 0; i < job->bo_count; i++) {
712 ++ bo = to_panfrost_bo(job->bos[i]);
713 + drm_gem_object_put_unlocked(job->bos[i]);
714 ++ }
715 ++
716 + kvfree(job->bos);
717 + }
718 +
719 +diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
720 +index 62454128a792..bbd3ba97ff67 100644
721 +--- a/drivers/gpu/drm/panfrost/panfrost_job.h
722 ++++ b/drivers/gpu/drm/panfrost/panfrost_job.h
723 +@@ -32,6 +32,7 @@ struct panfrost_job {
724 +
725 + /* Exclusive fences we have taken from the BOs to wait for */
726 + struct dma_fence **implicit_fences;
727 ++ struct panfrost_gem_mapping **mappings;
728 + struct drm_gem_object **bos;
729 + u32 bo_count;
730 +
731 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
732 +index a3ed64a1f15e..763cfca886a7 100644
733 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
734 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
735 +@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
736 + return 0;
737 + }
738 +
739 +-int panfrost_mmu_map(struct panfrost_gem_object *bo)
740 ++int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
741 + {
742 ++ struct panfrost_gem_object *bo = mapping->obj;
743 + struct drm_gem_object *obj = &bo->base.base;
744 + struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
745 + struct sg_table *sgt;
746 + int prot = IOMMU_READ | IOMMU_WRITE;
747 +
748 +- if (WARN_ON(bo->is_mapped))
749 ++ if (WARN_ON(mapping->active))
750 + return 0;
751 +
752 + if (bo->noexec)
753 +@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
754 + if (WARN_ON(IS_ERR(sgt)))
755 + return PTR_ERR(sgt);
756 +
757 +- mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
758 +- bo->is_mapped = true;
759 ++ mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
760 ++ prot, sgt);
761 ++ mapping->active = true;
762 +
763 + return 0;
764 + }
765 +
766 +-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
767 ++void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
768 + {
769 ++ struct panfrost_gem_object *bo = mapping->obj;
770 + struct drm_gem_object *obj = &bo->base.base;
771 + struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
772 +- struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
773 +- u64 iova = bo->node.start << PAGE_SHIFT;
774 +- size_t len = bo->node.size << PAGE_SHIFT;
775 ++ struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
776 ++ u64 iova = mapping->mmnode.start << PAGE_SHIFT;
777 ++ size_t len = mapping->mmnode.size << PAGE_SHIFT;
778 + size_t unmapped_len = 0;
779 +
780 +- if (WARN_ON(!bo->is_mapped))
781 ++ if (WARN_ON(!mapping->active))
782 + return;
783 +
784 +- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
785 ++ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
786 ++ mapping->mmu->as, iova, len);
787 +
788 + while (unmapped_len < len) {
789 + size_t unmapped_page;
790 +@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
791 + unmapped_len += pgsize;
792 + }
793 +
794 +- panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
795 +- bo->is_mapped = false;
796 ++ panfrost_mmu_flush_range(pfdev, mapping->mmu,
797 ++ mapping->mmnode.start << PAGE_SHIFT, len);
798 ++ mapping->active = false;
799 + }
800 +
801 + static void mmu_tlb_inv_context_s1(void *cookie)
802 +@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
803 + free_io_pgtable_ops(mmu->pgtbl_ops);
804 + }
805 +
806 +-static struct panfrost_gem_object *
807 +-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
808 ++static struct panfrost_gem_mapping *
809 ++addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
810 + {
811 +- struct panfrost_gem_object *bo = NULL;
812 ++ struct panfrost_gem_mapping *mapping = NULL;
813 + struct panfrost_file_priv *priv;
814 + struct drm_mm_node *node;
815 + u64 offset = addr >> PAGE_SHIFT;
816 +@@ -418,8 +423,9 @@ found_mmu:
817 + drm_mm_for_each_node(node, &priv->mm) {
818 + if (offset >= node->start &&
819 + offset < (node->start + node->size)) {
820 +- bo = drm_mm_node_to_panfrost_bo(node);
821 +- drm_gem_object_get(&bo->base.base);
822 ++ mapping = drm_mm_node_to_panfrost_mapping(node);
823 ++
824 ++ kref_get(&mapping->refcount);
825 + break;
826 + }
827 + }
828 +@@ -427,7 +433,7 @@ found_mmu:
829 + spin_unlock(&priv->mm_lock);
830 + out:
831 + spin_unlock(&pfdev->as_lock);
832 +- return bo;
833 ++ return mapping;
834 + }
835 +
836 + #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
837 +@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
838 + u64 addr)
839 + {
840 + int ret, i;
841 ++ struct panfrost_gem_mapping *bomapping;
842 + struct panfrost_gem_object *bo;
843 + struct address_space *mapping;
844 + pgoff_t page_offset;
845 + struct sg_table *sgt;
846 + struct page **pages;
847 +
848 +- bo = addr_to_drm_mm_node(pfdev, as, addr);
849 +- if (!bo)
850 ++ bomapping = addr_to_mapping(pfdev, as, addr);
851 ++ if (!bomapping)
852 + return -ENOENT;
853 +
854 ++ bo = bomapping->obj;
855 + if (!bo->is_heap) {
856 + dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
857 +- bo->node.start << PAGE_SHIFT);
858 ++ bomapping->mmnode.start << PAGE_SHIFT);
859 + ret = -EINVAL;
860 + goto err_bo;
861 + }
862 +- WARN_ON(bo->mmu->as != as);
863 ++ WARN_ON(bomapping->mmu->as != as);
864 +
865 + /* Assume 2MB alignment and size multiple */
866 + addr &= ~((u64)SZ_2M - 1);
867 + page_offset = addr >> PAGE_SHIFT;
868 +- page_offset -= bo->node.start;
869 ++ page_offset -= bomapping->mmnode.start;
870 +
871 + mutex_lock(&bo->base.pages_lock);
872 +
873 +@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
874 + goto err_map;
875 + }
876 +
877 +- mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
878 ++ mmu_map_sg(pfdev, bomapping->mmu, addr,
879 ++ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
880 +
881 +- bo->is_mapped = true;
882 ++ bomapping->active = true;
883 +
884 + dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
885 +
886 +- drm_gem_object_put_unlocked(&bo->base.base);
887 ++ panfrost_gem_mapping_put(bomapping);
888 +
889 + return 0;
890 +
891 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
892 +index 7c5b6775ae23..44fc2edf63ce 100644
893 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
894 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
895 +@@ -4,12 +4,12 @@
896 + #ifndef __PANFROST_MMU_H__
897 + #define __PANFROST_MMU_H__
898 +
899 +-struct panfrost_gem_object;
900 ++struct panfrost_gem_mapping;
901 + struct panfrost_file_priv;
902 + struct panfrost_mmu;
903 +
904 +-int panfrost_mmu_map(struct panfrost_gem_object *bo);
905 +-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
906 ++int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
907 ++void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
908 +
909 + int panfrost_mmu_init(struct panfrost_device *pfdev);
910 + void panfrost_mmu_fini(struct panfrost_device *pfdev);
911 +diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
912 +index 2c04e858c50a..684820448be3 100644
913 +--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
914 ++++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
915 +@@ -25,7 +25,7 @@
916 + #define V4_SHADERS_PER_COREGROUP 4
917 +
918 + struct panfrost_perfcnt {
919 +- struct panfrost_gem_object *bo;
920 ++ struct panfrost_gem_mapping *mapping;
921 + size_t bosize;
922 + void *buf;
923 + struct panfrost_file_priv *user;
924 +@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
925 + int ret;
926 +
927 + reinit_completion(&pfdev->perfcnt->dump_comp);
928 +- gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
929 ++ gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
930 + gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
931 + gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
932 + gpu_write(pfdev, GPU_INT_CLEAR,
933 +@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
934 + if (IS_ERR(bo))
935 + return PTR_ERR(bo);
936 +
937 +- perfcnt->bo = to_panfrost_bo(&bo->base);
938 +-
939 + /* Map the perfcnt buf in the address space attached to file_priv. */
940 +- ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
941 ++ ret = panfrost_gem_open(&bo->base, file_priv);
942 + if (ret)
943 + goto err_put_bo;
944 +
945 ++ perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
946 ++ user);
947 ++ if (!perfcnt->mapping) {
948 ++ ret = -EINVAL;
949 ++ goto err_close_bo;
950 ++ }
951 ++
952 + perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
953 + if (IS_ERR(perfcnt->buf)) {
954 + ret = PTR_ERR(perfcnt->buf);
955 +- goto err_close_bo;
956 ++ goto err_put_mapping;
957 + }
958 +
959 + /*
960 +@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
961 + if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
962 + gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
963 +
964 ++ /* The BO ref is retained by the mapping. */
965 ++ drm_gem_object_put_unlocked(&bo->base);
966 ++
967 + return 0;
968 +
969 + err_vunmap:
970 +- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
971 ++ drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
972 ++err_put_mapping:
973 ++ panfrost_gem_mapping_put(perfcnt->mapping);
974 + err_close_bo:
975 +- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
976 ++ panfrost_gem_close(&bo->base, file_priv);
977 + err_put_bo:
978 + drm_gem_object_put_unlocked(&bo->base);
979 + return ret;
980 +@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
981 + GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
982 +
983 + perfcnt->user = NULL;
984 +- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
985 ++ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
986 + perfcnt->buf = NULL;
987 +- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
988 +- drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
989 +- perfcnt->bo = NULL;
990 ++ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
991 ++ panfrost_gem_mapping_put(perfcnt->mapping);
992 ++ perfcnt->mapping = NULL;
993 + pm_runtime_mark_last_busy(pfdev->dev);
994 + pm_runtime_put_autosuspend(pfdev->dev);
995 +
996 +diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
997 +index 6c64d50c9aae..01c2eeb02aa9 100644
998 +--- a/drivers/hwmon/adt7475.c
999 ++++ b/drivers/hwmon/adt7475.c
1000 +@@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
1001 + long reg;
1002 +
1003 + if (bypass_attn & (1 << channel))
1004 +- reg = (volt * 1024) / 2250;
1005 ++ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
1006 + else
1007 +- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
1008 ++ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
1009 ++ (r[0] + r[1]) * 2250);
1010 + return clamp_val(reg, 0, 1023) & (0xff << 2);
1011 + }
1012 +
1013 +diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
1014 +index 1f3b30b085b9..d018b20089ec 100644
1015 +--- a/drivers/hwmon/hwmon.c
1016 ++++ b/drivers/hwmon/hwmon.c
1017 +@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
1018 +
1019 + #define to_hwmon_attr(d) \
1020 + container_of(d, struct hwmon_device_attribute, dev_attr)
1021 ++#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
1022 +
1023 + /*
1024 + * Thermal zone information
1025 +@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
1026 + * also provides the sensor index.
1027 + */
1028 + struct hwmon_thermal_data {
1029 +- struct hwmon_device *hwdev; /* Reference to hwmon device */
1030 ++ struct device *dev; /* Reference to hwmon device */
1031 + int index; /* sensor index */
1032 + };
1033 +
1034 +@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
1035 + NULL
1036 + };
1037 +
1038 ++static void hwmon_free_attrs(struct attribute **attrs)
1039 ++{
1040 ++ int i;
1041 ++
1042 ++ for (i = 0; attrs[i]; i++) {
1043 ++ struct device_attribute *dattr = to_dev_attr(attrs[i]);
1044 ++ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
1045 ++
1046 ++ kfree(hattr);
1047 ++ }
1048 ++ kfree(attrs);
1049 ++}
1050 ++
1051 + static void hwmon_dev_release(struct device *dev)
1052 + {
1053 +- kfree(to_hwmon_device(dev));
1054 ++ struct hwmon_device *hwdev = to_hwmon_device(dev);
1055 ++
1056 ++ if (hwdev->group.attrs)
1057 ++ hwmon_free_attrs(hwdev->group.attrs);
1058 ++ kfree(hwdev->groups);
1059 ++ kfree(hwdev);
1060 + }
1061 +
1062 + static struct class hwmon_class = {
1063 +@@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
1064 + static int hwmon_thermal_get_temp(void *data, int *temp)
1065 + {
1066 + struct hwmon_thermal_data *tdata = data;
1067 +- struct hwmon_device *hwdev = tdata->hwdev;
1068 ++ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
1069 + int ret;
1070 + long t;
1071 +
1072 +- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
1073 ++ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
1074 + tdata->index, &t);
1075 + if (ret < 0)
1076 + return ret;
1077 +@@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
1078 + .get_temp = hwmon_thermal_get_temp,
1079 + };
1080 +
1081 +-static int hwmon_thermal_add_sensor(struct device *dev,
1082 +- struct hwmon_device *hwdev, int index)
1083 ++static int hwmon_thermal_add_sensor(struct device *dev, int index)
1084 + {
1085 + struct hwmon_thermal_data *tdata;
1086 + struct thermal_zone_device *tzd;
1087 +@@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
1088 + if (!tdata)
1089 + return -ENOMEM;
1090 +
1091 +- tdata->hwdev = hwdev;
1092 ++ tdata->dev = dev;
1093 + tdata->index = index;
1094 +
1095 +- tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
1096 ++ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
1097 + &hwmon_thermal_ops);
1098 + /*
1099 + * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
1100 +@@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
1101 + return 0;
1102 + }
1103 + #else
1104 +-static int hwmon_thermal_add_sensor(struct device *dev,
1105 +- struct hwmon_device *hwdev, int index)
1106 ++static int hwmon_thermal_add_sensor(struct device *dev, int index)
1107 + {
1108 + return 0;
1109 + }
1110 +@@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
1111 + (type == hwmon_fan && attr == hwmon_fan_label);
1112 + }
1113 +
1114 +-static struct attribute *hwmon_genattr(struct device *dev,
1115 +- const void *drvdata,
1116 ++static struct attribute *hwmon_genattr(const void *drvdata,
1117 + enum hwmon_sensor_types type,
1118 + u32 attr,
1119 + int index,
1120 +@@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
1121 + if ((mode & 0222) && !ops->write)
1122 + return ERR_PTR(-EINVAL);
1123 +
1124 +- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
1125 ++ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
1126 + if (!hattr)
1127 + return ERR_PTR(-ENOMEM);
1128 +
1129 +@@ -492,8 +508,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
1130 + return n;
1131 + }
1132 +
1133 +-static int hwmon_genattrs(struct device *dev,
1134 +- const void *drvdata,
1135 ++static int hwmon_genattrs(const void *drvdata,
1136 + struct attribute **attrs,
1137 + const struct hwmon_ops *ops,
1138 + const struct hwmon_channel_info *info)
1139 +@@ -519,7 +534,7 @@ static int hwmon_genattrs(struct device *dev,
1140 + attr_mask &= ~BIT(attr);
1141 + if (attr >= template_size)
1142 + return -EINVAL;
1143 +- a = hwmon_genattr(dev, drvdata, info->type, attr, i,
1144 ++ a = hwmon_genattr(drvdata, info->type, attr, i,
1145 + templates[attr], ops);
1146 + if (IS_ERR(a)) {
1147 + if (PTR_ERR(a) != -ENOENT)
1148 +@@ -533,8 +548,7 @@ static int hwmon_genattrs(struct device *dev,
1149 + }
1150 +
1151 + static struct attribute **
1152 +-__hwmon_create_attrs(struct device *dev, const void *drvdata,
1153 +- const struct hwmon_chip_info *chip)
1154 ++__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
1155 + {
1156 + int ret, i, aindex = 0, nattrs = 0;
1157 + struct attribute **attrs;
1158 +@@ -545,15 +559,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
1159 + if (nattrs == 0)
1160 + return ERR_PTR(-EINVAL);
1161 +
1162 +- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
1163 ++ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
1164 + if (!attrs)
1165 + return ERR_PTR(-ENOMEM);
1166 +
1167 + for (i = 0; chip->info[i]; i++) {
1168 +- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
1169 ++ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
1170 + chip->info[i]);
1171 +- if (ret < 0)
1172 ++ if (ret < 0) {
1173 ++ hwmon_free_attrs(attrs);
1174 + return ERR_PTR(ret);
1175 ++ }
1176 + aindex += ret;
1177 + }
1178 +
1179 +@@ -595,14 +611,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1180 + for (i = 0; groups[i]; i++)
1181 + ngroups++;
1182 +
1183 +- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
1184 +- GFP_KERNEL);
1185 ++ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
1186 + if (!hwdev->groups) {
1187 + err = -ENOMEM;
1188 + goto free_hwmon;
1189 + }
1190 +
1191 +- attrs = __hwmon_create_attrs(dev, drvdata, chip);
1192 ++ attrs = __hwmon_create_attrs(drvdata, chip);
1193 + if (IS_ERR(attrs)) {
1194 + err = PTR_ERR(attrs);
1195 + goto free_hwmon;
1196 +@@ -647,8 +662,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1197 + hwmon_temp_input, j))
1198 + continue;
1199 + if (info[i]->config[j] & HWMON_T_INPUT) {
1200 +- err = hwmon_thermal_add_sensor(dev,
1201 +- hwdev, j);
1202 ++ err = hwmon_thermal_add_sensor(hdev, j);
1203 + if (err) {
1204 + device_unregister(hdev);
1205 + /*
1206 +@@ -667,7 +681,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1207 + return hdev;
1208 +
1209 + free_hwmon:
1210 +- kfree(hwdev);
1211 ++ hwmon_dev_release(hdev);
1212 + ida_remove:
1213 + ida_simple_remove(&hwmon_ida, id);
1214 + return ERR_PTR(err);
1215 +diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
1216 +index f3dd2a17bd42..2e97e56c72c7 100644
1217 +--- a/drivers/hwmon/nct7802.c
1218 ++++ b/drivers/hwmon/nct7802.c
1219 +@@ -23,8 +23,8 @@
1220 + static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
1221 +
1222 + static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
1223 +- { 0x40, 0x00, 0x42, 0x44, 0x46 },
1224 +- { 0x3f, 0x00, 0x41, 0x43, 0x45 },
1225 ++ { 0x46, 0x00, 0x40, 0x42, 0x44 },
1226 ++ { 0x45, 0x00, 0x3f, 0x41, 0x43 },
1227 + };
1228 +
1229 + static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
1230 +@@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
1231 + struct nct7802_data {
1232 + struct regmap *regmap;
1233 + struct mutex access_lock; /* for multi-byte read and write operations */
1234 ++ u8 in_status;
1235 ++ struct mutex in_alarm_lock;
1236 + };
1237 +
1238 + static ssize_t temp_type_show(struct device *dev,
1239 +@@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr,
1240 + return err ? : count;
1241 + }
1242 +
1243 ++static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
1244 ++ char *buf)
1245 ++{
1246 ++ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
1247 ++ struct nct7802_data *data = dev_get_drvdata(dev);
1248 ++ int volt, min, max, ret;
1249 ++ unsigned int val;
1250 ++
1251 ++ mutex_lock(&data->in_alarm_lock);
1252 ++
1253 ++ /*
1254 ++ * The SMI Voltage status register is the only register giving a status
1255 ++ * for voltages. A bit is set for each input crossing a threshold, in
1256 ++ * both direction, but the "inside" or "outside" limits info is not
1257 ++ * available. Also this register is cleared on read.
1258 ++ * Note: this is not explicitly spelled out in the datasheet, but
1259 ++ * from experiment.
1260 ++ * To deal with this we use a status cache with one validity bit and
1261 ++ * one status bit for each input. Validity is cleared at startup and
1262 ++ * each time the register reports a change, and the status is processed
1263 ++ * by software based on current input value and limits.
1264 ++ */
1265 ++ ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
1266 ++ if (ret < 0)
1267 ++ goto abort;
1268 ++
1269 ++ /* invalidate cached status for all inputs crossing a threshold */
1270 ++ data->in_status &= ~((val & 0x0f) << 4);
1271 ++
1272 ++ /* if cached status for requested input is invalid, update it */
1273 ++ if (!(data->in_status & (0x10 << sattr->index))) {
1274 ++ ret = nct7802_read_voltage(data, sattr->nr, 0);
1275 ++ if (ret < 0)
1276 ++ goto abort;
1277 ++ volt = ret;
1278 ++
1279 ++ ret = nct7802_read_voltage(data, sattr->nr, 1);
1280 ++ if (ret < 0)
1281 ++ goto abort;
1282 ++ min = ret;
1283 ++
1284 ++ ret = nct7802_read_voltage(data, sattr->nr, 2);
1285 ++ if (ret < 0)
1286 ++ goto abort;
1287 ++ max = ret;
1288 ++
1289 ++ if (volt < min || volt > max)
1290 ++ data->in_status |= (1 << sattr->index);
1291 ++ else
1292 ++ data->in_status &= ~(1 << sattr->index);
1293 ++
1294 ++ data->in_status |= 0x10 << sattr->index;
1295 ++ }
1296 ++
1297 ++ ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
1298 ++abort:
1299 ++ mutex_unlock(&data->in_alarm_lock);
1300 ++ return ret;
1301 ++}
1302 ++
1303 + static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
1304 + char *buf)
1305 + {
1306 +@@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = {
1307 + static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
1308 + static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
1309 + static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
1310 +-static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
1311 ++static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
1312 + static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
1313 +
1314 + static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
1315 +@@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
1316 + static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
1317 + static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
1318 + static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
1319 +-static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
1320 ++static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
1321 + static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
1322 +
1323 + static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
1324 + static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
1325 + static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
1326 +-static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
1327 ++static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
1328 + static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
1329 +
1330 + static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
1331 + static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
1332 + static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
1333 +-static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
1334 ++static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
1335 + static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
1336 +
1337 + static struct attribute *nct7802_in_attrs[] = {
1338 +@@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client,
1339 + return PTR_ERR(data->regmap);
1340 +
1341 + mutex_init(&data->access_lock);
1342 ++ mutex_init(&data->in_alarm_lock);
1343 +
1344 + ret = nct7802_init_chip(data);
1345 + if (ret < 0)
1346 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1347 +index a1a035270cab..b273e421e910 100644
1348 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
1349 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
1350 +@@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
1351 + }
1352 + }
1353 +
1354 +-static void
1355 +-isert_wait4cmds(struct iscsi_conn *conn)
1356 +-{
1357 +- isert_info("iscsi_conn %p\n", conn);
1358 +-
1359 +- if (conn->sess) {
1360 +- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1361 +- target_wait_for_sess_cmds(conn->sess->se_sess);
1362 +- }
1363 +-}
1364 +-
1365 + /**
1366 + * isert_put_unsol_pending_cmds() - Drop commands waiting for
1367 + * unsolicitate dataout
1368 +@@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1369 +
1370 + ib_drain_qp(isert_conn->qp);
1371 + isert_put_unsol_pending_cmds(conn);
1372 +- isert_wait4cmds(conn);
1373 + isert_wait4logout(isert_conn);
1374 +
1375 + queue_work(isert_release_wq, &isert_conn->release_work);
1376 +diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
1377 +index 83368f1e7c4e..4650f4a94989 100644
1378 +--- a/drivers/input/misc/keyspan_remote.c
1379 ++++ b/drivers/input/misc/keyspan_remote.c
1380 +@@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev)
1381 + int retval = 0;
1382 +
1383 + retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1384 +- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
1385 ++ 0x11, 0x40, 0x5601, 0x0, NULL, 0,
1386 ++ USB_CTRL_SET_TIMEOUT);
1387 + if (retval) {
1388 + dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
1389 + __func__, retval);
1390 +@@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev)
1391 + }
1392 +
1393 + retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1394 +- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
1395 ++ 0x44, 0x40, 0x0, 0x0, NULL, 0,
1396 ++ USB_CTRL_SET_TIMEOUT);
1397 + if (retval) {
1398 + dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
1399 + __func__, retval);
1400 +@@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev)
1401 + }
1402 +
1403 + retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1404 +- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
1405 ++ 0x22, 0x40, 0x0, 0x0, NULL, 0,
1406 ++ USB_CTRL_SET_TIMEOUT);
1407 + if (retval) {
1408 + dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
1409 + __func__, retval);
1410 +diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
1411 +index ecd762f93732..53ad25eaf1a2 100644
1412 +--- a/drivers/input/misc/pm8xxx-vibrator.c
1413 ++++ b/drivers/input/misc/pm8xxx-vibrator.c
1414 +@@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
1415 +
1416 + if (regs->enable_mask)
1417 + rc = regmap_update_bits(vib->regmap, regs->enable_addr,
1418 +- on ? regs->enable_mask : 0, val);
1419 ++ regs->enable_mask, on ? ~0 : 0);
1420 +
1421 + return rc;
1422 + }
1423 +diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
1424 +index b313c579914f..2407ea43de59 100644
1425 +--- a/drivers/input/rmi4/rmi_smbus.c
1426 ++++ b/drivers/input/rmi4/rmi_smbus.c
1427 +@@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
1428 + /* prepare to write next block of bytes */
1429 + cur_len -= SMB_MAX_COUNT;
1430 + databuff += SMB_MAX_COUNT;
1431 ++ rmiaddr += SMB_MAX_COUNT;
1432 + }
1433 + exit:
1434 + mutex_unlock(&rmi_smb->page_mutex);
1435 +@@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
1436 + /* prepare to read next block of bytes */
1437 + cur_len -= SMB_MAX_COUNT;
1438 + databuff += SMB_MAX_COUNT;
1439 ++ rmiaddr += SMB_MAX_COUNT;
1440 + }
1441 +
1442 + retval = 0;
1443 +diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
1444 +index 2ca586fb914f..06d0ffef4a17 100644
1445 +--- a/drivers/input/tablet/aiptek.c
1446 ++++ b/drivers/input/tablet/aiptek.c
1447 +@@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1448 + input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
1449 +
1450 + /* Verify that a device really has an endpoint */
1451 +- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1452 ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
1453 + dev_err(&intf->dev,
1454 + "interface has %d endpoints, but must have minimum 1\n",
1455 +- intf->altsetting[0].desc.bNumEndpoints);
1456 ++ intf->cur_altsetting->desc.bNumEndpoints);
1457 + err = -EINVAL;
1458 + goto fail3;
1459 + }
1460 +- endpoint = &intf->altsetting[0].endpoint[0].desc;
1461 ++ endpoint = &intf->cur_altsetting->endpoint[0].desc;
1462 +
1463 + /* Go set up our URB, which is called when the tablet receives
1464 + * input.
1465 +diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
1466 +index 35031228a6d0..799c94dda651 100644
1467 +--- a/drivers/input/tablet/gtco.c
1468 ++++ b/drivers/input/tablet/gtco.c
1469 +@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
1470 + }
1471 +
1472 + /* Sanity check that a device has an endpoint */
1473 +- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
1474 ++ if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
1475 + dev_err(&usbinterface->dev,
1476 + "Invalid number of endpoints\n");
1477 + error = -EINVAL;
1478 + goto err_free_urb;
1479 + }
1480 +
1481 +- /*
1482 +- * The endpoint is always altsetting 0, we know this since we know
1483 +- * this device only has one interrupt endpoint
1484 +- */
1485 +- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
1486 ++ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
1487 +
1488 + /* Some debug */
1489 + dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
1490 +@@ -973,7 +969,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
1491 + input_dev->dev.parent = &usbinterface->dev;
1492 +
1493 + /* Setup the URB, it will be posted later on open of input device */
1494 +- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
1495 ++ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
1496 +
1497 + usb_fill_int_urb(gtco->urbinfo,
1498 + udev,
1499 +diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
1500 +index a1f3a0cb197e..38f087404f7a 100644
1501 +--- a/drivers/input/tablet/pegasus_notetaker.c
1502 ++++ b/drivers/input/tablet/pegasus_notetaker.c
1503 +@@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf,
1504 + return -ENODEV;
1505 +
1506 + /* Sanity check that the device has an endpoint */
1507 +- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1508 ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
1509 + dev_err(&intf->dev, "Invalid number of endpoints\n");
1510 + return -EINVAL;
1511 + }
1512 +diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
1513 +index 0af0fe8c40d7..742a7e96c1b5 100644
1514 +--- a/drivers/input/touchscreen/sun4i-ts.c
1515 ++++ b/drivers/input/touchscreen/sun4i-ts.c
1516 +@@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
1517 + struct device *dev = &pdev->dev;
1518 + struct device_node *np = dev->of_node;
1519 + struct device *hwmon;
1520 ++ struct thermal_zone_device *thermal;
1521 + int error;
1522 + u32 reg;
1523 + bool ts_attached;
1524 +@@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
1525 + if (IS_ERR(hwmon))
1526 + return PTR_ERR(hwmon);
1527 +
1528 +- devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
1529 ++ thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
1530 ++ &sun4i_ts_tz_ops);
1531 ++ if (IS_ERR(thermal))
1532 ++ return PTR_ERR(thermal);
1533 +
1534 + writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
1535 +
1536 +diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
1537 +index 3fd3e862269b..2e2ea5719c90 100644
1538 +--- a/drivers/input/touchscreen/sur40.c
1539 ++++ b/drivers/input/touchscreen/sur40.c
1540 +@@ -653,7 +653,7 @@ static int sur40_probe(struct usb_interface *interface,
1541 + int error;
1542 +
1543 + /* Check if we really have the right interface. */
1544 +- iface_desc = &interface->altsetting[0];
1545 ++ iface_desc = interface->cur_altsetting;
1546 + if (iface_desc->desc.bInterfaceClass != 0xFF)
1547 + return -ENODEV;
1548 +
1549 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
1550 +index 568c52317757..483f7bc379fa 100644
1551 +--- a/drivers/iommu/amd_iommu_init.c
1552 ++++ b/drivers/iommu/amd_iommu_init.c
1553 +@@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1554 + static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1555 + {
1556 + struct pci_dev *pdev = iommu->dev;
1557 +- u64 val = 0xabcd, val2 = 0;
1558 ++ u64 val = 0xabcd, val2 = 0, save_reg = 0;
1559 +
1560 + if (!iommu_feature(iommu, FEATURE_PC))
1561 + return;
1562 +
1563 + amd_iommu_pc_present = true;
1564 +
1565 ++ /* save the value to restore, if writable */
1566 ++ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
1567 ++ goto pc_false;
1568 ++
1569 + /* Check if the performance counters can be written to */
1570 + if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1571 + (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1572 +- (val != val2)) {
1573 +- pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
1574 +- amd_iommu_pc_present = false;
1575 +- return;
1576 +- }
1577 ++ (val != val2))
1578 ++ goto pc_false;
1579 ++
1580 ++ /* restore */
1581 ++ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
1582 ++ goto pc_false;
1583 +
1584 + pci_info(pdev, "IOMMU performance counters supported\n");
1585 +
1586 + val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1587 + iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1588 + iommu->max_counters = (u8) ((val >> 7) & 0xf);
1589 ++
1590 ++ return;
1591 ++
1592 ++pc_false:
1593 ++ pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1594 ++ amd_iommu_pc_present = false;
1595 ++ return;
1596 + }
1597 +
1598 + static ssize_t amd_iommu_show_cap(struct device *dev,
1599 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1600 +index e84c5dfe146f..dd5db856dcaf 100644
1601 +--- a/drivers/iommu/intel-iommu.c
1602 ++++ b/drivers/iommu/intel-iommu.c
1603 +@@ -5132,7 +5132,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
1604 +
1605 + spin_lock_irqsave(&device_domain_lock, flags);
1606 + info = dev->archdata.iommu;
1607 +- if (info)
1608 ++ if (info && info != DEFER_DEVICE_DOMAIN_INFO
1609 ++ && info != DUMMY_DEVICE_DOMAIN_INFO)
1610 + __dmar_remove_one_dev_info(info);
1611 + spin_unlock_irqrestore(&device_domain_lock, flags);
1612 + }
1613 +diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
1614 +index a5c73f3d5f79..2bf74595610f 100644
1615 +--- a/drivers/leds/leds-gpio.c
1616 ++++ b/drivers/leds/leds-gpio.c
1617 +@@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
1618 + struct gpio_led led = {};
1619 + const char *state = NULL;
1620 +
1621 ++ /*
1622 ++ * Acquire gpiod from DT with uninitialized label, which
1623 ++ * will be updated after LED class device is registered,
1624 ++ * Only then the final LED name is known.
1625 ++ */
1626 + led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
1627 + GPIOD_ASIS,
1628 +- led.name);
1629 ++ NULL);
1630 + if (IS_ERR(led.gpiod)) {
1631 + fwnode_handle_put(child);
1632 + return ERR_CAST(led.gpiod);
1633 +@@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
1634 + fwnode_handle_put(child);
1635 + return ERR_PTR(ret);
1636 + }
1637 ++ /* Set gpiod label to match the corresponding LED name. */
1638 ++ gpiod_set_consumer_name(led_dat->gpiod,
1639 ++ led_dat->cdev.dev->kobj.name);
1640 + priv->num_leds++;
1641 + }
1642 +
1643 +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
1644 +index 21bb96ce4cd6..58868d7129eb 100644
1645 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c
1646 ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
1647 +@@ -1605,12 +1605,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1648 + case V4L2_BUF_TYPE_VBI_CAPTURE:
1649 + if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
1650 + break;
1651 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1652 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1653 + return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
1654 + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1655 + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
1656 + break;
1657 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1658 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1659 + return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
1660 + case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1661 + if (unlikely(!ops->vidioc_s_fmt_vid_out))
1662 +@@ -1636,22 +1636,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1663 + case V4L2_BUF_TYPE_VBI_OUTPUT:
1664 + if (unlikely(!ops->vidioc_s_fmt_vbi_out))
1665 + break;
1666 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1667 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1668 + return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
1669 + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1670 + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
1671 + break;
1672 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1673 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1674 + return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
1675 + case V4L2_BUF_TYPE_SDR_CAPTURE:
1676 + if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
1677 + break;
1678 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1679 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1680 + return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
1681 + case V4L2_BUF_TYPE_SDR_OUTPUT:
1682 + if (unlikely(!ops->vidioc_s_fmt_sdr_out))
1683 + break;
1684 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1685 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1686 + return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
1687 + case V4L2_BUF_TYPE_META_CAPTURE:
1688 + if (unlikely(!ops->vidioc_s_fmt_meta_cap))
1689 +@@ -1707,12 +1707,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1690 + case V4L2_BUF_TYPE_VBI_CAPTURE:
1691 + if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
1692 + break;
1693 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1694 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1695 + return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
1696 + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1697 + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
1698 + break;
1699 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1700 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1701 + return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
1702 + case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1703 + if (unlikely(!ops->vidioc_try_fmt_vid_out))
1704 +@@ -1738,22 +1738,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1705 + case V4L2_BUF_TYPE_VBI_OUTPUT:
1706 + if (unlikely(!ops->vidioc_try_fmt_vbi_out))
1707 + break;
1708 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1709 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1710 + return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
1711 + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1712 + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
1713 + break;
1714 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1715 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1716 + return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
1717 + case V4L2_BUF_TYPE_SDR_CAPTURE:
1718 + if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
1719 + break;
1720 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1721 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1722 + return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
1723 + case V4L2_BUF_TYPE_SDR_OUTPUT:
1724 + if (unlikely(!ops->vidioc_try_fmt_sdr_out))
1725 + break;
1726 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1727 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1728 + return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
1729 + case V4L2_BUF_TYPE_META_CAPTURE:
1730 + if (unlikely(!ops->vidioc_try_fmt_meta_cap))
1731 +diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
1732 +index 7bc950520fd9..403ac44a7378 100644
1733 +--- a/drivers/mmc/host/sdhci-tegra.c
1734 ++++ b/drivers/mmc/host/sdhci-tegra.c
1735 +@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
1736 + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
1737 + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
1738 + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
1739 +- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
1740 ++ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
1741 + clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
1742 + }
1743 +
1744 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1745 +index 5f9df2dbde06..4478b94d4791 100644
1746 +--- a/drivers/mmc/host/sdhci.c
1747 ++++ b/drivers/mmc/host/sdhci.c
1748 +@@ -3902,11 +3902,13 @@ int sdhci_setup_host(struct sdhci_host *host)
1749 + if (host->ops->get_min_clock)
1750 + mmc->f_min = host->ops->get_min_clock(host);
1751 + else if (host->version >= SDHCI_SPEC_300) {
1752 +- if (host->clk_mul) {
1753 +- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
1754 ++ if (host->clk_mul)
1755 + max_clk = host->max_clk * host->clk_mul;
1756 +- } else
1757 +- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
1758 ++ /*
1759 ++ * Divided Clock Mode minimum clock rate is always less than
1760 ++ * Programmable Clock Mode minimum clock rate.
1761 ++ */
1762 ++ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
1763 + } else
1764 + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
1765 +
1766 +diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
1767 +index bb90757ecace..4cbb764c9822 100644
1768 +--- a/drivers/mmc/host/sdhci_am654.c
1769 ++++ b/drivers/mmc/host/sdhci_am654.c
1770 +@@ -236,6 +236,22 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
1771 + writeb(val, host->ioaddr + reg);
1772 + }
1773 +
1774 ++static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
1775 ++{
1776 ++ struct sdhci_host *host = mmc_priv(mmc);
1777 ++ int err = sdhci_execute_tuning(mmc, opcode);
1778 ++
1779 ++ if (err)
1780 ++ return err;
1781 ++ /*
1782 ++ * Tuning data remains in the buffer after tuning.
1783 ++ * Do a command and data reset to get rid of it
1784 ++ */
1785 ++ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1786 ++
1787 ++ return 0;
1788 ++}
1789 ++
1790 + static struct sdhci_ops sdhci_am654_ops = {
1791 + .get_max_clock = sdhci_pltfm_clk_get_max_clock,
1792 + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
1793 +@@ -249,8 +265,7 @@ static struct sdhci_ops sdhci_am654_ops = {
1794 +
1795 + static const struct sdhci_pltfm_data sdhci_am654_pdata = {
1796 + .ops = &sdhci_am654_ops,
1797 +- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
1798 +- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1799 ++ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1800 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1801 + };
1802 +
1803 +@@ -272,8 +287,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
1804 +
1805 + static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
1806 + .ops = &sdhci_j721e_8bit_ops,
1807 +- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
1808 +- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1809 ++ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1810 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1811 + };
1812 +
1813 +@@ -295,8 +309,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
1814 +
1815 + static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
1816 + .ops = &sdhci_j721e_4bit_ops,
1817 +- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
1818 +- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1819 ++ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1820 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1821 + };
1822 +
1823 +@@ -480,6 +493,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
1824 + goto pm_runtime_put;
1825 + }
1826 +
1827 ++ host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
1828 ++
1829 + ret = sdhci_am654_init(host);
1830 + if (ret)
1831 + goto pm_runtime_put;
1832 +diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
1833 +index 2e57122f02fb..2f5c287eac95 100644
1834 +--- a/drivers/net/can/slcan.c
1835 ++++ b/drivers/net/can/slcan.c
1836 +@@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
1837 + */
1838 + static void slcan_write_wakeup(struct tty_struct *tty)
1839 + {
1840 +- struct slcan *sl = tty->disc_data;
1841 ++ struct slcan *sl;
1842 ++
1843 ++ rcu_read_lock();
1844 ++ sl = rcu_dereference(tty->disc_data);
1845 ++ if (!sl)
1846 ++ goto out;
1847 +
1848 + schedule_work(&sl->tx_work);
1849 ++out:
1850 ++ rcu_read_unlock();
1851 + }
1852 +
1853 + /* Send a can_frame to a TTY queue. */
1854 +@@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
1855 + return;
1856 +
1857 + spin_lock_bh(&sl->lock);
1858 +- tty->disc_data = NULL;
1859 ++ rcu_assign_pointer(tty->disc_data, NULL);
1860 + sl->tty = NULL;
1861 + spin_unlock_bh(&sl->lock);
1862 +
1863 ++ synchronize_rcu();
1864 + flush_work(&sl->tx_work);
1865 +
1866 + /* Flush network side */
1867 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1868 +index 1de51811fcb4..8f909d57501f 100644
1869 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1870 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1871 +@@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1872 + DMA_END_ADDR);
1873 +
1874 + /* Initialize Tx NAPI */
1875 +- netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
1876 +- NAPI_POLL_WEIGHT);
1877 ++ netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
1878 ++ NAPI_POLL_WEIGHT);
1879 + }
1880 +
1881 + /* Initialize a RDMA ring */
1882 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1883 +index 58f89f6a040f..97ff8608f0ab 100644
1884 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1885 ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1886 +@@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1887 +
1888 + if (!is_offload(adapter))
1889 + return -EOPNOTSUPP;
1890 ++ if (!capable(CAP_NET_ADMIN))
1891 ++ return -EPERM;
1892 + if (!(adapter->flags & FULL_INIT_DONE))
1893 + return -EIO; /* need the memory controllers */
1894 + if (copy_from_user(&t, useraddr, sizeof(t)))
1895 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
1896 +index 778dab1af8fc..f260dd96873b 100644
1897 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
1898 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
1899 +@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
1900 +
1901 + struct tx_sync_info {
1902 + u64 rcd_sn;
1903 +- s32 sync_len;
1904 ++ u32 sync_len;
1905 + int nr_frags;
1906 + skb_frag_t frags[MAX_SKB_FRAGS];
1907 + };
1908 +@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
1909 +
1910 + static enum mlx5e_ktls_sync_retval
1911 + tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
1912 +- u32 tcp_seq, struct tx_sync_info *info)
1913 ++ u32 tcp_seq, int datalen, struct tx_sync_info *info)
1914 + {
1915 + struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
1916 + enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
1917 + struct tls_record_info *record;
1918 + int remaining, i = 0;
1919 + unsigned long flags;
1920 ++ bool ends_before;
1921 +
1922 + spin_lock_irqsave(&tx_ctx->lock, flags);
1923 + record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
1924 +@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
1925 + goto out;
1926 + }
1927 +
1928 +- if (unlikely(tcp_seq < tls_record_start_seq(record))) {
1929 +- ret = tls_record_is_start_marker(record) ?
1930 +- MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
1931 ++ /* There are the following cases:
1932 ++ * 1. packet ends before start marker: bypass offload.
1933 ++ * 2. packet starts before start marker and ends after it: drop,
1934 ++ * not supported, breaks contract with kernel.
1935 ++ * 3. packet ends before tls record info starts: drop,
1936 ++ * this packet was already acknowledged and its record info
1937 ++ * was released.
1938 ++ */
1939 ++ ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
1940 ++
1941 ++ if (unlikely(tls_record_is_start_marker(record))) {
1942 ++ ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
1943 ++ goto out;
1944 ++ } else if (ends_before) {
1945 ++ ret = MLX5E_KTLS_SYNC_FAIL;
1946 + goto out;
1947 + }
1948 +
1949 +@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
1950 + u8 num_wqebbs;
1951 + int i = 0;
1952 +
1953 +- ret = tx_sync_info_get(priv_tx, seq, &info);
1954 ++ ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
1955 + if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
1956 + if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
1957 + stats->tls_skip_no_sync_data++;
1958 +@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
1959 + goto err_out;
1960 + }
1961 +
1962 +- if (unlikely(info.sync_len < 0)) {
1963 +- if (likely(datalen <= -info.sync_len))
1964 +- return MLX5E_KTLS_SYNC_DONE;
1965 +-
1966 +- stats->tls_drop_bypass_req++;
1967 +- goto err_out;
1968 +- }
1969 +-
1970 + stats->tls_ooo++;
1971 +
1972 + tx_post_resync_params(sq, priv_tx, info.rcd_sn);
1973 +@@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
1974 + if (unlikely(contig_wqebbs_room < num_wqebbs))
1975 + mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
1976 +
1977 +- tx_post_resync_params(sq, priv_tx, info.rcd_sn);
1978 +-
1979 + for (; i < info.nr_frags; i++) {
1980 + unsigned int orig_fsz, frag_offset = 0, n = 0;
1981 + skb_frag_t *f = &info.frags[i];
1982 +@@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
1983 + enum mlx5e_ktls_sync_retval ret =
1984 + mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
1985 +
1986 +- if (likely(ret == MLX5E_KTLS_SYNC_DONE))
1987 ++ switch (ret) {
1988 ++ case MLX5E_KTLS_SYNC_DONE:
1989 + *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
1990 +- else if (ret == MLX5E_KTLS_SYNC_FAIL)
1991 ++ break;
1992 ++ case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
1993 ++ if (likely(!skb->decrypted))
1994 ++ goto out;
1995 ++ WARN_ON_ONCE(1);
1996 ++ /* fall-through */
1997 ++ default: /* MLX5E_KTLS_SYNC_FAIL */
1998 + goto err_out;
1999 +- else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
2000 +- goto out;
2001 ++ }
2002 + }
2003 +
2004 + priv_tx->expected_seq = seq + datalen;
2005 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2006 +index 96711e34d248..1f9107d83848 100644
2007 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2008 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2009 +@@ -3951,6 +3951,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
2010 + u32 rate_mbps;
2011 + int err;
2012 +
2013 ++ vport_num = rpriv->rep->vport;
2014 ++ if (vport_num >= MLX5_VPORT_ECPF) {
2015 ++ NL_SET_ERR_MSG_MOD(extack,
2016 ++ "Ingress rate limit is supported only for Eswitch ports connected to VFs");
2017 ++ return -EOPNOTSUPP;
2018 ++ }
2019 ++
2020 + esw = priv->mdev->priv.eswitch;
2021 + /* rate is given in bytes/sec.
2022 + * First convert to bits/sec and then round to the nearest mbit/secs.
2023 +@@ -3959,8 +3966,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
2024 + * 1 mbit/sec.
2025 + */
2026 + rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
2027 +- vport_num = rpriv->rep->vport;
2028 +-
2029 + err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
2030 + if (err)
2031 + NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
2032 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2033 +index 9004a07e457a..5acfdea3a75a 100644
2034 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2035 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2036 +@@ -858,7 +858,7 @@ out:
2037 + */
2038 + #define ESW_SIZE (16 * 1024 * 1024)
2039 + const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
2040 +- 64 * 1024, 4 * 1024 };
2041 ++ 64 * 1024, 128 };
2042 +
2043 + static int
2044 + get_sz_from_pool(struct mlx5_eswitch *esw)
2045 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2046 +index 051ab845b501..c96a0e501007 100644
2047 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
2048 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2049 +@@ -1569,6 +1569,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
2050 + { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
2051 + { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
2052 + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
2053 ++ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
2054 + { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
2055 + { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
2056 + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
2057 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2058 +index 51803eef13dd..c7f10d4f8f8d 100644
2059 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2060 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2061 +@@ -1,6 +1,7 @@
2062 + // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2063 + /* Copyright (c) 2019 Mellanox Technologies. */
2064 +
2065 ++#include <linux/smp.h>
2066 + #include "dr_types.h"
2067 +
2068 + #define QUEUE_SIZE 128
2069 +@@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
2070 + if (!in)
2071 + goto err_cqwq;
2072 +
2073 +- vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
2074 ++ vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
2075 + err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
2076 + if (err) {
2077 + kvfree(in);
2078 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2079 +index 3d587d0bdbbe..1e32e2443f73 100644
2080 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2081 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2082 +@@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
2083 + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
2084 + list_for_each_entry(dst, &fte->node.children, node.list) {
2085 + enum mlx5_flow_destination_type type = dst->dest_attr.type;
2086 +- u32 id;
2087 +
2088 + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
2089 + err = -ENOSPC;
2090 + goto free_actions;
2091 + }
2092 +
2093 +- switch (type) {
2094 +- case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
2095 +- id = dst->dest_attr.counter_id;
2096 ++ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
2097 ++ continue;
2098 +
2099 +- tmp_action =
2100 +- mlx5dr_action_create_flow_counter(id);
2101 +- if (!tmp_action) {
2102 +- err = -ENOMEM;
2103 +- goto free_actions;
2104 +- }
2105 +- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
2106 +- actions[num_actions++] = tmp_action;
2107 +- break;
2108 ++ switch (type) {
2109 + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
2110 + tmp_action = create_ft_action(dev, dst);
2111 + if (!tmp_action) {
2112 +@@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
2113 + }
2114 + }
2115 +
2116 ++ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
2117 ++ list_for_each_entry(dst, &fte->node.children, node.list) {
2118 ++ u32 id;
2119 ++
2120 ++ if (dst->dest_attr.type !=
2121 ++ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
2122 ++ continue;
2123 ++
2124 ++ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
2125 ++ err = -ENOSPC;
2126 ++ goto free_actions;
2127 ++ }
2128 ++
2129 ++ id = dst->dest_attr.counter_id;
2130 ++ tmp_action =
2131 ++ mlx5dr_action_create_flow_counter(id);
2132 ++ if (!tmp_action) {
2133 ++ err = -ENOMEM;
2134 ++ goto free_actions;
2135 ++ }
2136 ++
2137 ++ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
2138 ++ actions[num_actions++] = tmp_action;
2139 ++ }
2140 ++ }
2141 ++
2142 + params.match_sz = match_sz;
2143 + params.match_buf = (u64 *)fte->val;
2144 +
2145 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
2146 +index 150b3a144b83..3d3cca596116 100644
2147 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
2148 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
2149 +@@ -8,6 +8,7 @@
2150 + #include <linux/string.h>
2151 + #include <linux/rhashtable.h>
2152 + #include <linux/netdevice.h>
2153 ++#include <linux/mutex.h>
2154 + #include <net/net_namespace.h>
2155 + #include <net/tc_act/tc_vlan.h>
2156 +
2157 +@@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
2158 + struct mlxsw_sp_fid *dummy_fid;
2159 + struct rhashtable ruleset_ht;
2160 + struct list_head rules;
2161 ++ struct mutex rules_lock; /* Protects rules list */
2162 + struct {
2163 + struct delayed_work dw;
2164 + unsigned long interval; /* ms */
2165 +@@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
2166 + goto err_ruleset_block_bind;
2167 + }
2168 +
2169 ++ mutex_lock(&mlxsw_sp->acl->rules_lock);
2170 + list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
2171 ++ mutex_unlock(&mlxsw_sp->acl->rules_lock);
2172 + block->rule_count++;
2173 + block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
2174 + return 0;
2175 +@@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
2176 +
2177 + block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
2178 + ruleset->ht_key.block->rule_count--;
2179 ++ mutex_lock(&mlxsw_sp->acl->rules_lock);
2180 + list_del(&rule->list);
2181 ++ mutex_unlock(&mlxsw_sp->acl->rules_lock);
2182 + if (!ruleset->ht_key.chain_index &&
2183 + mlxsw_sp_acl_ruleset_is_singular(ruleset))
2184 + mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
2185 +@@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
2186 + struct mlxsw_sp_acl_rule *rule;
2187 + int err;
2188 +
2189 +- /* Protect internal structures from changes */
2190 +- rtnl_lock();
2191 ++ mutex_lock(&acl->rules_lock);
2192 + list_for_each_entry(rule, &acl->rules, list) {
2193 + err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
2194 + rule);
2195 + if (err)
2196 + goto err_rule_update;
2197 + }
2198 +- rtnl_unlock();
2199 ++ mutex_unlock(&acl->rules_lock);
2200 + return 0;
2201 +
2202 + err_rule_update:
2203 +- rtnl_unlock();
2204 ++ mutex_unlock(&acl->rules_lock);
2205 + return err;
2206 + }
2207 +
2208 +@@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
2209 + acl->dummy_fid = fid;
2210 +
2211 + INIT_LIST_HEAD(&acl->rules);
2212 ++ mutex_init(&acl->rules_lock);
2213 + err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
2214 + if (err)
2215 + goto err_acl_ops_init;
2216 +@@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
2217 + return 0;
2218 +
2219 + err_acl_ops_init:
2220 ++ mutex_destroy(&acl->rules_lock);
2221 + mlxsw_sp_fid_put(fid);
2222 + err_fid_get:
2223 + rhashtable_destroy(&acl->ruleset_ht);
2224 +@@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
2225 +
2226 + cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
2227 + mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
2228 ++ mutex_destroy(&acl->rules_lock);
2229 + WARN_ON(!list_empty(&acl->rules));
2230 + mlxsw_sp_fid_put(acl->dummy_fid);
2231 + rhashtable_destroy(&acl->ruleset_ht);
2232 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
2233 +index 1c14c051ee52..63e7a058b7c6 100644
2234 +--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
2235 ++++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
2236 +@@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
2237 + u64 len;
2238 + int err;
2239 +
2240 ++ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
2241 ++ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
2242 ++ dev_kfree_skb_any(skb);
2243 ++ return NETDEV_TX_OK;
2244 ++ }
2245 ++
2246 + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
2247 +
2248 + if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
2249 + return NETDEV_TX_BUSY;
2250 +
2251 +- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
2252 +- struct sk_buff *skb_orig = skb;
2253 +-
2254 +- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
2255 +- if (!skb) {
2256 +- this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
2257 +- dev_kfree_skb_any(skb_orig);
2258 +- return NETDEV_TX_OK;
2259 +- }
2260 +- dev_consume_skb_any(skb_orig);
2261 +- }
2262 + mlxsw_sx_txhdr_construct(skb, &tx_info);
2263 + /* TX header is consumed by HW on the way so we shouldn't count its
2264 + * bytes as being sent.
2265 +diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
2266 +index b339125b2f09..05e760444a92 100644
2267 +--- a/drivers/net/ethernet/natsemi/sonic.c
2268 ++++ b/drivers/net/ethernet/natsemi/sonic.c
2269 +@@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
2270 +
2271 + netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
2272 +
2273 ++ spin_lock_init(&lp->lock);
2274 ++
2275 + for (i = 0; i < SONIC_NUM_RRS; i++) {
2276 + struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
2277 + if (skb == NULL) {
2278 +@@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
2279 + return 0;
2280 + }
2281 +
2282 ++/* Wait for the SONIC to become idle. */
2283 ++static void sonic_quiesce(struct net_device *dev, u16 mask)
2284 ++{
2285 ++ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
2286 ++ int i;
2287 ++ u16 bits;
2288 ++
2289 ++ for (i = 0; i < 1000; ++i) {
2290 ++ bits = SONIC_READ(SONIC_CMD) & mask;
2291 ++ if (!bits)
2292 ++ return;
2293 ++ if (irqs_disabled() || in_interrupt())
2294 ++ udelay(20);
2295 ++ else
2296 ++ usleep_range(100, 200);
2297 ++ }
2298 ++ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
2299 ++}
2300 +
2301 + /*
2302 + * Close the SONIC device
2303 +@@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
2304 + /*
2305 + * stop the SONIC, disable interrupts
2306 + */
2307 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
2308 ++ sonic_quiesce(dev, SONIC_CR_ALL);
2309 ++
2310 + SONIC_WRITE(SONIC_IMR, 0);
2311 + SONIC_WRITE(SONIC_ISR, 0x7fff);
2312 + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
2313 +@@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
2314 + * put the Sonic into software-reset mode and
2315 + * disable all interrupts before releasing DMA buffers
2316 + */
2317 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
2318 ++ sonic_quiesce(dev, SONIC_CR_ALL);
2319 ++
2320 + SONIC_WRITE(SONIC_IMR, 0);
2321 + SONIC_WRITE(SONIC_ISR, 0x7fff);
2322 + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
2323 +@@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
2324 + * wake the tx queue
2325 + * Concurrently with all of this, the SONIC is potentially writing to
2326 + * the status flags of the TDs.
2327 +- * Until some mutual exclusion is added, this code will not work with SMP. However,
2328 +- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
2329 + */
2330 +
2331 + static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2332 +@@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2333 + struct sonic_local *lp = netdev_priv(dev);
2334 + dma_addr_t laddr;
2335 + int length;
2336 +- int entry = lp->next_tx;
2337 ++ int entry;
2338 ++ unsigned long flags;
2339 +
2340 + netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
2341 +
2342 +@@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2343 + return NETDEV_TX_OK;
2344 + }
2345 +
2346 ++ spin_lock_irqsave(&lp->lock, flags);
2347 ++
2348 ++ entry = lp->next_tx;
2349 ++
2350 + sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
2351 + sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
2352 + sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
2353 +@@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2354 + sonic_tda_put(dev, entry, SONIC_TD_LINK,
2355 + sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
2356 +
2357 +- /*
2358 +- * Must set tx_skb[entry] only after clearing status, and
2359 +- * before clearing EOL and before stopping queue
2360 +- */
2361 + wmb();
2362 + lp->tx_len[entry] = length;
2363 + lp->tx_laddr[entry] = laddr;
2364 +@@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2365 +
2366 + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
2367 +
2368 ++ spin_unlock_irqrestore(&lp->lock, flags);
2369 ++
2370 + return NETDEV_TX_OK;
2371 + }
2372 +
2373 +@@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2374 + struct net_device *dev = dev_id;
2375 + struct sonic_local *lp = netdev_priv(dev);
2376 + int status;
2377 ++ unsigned long flags;
2378 ++
2379 ++ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
2380 ++ * with sonic_send_packet() so that the two functions can share state.
2381 ++ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
2382 ++ * by macsonic which must use two IRQs with different priority levels.
2383 ++ */
2384 ++ spin_lock_irqsave(&lp->lock, flags);
2385 ++
2386 ++ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
2387 ++ if (!status) {
2388 ++ spin_unlock_irqrestore(&lp->lock, flags);
2389 +
2390 +- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
2391 + return IRQ_NONE;
2392 ++ }
2393 +
2394 + do {
2395 ++ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
2396 ++
2397 + if (status & SONIC_INT_PKTRX) {
2398 + netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
2399 + sonic_rx(dev); /* got packet(s) */
2400 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
2401 + }
2402 +
2403 + if (status & SONIC_INT_TXDN) {
2404 +@@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2405 + int td_status;
2406 + int freed_some = 0;
2407 +
2408 +- /* At this point, cur_tx is the index of a TD that is one of:
2409 +- * unallocated/freed (status set & tx_skb[entry] clear)
2410 +- * allocated and sent (status set & tx_skb[entry] set )
2411 +- * allocated and not yet sent (status clear & tx_skb[entry] set )
2412 +- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
2413 ++ /* The state of a Transmit Descriptor may be inferred
2414 ++ * from { tx_skb[entry], td_status } as follows.
2415 ++ * { clear, clear } => the TD has never been used
2416 ++ * { set, clear } => the TD was handed to SONIC
2417 ++ * { set, set } => the TD was handed back
2418 ++ * { clear, set } => the TD is available for re-use
2419 + */
2420 +
2421 + netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
2422 +@@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2423 + if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
2424 + break;
2425 +
2426 +- if (td_status & 0x0001) {
2427 ++ if (td_status & SONIC_TCR_PTX) {
2428 + lp->stats.tx_packets++;
2429 + lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
2430 + } else {
2431 +- lp->stats.tx_errors++;
2432 +- if (td_status & 0x0642)
2433 ++ if (td_status & (SONIC_TCR_EXD |
2434 ++ SONIC_TCR_EXC | SONIC_TCR_BCM))
2435 + lp->stats.tx_aborted_errors++;
2436 +- if (td_status & 0x0180)
2437 ++ if (td_status &
2438 ++ (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
2439 + lp->stats.tx_carrier_errors++;
2440 +- if (td_status & 0x0020)
2441 ++ if (td_status & SONIC_TCR_OWC)
2442 + lp->stats.tx_window_errors++;
2443 +- if (td_status & 0x0004)
2444 ++ if (td_status & SONIC_TCR_FU)
2445 + lp->stats.tx_fifo_errors++;
2446 + }
2447 +
2448 +@@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2449 + if (freed_some || lp->tx_skb[entry] == NULL)
2450 + netif_wake_queue(dev); /* The ring is no longer full */
2451 + lp->cur_tx = entry;
2452 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
2453 + }
2454 +
2455 + /*
2456 +@@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2457 + if (status & SONIC_INT_RFO) {
2458 + netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
2459 + __func__);
2460 +- lp->stats.rx_fifo_errors++;
2461 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
2462 + }
2463 + if (status & SONIC_INT_RDE) {
2464 + netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
2465 + __func__);
2466 +- lp->stats.rx_dropped++;
2467 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
2468 + }
2469 + if (status & SONIC_INT_RBAE) {
2470 + netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
2471 + __func__);
2472 +- lp->stats.rx_dropped++;
2473 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
2474 + }
2475 +
2476 + /* counter overruns; all counters are 16bit wide */
2477 +- if (status & SONIC_INT_FAE) {
2478 ++ if (status & SONIC_INT_FAE)
2479 + lp->stats.rx_frame_errors += 65536;
2480 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
2481 +- }
2482 +- if (status & SONIC_INT_CRC) {
2483 ++ if (status & SONIC_INT_CRC)
2484 + lp->stats.rx_crc_errors += 65536;
2485 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
2486 +- }
2487 +- if (status & SONIC_INT_MP) {
2488 ++ if (status & SONIC_INT_MP)
2489 + lp->stats.rx_missed_errors += 65536;
2490 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
2491 +- }
2492 +
2493 + /* transmit error */
2494 + if (status & SONIC_INT_TXER) {
2495 +- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
2496 +- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
2497 +- __func__);
2498 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
2499 ++ u16 tcr = SONIC_READ(SONIC_TCR);
2500 ++
2501 ++ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
2502 ++ __func__, tcr);
2503 ++
2504 ++ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
2505 ++ SONIC_TCR_FU | SONIC_TCR_BCM)) {
2506 ++ /* Aborted transmission. Try again. */
2507 ++ netif_stop_queue(dev);
2508 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
2509 ++ }
2510 + }
2511 +
2512 + /* bus retry */
2513 +@@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2514 + /* ... to help debug DMA problems causing endless interrupts. */
2515 + /* Bounce the eth interface to turn on the interrupt again. */
2516 + SONIC_WRITE(SONIC_IMR, 0);
2517 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
2518 + }
2519 +
2520 +- /* load CAM done */
2521 +- if (status & SONIC_INT_LCD)
2522 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
2523 +- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
2524 ++ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
2525 ++ } while (status);
2526 ++
2527 ++ spin_unlock_irqrestore(&lp->lock, flags);
2528 ++
2529 + return IRQ_HANDLED;
2530 + }
2531 +
2532 ++/* Return the array index corresponding to a given Receive Buffer pointer. */
2533 ++static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
2534 ++ unsigned int last)
2535 ++{
2536 ++ unsigned int i = last;
2537 ++
2538 ++ do {
2539 ++ i = (i + 1) & SONIC_RRS_MASK;
2540 ++ if (addr == lp->rx_laddr[i])
2541 ++ return i;
2542 ++ } while (i != last);
2543 ++
2544 ++ return -ENOENT;
2545 ++}
2546 ++
2547 ++/* Allocate and map a new skb to be used as a receive buffer. */
2548 ++static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
2549