Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 29 Jan 2020 16:18:49
Message-Id: 1580314630.bfe9312182d69deb9786d697a6fb0e57b6cd9ffa.mpagano@gentoo
1 commit: bfe9312182d69deb9786d697a6fb0e57b6cd9ffa
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 29 16:17:10 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 29 16:17:10 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bfe93121
7
8 Linux patch 5.4.16
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1015_linux-5.4.16.patch | 5662 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5666 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 34417c5..85cdd05 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -103,6 +103,10 @@ Patch: 1014_linux-5.4.15.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.15
23
24 +Patch: 1015_linux-5.4.16.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.16
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1015_linux-5.4.16.patch b/1015_linux-5.4.16.patch
33 new file mode 100644
34 index 0000000..2c9e8a8
35 --- /dev/null
36 +++ b/1015_linux-5.4.16.patch
37 @@ -0,0 +1,5662 @@
38 +diff --git a/Makefile b/Makefile
39 +index 30600e309c73..e16d2e58ed4b 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 15
47 ++SUBLEVEL = 16
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
52 +index 15b75005bc34..3fa1b962dc27 100644
53 +--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
54 ++++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
55 +@@ -600,8 +600,11 @@ extern void slb_set_size(u16 size);
56 + *
57 + */
58 + #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
59 ++
60 ++// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
61 + #define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
62 +- MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
63 ++ MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
64 ++
65 + /*
66 + * For platforms that support on 65bit VA we limit the context bits
67 + */
68 +diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
69 +index f2dfcd50a2d3..33aee7490cbb 100644
70 +--- a/arch/powerpc/include/asm/xive-regs.h
71 ++++ b/arch/powerpc/include/asm/xive-regs.h
72 +@@ -39,6 +39,7 @@
73 +
74 + #define XIVE_ESB_VAL_P 0x2
75 + #define XIVE_ESB_VAL_Q 0x1
76 ++#define XIVE_ESB_INVALID 0xFF
77 +
78 + /*
79 + * Thread Management (aka "TM") registers
80 +diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
81 +index f5fadbd2533a..9651ca061828 100644
82 +--- a/arch/powerpc/sysdev/xive/common.c
83 ++++ b/arch/powerpc/sysdev/xive/common.c
84 +@@ -972,12 +972,21 @@ static int xive_get_irqchip_state(struct irq_data *data,
85 + enum irqchip_irq_state which, bool *state)
86 + {
87 + struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
88 ++ u8 pq;
89 +
90 + switch (which) {
91 + case IRQCHIP_STATE_ACTIVE:
92 +- *state = !xd->stale_p &&
93 +- (xd->saved_p ||
94 +- !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
95 ++ pq = xive_esb_read(xd, XIVE_ESB_GET);
96 ++
97 ++ /*
98 ++ * The esb value being all 1's means we couldn't get
99 ++ * the PQ state of the interrupt through mmio. It may
100 ++ * happen, for example when querying a PHB interrupt
101 ++ * while the PHB is in an error state. We consider the
102 ++ * interrupt to be inactive in that case.
103 ++ */
104 ++ *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
105 ++ (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
106 + return 0;
107 + default:
108 + return -EINVAL;
109 +diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
110 +index 2bbab0230aeb..d287837ed755 100644
111 +--- a/drivers/atm/firestream.c
112 ++++ b/drivers/atm/firestream.c
113 +@@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
114 + }
115 + if (!to) {
116 + printk ("No more free channels for FS50..\n");
117 ++ kfree(vcc);
118 + return -EBUSY;
119 + }
120 + vcc->channo = dev->channo;
121 +@@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
122 + if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
123 + ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
124 + printk ("Channel is in use for FS155.\n");
125 ++ kfree(vcc);
126 + return -EBUSY;
127 + }
128 + }
129 +@@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
130 + tc, sizeof (struct fs_transmit_config));
131 + if (!tc) {
132 + fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
133 ++ kfree(vcc);
134 + return -ENOMEM;
135 + }
136 +
137 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
138 +index 3d4f5775a4ba..25235ef630c1 100644
139 +--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
140 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
141 +@@ -9,16 +9,16 @@
142 + #include "i915_gem_ioctls.h"
143 + #include "i915_gem_object.h"
144 +
145 +-static __always_inline u32 __busy_read_flag(u8 id)
146 ++static __always_inline u32 __busy_read_flag(u16 id)
147 + {
148 +- if (id == (u8)I915_ENGINE_CLASS_INVALID)
149 ++ if (id == (u16)I915_ENGINE_CLASS_INVALID)
150 + return 0xffff0000u;
151 +
152 + GEM_BUG_ON(id >= 16);
153 + return 0x10000u << id;
154 + }
155 +
156 +-static __always_inline u32 __busy_write_id(u8 id)
157 ++static __always_inline u32 __busy_write_id(u16 id)
158 + {
159 + /*
160 + * The uABI guarantees an active writer is also amongst the read
161 +@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
162 + * last_read - hence we always set both read and write busy for
163 + * last_write.
164 + */
165 +- if (id == (u8)I915_ENGINE_CLASS_INVALID)
166 ++ if (id == (u16)I915_ENGINE_CLASS_INVALID)
167 + return 0xffffffffu;
168 +
169 + return (id + 1) | __busy_read_flag(id);
170 + }
171 +
172 + static __always_inline unsigned int
173 +-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
174 ++__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
175 + {
176 + const struct i915_request *rq;
177 +
178 +@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
179 + return 0;
180 +
181 + /* Beware type-expansion follies! */
182 +- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
183 ++ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
184 + return flag(rq->engine->uabi_class);
185 + }
186 +
187 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
188 +index abfbac49b8e8..968d9b2705d0 100644
189 +--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
190 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
191 +@@ -427,7 +427,7 @@ struct get_pages_work {
192 +
193 + static struct sg_table *
194 + __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
195 +- struct page **pvec, int num_pages)
196 ++ struct page **pvec, unsigned long num_pages)
197 + {
198 + unsigned int max_segment = i915_sg_segment_size();
199 + struct sg_table *st;
200 +@@ -473,9 +473,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
201 + {
202 + struct get_pages_work *work = container_of(_work, typeof(*work), work);
203 + struct drm_i915_gem_object *obj = work->obj;
204 +- const int npages = obj->base.size >> PAGE_SHIFT;
205 ++ const unsigned long npages = obj->base.size >> PAGE_SHIFT;
206 ++ unsigned long pinned;
207 + struct page **pvec;
208 +- int pinned, ret;
209 ++ int ret;
210 +
211 + ret = -ENOMEM;
212 + pinned = 0;
213 +@@ -578,7 +579,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
214 +
215 + static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
216 + {
217 +- const int num_pages = obj->base.size >> PAGE_SHIFT;
218 ++ const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
219 + struct mm_struct *mm = obj->userptr.mm->mm;
220 + struct page **pvec;
221 + struct sg_table *pages;
222 +diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
223 +index 9dd8c299cb2d..798e1b024406 100644
224 +--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
225 ++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
226 +@@ -300,8 +300,8 @@ struct intel_engine_cs {
227 + u8 class;
228 + u8 instance;
229 +
230 +- u8 uabi_class;
231 +- u8 uabi_instance;
232 ++ u16 uabi_class;
233 ++ u16 uabi_instance;
234 +
235 + u32 context_size;
236 + u32 mmio_base;
237 +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
238 +index b1a7a8b9b46a..f614646ed3f9 100644
239 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
240 ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
241 +@@ -1178,6 +1178,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
242 + pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
243 + vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
244 + do {
245 ++ GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
246 + vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
247 +
248 + iter->dma += I915_GTT_PAGE_SIZE;
249 +@@ -1657,6 +1658,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
250 +
251 + vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
252 + do {
253 ++ GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
254 + vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
255 +
256 + iter.dma += I915_GTT_PAGE_SIZE;
257 +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
258 +index 1c67ac434e10..5906c80c4b2c 100644
259 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
260 ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
261 +@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
262 + static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
263 + struct drm_file *file)
264 + {
265 ++ struct panfrost_file_priv *priv = file->driver_priv;
266 + struct panfrost_gem_object *bo;
267 + struct drm_panfrost_create_bo *args = data;
268 ++ struct panfrost_gem_mapping *mapping;
269 +
270 + if (!args->size || args->pad ||
271 + (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
272 +@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
273 + if (IS_ERR(bo))
274 + return PTR_ERR(bo);
275 +
276 +- args->offset = bo->node.start << PAGE_SHIFT;
277 ++ mapping = panfrost_gem_mapping_get(bo, priv);
278 ++ if (!mapping) {
279 ++ drm_gem_object_put_unlocked(&bo->base.base);
280 ++ return -EINVAL;
281 ++ }
282 ++
283 ++ args->offset = mapping->mmnode.start << PAGE_SHIFT;
284 ++ panfrost_gem_mapping_put(mapping);
285 +
286 + return 0;
287 + }
288 +@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
289 + struct drm_panfrost_submit *args,
290 + struct panfrost_job *job)
291 + {
292 ++ struct panfrost_file_priv *priv = file_priv->driver_priv;
293 ++ struct panfrost_gem_object *bo;
294 ++ unsigned int i;
295 ++ int ret;
296 ++
297 + job->bo_count = args->bo_handle_count;
298 +
299 + if (!job->bo_count)
300 +@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
301 + if (!job->implicit_fences)
302 + return -ENOMEM;
303 +
304 +- return drm_gem_objects_lookup(file_priv,
305 +- (void __user *)(uintptr_t)args->bo_handles,
306 +- job->bo_count, &job->bos);
307 ++ ret = drm_gem_objects_lookup(file_priv,
308 ++ (void __user *)(uintptr_t)args->bo_handles,
309 ++ job->bo_count, &job->bos);
310 ++ if (ret)
311 ++ return ret;
312 ++
313 ++ job->mappings = kvmalloc_array(job->bo_count,
314 ++ sizeof(struct panfrost_gem_mapping *),
315 ++ GFP_KERNEL | __GFP_ZERO);
316 ++ if (!job->mappings)
317 ++ return -ENOMEM;
318 ++
319 ++ for (i = 0; i < job->bo_count; i++) {
320 ++ struct panfrost_gem_mapping *mapping;
321 ++
322 ++ bo = to_panfrost_bo(job->bos[i]);
323 ++ mapping = panfrost_gem_mapping_get(bo, priv);
324 ++ if (!mapping) {
325 ++ ret = -EINVAL;
326 ++ break;
327 ++ }
328 ++
329 ++ job->mappings[i] = mapping;
330 ++ }
331 ++
332 ++ return ret;
333 + }
334 +
335 + /**
336 +@@ -320,7 +357,9 @@ out:
337 + static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
338 + struct drm_file *file_priv)
339 + {
340 ++ struct panfrost_file_priv *priv = file_priv->driver_priv;
341 + struct drm_panfrost_get_bo_offset *args = data;
342 ++ struct panfrost_gem_mapping *mapping;
343 + struct drm_gem_object *gem_obj;
344 + struct panfrost_gem_object *bo;
345 +
346 +@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
347 + }
348 + bo = to_panfrost_bo(gem_obj);
349 +
350 +- args->offset = bo->node.start << PAGE_SHIFT;
351 +-
352 ++ mapping = panfrost_gem_mapping_get(bo, priv);
353 + drm_gem_object_put_unlocked(gem_obj);
354 ++
355 ++ if (!mapping)
356 ++ return -EINVAL;
357 ++
358 ++ args->offset = mapping->mmnode.start << PAGE_SHIFT;
359 ++ panfrost_gem_mapping_put(mapping);
360 + return 0;
361 + }
362 +
363 + static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
364 + struct drm_file *file_priv)
365 + {
366 ++ struct panfrost_file_priv *priv = file_priv->driver_priv;
367 + struct drm_panfrost_madvise *args = data;
368 + struct panfrost_device *pfdev = dev->dev_private;
369 + struct drm_gem_object *gem_obj;
370 ++ struct panfrost_gem_object *bo;
371 ++ int ret = 0;
372 +
373 + gem_obj = drm_gem_object_lookup(file_priv, args->handle);
374 + if (!gem_obj) {
375 +@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
376 + return -ENOENT;
377 + }
378 +
379 ++ bo = to_panfrost_bo(gem_obj);
380 ++
381 + mutex_lock(&pfdev->shrinker_lock);
382 ++ mutex_lock(&bo->mappings.lock);
383 ++ if (args->madv == PANFROST_MADV_DONTNEED) {
384 ++ struct panfrost_gem_mapping *first;
385 ++
386 ++ first = list_first_entry(&bo->mappings.list,
387 ++ struct panfrost_gem_mapping,
388 ++ node);
389 ++
390 ++ /*
391 ++ * If we want to mark the BO purgeable, there must be only one
392 ++ * user: the caller FD.
393 ++ * We could do something smarter and mark the BO purgeable only
394 ++ * when all its users have marked it purgeable, but globally
395 ++ * visible/shared BOs are likely to never be marked purgeable
396 ++ * anyway, so let's not bother.
397 ++ */
398 ++ if (!list_is_singular(&bo->mappings.list) ||
399 ++ WARN_ON_ONCE(first->mmu != &priv->mmu)) {
400 ++ ret = -EINVAL;
401 ++ goto out_unlock_mappings;
402 ++ }
403 ++ }
404 ++
405 + args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
406 +
407 + if (args->retained) {
408 +- struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
409 +-
410 + if (args->madv == PANFROST_MADV_DONTNEED)
411 + list_add_tail(&bo->base.madv_list,
412 + &pfdev->shrinker_list);
413 + else if (args->madv == PANFROST_MADV_WILLNEED)
414 + list_del_init(&bo->base.madv_list);
415 + }
416 ++
417 ++out_unlock_mappings:
418 ++ mutex_unlock(&bo->mappings.lock);
419 + mutex_unlock(&pfdev->shrinker_lock);
420 +
421 + drm_gem_object_put_unlocked(gem_obj);
422 +- return 0;
423 ++ return ret;
424 + }
425 +
426 + int panfrost_unstable_ioctl_check(void)
427 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
428 +index 92a95210a899..77c3a3855c68 100644
429 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
430 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
431 +@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
432 + list_del_init(&bo->base.madv_list);
433 + mutex_unlock(&pfdev->shrinker_lock);
434 +
435 ++ /*
436 ++ * If we still have mappings attached to the BO, there's a problem in
437 ++ * our refcounting.
438 ++ */
439 ++ WARN_ON_ONCE(!list_empty(&bo->mappings.list));
440 ++
441 + if (bo->sgts) {
442 + int i;
443 + int n_sgt = bo->base.base.size / SZ_2M;
444 +@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
445 + drm_gem_shmem_free_object(obj);
446 + }
447 +
448 ++struct panfrost_gem_mapping *
449 ++panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
450 ++ struct panfrost_file_priv *priv)
451 ++{
452 ++ struct panfrost_gem_mapping *iter, *mapping = NULL;
453 ++
454 ++ mutex_lock(&bo->mappings.lock);
455 ++ list_for_each_entry(iter, &bo->mappings.list, node) {
456 ++ if (iter->mmu == &priv->mmu) {
457 ++ kref_get(&iter->refcount);
458 ++ mapping = iter;
459 ++ break;
460 ++ }
461 ++ }
462 ++ mutex_unlock(&bo->mappings.lock);
463 ++
464 ++ return mapping;
465 ++}
466 ++
467 ++static void
468 ++panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
469 ++{
470 ++ struct panfrost_file_priv *priv;
471 ++
472 ++ if (mapping->active)
473 ++ panfrost_mmu_unmap(mapping);
474 ++
475 ++ priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
476 ++ spin_lock(&priv->mm_lock);
477 ++ if (drm_mm_node_allocated(&mapping->mmnode))
478 ++ drm_mm_remove_node(&mapping->mmnode);
479 ++ spin_unlock(&priv->mm_lock);
480 ++}
481 ++
482 ++static void panfrost_gem_mapping_release(struct kref *kref)
483 ++{
484 ++ struct panfrost_gem_mapping *mapping;
485 ++
486 ++ mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
487 ++
488 ++ panfrost_gem_teardown_mapping(mapping);
489 ++ drm_gem_object_put_unlocked(&mapping->obj->base.base);
490 ++ kfree(mapping);
491 ++}
492 ++
493 ++void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
494 ++{
495 ++ if (!mapping)
496 ++ return;
497 ++
498 ++ kref_put(&mapping->refcount, panfrost_gem_mapping_release);
499 ++}
500 ++
501 ++void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
502 ++{
503 ++ struct panfrost_gem_mapping *mapping;
504 ++
505 ++ mutex_lock(&bo->mappings.lock);
506 ++ list_for_each_entry(mapping, &bo->mappings.list, node)
507 ++ panfrost_gem_teardown_mapping(mapping);
508 ++ mutex_unlock(&bo->mappings.lock);
509 ++}
510 ++
511 + int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
512 + {
513 + int ret;
514 +@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
515 + struct panfrost_gem_object *bo = to_panfrost_bo(obj);
516 + unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
517 + struct panfrost_file_priv *priv = file_priv->driver_priv;
518 ++ struct panfrost_gem_mapping *mapping;
519 ++
520 ++ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
521 ++ if (!mapping)
522 ++ return -ENOMEM;
523 ++
524 ++ INIT_LIST_HEAD(&mapping->node);
525 ++ kref_init(&mapping->refcount);
526 ++ drm_gem_object_get(obj);
527 ++ mapping->obj = bo;
528 +
529 + /*
530 + * Executable buffers cannot cross a 16MB boundary as the program
531 +@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
532 + else
533 + align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
534 +
535 +- bo->mmu = &priv->mmu;
536 ++ mapping->mmu = &priv->mmu;
537 + spin_lock(&priv->mm_lock);
538 +- ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
539 ++ ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
540 + size >> PAGE_SHIFT, align, color, 0);
541 + spin_unlock(&priv->mm_lock);
542 + if (ret)
543 +- return ret;
544 ++ goto err;
545 +
546 + if (!bo->is_heap) {
547 +- ret = panfrost_mmu_map(bo);
548 +- if (ret) {
549 +- spin_lock(&priv->mm_lock);
550 +- drm_mm_remove_node(&bo->node);
551 +- spin_unlock(&priv->mm_lock);
552 +- }
553 ++ ret = panfrost_mmu_map(mapping);
554 ++ if (ret)
555 ++ goto err;
556 + }
557 ++
558 ++ mutex_lock(&bo->mappings.lock);
559 ++ WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
560 ++ list_add_tail(&mapping->node, &bo->mappings.list);
561 ++ mutex_unlock(&bo->mappings.lock);
562 ++
563 ++err:
564 ++ if (ret)
565 ++ panfrost_gem_mapping_put(mapping);
566 + return ret;
567 + }
568 +
569 + void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
570 + {
571 +- struct panfrost_gem_object *bo = to_panfrost_bo(obj);
572 + struct panfrost_file_priv *priv = file_priv->driver_priv;
573 ++ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
574 ++ struct panfrost_gem_mapping *mapping = NULL, *iter;
575 +
576 +- if (bo->is_mapped)
577 +- panfrost_mmu_unmap(bo);
578 ++ mutex_lock(&bo->mappings.lock);
579 ++ list_for_each_entry(iter, &bo->mappings.list, node) {
580 ++ if (iter->mmu == &priv->mmu) {
581 ++ mapping = iter;
582 ++ list_del(&iter->node);
583 ++ break;
584 ++ }
585 ++ }
586 ++ mutex_unlock(&bo->mappings.lock);
587 +
588 +- spin_lock(&priv->mm_lock);
589 +- if (drm_mm_node_allocated(&bo->node))
590 +- drm_mm_remove_node(&bo->node);
591 +- spin_unlock(&priv->mm_lock);
592 ++ panfrost_gem_mapping_put(mapping);
593 + }
594 +
595 + static int panfrost_gem_pin(struct drm_gem_object *obj)
596 +@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
597 + if (!obj)
598 + return NULL;
599 +
600 ++ INIT_LIST_HEAD(&obj->mappings.list);
601 ++ mutex_init(&obj->mappings.lock);
602 + obj->base.base.funcs = &panfrost_gem_funcs;
603 +
604 + return &obj->base.base;
605 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
606 +index 4b17e7308764..ca1bc9019600 100644
607 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
608 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
609 +@@ -13,23 +13,46 @@ struct panfrost_gem_object {
610 + struct drm_gem_shmem_object base;
611 + struct sg_table *sgts;
612 +
613 +- struct panfrost_mmu *mmu;
614 +- struct drm_mm_node node;
615 +- bool is_mapped :1;
616 ++ /*
617 ++ * Use a list for now. If searching a mapping ever becomes the
618 ++ * bottleneck, we should consider using an RB-tree, or even better,
619 ++ * let the core store drm_gem_object_mapping entries (where we
620 ++ * could place driver specific data) instead of drm_gem_object ones
621 ++ * in its drm_file->object_idr table.
622 ++ *
623 ++ * struct drm_gem_object_mapping {
624 ++ * struct drm_gem_object *obj;
625 ++ * void *driver_priv;
626 ++ * };
627 ++ */
628 ++ struct {
629 ++ struct list_head list;
630 ++ struct mutex lock;
631 ++ } mappings;
632 ++
633 + bool noexec :1;
634 + bool is_heap :1;
635 + };
636 +
637 ++struct panfrost_gem_mapping {
638 ++ struct list_head node;
639 ++ struct kref refcount;
640 ++ struct panfrost_gem_object *obj;
641 ++ struct drm_mm_node mmnode;
642 ++ struct panfrost_mmu *mmu;
643 ++ bool active :1;
644 ++};
645 ++
646 + static inline
647 + struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
648 + {
649 + return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
650 + }
651 +
652 +-static inline
653 +-struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
654 ++static inline struct panfrost_gem_mapping *
655 ++drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
656 + {
657 +- return container_of(node, struct panfrost_gem_object, node);
658 ++ return container_of(node, struct panfrost_gem_mapping, mmnode);
659 + }
660 +
661 + struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
662 +@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
663 + void panfrost_gem_close(struct drm_gem_object *obj,
664 + struct drm_file *file_priv);
665 +
666 ++struct panfrost_gem_mapping *
667 ++panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
668 ++ struct panfrost_file_priv *priv);
669 ++void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
670 ++void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
671 ++
672 + void panfrost_gem_shrinker_init(struct drm_device *dev);
673 + void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
674 +
675 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
676 +index 458f0fa68111..f5dd7b29bc95 100644
677 +--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
678 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
679 +@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
680 + static bool panfrost_gem_purge(struct drm_gem_object *obj)
681 + {
682 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
683 ++ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
684 +
685 + if (!mutex_trylock(&shmem->pages_lock))
686 + return false;
687 +
688 +- panfrost_mmu_unmap(to_panfrost_bo(obj));
689 ++ panfrost_gem_teardown_mappings(bo);
690 + drm_gem_shmem_purge_locked(obj);
691 +
692 + mutex_unlock(&shmem->pages_lock);
693 +diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
694 +index 21f34d44aac2..bbb0c5e3ca6f 100644
695 +--- a/drivers/gpu/drm/panfrost/panfrost_job.c
696 ++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
697 +@@ -269,9 +269,20 @@ static void panfrost_job_cleanup(struct kref *ref)
698 + dma_fence_put(job->done_fence);
699 + dma_fence_put(job->render_done_fence);
700 +
701 +- if (job->bos) {
702 ++ if (job->mappings) {
703 + for (i = 0; i < job->bo_count; i++)
704 ++ panfrost_gem_mapping_put(job->mappings[i]);
705 ++ kvfree(job->mappings);
706 ++ }
707 ++
708 ++ if (job->bos) {
709 ++ struct panfrost_gem_object *bo;
710 ++
711 ++ for (i = 0; i < job->bo_count; i++) {
712 ++ bo = to_panfrost_bo(job->bos[i]);
713 + drm_gem_object_put_unlocked(job->bos[i]);
714 ++ }
715 ++
716 + kvfree(job->bos);
717 + }
718 +
719 +diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
720 +index 62454128a792..bbd3ba97ff67 100644
721 +--- a/drivers/gpu/drm/panfrost/panfrost_job.h
722 ++++ b/drivers/gpu/drm/panfrost/panfrost_job.h
723 +@@ -32,6 +32,7 @@ struct panfrost_job {
724 +
725 + /* Exclusive fences we have taken from the BOs to wait for */
726 + struct dma_fence **implicit_fences;
727 ++ struct panfrost_gem_mapping **mappings;
728 + struct drm_gem_object **bos;
729 + u32 bo_count;
730 +
731 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
732 +index a3ed64a1f15e..763cfca886a7 100644
733 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
734 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
735 +@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
736 + return 0;
737 + }
738 +
739 +-int panfrost_mmu_map(struct panfrost_gem_object *bo)
740 ++int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
741 + {
742 ++ struct panfrost_gem_object *bo = mapping->obj;
743 + struct drm_gem_object *obj = &bo->base.base;
744 + struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
745 + struct sg_table *sgt;
746 + int prot = IOMMU_READ | IOMMU_WRITE;
747 +
748 +- if (WARN_ON(bo->is_mapped))
749 ++ if (WARN_ON(mapping->active))
750 + return 0;
751 +
752 + if (bo->noexec)
753 +@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
754 + if (WARN_ON(IS_ERR(sgt)))
755 + return PTR_ERR(sgt);
756 +
757 +- mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
758 +- bo->is_mapped = true;
759 ++ mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
760 ++ prot, sgt);
761 ++ mapping->active = true;
762 +
763 + return 0;
764 + }
765 +
766 +-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
767 ++void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
768 + {
769 ++ struct panfrost_gem_object *bo = mapping->obj;
770 + struct drm_gem_object *obj = &bo->base.base;
771 + struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
772 +- struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
773 +- u64 iova = bo->node.start << PAGE_SHIFT;
774 +- size_t len = bo->node.size << PAGE_SHIFT;
775 ++ struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
776 ++ u64 iova = mapping->mmnode.start << PAGE_SHIFT;
777 ++ size_t len = mapping->mmnode.size << PAGE_SHIFT;
778 + size_t unmapped_len = 0;
779 +
780 +- if (WARN_ON(!bo->is_mapped))
781 ++ if (WARN_ON(!mapping->active))
782 + return;
783 +
784 +- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
785 ++ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
786 ++ mapping->mmu->as, iova, len);
787 +
788 + while (unmapped_len < len) {
789 + size_t unmapped_page;
790 +@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
791 + unmapped_len += pgsize;
792 + }
793 +
794 +- panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
795 +- bo->is_mapped = false;
796 ++ panfrost_mmu_flush_range(pfdev, mapping->mmu,
797 ++ mapping->mmnode.start << PAGE_SHIFT, len);
798 ++ mapping->active = false;
799 + }
800 +
801 + static void mmu_tlb_inv_context_s1(void *cookie)
802 +@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
803 + free_io_pgtable_ops(mmu->pgtbl_ops);
804 + }
805 +
806 +-static struct panfrost_gem_object *
807 +-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
808 ++static struct panfrost_gem_mapping *
809 ++addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
810 + {
811 +- struct panfrost_gem_object *bo = NULL;
812 ++ struct panfrost_gem_mapping *mapping = NULL;
813 + struct panfrost_file_priv *priv;
814 + struct drm_mm_node *node;
815 + u64 offset = addr >> PAGE_SHIFT;
816 +@@ -418,8 +423,9 @@ found_mmu:
817 + drm_mm_for_each_node(node, &priv->mm) {
818 + if (offset >= node->start &&
819 + offset < (node->start + node->size)) {
820 +- bo = drm_mm_node_to_panfrost_bo(node);
821 +- drm_gem_object_get(&bo->base.base);
822 ++ mapping = drm_mm_node_to_panfrost_mapping(node);
823 ++
824 ++ kref_get(&mapping->refcount);
825 + break;
826 + }
827 + }
828 +@@ -427,7 +433,7 @@ found_mmu:
829 + spin_unlock(&priv->mm_lock);
830 + out:
831 + spin_unlock(&pfdev->as_lock);
832 +- return bo;
833 ++ return mapping;
834 + }
835 +
836 + #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
837 +@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
838 + u64 addr)
839 + {
840 + int ret, i;
841 ++ struct panfrost_gem_mapping *bomapping;
842 + struct panfrost_gem_object *bo;
843 + struct address_space *mapping;
844 + pgoff_t page_offset;
845 + struct sg_table *sgt;
846 + struct page **pages;
847 +
848 +- bo = addr_to_drm_mm_node(pfdev, as, addr);
849 +- if (!bo)
850 ++ bomapping = addr_to_mapping(pfdev, as, addr);
851 ++ if (!bomapping)
852 + return -ENOENT;
853 +
854 ++ bo = bomapping->obj;
855 + if (!bo->is_heap) {
856 + dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
857 +- bo->node.start << PAGE_SHIFT);
858 ++ bomapping->mmnode.start << PAGE_SHIFT);
859 + ret = -EINVAL;
860 + goto err_bo;
861 + }
862 +- WARN_ON(bo->mmu->as != as);
863 ++ WARN_ON(bomapping->mmu->as != as);
864 +
865 + /* Assume 2MB alignment and size multiple */
866 + addr &= ~((u64)SZ_2M - 1);
867 + page_offset = addr >> PAGE_SHIFT;
868 +- page_offset -= bo->node.start;
869 ++ page_offset -= bomapping->mmnode.start;
870 +
871 + mutex_lock(&bo->base.pages_lock);
872 +
873 +@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
874 + goto err_map;
875 + }
876 +
877 +- mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
878 ++ mmu_map_sg(pfdev, bomapping->mmu, addr,
879 ++ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
880 +
881 +- bo->is_mapped = true;
882 ++ bomapping->active = true;
883 +
884 + dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
885 +
886 +- drm_gem_object_put_unlocked(&bo->base.base);
887 ++ panfrost_gem_mapping_put(bomapping);
888 +
889 + return 0;
890 +
891 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
892 +index 7c5b6775ae23..44fc2edf63ce 100644
893 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
894 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
895 +@@ -4,12 +4,12 @@
896 + #ifndef __PANFROST_MMU_H__
897 + #define __PANFROST_MMU_H__
898 +
899 +-struct panfrost_gem_object;
900 ++struct panfrost_gem_mapping;
901 + struct panfrost_file_priv;
902 + struct panfrost_mmu;
903 +
904 +-int panfrost_mmu_map(struct panfrost_gem_object *bo);
905 +-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
906 ++int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
907 ++void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
908 +
909 + int panfrost_mmu_init(struct panfrost_device *pfdev);
910 + void panfrost_mmu_fini(struct panfrost_device *pfdev);
911 +diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
912 +index 2c04e858c50a..684820448be3 100644
913 +--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
914 ++++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
915 +@@ -25,7 +25,7 @@
916 + #define V4_SHADERS_PER_COREGROUP 4
917 +
918 + struct panfrost_perfcnt {
919 +- struct panfrost_gem_object *bo;
920 ++ struct panfrost_gem_mapping *mapping;
921 + size_t bosize;
922 + void *buf;
923 + struct panfrost_file_priv *user;
924 +@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
925 + int ret;
926 +
927 + reinit_completion(&pfdev->perfcnt->dump_comp);
928 +- gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
929 ++ gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
930 + gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
931 + gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
932 + gpu_write(pfdev, GPU_INT_CLEAR,
933 +@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
934 + if (IS_ERR(bo))
935 + return PTR_ERR(bo);
936 +
937 +- perfcnt->bo = to_panfrost_bo(&bo->base);
938 +-
939 + /* Map the perfcnt buf in the address space attached to file_priv. */
940 +- ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
941 ++ ret = panfrost_gem_open(&bo->base, file_priv);
942 + if (ret)
943 + goto err_put_bo;
944 +
945 ++ perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
946 ++ user);
947 ++ if (!perfcnt->mapping) {
948 ++ ret = -EINVAL;
949 ++ goto err_close_bo;
950 ++ }
951 ++
952 + perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
953 + if (IS_ERR(perfcnt->buf)) {
954 + ret = PTR_ERR(perfcnt->buf);
955 +- goto err_close_bo;
956 ++ goto err_put_mapping;
957 + }
958 +
959 + /*
960 +@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
961 + if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
962 + gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
963 +
964 ++ /* The BO ref is retained by the mapping. */
965 ++ drm_gem_object_put_unlocked(&bo->base);
966 ++
967 + return 0;
968 +
969 + err_vunmap:
970 +- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
971 ++ drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
972 ++err_put_mapping:
973 ++ panfrost_gem_mapping_put(perfcnt->mapping);
974 + err_close_bo:
975 +- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
976 ++ panfrost_gem_close(&bo->base, file_priv);
977 + err_put_bo:
978 + drm_gem_object_put_unlocked(&bo->base);
979 + return ret;
980 +@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
981 + GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
982 +
983 + perfcnt->user = NULL;
984 +- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
985 ++ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
986 + perfcnt->buf = NULL;
987 +- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
988 +- drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
989 +- perfcnt->bo = NULL;
990 ++ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
991 ++ panfrost_gem_mapping_put(perfcnt->mapping);
992 ++ perfcnt->mapping = NULL;
993 + pm_runtime_mark_last_busy(pfdev->dev);
994 + pm_runtime_put_autosuspend(pfdev->dev);
995 +
996 +diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
997 +index 6c64d50c9aae..01c2eeb02aa9 100644
998 +--- a/drivers/hwmon/adt7475.c
999 ++++ b/drivers/hwmon/adt7475.c
1000 +@@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
1001 + long reg;
1002 +
1003 + if (bypass_attn & (1 << channel))
1004 +- reg = (volt * 1024) / 2250;
1005 ++ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
1006 + else
1007 +- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
1008 ++ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
1009 ++ (r[0] + r[1]) * 2250);
1010 + return clamp_val(reg, 0, 1023) & (0xff << 2);
1011 + }
1012 +
1013 +diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
1014 +index 1f3b30b085b9..d018b20089ec 100644
1015 +--- a/drivers/hwmon/hwmon.c
1016 ++++ b/drivers/hwmon/hwmon.c
1017 +@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
1018 +
1019 + #define to_hwmon_attr(d) \
1020 + container_of(d, struct hwmon_device_attribute, dev_attr)
1021 ++#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
1022 +
1023 + /*
1024 + * Thermal zone information
1025 +@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
1026 + * also provides the sensor index.
1027 + */
1028 + struct hwmon_thermal_data {
1029 +- struct hwmon_device *hwdev; /* Reference to hwmon device */
1030 ++ struct device *dev; /* Reference to hwmon device */
1031 + int index; /* sensor index */
1032 + };
1033 +
1034 +@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
1035 + NULL
1036 + };
1037 +
1038 ++static void hwmon_free_attrs(struct attribute **attrs)
1039 ++{
1040 ++ int i;
1041 ++
1042 ++ for (i = 0; attrs[i]; i++) {
1043 ++ struct device_attribute *dattr = to_dev_attr(attrs[i]);
1044 ++ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
1045 ++
1046 ++ kfree(hattr);
1047 ++ }
1048 ++ kfree(attrs);
1049 ++}
1050 ++
1051 + static void hwmon_dev_release(struct device *dev)
1052 + {
1053 +- kfree(to_hwmon_device(dev));
1054 ++ struct hwmon_device *hwdev = to_hwmon_device(dev);
1055 ++
1056 ++ if (hwdev->group.attrs)
1057 ++ hwmon_free_attrs(hwdev->group.attrs);
1058 ++ kfree(hwdev->groups);
1059 ++ kfree(hwdev);
1060 + }
1061 +
1062 + static struct class hwmon_class = {
1063 +@@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
1064 + static int hwmon_thermal_get_temp(void *data, int *temp)
1065 + {
1066 + struct hwmon_thermal_data *tdata = data;
1067 +- struct hwmon_device *hwdev = tdata->hwdev;
1068 ++ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
1069 + int ret;
1070 + long t;
1071 +
1072 +- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
1073 ++ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
1074 + tdata->index, &t);
1075 + if (ret < 0)
1076 + return ret;
1077 +@@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
1078 + .get_temp = hwmon_thermal_get_temp,
1079 + };
1080 +
1081 +-static int hwmon_thermal_add_sensor(struct device *dev,
1082 +- struct hwmon_device *hwdev, int index)
1083 ++static int hwmon_thermal_add_sensor(struct device *dev, int index)
1084 + {
1085 + struct hwmon_thermal_data *tdata;
1086 + struct thermal_zone_device *tzd;
1087 +@@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
1088 + if (!tdata)
1089 + return -ENOMEM;
1090 +
1091 +- tdata->hwdev = hwdev;
1092 ++ tdata->dev = dev;
1093 + tdata->index = index;
1094 +
1095 +- tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
1096 ++ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
1097 + &hwmon_thermal_ops);
1098 + /*
1099 + * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
1100 +@@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
1101 + return 0;
1102 + }
1103 + #else
1104 +-static int hwmon_thermal_add_sensor(struct device *dev,
1105 +- struct hwmon_device *hwdev, int index)
1106 ++static int hwmon_thermal_add_sensor(struct device *dev, int index)
1107 + {
1108 + return 0;
1109 + }
1110 +@@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
1111 + (type == hwmon_fan && attr == hwmon_fan_label);
1112 + }
1113 +
1114 +-static struct attribute *hwmon_genattr(struct device *dev,
1115 +- const void *drvdata,
1116 ++static struct attribute *hwmon_genattr(const void *drvdata,
1117 + enum hwmon_sensor_types type,
1118 + u32 attr,
1119 + int index,
1120 +@@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
1121 + if ((mode & 0222) && !ops->write)
1122 + return ERR_PTR(-EINVAL);
1123 +
1124 +- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
1125 ++ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
1126 + if (!hattr)
1127 + return ERR_PTR(-ENOMEM);
1128 +
1129 +@@ -492,8 +508,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
1130 + return n;
1131 + }
1132 +
1133 +-static int hwmon_genattrs(struct device *dev,
1134 +- const void *drvdata,
1135 ++static int hwmon_genattrs(const void *drvdata,
1136 + struct attribute **attrs,
1137 + const struct hwmon_ops *ops,
1138 + const struct hwmon_channel_info *info)
1139 +@@ -519,7 +534,7 @@ static int hwmon_genattrs(struct device *dev,
1140 + attr_mask &= ~BIT(attr);
1141 + if (attr >= template_size)
1142 + return -EINVAL;
1143 +- a = hwmon_genattr(dev, drvdata, info->type, attr, i,
1144 ++ a = hwmon_genattr(drvdata, info->type, attr, i,
1145 + templates[attr], ops);
1146 + if (IS_ERR(a)) {
1147 + if (PTR_ERR(a) != -ENOENT)
1148 +@@ -533,8 +548,7 @@ static int hwmon_genattrs(struct device *dev,
1149 + }
1150 +
1151 + static struct attribute **
1152 +-__hwmon_create_attrs(struct device *dev, const void *drvdata,
1153 +- const struct hwmon_chip_info *chip)
1154 ++__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
1155 + {
1156 + int ret, i, aindex = 0, nattrs = 0;
1157 + struct attribute **attrs;
1158 +@@ -545,15 +559,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
1159 + if (nattrs == 0)
1160 + return ERR_PTR(-EINVAL);
1161 +
1162 +- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
1163 ++ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
1164 + if (!attrs)
1165 + return ERR_PTR(-ENOMEM);
1166 +
1167 + for (i = 0; chip->info[i]; i++) {
1168 +- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
1169 ++ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
1170 + chip->info[i]);
1171 +- if (ret < 0)
1172 ++ if (ret < 0) {
1173 ++ hwmon_free_attrs(attrs);
1174 + return ERR_PTR(ret);
1175 ++ }
1176 + aindex += ret;
1177 + }
1178 +
1179 +@@ -595,14 +611,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1180 + for (i = 0; groups[i]; i++)
1181 + ngroups++;
1182 +
1183 +- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
1184 +- GFP_KERNEL);
1185 ++ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
1186 + if (!hwdev->groups) {
1187 + err = -ENOMEM;
1188 + goto free_hwmon;
1189 + }
1190 +
1191 +- attrs = __hwmon_create_attrs(dev, drvdata, chip);
1192 ++ attrs = __hwmon_create_attrs(drvdata, chip);
1193 + if (IS_ERR(attrs)) {
1194 + err = PTR_ERR(attrs);
1195 + goto free_hwmon;
1196 +@@ -647,8 +662,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1197 + hwmon_temp_input, j))
1198 + continue;
1199 + if (info[i]->config[j] & HWMON_T_INPUT) {
1200 +- err = hwmon_thermal_add_sensor(dev,
1201 +- hwdev, j);
1202 ++ err = hwmon_thermal_add_sensor(hdev, j);
1203 + if (err) {
1204 + device_unregister(hdev);
1205 + /*
1206 +@@ -667,7 +681,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1207 + return hdev;
1208 +
1209 + free_hwmon:
1210 +- kfree(hwdev);
1211 ++ hwmon_dev_release(hdev);
1212 + ida_remove:
1213 + ida_simple_remove(&hwmon_ida, id);
1214 + return ERR_PTR(err);
1215 +diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
1216 +index f3dd2a17bd42..2e97e56c72c7 100644
1217 +--- a/drivers/hwmon/nct7802.c
1218 ++++ b/drivers/hwmon/nct7802.c
1219 +@@ -23,8 +23,8 @@
1220 + static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
1221 +
1222 + static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
1223 +- { 0x40, 0x00, 0x42, 0x44, 0x46 },
1224 +- { 0x3f, 0x00, 0x41, 0x43, 0x45 },
1225 ++ { 0x46, 0x00, 0x40, 0x42, 0x44 },
1226 ++ { 0x45, 0x00, 0x3f, 0x41, 0x43 },
1227 + };
1228 +
1229 + static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
1230 +@@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
1231 + struct nct7802_data {
1232 + struct regmap *regmap;
1233 + struct mutex access_lock; /* for multi-byte read and write operations */
1234 ++ u8 in_status;
1235 ++ struct mutex in_alarm_lock;
1236 + };
1237 +
1238 + static ssize_t temp_type_show(struct device *dev,
1239 +@@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr,
1240 + return err ? : count;
1241 + }
1242 +
1243 ++static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
1244 ++ char *buf)
1245 ++{
1246 ++ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
1247 ++ struct nct7802_data *data = dev_get_drvdata(dev);
1248 ++ int volt, min, max, ret;
1249 ++ unsigned int val;
1250 ++
1251 ++ mutex_lock(&data->in_alarm_lock);
1252 ++
1253 ++ /*
1254 ++ * The SMI Voltage status register is the only register giving a status
1255 ++ * for voltages. A bit is set for each input crossing a threshold, in
1256 ++ * both direction, but the "inside" or "outside" limits info is not
1257 ++ * available. Also this register is cleared on read.
1258 ++ * Note: this is not explicitly spelled out in the datasheet, but
1259 ++ * from experiment.
1260 ++ * To deal with this we use a status cache with one validity bit and
1261 ++ * one status bit for each input. Validity is cleared at startup and
1262 ++ * each time the register reports a change, and the status is processed
1263 ++ * by software based on current input value and limits.
1264 ++ */
1265 ++ ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
1266 ++ if (ret < 0)
1267 ++ goto abort;
1268 ++
1269 ++ /* invalidate cached status for all inputs crossing a threshold */
1270 ++ data->in_status &= ~((val & 0x0f) << 4);
1271 ++
1272 ++ /* if cached status for requested input is invalid, update it */
1273 ++ if (!(data->in_status & (0x10 << sattr->index))) {
1274 ++ ret = nct7802_read_voltage(data, sattr->nr, 0);
1275 ++ if (ret < 0)
1276 ++ goto abort;
1277 ++ volt = ret;
1278 ++
1279 ++ ret = nct7802_read_voltage(data, sattr->nr, 1);
1280 ++ if (ret < 0)
1281 ++ goto abort;
1282 ++ min = ret;
1283 ++
1284 ++ ret = nct7802_read_voltage(data, sattr->nr, 2);
1285 ++ if (ret < 0)
1286 ++ goto abort;
1287 ++ max = ret;
1288 ++
1289 ++ if (volt < min || volt > max)
1290 ++ data->in_status |= (1 << sattr->index);
1291 ++ else
1292 ++ data->in_status &= ~(1 << sattr->index);
1293 ++
1294 ++ data->in_status |= 0x10 << sattr->index;
1295 ++ }
1296 ++
1297 ++ ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
1298 ++abort:
1299 ++ mutex_unlock(&data->in_alarm_lock);
1300 ++ return ret;
1301 ++}
1302 ++
1303 + static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
1304 + char *buf)
1305 + {
1306 +@@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = {
1307 + static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
1308 + static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
1309 + static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
1310 +-static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
1311 ++static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
1312 + static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
1313 +
1314 + static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
1315 +@@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
1316 + static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
1317 + static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
1318 + static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
1319 +-static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
1320 ++static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
1321 + static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
1322 +
1323 + static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
1324 + static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
1325 + static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
1326 +-static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
1327 ++static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
1328 + static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
1329 +
1330 + static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
1331 + static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
1332 + static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
1333 +-static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
1334 ++static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
1335 + static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
1336 +
1337 + static struct attribute *nct7802_in_attrs[] = {
1338 +@@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client,
1339 + return PTR_ERR(data->regmap);
1340 +
1341 + mutex_init(&data->access_lock);
1342 ++ mutex_init(&data->in_alarm_lock);
1343 +
1344 + ret = nct7802_init_chip(data);
1345 + if (ret < 0)
1346 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1347 +index a1a035270cab..b273e421e910 100644
1348 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
1349 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
1350 +@@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
1351 + }
1352 + }
1353 +
1354 +-static void
1355 +-isert_wait4cmds(struct iscsi_conn *conn)
1356 +-{
1357 +- isert_info("iscsi_conn %p\n", conn);
1358 +-
1359 +- if (conn->sess) {
1360 +- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1361 +- target_wait_for_sess_cmds(conn->sess->se_sess);
1362 +- }
1363 +-}
1364 +-
1365 + /**
1366 + * isert_put_unsol_pending_cmds() - Drop commands waiting for
1367 + * unsolicitate dataout
1368 +@@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1369 +
1370 + ib_drain_qp(isert_conn->qp);
1371 + isert_put_unsol_pending_cmds(conn);
1372 +- isert_wait4cmds(conn);
1373 + isert_wait4logout(isert_conn);
1374 +
1375 + queue_work(isert_release_wq, &isert_conn->release_work);
1376 +diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
1377 +index 83368f1e7c4e..4650f4a94989 100644
1378 +--- a/drivers/input/misc/keyspan_remote.c
1379 ++++ b/drivers/input/misc/keyspan_remote.c
1380 +@@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev)
1381 + int retval = 0;
1382 +
1383 + retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1384 +- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
1385 ++ 0x11, 0x40, 0x5601, 0x0, NULL, 0,
1386 ++ USB_CTRL_SET_TIMEOUT);
1387 + if (retval) {
1388 + dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
1389 + __func__, retval);
1390 +@@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev)
1391 + }
1392 +
1393 + retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1394 +- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
1395 ++ 0x44, 0x40, 0x0, 0x0, NULL, 0,
1396 ++ USB_CTRL_SET_TIMEOUT);
1397 + if (retval) {
1398 + dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
1399 + __func__, retval);
1400 +@@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev)
1401 + }
1402 +
1403 + retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1404 +- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
1405 ++ 0x22, 0x40, 0x0, 0x0, NULL, 0,
1406 ++ USB_CTRL_SET_TIMEOUT);
1407 + if (retval) {
1408 + dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
1409 + __func__, retval);
1410 +diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
1411 +index ecd762f93732..53ad25eaf1a2 100644
1412 +--- a/drivers/input/misc/pm8xxx-vibrator.c
1413 ++++ b/drivers/input/misc/pm8xxx-vibrator.c
1414 +@@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
1415 +
1416 + if (regs->enable_mask)
1417 + rc = regmap_update_bits(vib->regmap, regs->enable_addr,
1418 +- on ? regs->enable_mask : 0, val);
1419 ++ regs->enable_mask, on ? ~0 : 0);
1420 +
1421 + return rc;
1422 + }
1423 +diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
1424 +index b313c579914f..2407ea43de59 100644
1425 +--- a/drivers/input/rmi4/rmi_smbus.c
1426 ++++ b/drivers/input/rmi4/rmi_smbus.c
1427 +@@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
1428 + /* prepare to write next block of bytes */
1429 + cur_len -= SMB_MAX_COUNT;
1430 + databuff += SMB_MAX_COUNT;
1431 ++ rmiaddr += SMB_MAX_COUNT;
1432 + }
1433 + exit:
1434 + mutex_unlock(&rmi_smb->page_mutex);
1435 +@@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
1436 + /* prepare to read next block of bytes */
1437 + cur_len -= SMB_MAX_COUNT;
1438 + databuff += SMB_MAX_COUNT;
1439 ++ rmiaddr += SMB_MAX_COUNT;
1440 + }
1441 +
1442 + retval = 0;
1443 +diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
1444 +index 2ca586fb914f..06d0ffef4a17 100644
1445 +--- a/drivers/input/tablet/aiptek.c
1446 ++++ b/drivers/input/tablet/aiptek.c
1447 +@@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1448 + input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
1449 +
1450 + /* Verify that a device really has an endpoint */
1451 +- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1452 ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
1453 + dev_err(&intf->dev,
1454 + "interface has %d endpoints, but must have minimum 1\n",
1455 +- intf->altsetting[0].desc.bNumEndpoints);
1456 ++ intf->cur_altsetting->desc.bNumEndpoints);
1457 + err = -EINVAL;
1458 + goto fail3;
1459 + }
1460 +- endpoint = &intf->altsetting[0].endpoint[0].desc;
1461 ++ endpoint = &intf->cur_altsetting->endpoint[0].desc;
1462 +
1463 + /* Go set up our URB, which is called when the tablet receives
1464 + * input.
1465 +diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
1466 +index 35031228a6d0..799c94dda651 100644
1467 +--- a/drivers/input/tablet/gtco.c
1468 ++++ b/drivers/input/tablet/gtco.c
1469 +@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
1470 + }
1471 +
1472 + /* Sanity check that a device has an endpoint */
1473 +- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
1474 ++ if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
1475 + dev_err(&usbinterface->dev,
1476 + "Invalid number of endpoints\n");
1477 + error = -EINVAL;
1478 + goto err_free_urb;
1479 + }
1480 +
1481 +- /*
1482 +- * The endpoint is always altsetting 0, we know this since we know
1483 +- * this device only has one interrupt endpoint
1484 +- */
1485 +- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
1486 ++ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
1487 +
1488 + /* Some debug */
1489 + dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
1490 +@@ -973,7 +969,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
1491 + input_dev->dev.parent = &usbinterface->dev;
1492 +
1493 + /* Setup the URB, it will be posted later on open of input device */
1494 +- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
1495 ++ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
1496 +
1497 + usb_fill_int_urb(gtco->urbinfo,
1498 + udev,
1499 +diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
1500 +index a1f3a0cb197e..38f087404f7a 100644
1501 +--- a/drivers/input/tablet/pegasus_notetaker.c
1502 ++++ b/drivers/input/tablet/pegasus_notetaker.c
1503 +@@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf,
1504 + return -ENODEV;
1505 +
1506 + /* Sanity check that the device has an endpoint */
1507 +- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1508 ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
1509 + dev_err(&intf->dev, "Invalid number of endpoints\n");
1510 + return -EINVAL;
1511 + }
1512 +diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
1513 +index 0af0fe8c40d7..742a7e96c1b5 100644
1514 +--- a/drivers/input/touchscreen/sun4i-ts.c
1515 ++++ b/drivers/input/touchscreen/sun4i-ts.c
1516 +@@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
1517 + struct device *dev = &pdev->dev;
1518 + struct device_node *np = dev->of_node;
1519 + struct device *hwmon;
1520 ++ struct thermal_zone_device *thermal;
1521 + int error;
1522 + u32 reg;
1523 + bool ts_attached;
1524 +@@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
1525 + if (IS_ERR(hwmon))
1526 + return PTR_ERR(hwmon);
1527 +
1528 +- devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
1529 ++ thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
1530 ++ &sun4i_ts_tz_ops);
1531 ++ if (IS_ERR(thermal))
1532 ++ return PTR_ERR(thermal);
1533 +
1534 + writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
1535 +
1536 +diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
1537 +index 3fd3e862269b..2e2ea5719c90 100644
1538 +--- a/drivers/input/touchscreen/sur40.c
1539 ++++ b/drivers/input/touchscreen/sur40.c
1540 +@@ -653,7 +653,7 @@ static int sur40_probe(struct usb_interface *interface,
1541 + int error;
1542 +
1543 + /* Check if we really have the right interface. */
1544 +- iface_desc = &interface->altsetting[0];
1545 ++ iface_desc = interface->cur_altsetting;
1546 + if (iface_desc->desc.bInterfaceClass != 0xFF)
1547 + return -ENODEV;
1548 +
1549 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
1550 +index 568c52317757..483f7bc379fa 100644
1551 +--- a/drivers/iommu/amd_iommu_init.c
1552 ++++ b/drivers/iommu/amd_iommu_init.c
1553 +@@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1554 + static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1555 + {
1556 + struct pci_dev *pdev = iommu->dev;
1557 +- u64 val = 0xabcd, val2 = 0;
1558 ++ u64 val = 0xabcd, val2 = 0, save_reg = 0;
1559 +
1560 + if (!iommu_feature(iommu, FEATURE_PC))
1561 + return;
1562 +
1563 + amd_iommu_pc_present = true;
1564 +
1565 ++ /* save the value to restore, if writable */
1566 ++ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
1567 ++ goto pc_false;
1568 ++
1569 + /* Check if the performance counters can be written to */
1570 + if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1571 + (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1572 +- (val != val2)) {
1573 +- pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
1574 +- amd_iommu_pc_present = false;
1575 +- return;
1576 +- }
1577 ++ (val != val2))
1578 ++ goto pc_false;
1579 ++
1580 ++ /* restore */
1581 ++ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
1582 ++ goto pc_false;
1583 +
1584 + pci_info(pdev, "IOMMU performance counters supported\n");
1585 +
1586 + val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1587 + iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1588 + iommu->max_counters = (u8) ((val >> 7) & 0xf);
1589 ++
1590 ++ return;
1591 ++
1592 ++pc_false:
1593 ++ pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1594 ++ amd_iommu_pc_present = false;
1595 ++ return;
1596 + }
1597 +
1598 + static ssize_t amd_iommu_show_cap(struct device *dev,
1599 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1600 +index e84c5dfe146f..dd5db856dcaf 100644
1601 +--- a/drivers/iommu/intel-iommu.c
1602 ++++ b/drivers/iommu/intel-iommu.c
1603 +@@ -5132,7 +5132,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
1604 +
1605 + spin_lock_irqsave(&device_domain_lock, flags);
1606 + info = dev->archdata.iommu;
1607 +- if (info)
1608 ++ if (info && info != DEFER_DEVICE_DOMAIN_INFO
1609 ++ && info != DUMMY_DEVICE_DOMAIN_INFO)
1610 + __dmar_remove_one_dev_info(info);
1611 + spin_unlock_irqrestore(&device_domain_lock, flags);
1612 + }
1613 +diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
1614 +index a5c73f3d5f79..2bf74595610f 100644
1615 +--- a/drivers/leds/leds-gpio.c
1616 ++++ b/drivers/leds/leds-gpio.c
1617 +@@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
1618 + struct gpio_led led = {};
1619 + const char *state = NULL;
1620 +
1621 ++ /*
1622 ++ * Acquire gpiod from DT with uninitialized label, which
1623 ++ * will be updated after LED class device is registered,
1624 ++ * Only then the final LED name is known.
1625 ++ */
1626 + led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
1627 + GPIOD_ASIS,
1628 +- led.name);
1629 ++ NULL);
1630 + if (IS_ERR(led.gpiod)) {
1631 + fwnode_handle_put(child);
1632 + return ERR_CAST(led.gpiod);
1633 +@@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
1634 + fwnode_handle_put(child);
1635 + return ERR_PTR(ret);
1636 + }
1637 ++ /* Set gpiod label to match the corresponding LED name. */
1638 ++ gpiod_set_consumer_name(led_dat->gpiod,
1639 ++ led_dat->cdev.dev->kobj.name);
1640 + priv->num_leds++;
1641 + }
1642 +
1643 +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
1644 +index 21bb96ce4cd6..58868d7129eb 100644
1645 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c
1646 ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
1647 +@@ -1605,12 +1605,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1648 + case V4L2_BUF_TYPE_VBI_CAPTURE:
1649 + if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
1650 + break;
1651 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1652 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1653 + return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
1654 + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1655 + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
1656 + break;
1657 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1658 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1659 + return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
1660 + case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1661 + if (unlikely(!ops->vidioc_s_fmt_vid_out))
1662 +@@ -1636,22 +1636,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1663 + case V4L2_BUF_TYPE_VBI_OUTPUT:
1664 + if (unlikely(!ops->vidioc_s_fmt_vbi_out))
1665 + break;
1666 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1667 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1668 + return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
1669 + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1670 + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
1671 + break;
1672 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1673 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1674 + return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
1675 + case V4L2_BUF_TYPE_SDR_CAPTURE:
1676 + if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
1677 + break;
1678 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1679 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1680 + return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
1681 + case V4L2_BUF_TYPE_SDR_OUTPUT:
1682 + if (unlikely(!ops->vidioc_s_fmt_sdr_out))
1683 + break;
1684 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1685 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1686 + return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
1687 + case V4L2_BUF_TYPE_META_CAPTURE:
1688 + if (unlikely(!ops->vidioc_s_fmt_meta_cap))
1689 +@@ -1707,12 +1707,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1690 + case V4L2_BUF_TYPE_VBI_CAPTURE:
1691 + if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
1692 + break;
1693 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1694 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1695 + return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
1696 + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1697 + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
1698 + break;
1699 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1700 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1701 + return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
1702 + case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1703 + if (unlikely(!ops->vidioc_try_fmt_vid_out))
1704 +@@ -1738,22 +1738,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1705 + case V4L2_BUF_TYPE_VBI_OUTPUT:
1706 + if (unlikely(!ops->vidioc_try_fmt_vbi_out))
1707 + break;
1708 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1709 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1710 + return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
1711 + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1712 + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
1713 + break;
1714 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1715 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1716 + return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
1717 + case V4L2_BUF_TYPE_SDR_CAPTURE:
1718 + if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
1719 + break;
1720 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1721 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1722 + return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
1723 + case V4L2_BUF_TYPE_SDR_OUTPUT:
1724 + if (unlikely(!ops->vidioc_try_fmt_sdr_out))
1725 + break;
1726 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1727 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1728 + return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
1729 + case V4L2_BUF_TYPE_META_CAPTURE:
1730 + if (unlikely(!ops->vidioc_try_fmt_meta_cap))
1731 +diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
1732 +index 7bc950520fd9..403ac44a7378 100644
1733 +--- a/drivers/mmc/host/sdhci-tegra.c
1734 ++++ b/drivers/mmc/host/sdhci-tegra.c
1735 +@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
1736 + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
1737 + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
1738 + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
1739 +- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
1740 ++ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
1741 + clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
1742 + }
1743 +
1744 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1745 +index 5f9df2dbde06..4478b94d4791 100644
1746 +--- a/drivers/mmc/host/sdhci.c
1747 ++++ b/drivers/mmc/host/sdhci.c
1748 +@@ -3902,11 +3902,13 @@ int sdhci_setup_host(struct sdhci_host *host)
1749 + if (host->ops->get_min_clock)
1750 + mmc->f_min = host->ops->get_min_clock(host);
1751 + else if (host->version >= SDHCI_SPEC_300) {
1752 +- if (host->clk_mul) {
1753 +- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
1754 ++ if (host->clk_mul)
1755 + max_clk = host->max_clk * host->clk_mul;
1756 +- } else
1757 +- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
1758 ++ /*
1759 ++ * Divided Clock Mode minimum clock rate is always less than
1760 ++ * Programmable Clock Mode minimum clock rate.
1761 ++ */
1762 ++ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
1763 + } else
1764 + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
1765 +
1766 +diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
1767 +index bb90757ecace..4cbb764c9822 100644
1768 +--- a/drivers/mmc/host/sdhci_am654.c
1769 ++++ b/drivers/mmc/host/sdhci_am654.c
1770 +@@ -236,6 +236,22 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
1771 + writeb(val, host->ioaddr + reg);
1772 + }
1773 +
1774 ++static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
1775 ++{
1776 ++ struct sdhci_host *host = mmc_priv(mmc);
1777 ++ int err = sdhci_execute_tuning(mmc, opcode);
1778 ++
1779 ++ if (err)
1780 ++ return err;
1781 ++ /*
1782 ++ * Tuning data remains in the buffer after tuning.
1783 ++ * Do a command and data reset to get rid of it
1784 ++ */
1785 ++ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1786 ++
1787 ++ return 0;
1788 ++}
1789 ++
1790 + static struct sdhci_ops sdhci_am654_ops = {
1791 + .get_max_clock = sdhci_pltfm_clk_get_max_clock,
1792 + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
1793 +@@ -249,8 +265,7 @@ static struct sdhci_ops sdhci_am654_ops = {
1794 +
1795 + static const struct sdhci_pltfm_data sdhci_am654_pdata = {
1796 + .ops = &sdhci_am654_ops,
1797 +- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
1798 +- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1799 ++ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1800 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1801 + };
1802 +
1803 +@@ -272,8 +287,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
1804 +
1805 + static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
1806 + .ops = &sdhci_j721e_8bit_ops,
1807 +- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
1808 +- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1809 ++ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1810 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1811 + };
1812 +
1813 +@@ -295,8 +309,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
1814 +
1815 + static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
1816 + .ops = &sdhci_j721e_4bit_ops,
1817 +- .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
1818 +- SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1819 ++ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
1820 + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1821 + };
1822 +
1823 +@@ -480,6 +493,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
1824 + goto pm_runtime_put;
1825 + }
1826 +
1827 ++ host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
1828 ++
1829 + ret = sdhci_am654_init(host);
1830 + if (ret)
1831 + goto pm_runtime_put;
1832 +diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
1833 +index 2e57122f02fb..2f5c287eac95 100644
1834 +--- a/drivers/net/can/slcan.c
1835 ++++ b/drivers/net/can/slcan.c
1836 +@@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
1837 + */
1838 + static void slcan_write_wakeup(struct tty_struct *tty)
1839 + {
1840 +- struct slcan *sl = tty->disc_data;
1841 ++ struct slcan *sl;
1842 ++
1843 ++ rcu_read_lock();
1844 ++ sl = rcu_dereference(tty->disc_data);
1845 ++ if (!sl)
1846 ++ goto out;
1847 +
1848 + schedule_work(&sl->tx_work);
1849 ++out:
1850 ++ rcu_read_unlock();
1851 + }
1852 +
1853 + /* Send a can_frame to a TTY queue. */
1854 +@@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
1855 + return;
1856 +
1857 + spin_lock_bh(&sl->lock);
1858 +- tty->disc_data = NULL;
1859 ++ rcu_assign_pointer(tty->disc_data, NULL);
1860 + sl->tty = NULL;
1861 + spin_unlock_bh(&sl->lock);
1862 +
1863 ++ synchronize_rcu();
1864 + flush_work(&sl->tx_work);
1865 +
1866 + /* Flush network side */
1867 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1868 +index 1de51811fcb4..8f909d57501f 100644
1869 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1870 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1871 +@@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1872 + DMA_END_ADDR);
1873 +
1874 + /* Initialize Tx NAPI */
1875 +- netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
1876 +- NAPI_POLL_WEIGHT);
1877 ++ netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
1878 ++ NAPI_POLL_WEIGHT);
1879 + }
1880 +
1881 + /* Initialize a RDMA ring */
1882 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1883 +index 58f89f6a040f..97ff8608f0ab 100644
1884 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1885 ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1886 +@@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1887 +
1888 + if (!is_offload(adapter))
1889 + return -EOPNOTSUPP;
1890 ++ if (!capable(CAP_NET_ADMIN))
1891 ++ return -EPERM;
1892 + if (!(adapter->flags & FULL_INIT_DONE))
1893 + return -EIO; /* need the memory controllers */
1894 + if (copy_from_user(&t, useraddr, sizeof(t)))
1895 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
1896 +index 778dab1af8fc..f260dd96873b 100644
1897 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
1898 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
1899 +@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
1900 +
1901 + struct tx_sync_info {
1902 + u64 rcd_sn;
1903 +- s32 sync_len;
1904 ++ u32 sync_len;
1905 + int nr_frags;
1906 + skb_frag_t frags[MAX_SKB_FRAGS];
1907 + };
1908 +@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
1909 +
1910 + static enum mlx5e_ktls_sync_retval
1911 + tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
1912 +- u32 tcp_seq, struct tx_sync_info *info)
1913 ++ u32 tcp_seq, int datalen, struct tx_sync_info *info)
1914 + {
1915 + struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
1916 + enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
1917 + struct tls_record_info *record;
1918 + int remaining, i = 0;
1919 + unsigned long flags;
1920 ++ bool ends_before;
1921 +
1922 + spin_lock_irqsave(&tx_ctx->lock, flags);
1923 + record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
1924 +@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
1925 + goto out;
1926 + }
1927 +
1928 +- if (unlikely(tcp_seq < tls_record_start_seq(record))) {
1929 +- ret = tls_record_is_start_marker(record) ?
1930 +- MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
1931 ++ /* There are the following cases:
1932 ++ * 1. packet ends before start marker: bypass offload.
1933 ++ * 2. packet starts before start marker and ends after it: drop,
1934 ++ * not supported, breaks contract with kernel.
1935 ++ * 3. packet ends before tls record info starts: drop,
1936 ++ * this packet was already acknowledged and its record info
1937 ++ * was released.
1938 ++ */
1939 ++ ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
1940 ++
1941 ++ if (unlikely(tls_record_is_start_marker(record))) {
1942 ++ ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
1943 ++ goto out;
1944 ++ } else if (ends_before) {
1945 ++ ret = MLX5E_KTLS_SYNC_FAIL;
1946 + goto out;
1947 + }
1948 +
1949 +@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
1950 + u8 num_wqebbs;
1951 + int i = 0;
1952 +
1953 +- ret = tx_sync_info_get(priv_tx, seq, &info);
1954 ++ ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
1955 + if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
1956 + if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
1957 + stats->tls_skip_no_sync_data++;
1958 +@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
1959 + goto err_out;
1960 + }
1961 +
1962 +- if (unlikely(info.sync_len < 0)) {
1963 +- if (likely(datalen <= -info.sync_len))
1964 +- return MLX5E_KTLS_SYNC_DONE;
1965 +-
1966 +- stats->tls_drop_bypass_req++;
1967 +- goto err_out;
1968 +- }
1969 +-
1970 + stats->tls_ooo++;
1971 +
1972 + tx_post_resync_params(sq, priv_tx, info.rcd_sn);
1973 +@@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
1974 + if (unlikely(contig_wqebbs_room < num_wqebbs))
1975 + mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
1976 +
1977 +- tx_post_resync_params(sq, priv_tx, info.rcd_sn);
1978 +-
1979 + for (; i < info.nr_frags; i++) {
1980 + unsigned int orig_fsz, frag_offset = 0, n = 0;
1981 + skb_frag_t *f = &info.frags[i];
1982 +@@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
1983 + enum mlx5e_ktls_sync_retval ret =
1984 + mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
1985 +
1986 +- if (likely(ret == MLX5E_KTLS_SYNC_DONE))
1987 ++ switch (ret) {
1988 ++ case MLX5E_KTLS_SYNC_DONE:
1989 + *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
1990 +- else if (ret == MLX5E_KTLS_SYNC_FAIL)
1991 ++ break;
1992 ++ case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
1993 ++ if (likely(!skb->decrypted))
1994 ++ goto out;
1995 ++ WARN_ON_ONCE(1);
1996 ++ /* fall-through */
1997 ++ default: /* MLX5E_KTLS_SYNC_FAIL */
1998 + goto err_out;
1999 +- else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
2000 +- goto out;
2001 ++ }
2002 + }
2003 +
2004 + priv_tx->expected_seq = seq + datalen;
2005 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2006 +index 96711e34d248..1f9107d83848 100644
2007 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2008 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
2009 +@@ -3951,6 +3951,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
2010 + u32 rate_mbps;
2011 + int err;
2012 +
2013 ++ vport_num = rpriv->rep->vport;
2014 ++ if (vport_num >= MLX5_VPORT_ECPF) {
2015 ++ NL_SET_ERR_MSG_MOD(extack,
2016 ++ "Ingress rate limit is supported only for Eswitch ports connected to VFs");
2017 ++ return -EOPNOTSUPP;
2018 ++ }
2019 ++
2020 + esw = priv->mdev->priv.eswitch;
2021 + /* rate is given in bytes/sec.
2022 + * First convert to bits/sec and then round to the nearest mbit/secs.
2023 +@@ -3959,8 +3966,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
2024 + * 1 mbit/sec.
2025 + */
2026 + rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
2027 +- vport_num = rpriv->rep->vport;
2028 +-
2029 + err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
2030 + if (err)
2031 + NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
2032 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2033 +index 9004a07e457a..5acfdea3a75a 100644
2034 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2035 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2036 +@@ -858,7 +858,7 @@ out:
2037 + */
2038 + #define ESW_SIZE (16 * 1024 * 1024)
2039 + const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
2040 +- 64 * 1024, 4 * 1024 };
2041 ++ 64 * 1024, 128 };
2042 +
2043 + static int
2044 + get_sz_from_pool(struct mlx5_eswitch *esw)
2045 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2046 +index 051ab845b501..c96a0e501007 100644
2047 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
2048 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2049 +@@ -1569,6 +1569,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
2050 + { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
2051 + { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
2052 + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
2053 ++ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
2054 + { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
2055 + { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
2056 + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
2057 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2058 +index 51803eef13dd..c7f10d4f8f8d 100644
2059 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2060 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2061 +@@ -1,6 +1,7 @@
2062 + // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2063 + /* Copyright (c) 2019 Mellanox Technologies. */
2064 +
2065 ++#include <linux/smp.h>
2066 + #include "dr_types.h"
2067 +
2068 + #define QUEUE_SIZE 128
2069 +@@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
2070 + if (!in)
2071 + goto err_cqwq;
2072 +
2073 +- vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
2074 ++ vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
2075 + err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
2076 + if (err) {
2077 + kvfree(in);
2078 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2079 +index 3d587d0bdbbe..1e32e2443f73 100644
2080 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2081 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
2082 +@@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
2083 + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
2084 + list_for_each_entry(dst, &fte->node.children, node.list) {
2085 + enum mlx5_flow_destination_type type = dst->dest_attr.type;
2086 +- u32 id;
2087 +
2088 + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
2089 + err = -ENOSPC;
2090 + goto free_actions;
2091 + }
2092 +
2093 +- switch (type) {
2094 +- case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
2095 +- id = dst->dest_attr.counter_id;
2096 ++ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
2097 ++ continue;
2098 +
2099 +- tmp_action =
2100 +- mlx5dr_action_create_flow_counter(id);
2101 +- if (!tmp_action) {
2102 +- err = -ENOMEM;
2103 +- goto free_actions;
2104 +- }
2105 +- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
2106 +- actions[num_actions++] = tmp_action;
2107 +- break;
2108 ++ switch (type) {
2109 + case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
2110 + tmp_action = create_ft_action(dev, dst);
2111 + if (!tmp_action) {
2112 +@@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
2113 + }
2114 + }
2115 +
2116 ++ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
2117 ++ list_for_each_entry(dst, &fte->node.children, node.list) {
2118 ++ u32 id;
2119 ++
2120 ++ if (dst->dest_attr.type !=
2121 ++ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
2122 ++ continue;
2123 ++
2124 ++ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
2125 ++ err = -ENOSPC;
2126 ++ goto free_actions;
2127 ++ }
2128 ++
2129 ++ id = dst->dest_attr.counter_id;
2130 ++ tmp_action =
2131 ++ mlx5dr_action_create_flow_counter(id);
2132 ++ if (!tmp_action) {
2133 ++ err = -ENOMEM;
2134 ++ goto free_actions;
2135 ++ }
2136 ++
2137 ++ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
2138 ++ actions[num_actions++] = tmp_action;
2139 ++ }
2140 ++ }
2141 ++
2142 + params.match_sz = match_sz;
2143 + params.match_buf = (u64 *)fte->val;
2144 +
2145 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
2146 +index 150b3a144b83..3d3cca596116 100644
2147 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
2148 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
2149 +@@ -8,6 +8,7 @@
2150 + #include <linux/string.h>
2151 + #include <linux/rhashtable.h>
2152 + #include <linux/netdevice.h>
2153 ++#include <linux/mutex.h>
2154 + #include <net/net_namespace.h>
2155 + #include <net/tc_act/tc_vlan.h>
2156 +
2157 +@@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
2158 + struct mlxsw_sp_fid *dummy_fid;
2159 + struct rhashtable ruleset_ht;
2160 + struct list_head rules;
2161 ++ struct mutex rules_lock; /* Protects rules list */
2162 + struct {
2163 + struct delayed_work dw;
2164 + unsigned long interval; /* ms */
2165 +@@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
2166 + goto err_ruleset_block_bind;
2167 + }
2168 +
2169 ++ mutex_lock(&mlxsw_sp->acl->rules_lock);
2170 + list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
2171 ++ mutex_unlock(&mlxsw_sp->acl->rules_lock);
2172 + block->rule_count++;
2173 + block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
2174 + return 0;
2175 +@@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
2176 +
2177 + block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
2178 + ruleset->ht_key.block->rule_count--;
2179 ++ mutex_lock(&mlxsw_sp->acl->rules_lock);
2180 + list_del(&rule->list);
2181 ++ mutex_unlock(&mlxsw_sp->acl->rules_lock);
2182 + if (!ruleset->ht_key.chain_index &&
2183 + mlxsw_sp_acl_ruleset_is_singular(ruleset))
2184 + mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
2185 +@@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
2186 + struct mlxsw_sp_acl_rule *rule;
2187 + int err;
2188 +
2189 +- /* Protect internal structures from changes */
2190 +- rtnl_lock();
2191 ++ mutex_lock(&acl->rules_lock);
2192 + list_for_each_entry(rule, &acl->rules, list) {
2193 + err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
2194 + rule);
2195 + if (err)
2196 + goto err_rule_update;
2197 + }
2198 +- rtnl_unlock();
2199 ++ mutex_unlock(&acl->rules_lock);
2200 + return 0;
2201 +
2202 + err_rule_update:
2203 +- rtnl_unlock();
2204 ++ mutex_unlock(&acl->rules_lock);
2205 + return err;
2206 + }
2207 +
2208 +@@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
2209 + acl->dummy_fid = fid;
2210 +
2211 + INIT_LIST_HEAD(&acl->rules);
2212 ++ mutex_init(&acl->rules_lock);
2213 + err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
2214 + if (err)
2215 + goto err_acl_ops_init;
2216 +@@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
2217 + return 0;
2218 +
2219 + err_acl_ops_init:
2220 ++ mutex_destroy(&acl->rules_lock);
2221 + mlxsw_sp_fid_put(fid);
2222 + err_fid_get:
2223 + rhashtable_destroy(&acl->ruleset_ht);
2224 +@@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
2225 +
2226 + cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
2227 + mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
2228 ++ mutex_destroy(&acl->rules_lock);
2229 + WARN_ON(!list_empty(&acl->rules));
2230 + mlxsw_sp_fid_put(acl->dummy_fid);
2231 + rhashtable_destroy(&acl->ruleset_ht);
2232 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
2233 +index 1c14c051ee52..63e7a058b7c6 100644
2234 +--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
2235 ++++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
2236 +@@ -299,22 +299,17 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
2237 + u64 len;
2238 + int err;
2239 +
2240 ++ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
2241 ++ this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
2242 ++ dev_kfree_skb_any(skb);
2243 ++ return NETDEV_TX_OK;
2244 ++ }
2245 ++
2246 + memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
2247 +
2248 + if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
2249 + return NETDEV_TX_BUSY;
2250 +
2251 +- if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
2252 +- struct sk_buff *skb_orig = skb;
2253 +-
2254 +- skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
2255 +- if (!skb) {
2256 +- this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
2257 +- dev_kfree_skb_any(skb_orig);
2258 +- return NETDEV_TX_OK;
2259 +- }
2260 +- dev_consume_skb_any(skb_orig);
2261 +- }
2262 + mlxsw_sx_txhdr_construct(skb, &tx_info);
2263 + /* TX header is consumed by HW on the way so we shouldn't count its
2264 + * bytes as being sent.
2265 +diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
2266 +index b339125b2f09..05e760444a92 100644
2267 +--- a/drivers/net/ethernet/natsemi/sonic.c
2268 ++++ b/drivers/net/ethernet/natsemi/sonic.c
2269 +@@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
2270 +
2271 + netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
2272 +
2273 ++ spin_lock_init(&lp->lock);
2274 ++
2275 + for (i = 0; i < SONIC_NUM_RRS; i++) {
2276 + struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
2277 + if (skb == NULL) {
2278 +@@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
2279 + return 0;
2280 + }
2281 +
2282 ++/* Wait for the SONIC to become idle. */
2283 ++static void sonic_quiesce(struct net_device *dev, u16 mask)
2284 ++{
2285 ++ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
2286 ++ int i;
2287 ++ u16 bits;
2288 ++
2289 ++ for (i = 0; i < 1000; ++i) {
2290 ++ bits = SONIC_READ(SONIC_CMD) & mask;
2291 ++ if (!bits)
2292 ++ return;
2293 ++ if (irqs_disabled() || in_interrupt())
2294 ++ udelay(20);
2295 ++ else
2296 ++ usleep_range(100, 200);
2297 ++ }
2298 ++ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
2299 ++}
2300 +
2301 + /*
2302 + * Close the SONIC device
2303 +@@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
2304 + /*
2305 + * stop the SONIC, disable interrupts
2306 + */
2307 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
2308 ++ sonic_quiesce(dev, SONIC_CR_ALL);
2309 ++
2310 + SONIC_WRITE(SONIC_IMR, 0);
2311 + SONIC_WRITE(SONIC_ISR, 0x7fff);
2312 + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
2313 +@@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev)
2314 + * put the Sonic into software-reset mode and
2315 + * disable all interrupts before releasing DMA buffers
2316 + */
2317 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
2318 ++ sonic_quiesce(dev, SONIC_CR_ALL);
2319 ++
2320 + SONIC_WRITE(SONIC_IMR, 0);
2321 + SONIC_WRITE(SONIC_ISR, 0x7fff);
2322 + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
2323 +@@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev)
2324 + * wake the tx queue
2325 + * Concurrently with all of this, the SONIC is potentially writing to
2326 + * the status flags of the TDs.
2327 +- * Until some mutual exclusion is added, this code will not work with SMP. However,
2328 +- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
2329 + */
2330 +
2331 + static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2332 +@@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2333 + struct sonic_local *lp = netdev_priv(dev);
2334 + dma_addr_t laddr;
2335 + int length;
2336 +- int entry = lp->next_tx;
2337 ++ int entry;
2338 ++ unsigned long flags;
2339 +
2340 + netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
2341 +
2342 +@@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2343 + return NETDEV_TX_OK;
2344 + }
2345 +
2346 ++ spin_lock_irqsave(&lp->lock, flags);
2347 ++
2348 ++ entry = lp->next_tx;
2349 ++
2350 + sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
2351 + sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
2352 + sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
2353 +@@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2354 + sonic_tda_put(dev, entry, SONIC_TD_LINK,
2355 + sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
2356 +
2357 +- /*
2358 +- * Must set tx_skb[entry] only after clearing status, and
2359 +- * before clearing EOL and before stopping queue
2360 +- */
2361 + wmb();
2362 + lp->tx_len[entry] = length;
2363 + lp->tx_laddr[entry] = laddr;
2364 +@@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
2365 +
2366 + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
2367 +
2368 ++ spin_unlock_irqrestore(&lp->lock, flags);
2369 ++
2370 + return NETDEV_TX_OK;
2371 + }
2372 +
2373 +@@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2374 + struct net_device *dev = dev_id;
2375 + struct sonic_local *lp = netdev_priv(dev);
2376 + int status;
2377 ++ unsigned long flags;
2378 ++
2379 ++ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
2380 ++ * with sonic_send_packet() so that the two functions can share state.
2381 ++ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
2382 ++ * by macsonic which must use two IRQs with different priority levels.
2383 ++ */
2384 ++ spin_lock_irqsave(&lp->lock, flags);
2385 ++
2386 ++ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
2387 ++ if (!status) {
2388 ++ spin_unlock_irqrestore(&lp->lock, flags);
2389 +
2390 +- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
2391 + return IRQ_NONE;
2392 ++ }
2393 +
2394 + do {
2395 ++ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
2396 ++
2397 + if (status & SONIC_INT_PKTRX) {
2398 + netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
2399 + sonic_rx(dev); /* got packet(s) */
2400 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
2401 + }
2402 +
2403 + if (status & SONIC_INT_TXDN) {
2404 +@@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2405 + int td_status;
2406 + int freed_some = 0;
2407 +
2408 +- /* At this point, cur_tx is the index of a TD that is one of:
2409 +- * unallocated/freed (status set & tx_skb[entry] clear)
2410 +- * allocated and sent (status set & tx_skb[entry] set )
2411 +- * allocated and not yet sent (status clear & tx_skb[entry] set )
2412 +- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
2413 ++ /* The state of a Transmit Descriptor may be inferred
2414 ++ * from { tx_skb[entry], td_status } as follows.
2415 ++ * { clear, clear } => the TD has never been used
2416 ++ * { set, clear } => the TD was handed to SONIC
2417 ++ * { set, set } => the TD was handed back
2418 ++ * { clear, set } => the TD is available for re-use
2419 + */
2420 +
2421 + netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
2422 +@@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2423 + if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
2424 + break;
2425 +
2426 +- if (td_status & 0x0001) {
2427 ++ if (td_status & SONIC_TCR_PTX) {
2428 + lp->stats.tx_packets++;
2429 + lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
2430 + } else {
2431 +- lp->stats.tx_errors++;
2432 +- if (td_status & 0x0642)
2433 ++ if (td_status & (SONIC_TCR_EXD |
2434 ++ SONIC_TCR_EXC | SONIC_TCR_BCM))
2435 + lp->stats.tx_aborted_errors++;
2436 +- if (td_status & 0x0180)
2437 ++ if (td_status &
2438 ++ (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
2439 + lp->stats.tx_carrier_errors++;
2440 +- if (td_status & 0x0020)
2441 ++ if (td_status & SONIC_TCR_OWC)
2442 + lp->stats.tx_window_errors++;
2443 +- if (td_status & 0x0004)
2444 ++ if (td_status & SONIC_TCR_FU)
2445 + lp->stats.tx_fifo_errors++;
2446 + }
2447 +
2448 +@@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2449 + if (freed_some || lp->tx_skb[entry] == NULL)
2450 + netif_wake_queue(dev); /* The ring is no longer full */
2451 + lp->cur_tx = entry;
2452 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
2453 + }
2454 +
2455 + /*
2456 +@@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2457 + if (status & SONIC_INT_RFO) {
2458 + netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
2459 + __func__);
2460 +- lp->stats.rx_fifo_errors++;
2461 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
2462 + }
2463 + if (status & SONIC_INT_RDE) {
2464 + netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
2465 + __func__);
2466 +- lp->stats.rx_dropped++;
2467 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
2468 + }
2469 + if (status & SONIC_INT_RBAE) {
2470 + netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
2471 + __func__);
2472 +- lp->stats.rx_dropped++;
2473 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
2474 + }
2475 +
2476 + /* counter overruns; all counters are 16bit wide */
2477 +- if (status & SONIC_INT_FAE) {
2478 ++ if (status & SONIC_INT_FAE)
2479 + lp->stats.rx_frame_errors += 65536;
2480 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
2481 +- }
2482 +- if (status & SONIC_INT_CRC) {
2483 ++ if (status & SONIC_INT_CRC)
2484 + lp->stats.rx_crc_errors += 65536;
2485 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
2486 +- }
2487 +- if (status & SONIC_INT_MP) {
2488 ++ if (status & SONIC_INT_MP)
2489 + lp->stats.rx_missed_errors += 65536;
2490 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
2491 +- }
2492 +
2493 + /* transmit error */
2494 + if (status & SONIC_INT_TXER) {
2495 +- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
2496 +- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
2497 +- __func__);
2498 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
2499 ++ u16 tcr = SONIC_READ(SONIC_TCR);
2500 ++
2501 ++ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
2502 ++ __func__, tcr);
2503 ++
2504 ++ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
2505 ++ SONIC_TCR_FU | SONIC_TCR_BCM)) {
2506 ++ /* Aborted transmission. Try again. */
2507 ++ netif_stop_queue(dev);
2508 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
2509 ++ }
2510 + }
2511 +
2512 + /* bus retry */
2513 +@@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
2514 + /* ... to help debug DMA problems causing endless interrupts. */
2515 + /* Bounce the eth interface to turn on the interrupt again. */
2516 + SONIC_WRITE(SONIC_IMR, 0);
2517 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
2518 + }
2519 +
2520 +- /* load CAM done */
2521 +- if (status & SONIC_INT_LCD)
2522 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
2523 +- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
2524 ++ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
2525 ++ } while (status);
2526 ++
2527 ++ spin_unlock_irqrestore(&lp->lock, flags);
2528 ++
2529 + return IRQ_HANDLED;
2530 + }
2531 +
2532 ++/* Return the array index corresponding to a given Receive Buffer pointer. */
2533 ++static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
2534 ++ unsigned int last)
2535 ++{
2536 ++ unsigned int i = last;
2537 ++
2538 ++ do {
2539 ++ i = (i + 1) & SONIC_RRS_MASK;
2540 ++ if (addr == lp->rx_laddr[i])
2541 ++ return i;
2542 ++ } while (i != last);
2543 ++
2544 ++ return -ENOENT;
2545 ++}
2546 ++
2547 ++/* Allocate and map a new skb to be used as a receive buffer. */
2548 ++static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
2549 ++ struct sk_buff **new_skb, dma_addr_t *new_addr)
2550 ++{
2551 ++ *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
2552 ++ if (!*new_skb)
2553 ++ return false;
2554 ++
2555 ++ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
2556 ++ skb_reserve(*new_skb, 2);
2557 ++
2558 ++ *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
2559 ++ SONIC_RBSIZE, DMA_FROM_DEVICE);
2560 ++ if (!*new_addr) {
2561 ++ dev_kfree_skb(*new_skb);
2562 ++ *new_skb = NULL;
2563 ++ return false;
2564 ++ }
2565 ++
2566 ++ return true;
2567 ++}
2568 ++
2569 ++/* Place a new receive resource in the Receive Resource Area and update RWP. */
2570 ++static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
2571 ++ dma_addr_t old_addr, dma_addr_t new_addr)
2572 ++{
2573 ++ unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
2574 ++ unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
2575 ++ u32 buf;
2576 ++
2577 ++ /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
2578 ++ * scans the other resources in the RRA, those in the range [RWP, RRP).
2579 ++ */
2580 ++ do {
2581 ++ buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
2582 ++ sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
2583 ++
2584 ++ if (buf == old_addr)
2585 ++ break;
2586 ++
2587 ++ entry = (entry + 1) & SONIC_RRS_MASK;
2588 ++ } while (entry != end);
2589 ++
2590 ++ WARN_ONCE(buf != old_addr, "failed to find resource!\n");
2591 ++
2592 ++ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
2593 ++ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
2594 ++
2595 ++ entry = (entry + 1) & SONIC_RRS_MASK;
2596 ++
2597 ++ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
2598 ++}
2599 ++
2600 + /*
2601 + * We have a good packet(s), pass it/them up the network stack.
2602 + */
2603 + static void sonic_rx(struct net_device *dev)
2604 + {
2605 + struct sonic_local *lp = netdev_priv(dev);
2606 +- int status;
2607 + int entry = lp->cur_rx;
2608 ++ int prev_entry = lp->eol_rx;
2609 ++ bool rbe = false;
2610 +
2611 + while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
2612 +- struct sk_buff *used_skb;
2613 +- struct sk_buff *new_skb;
2614 +- dma_addr_t new_laddr;
2615 +- u16 bufadr_l;
2616 +- u16 bufadr_h;
2617 +- int pkt_len;
2618 +-
2619 +- status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
2620 +- if (status & SONIC_RCR_PRX) {
2621 +- /* Malloc up new buffer. */
2622 +- new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
2623 +- if (new_skb == NULL) {
2624 +- lp->stats.rx_dropped++;
2625 ++ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
2626 ++
2627 ++ /* If the RD has LPKT set, the chip has finished with the RB */
2628 ++ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
2629 ++ struct sk_buff *new_skb;
2630 ++ dma_addr_t new_laddr;
2631 ++ u32 addr = (sonic_rda_get(dev, entry,
2632 ++ SONIC_RD_PKTPTR_H) << 16) |
2633 ++ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
2634 ++ int i = index_from_addr(lp, addr, entry);
2635 ++
2636 ++ if (i < 0) {
2637 ++ WARN_ONCE(1, "failed to find buffer!\n");
2638 + break;
2639 + }
2640 +- /* provide 16 byte IP header alignment unless DMA requires otherwise */
2641 +- if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
2642 +- skb_reserve(new_skb, 2);
2643 +-
2644 +- new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
2645 +- SONIC_RBSIZE, DMA_FROM_DEVICE);
2646 +- if (!new_laddr) {
2647 +- dev_kfree_skb(new_skb);
2648 +- printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
2649 ++
2650 ++ if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
2651 ++ struct sk_buff *used_skb = lp->rx_skb[i];
2652 ++ int pkt_len;
2653 ++
2654 ++ /* Pass the used buffer up the stack */
2655 ++ dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
2656 ++ DMA_FROM_DEVICE);
2657 ++
2658 ++ pkt_len = sonic_rda_get(dev, entry,
2659 ++ SONIC_RD_PKTLEN);
2660 ++ skb_trim(used_skb, pkt_len);
2661 ++ used_skb->protocol = eth_type_trans(used_skb,
2662 ++ dev);
2663 ++ netif_rx(used_skb);
2664 ++ lp->stats.rx_packets++;
2665 ++ lp->stats.rx_bytes += pkt_len;
2666 ++
2667 ++ lp->rx_skb[i] = new_skb;
2668 ++ lp->rx_laddr[i] = new_laddr;
2669 ++ } else {
2670 ++ /* Failed to obtain a new buffer so re-use it */
2671 ++ new_laddr = addr;
2672 + lp->stats.rx_dropped++;
2673 +- break;
2674 + }
2675 +-
2676 +- /* now we have a new skb to replace it, pass the used one up the stack */
2677 +- dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
2678 +- used_skb = lp->rx_skb[entry];
2679 +- pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
2680 +- skb_trim(used_skb, pkt_len);
2681 +- used_skb->protocol = eth_type_trans(used_skb, dev);
2682 +- netif_rx(used_skb);
2683 +- lp->stats.rx_packets++;
2684 +- lp->stats.rx_bytes += pkt_len;
2685 +-
2686 +- /* and insert the new skb */
2687 +- lp->rx_laddr[entry] = new_laddr;
2688 +- lp->rx_skb[entry] = new_skb;
2689 +-
2690 +- bufadr_l = (unsigned long)new_laddr & 0xffff;
2691 +- bufadr_h = (unsigned long)new_laddr >> 16;
2692 +- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
2693 +- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
2694 +- } else {
2695 +- /* This should only happen, if we enable accepting broken packets. */
2696 +- lp->stats.rx_errors++;
2697 +- if (status & SONIC_RCR_FAER)
2698 +- lp->stats.rx_frame_errors++;
2699 +- if (status & SONIC_RCR_CRCR)
2700 +- lp->stats.rx_crc_errors++;
2701 +- }
2702 +- if (status & SONIC_RCR_LPKT) {
2703 +- /*
2704 +- * this was the last packet out of the current receive buffer
2705 +- * give the buffer back to the SONIC
2706 ++ /* If RBE is already asserted when RWP advances then
2707 ++ * it's safe to clear RBE after processing this packet.
2708 + */
2709 +- lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
2710 +- if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
2711 +- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
2712 +- if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
2713 +- netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
2714 +- __func__);
2715 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
2716 +- }
2717 +- } else
2718 +- printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
2719 +- dev->name);
2720 ++ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
2721 ++ sonic_update_rra(dev, lp, addr, new_laddr);
2722 ++ }
2723 + /*
2724 + * give back the descriptor
2725 + */
2726 +- sonic_rda_put(dev, entry, SONIC_RD_LINK,
2727 +- sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
2728 ++ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
2729 + sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
2730 +- sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
2731 +- sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
2732 +- lp->eol_rx = entry;
2733 +- lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
2734 ++
2735 ++ prev_entry = entry;
2736 ++ entry = (entry + 1) & SONIC_RDS_MASK;
2737 ++ }
2738 ++
2739 ++ lp->cur_rx = entry;
2740 ++
2741 ++ if (prev_entry != lp->eol_rx) {
2742 ++ /* Advance the EOL flag to put descriptors back into service */
2743 ++ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
2744 ++ sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
2745 ++ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
2746 ++ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
2747 ++ lp->eol_rx = prev_entry;
2748 + }
2749 ++
2750 ++ if (rbe)
2751 ++ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
2752 + /*
2753 + * If any worth-while packets have been received, netif_rx()
2754 + * has done a mark_bh(NET_BH) for us and will work on them
2755 +@@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
2756 + (netdev_mc_count(dev) > 15)) {
2757 + rcr |= SONIC_RCR_AMC;
2758 + } else {
2759 ++ unsigned long flags;
2760 ++
2761 + netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
2762 + netdev_mc_count(dev));
2763 + sonic_set_cam_enable(dev, 1); /* always enable our own address */
2764 +@@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
2765 + i++;
2766 + }
2767 + SONIC_WRITE(SONIC_CDC, 16);
2768 +- /* issue Load CAM command */
2769 + SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
2770 ++
2771 ++ /* LCAM and TXP commands can't be used simultaneously */
2772 ++ spin_lock_irqsave(&lp->lock, flags);
2773 ++ sonic_quiesce(dev, SONIC_CR_TXP);
2774 + SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
2775 ++ sonic_quiesce(dev, SONIC_CR_LCAM);
2776 ++ spin_unlock_irqrestore(&lp->lock, flags);
2777 + }
2778 + }
2779 +
2780 +@@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
2781 + */
2782 + static int sonic_init(struct net_device *dev)
2783 + {
2784 +- unsigned int cmd;
2785 + struct sonic_local *lp = netdev_priv(dev);
2786 + int i;
2787 +
2788 +@@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
2789 + SONIC_WRITE(SONIC_ISR, 0x7fff);
2790 + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
2791 +
2792 ++ /* While in reset mode, clear CAM Enable register */
2793 ++ SONIC_WRITE(SONIC_CE, 0);
2794 ++
2795 + /*
2796 + * clear software reset flag, disable receiver, clear and
2797 + * enable interrupts, then completely initialize the SONIC
2798 + */
2799 + SONIC_WRITE(SONIC_CMD, 0);
2800 +- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
2801 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
2802 ++ sonic_quiesce(dev, SONIC_CR_ALL);
2803 +
2804 + /*
2805 + * initialize the receive resource area
2806 +@@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
2807 + }
2808 +
2809 + /* initialize all RRA registers */
2810 +- lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
2811 +- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
2812 +- lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
2813 +- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
2814 +-
2815 +- SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
2816 +- SONIC_WRITE(SONIC_REA, lp->rra_end);
2817 +- SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
2818 +- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
2819 ++ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
2820 ++ SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
2821 ++ SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
2822 ++ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
2823 + SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
2824 + SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
2825 +
2826 +@@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
2827 + netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
2828 +
2829 + SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
2830 +- i = 0;
2831 +- while (i++ < 100) {
2832 +- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
2833 +- break;
2834 +- }
2835 +-
2836 +- netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
2837 +- SONIC_READ(SONIC_CMD), i);
2838 ++ sonic_quiesce(dev, SONIC_CR_RRRA);
2839 +
2840 + /*
2841 + * Initialize the receive descriptors so that they
2842 +@@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
2843 + * load the CAM
2844 + */
2845 + SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
2846 +-
2847 +- i = 0;
2848 +- while (i++ < 100) {
2849 +- if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
2850 +- break;
2851 +- }
2852 +- netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
2853 +- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
2854 ++ sonic_quiesce(dev, SONIC_CR_LCAM);
2855 +
2856 + /*
2857 + * enable receiver, disable loopback
2858 + * and enable all interrupts
2859 + */
2860 +- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
2861 + SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
2862 + SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
2863 + SONIC_WRITE(SONIC_ISR, 0x7fff);
2864 + SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
2865 +-
2866 +- cmd = SONIC_READ(SONIC_CMD);
2867 +- if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
2868 +- printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
2869 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
2870 +
2871 + netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
2872 + SONIC_READ(SONIC_CMD));
2873 +diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
2874 +index 2b27f7049acb..1df6d2f06cc4 100644
2875 +--- a/drivers/net/ethernet/natsemi/sonic.h
2876 ++++ b/drivers/net/ethernet/natsemi/sonic.h
2877 +@@ -110,6 +110,9 @@
2878 + #define SONIC_CR_TXP 0x0002
2879 + #define SONIC_CR_HTX 0x0001
2880 +
2881 ++#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
2882 ++ SONIC_CR_RXEN | SONIC_CR_TXP)
2883 ++
2884 + /*
2885 + * SONIC data configuration bits
2886 + */
2887 +@@ -175,6 +178,7 @@
2888 + #define SONIC_TCR_NCRS 0x0100
2889 + #define SONIC_TCR_CRLS 0x0080
2890 + #define SONIC_TCR_EXC 0x0040
2891 ++#define SONIC_TCR_OWC 0x0020
2892 + #define SONIC_TCR_PMB 0x0008
2893 + #define SONIC_TCR_FU 0x0004
2894 + #define SONIC_TCR_BCM 0x0002
2895 +@@ -274,8 +278,9 @@
2896 + #define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
2897 + #define SONIC_NUM_TDS 16 /* number of transmit descriptors */
2898 +
2899 +-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
2900 +-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
2901 ++#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
2902 ++#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
2903 ++#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
2904 +
2905 + #define SONIC_RBSIZE 1520 /* size of one resource buffer */
2906 +
2907 +@@ -312,8 +317,6 @@ struct sonic_local {
2908 + u32 rda_laddr; /* logical DMA address of RDA */
2909 + dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
2910 + dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
2911 +- unsigned int rra_end;
2912 +- unsigned int cur_rwp;
2913 + unsigned int cur_rx;
2914 + unsigned int cur_tx; /* first unacked transmit packet */
2915 + unsigned int eol_rx;
2916 +@@ -322,6 +325,7 @@ struct sonic_local {
2917 + int msg_enable;
2918 + struct device *device; /* generic device */
2919 + struct net_device_stats stats;
2920 ++ spinlock_t lock;
2921 + };
2922 +
2923 + #define TX_TIMEOUT (3 * HZ)
2924 +@@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
2925 + as far as we can tell. */
2926 + /* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
2927 + is a much better name. */
2928 +-static inline void sonic_buf_put(void* base, int bitmode,
2929 ++static inline void sonic_buf_put(u16 *base, int bitmode,
2930 + int offset, __u16 val)
2931 + {
2932 + if (bitmode)
2933 + #ifdef __BIG_ENDIAN
2934 +- ((__u16 *) base + (offset*2))[1] = val;
2935 ++ __raw_writew(val, base + (offset * 2) + 1);
2936 + #else
2937 +- ((__u16 *) base + (offset*2))[0] = val;
2938 ++ __raw_writew(val, base + (offset * 2) + 0);
2939 + #endif
2940 + else
2941 +- ((__u16 *) base)[offset] = val;
2942 ++ __raw_writew(val, base + (offset * 1) + 0);
2943 + }
2944 +
2945 +-static inline __u16 sonic_buf_get(void* base, int bitmode,
2946 ++static inline __u16 sonic_buf_get(u16 *base, int bitmode,
2947 + int offset)
2948 + {
2949 + if (bitmode)
2950 + #ifdef __BIG_ENDIAN
2951 +- return ((volatile __u16 *) base + (offset*2))[1];
2952 ++ return __raw_readw(base + (offset * 2) + 1);
2953 + #else
2954 +- return ((volatile __u16 *) base + (offset*2))[0];
2955 ++ return __raw_readw(base + (offset * 2) + 0);
2956 + #endif
2957 + else
2958 +- return ((volatile __u16 *) base)[offset];
2959 ++ return __raw_readw(base + (offset * 1) + 0);
2960 + }
2961 +
2962 + /* Inlines that you should actually use for reading/writing DMA buffers */
2963 +@@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
2964 + (entry * SIZEOF_SONIC_RR) + offset);
2965 + }
2966 +
2967 ++static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
2968 ++{
2969 ++ struct sonic_local *lp = netdev_priv(dev);
2970 ++
2971 ++ return lp->rra_laddr +
2972 ++ entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
2973 ++}
2974 ++
2975 ++static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
2976 ++{
2977 ++ struct sonic_local *lp = netdev_priv(dev);
2978 ++
2979 ++ return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
2980 ++ SONIC_BUS_SCALE(lp->dma_bitmode));
2981 ++}
2982 ++
2983 + static const char version[] =
2984 + "sonic.c:v0.92 20.9.98 tsbogend@×××××××××××××.de\n";
2985 +
2986 +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
2987 +index f6222ada6818..9b3ba98726d7 100644
2988 +--- a/drivers/net/gtp.c
2989 ++++ b/drivers/net/gtp.c
2990 +@@ -804,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
2991 + return NULL;
2992 + }
2993 +
2994 +- if (sock->sk->sk_protocol != IPPROTO_UDP) {
2995 ++ sk = sock->sk;
2996 ++ if (sk->sk_protocol != IPPROTO_UDP ||
2997 ++ sk->sk_type != SOCK_DGRAM ||
2998 ++ (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
2999 + pr_debug("socket fd=%d not UDP\n", fd);
3000 + sk = ERR_PTR(-EINVAL);
3001 + goto out_sock;
3002 + }
3003 +
3004 +- lock_sock(sock->sk);
3005 +- if (sock->sk->sk_user_data) {
3006 ++ lock_sock(sk);
3007 ++ if (sk->sk_user_data) {
3008 + sk = ERR_PTR(-EBUSY);
3009 + goto out_rel_sock;
3010 + }
3011 +
3012 +- sk = sock->sk;
3013 + sock_hold(sk);
3014 +
3015 + tuncfg.sk_user_data = gtp;
3016 +diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
3017 +index 2a91c192659f..61d7e0d1d77d 100644
3018 +--- a/drivers/net/slip/slip.c
3019 ++++ b/drivers/net/slip/slip.c
3020 +@@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
3021 + */
3022 + static void slip_write_wakeup(struct tty_struct *tty)
3023 + {
3024 +- struct slip *sl = tty->disc_data;
3025 ++ struct slip *sl;
3026 ++
3027 ++ rcu_read_lock();
3028 ++ sl = rcu_dereference(tty->disc_data);
3029 ++ if (!sl)
3030 ++ goto out;
3031 +
3032 + schedule_work(&sl->tx_work);
3033 ++out:
3034 ++ rcu_read_unlock();
3035 + }
3036 +
3037 + static void sl_tx_timeout(struct net_device *dev)
3038 +@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
3039 + return;
3040 +
3041 + spin_lock_bh(&sl->lock);
3042 +- tty->disc_data = NULL;
3043 ++ rcu_assign_pointer(tty->disc_data, NULL);
3044 + sl->tty = NULL;
3045 + spin_unlock_bh(&sl->lock);
3046 +
3047 ++ synchronize_rcu();
3048 + flush_work(&sl->tx_work);
3049 +
3050 + /* VSV = very important to remove timers */
3051 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
3052 +index 16564ebcde50..69f553a028ee 100644
3053 +--- a/drivers/net/tun.c
3054 ++++ b/drivers/net/tun.c
3055 +@@ -1936,6 +1936,10 @@ drop:
3056 + if (ret != XDP_PASS) {
3057 + rcu_read_unlock();
3058 + local_bh_enable();
3059 ++ if (frags) {
3060 ++ tfile->napi.skb = NULL;
3061 ++ mutex_unlock(&tfile->napi_mutex);
3062 ++ }
3063 + return total_len;
3064 + }
3065 + }
3066 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
3067 +index c232f1612083..0170a441208a 100644
3068 +--- a/drivers/net/usb/lan78xx.c
3069 ++++ b/drivers/net/usb/lan78xx.c
3070 +@@ -20,6 +20,7 @@
3071 + #include <linux/mdio.h>
3072 + #include <linux/phy.h>
3073 + #include <net/ip6_checksum.h>
3074 ++#include <net/vxlan.h>
3075 + #include <linux/interrupt.h>
3076 + #include <linux/irqdomain.h>
3077 + #include <linux/irq.h>
3078 +@@ -3668,6 +3669,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
3079 + tasklet_schedule(&dev->bh);
3080 + }
3081 +
3082 ++static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3083 ++ struct net_device *netdev,
3084 ++ netdev_features_t features)
3085 ++{
3086 ++ if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3087 ++ features &= ~NETIF_F_GSO_MASK;
3088 ++
3089 ++ features = vlan_features_check(skb, features);
3090 ++ features = vxlan_features_check(skb, features);
3091 ++
3092 ++ return features;
3093 ++}
3094 ++
3095 + static const struct net_device_ops lan78xx_netdev_ops = {
3096 + .ndo_open = lan78xx_open,
3097 + .ndo_stop = lan78xx_stop,
3098 +@@ -3681,6 +3695,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
3099 + .ndo_set_features = lan78xx_set_features,
3100 + .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3101 + .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3102 ++ .ndo_features_check = lan78xx_features_check,
3103 + };
3104 +
3105 + static void lan78xx_stat_monitor(struct timer_list *t)
3106 +diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
3107 +index f43c06569ea1..c4c8f1b62e1e 100644
3108 +--- a/drivers/net/wireless/cisco/airo.c
3109 ++++ b/drivers/net/wireless/cisco/airo.c
3110 +@@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
3111 + case AIROGVLIST: ridcode = RID_APLIST; break;
3112 + case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
3113 + case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
3114 +- case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
3115 +- /* Only super-user can read WEP keys */
3116 +- if (!capable(CAP_NET_ADMIN))
3117 +- return -EPERM;
3118 +- break;
3119 +- case AIROGWEPKNV: ridcode = RID_WEP_PERM;
3120 +- /* Only super-user can read WEP keys */
3121 +- if (!capable(CAP_NET_ADMIN))
3122 +- return -EPERM;
3123 +- break;
3124 ++ case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break;
3125 ++ case AIROGWEPKNV: ridcode = RID_WEP_PERM; break;
3126 + case AIROGSTAT: ridcode = RID_STATUS; break;
3127 + case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
3128 + case AIROGSTATSC32: ridcode = RID_STATS; break;
3129 +@@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
3130 + return -EINVAL;
3131 + }
3132 +
3133 +- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
3134 ++ if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
3135 ++ /* Only super-user can read WEP keys */
3136 ++ if (!capable(CAP_NET_ADMIN))
3137 ++ return -EPERM;
3138 ++ }
3139 ++
3140 ++ if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
3141 + return -ENOMEM;
3142 +
3143 + PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
3144 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
3145 +index 60aff2ecec12..58df25e2fb32 100644
3146 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
3147 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
3148 +@@ -154,5 +154,6 @@
3149 + #define IWL_MVM_D3_DEBUG false
3150 + #define IWL_MVM_USE_TWT false
3151 + #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
3152 ++#define IWL_MVM_USE_NSSN_SYNC 0
3153 +
3154 + #endif /* __MVM_CONSTANTS_H */
3155 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3156 +index d31f96c3f925..49aeab7c27a2 100644
3157 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3158 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3159 +@@ -742,6 +742,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
3160 + return ret;
3161 + }
3162 +
3163 ++static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
3164 ++ struct ieee80211_sta *sta)
3165 ++{
3166 ++ if (likely(sta)) {
3167 ++ if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
3168 ++ return;
3169 ++ } else {
3170 ++ if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
3171 ++ return;
3172 ++ }
3173 ++
3174 ++ ieee80211_free_txskb(mvm->hw, skb);
3175 ++}
3176 ++
3177 + static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
3178 + struct ieee80211_tx_control *control,
3179 + struct sk_buff *skb)
3180 +@@ -785,14 +799,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
3181 + }
3182 + }
3183 +
3184 +- if (sta) {
3185 +- if (iwl_mvm_tx_skb(mvm, skb, sta))
3186 +- goto drop;
3187 +- return;
3188 +- }
3189 +-
3190 +- if (iwl_mvm_tx_skb_non_sta(mvm, skb))
3191 +- goto drop;
3192 ++ iwl_mvm_tx_skb(mvm, skb, sta);
3193 + return;
3194 + drop:
3195 + ieee80211_free_txskb(hw, skb);
3196 +@@ -842,10 +849,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
3197 + break;
3198 + }
3199 +
3200 +- if (!txq->sta)
3201 +- iwl_mvm_tx_skb_non_sta(mvm, skb);
3202 +- else
3203 +- iwl_mvm_tx_skb(mvm, skb, txq->sta);
3204 ++ iwl_mvm_tx_skb(mvm, skb, txq->sta);
3205 + }
3206 + } while (atomic_dec_return(&mvmtxq->tx_request));
3207 + rcu_read_unlock();
3208 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
3209 +index 5ca50f39a023..5f1ecbb6fb71 100644
3210 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
3211 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
3212 +@@ -1508,8 +1508,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
3213 + int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
3214 + u16 len, const void *data,
3215 + u32 *status);
3216 +-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
3217 +- struct ieee80211_sta *sta);
3218 ++int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
3219 ++ struct ieee80211_sta *sta);
3220 + int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
3221 + void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
3222 + struct iwl_tx_cmd *tx_cmd,
3223 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
3224 +index 77b03b757193..a6e2a30eb310 100644
3225 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
3226 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
3227 +@@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
3228 +
3229 + static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
3230 + {
3231 +- struct iwl_mvm_rss_sync_notif notif = {
3232 +- .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
3233 +- .metadata.sync = 0,
3234 +- .nssn_sync.baid = baid,
3235 +- .nssn_sync.nssn = nssn,
3236 +- };
3237 +-
3238 +- iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
3239 ++ if (IWL_MVM_USE_NSSN_SYNC) {
3240 ++ struct iwl_mvm_rss_sync_notif notif = {
3241 ++ .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
3242 ++ .metadata.sync = 0,
3243 ++ .nssn_sync.baid = baid,
3244 ++ .nssn_sync.nssn = nssn,
3245 ++ };
3246 ++
3247 ++ iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif,
3248 ++ sizeof(notif));
3249 ++ }
3250 + }
3251 +
3252 + #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
3253 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
3254 +index fcafa22ec6ce..8aa567d7912c 100644
3255 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
3256 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
3257 +@@ -1220,7 +1220,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
3258 + cmd_size = sizeof(struct iwl_scan_config_v2);
3259 + else
3260 + cmd_size = sizeof(struct iwl_scan_config_v1);
3261 +- cmd_size += num_channels;
3262 ++ cmd_size += mvm->fw->ucode_capa.n_scan_channels;
3263 +
3264 + cfg = kzalloc(cmd_size, GFP_KERNEL);
3265 + if (!cfg)
3266 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3267 +index e3b2a2bf3863..d9d82f6b5e87 100644
3268 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3269 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3270 +@@ -1151,7 +1151,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
3271 + if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
3272 + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
3273 + spin_unlock(&mvmsta->lock);
3274 +- return 0;
3275 ++ return -1;
3276 + }
3277 +
3278 + if (!iwl_mvm_has_new_tx_api(mvm)) {
3279 +@@ -1203,8 +1203,8 @@ drop:
3280 + return -1;
3281 + }
3282 +
3283 +-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
3284 +- struct ieee80211_sta *sta)
3285 ++int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
3286 ++ struct ieee80211_sta *sta)
3287 + {
3288 + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3289 + struct ieee80211_tx_info info;
3290 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3291 +index 041dd75ac72b..64c74acadb99 100644
3292 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3293 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
3294 +@@ -1537,13 +1537,13 @@ out:
3295 +
3296 + napi = &rxq->napi;
3297 + if (napi->poll) {
3298 ++ napi_gro_flush(napi, false);
3299 ++
3300 + if (napi->rx_count) {
3301 + netif_receive_skb_list(&napi->rx_list);
3302 + INIT_LIST_HEAD(&napi->rx_list);
3303 + napi->rx_count = 0;
3304 + }
3305 +-
3306 +- napi_gro_flush(napi, false);
3307 + }
3308 +
3309 + iwl_pcie_rxq_restock(trans, rxq);
3310 +diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
3311 +index 57edfada0665..c9401c121a14 100644
3312 +--- a/drivers/net/wireless/marvell/libertas/cfg.c
3313 ++++ b/drivers/net/wireless/marvell/libertas/cfg.c
3314 +@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
3315 + int hw, ap, ap_max = ie[1];
3316 + u8 hw_rate;
3317 +
3318 ++ if (ap_max > MAX_RATES) {
3319 ++ lbs_deb_assoc("invalid rates\n");
3320 ++ return tlv;
3321 ++ }
3322 + /* Advance past IE header */
3323 + ie += 2;
3324 +
3325 +@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
3326 + struct cmd_ds_802_11_ad_hoc_join cmd;
3327 + u8 preamble = RADIO_PREAMBLE_SHORT;
3328 + int ret = 0;
3329 ++ int hw, i;
3330 ++ u8 rates_max;
3331 ++ u8 *rates;
3332 +
3333 + /* TODO: set preamble based on scan result */
3334 + ret = lbs_set_radio(priv, preamble, 1);
3335 +@@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
3336 + if (!rates_eid) {
3337 + lbs_add_rates(cmd.bss.rates);
3338 + } else {
3339 +- int hw, i;
3340 +- u8 rates_max = rates_eid[1];
3341 +- u8 *rates = cmd.bss.rates;
3342 ++ rates_max = rates_eid[1];
3343 ++ if (rates_max > MAX_RATES) {
3344 ++ lbs_deb_join("invalid rates");
3345 ++ goto out;
3346 ++ }
3347 ++ rates = cmd.bss.rates;
3348 + for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
3349 + u8 hw_rate = lbs_rates[hw].bitrate / 5;
3350 + for (i = 0; i < rates_max; i++) {
3351 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3352 +index 308f744393eb..1593b8494ebb 100644
3353 +--- a/drivers/pci/quirks.c
3354 ++++ b/drivers/pci/quirks.c
3355 +@@ -5021,18 +5021,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
3356 +
3357 + #ifdef CONFIG_PCI_ATS
3358 + /*
3359 +- * Some devices have a broken ATS implementation causing IOMMU stalls.
3360 +- * Don't use ATS for those devices.
3361 ++ * Some devices require additional driver setup to enable ATS. Don't use
3362 ++ * ATS for those devices as ATS will be enabled before the driver has had a
3363 ++ * chance to load and configure the device.
3364 + */
3365 +-static void quirk_no_ats(struct pci_dev *pdev)
3366 ++static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
3367 + {
3368 +- pci_info(pdev, "disabling ATS (broken on this device)\n");
3369 ++ if (pdev->device == 0x7340 && pdev->revision != 0xc5)
3370 ++ return;
3371 ++
3372 ++ pci_info(pdev, "disabling ATS\n");
3373 + pdev->ats_cap = 0;
3374 + }
3375 +
3376 + /* AMD Stoney platform GPU */
3377 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
3378 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
3379 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
3380 ++/* AMD Iceland dGPU */
3381 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
3382 ++/* AMD Navi14 dGPU */
3383 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
3384 + #endif /* CONFIG_PCI_ATS */
3385 +
3386 + /* Freescale PCIe doesn't support MSI in RC mode */
3387 +diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
3388 +index 44d7f50bbc82..d936e7aa74c4 100644
3389 +--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
3390 ++++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
3391 +@@ -49,6 +49,7 @@
3392 + .padown_offset = SPT_PAD_OWN, \
3393 + .padcfglock_offset = SPT_PADCFGLOCK, \
3394 + .hostown_offset = SPT_HOSTSW_OWN, \
3395 ++ .is_offset = SPT_GPI_IS, \
3396 + .ie_offset = SPT_GPI_IE, \
3397 + .pin_base = (s), \
3398 + .npins = ((e) - (s) + 1), \
3399 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
3400 +index f194ffc4699e..c070cb2a6a5b 100644
3401 +--- a/drivers/target/iscsi/iscsi_target.c
3402 ++++ b/drivers/target/iscsi/iscsi_target.c
3403 +@@ -4151,9 +4151,6 @@ int iscsit_close_connection(
3404 + iscsit_stop_nopin_response_timer(conn);
3405 + iscsit_stop_nopin_timer(conn);
3406 +
3407 +- if (conn->conn_transport->iscsit_wait_conn)
3408 +- conn->conn_transport->iscsit_wait_conn(conn);
3409 +-
3410 + /*
3411 + * During Connection recovery drop unacknowledged out of order
3412 + * commands for this connection, and prepare the other commands
3413 +@@ -4239,6 +4236,9 @@ int iscsit_close_connection(
3414 + target_sess_cmd_list_set_waiting(sess->se_sess);
3415 + target_wait_for_sess_cmds(sess->se_sess);
3416 +
3417 ++ if (conn->conn_transport->iscsit_wait_conn)
3418 ++ conn->conn_transport->iscsit_wait_conn(conn);
3419 ++
3420 + ahash_request_free(conn->conn_tx_hash);
3421 + if (conn->conn_rx_hash) {
3422 + struct crypto_ahash *tfm;
3423 +diff --git a/fs/afs/cell.c b/fs/afs/cell.c
3424 +index fd5133e26a38..78ba5f932287 100644
3425 +--- a/fs/afs/cell.c
3426 ++++ b/fs/afs/cell.c
3427 +@@ -134,8 +134,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
3428 + _leave(" = -ENAMETOOLONG");
3429 + return ERR_PTR(-ENAMETOOLONG);
3430 + }
3431 +- if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
3432 ++
3433 ++ /* Prohibit cell names that contain unprintable chars, '/' and '@' or
3434 ++ * that begin with a dot. This also precludes "@cell".
3435 ++ */
3436 ++ if (name[0] == '.')
3437 + return ERR_PTR(-EINVAL);
3438 ++ for (i = 0; i < namelen; i++) {
3439 ++ char ch = name[i];
3440 ++ if (!isprint(ch) || ch == '/' || ch == '@')
3441 ++ return ERR_PTR(-EINVAL);
3442 ++ }
3443 +
3444 + _enter("%*.*s,%s", namelen, namelen, name, addresses);
3445 +
3446 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
3447 +index a5163296d9d9..ee02a742fff5 100644
3448 +--- a/fs/ceph/mds_client.c
3449 ++++ b/fs/ceph/mds_client.c
3450 +@@ -708,8 +708,10 @@ void ceph_mdsc_release_request(struct kref *kref)
3451 + /* avoid calling iput_final() in mds dispatch threads */
3452 + ceph_async_iput(req->r_inode);
3453 + }
3454 +- if (req->r_parent)
3455 ++ if (req->r_parent) {
3456 + ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
3457 ++ ceph_async_iput(req->r_parent);
3458 ++ }
3459 + ceph_async_iput(req->r_target_inode);
3460 + if (req->r_dentry)
3461 + dput(req->r_dentry);
3462 +@@ -2670,8 +2672,10 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
3463 + /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
3464 + if (req->r_inode)
3465 + ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
3466 +- if (req->r_parent)
3467 ++ if (req->r_parent) {
3468 + ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
3469 ++ ihold(req->r_parent);
3470 ++ }
3471 + if (req->r_old_dentry_dir)
3472 + ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
3473 + CEPH_CAP_PIN);
3474 +diff --git a/fs/io_uring.c b/fs/io_uring.c
3475 +index b1c9ad1fb9e1..709671faaed6 100644
3476 +--- a/fs/io_uring.c
3477 ++++ b/fs/io_uring.c
3478 +@@ -3716,12 +3716,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3479 + wake_up(&ctx->sqo_wait);
3480 + submitted = to_submit;
3481 + } else if (to_submit) {
3482 +- if (current->mm != ctx->sqo_mm ||
3483 +- current_cred() != ctx->creds) {
3484 +- ret = -EPERM;
3485 +- goto out;
3486 +- }
3487 +-
3488 + to_submit = min(to_submit, ctx->sq_entries);
3489 +
3490 + mutex_lock(&ctx->uring_lock);
3491 +diff --git a/fs/namei.c b/fs/namei.c
3492 +index 671c3c1a3425..e81521c87f98 100644
3493 +--- a/fs/namei.c
3494 ++++ b/fs/namei.c
3495 +@@ -1001,7 +1001,8 @@ static int may_linkat(struct path *link)
3496 + * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
3497 + * should be allowed, or not, on files that already
3498 + * exist.
3499 +- * @dir: the sticky parent directory
3500 ++ * @dir_mode: mode bits of directory
3501 ++ * @dir_uid: owner of directory
3502 + * @inode: the inode of the file to open
3503 + *
3504 + * Block an O_CREAT open of a FIFO (or a regular file) when:
3505 +@@ -1017,18 +1018,18 @@ static int may_linkat(struct path *link)
3506 + *
3507 + * Returns 0 if the open is allowed, -ve on error.
3508 + */
3509 +-static int may_create_in_sticky(struct dentry * const dir,
3510 ++static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
3511 + struct inode * const inode)
3512 + {
3513 + if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
3514 + (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
3515 +- likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
3516 +- uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
3517 ++ likely(!(dir_mode & S_ISVTX)) ||
3518 ++ uid_eq(inode->i_uid, dir_uid) ||
3519 + uid_eq(current_fsuid(), inode->i_uid))
3520 + return 0;
3521 +
3522 +- if (likely(dir->d_inode->i_mode & 0002) ||
3523 +- (dir->d_inode->i_mode & 0020 &&
3524 ++ if (likely(dir_mode & 0002) ||
3525 ++ (dir_mode & 0020 &&
3526 + ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
3527 + (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
3528 + return -EACCES;
3529 +@@ -3248,6 +3249,8 @@ static int do_last(struct nameidata *nd,
3530 + struct file *file, const struct open_flags *op)
3531 + {
3532 + struct dentry *dir = nd->path.dentry;
3533 ++ kuid_t dir_uid = dir->d_inode->i_uid;
3534 ++ umode_t dir_mode = dir->d_inode->i_mode;
3535 + int open_flag = op->open_flag;
3536 + bool will_truncate = (open_flag & O_TRUNC) != 0;
3537 + bool got_write = false;
3538 +@@ -3383,7 +3386,7 @@ finish_open:
3539 + error = -EISDIR;
3540 + if (d_is_dir(nd->path.dentry))
3541 + goto out;
3542 +- error = may_create_in_sticky(dir,
3543 ++ error = may_create_in_sticky(dir_mode, dir_uid,
3544 + d_backing_inode(nd->path.dentry));
3545 + if (unlikely(error))
3546 + goto out;
3547 +diff --git a/fs/readdir.c b/fs/readdir.c
3548 +index d26d5ea4de7b..de2eceffdee8 100644
3549 +--- a/fs/readdir.c
3550 ++++ b/fs/readdir.c
3551 +@@ -102,10 +102,14 @@ EXPORT_SYMBOL(iterate_dir);
3552 + * filename length, and the above "soft error" worry means
3553 + * that it's probably better left alone until we have that
3554 + * issue clarified.
3555 ++ *
3556 ++ * Note the PATH_MAX check - it's arbitrary but the real
3557 ++ * kernel limit on a possible path component, not NAME_MAX,
3558 ++ * which is the technical standard limit.
3559 + */
3560 + static int verify_dirent_name(const char *name, int len)
3561 + {
3562 +- if (!len)
3563 ++ if (len <= 0 || len >= PATH_MAX)
3564 + return -EIO;
3565 + if (memchr(name, '/', len))
3566 + return -EIO;
3567 +@@ -206,7 +210,7 @@ struct linux_dirent {
3568 + struct getdents_callback {
3569 + struct dir_context ctx;
3570 + struct linux_dirent __user * current_dir;
3571 +- struct linux_dirent __user * previous;
3572 ++ int prev_reclen;
3573 + int count;
3574 + int error;
3575 + };
3576 +@@ -214,12 +218,13 @@ struct getdents_callback {
3577 + static int filldir(struct dir_context *ctx, const char *name, int namlen,
3578 + loff_t offset, u64 ino, unsigned int d_type)
3579 + {
3580 +- struct linux_dirent __user * dirent;
3581 ++ struct linux_dirent __user *dirent, *prev;
3582 + struct getdents_callback *buf =
3583 + container_of(ctx, struct getdents_callback, ctx);
3584 + unsigned long d_ino;
3585 + int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
3586 + sizeof(long));
3587 ++ int prev_reclen;
3588 +
3589 + buf->error = verify_dirent_name(name, namlen);
3590 + if (unlikely(buf->error))
3591 +@@ -232,28 +237,24 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
3592 + buf->error = -EOVERFLOW;
3593 + return -EOVERFLOW;
3594 + }
3595 +- dirent = buf->previous;
3596 +- if (dirent && signal_pending(current))
3597 ++ prev_reclen = buf->prev_reclen;
3598 ++ if (prev_reclen && signal_pending(current))
3599 + return -EINTR;
3600 +-
3601 +- /*
3602 +- * Note! This range-checks 'previous' (which may be NULL).
3603 +- * The real range was checked in getdents
3604 +- */
3605 +- if (!user_access_begin(dirent, sizeof(*dirent)))
3606 +- goto efault;
3607 +- if (dirent)
3608 +- unsafe_put_user(offset, &dirent->d_off, efault_end);
3609 + dirent = buf->current_dir;
3610 ++ prev = (void __user *) dirent - prev_reclen;
3611 ++ if (!user_access_begin(prev, reclen + prev_reclen))
3612 ++ goto efault;
3613 ++
3614 ++ /* This might be 'dirent->d_off', but if so it will get overwritten */
3615 ++ unsafe_put_user(offset, &prev->d_off, efault_end);
3616 + unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
3617 + unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
3618 + unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
3619 + unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
3620 + user_access_end();
3621 +
3622 +- buf->previous = dirent;
3623 +- dirent = (void __user *)dirent + reclen;
3624 +- buf->current_dir = dirent;
3625 ++ buf->current_dir = (void __user *)dirent + reclen;
3626 ++ buf->prev_reclen = reclen;
3627 + buf->count -= reclen;
3628 + return 0;
3629 + efault_end:
3630 +@@ -267,7 +268,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
3631 + struct linux_dirent __user *, dirent, unsigned int, count)
3632 + {
3633 + struct fd f;
3634 +- struct linux_dirent __user * lastdirent;
3635 + struct getdents_callback buf = {
3636 + .ctx.actor = filldir,
3637 + .count = count,
3638 +@@ -285,8 +285,10 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
3639 + error = iterate_dir(f.file, &buf.ctx);
3640 + if (error >= 0)
3641 + error = buf.error;
3642 +- lastdirent = buf.previous;
3643 +- if (lastdirent) {
3644 ++ if (buf.prev_reclen) {
3645 ++ struct linux_dirent __user * lastdirent;
3646 ++ lastdirent = (void __user *)buf.current_dir - buf.prev_reclen;
3647 ++
3648 + if (put_user(buf.ctx.pos, &lastdirent->d_off))
3649 + error = -EFAULT;
3650 + else
3651 +@@ -299,7 +301,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
3652 + struct getdents_callback64 {
3653 + struct dir_context ctx;
3654 + struct linux_dirent64 __user * current_dir;
3655 +- struct linux_dirent64 __user * previous;
3656 ++ int prev_reclen;
3657 + int count;
3658 + int error;
3659 + };
3660 +@@ -307,11 +309,12 @@ struct getdents_callback64 {
3661 + static int filldir64(struct dir_context *ctx, const char *name, int namlen,
3662 + loff_t offset, u64 ino, unsigned int d_type)
3663 + {
3664 +- struct linux_dirent64 __user *dirent;
3665 ++ struct linux_dirent64 __user *dirent, *prev;
3666 + struct getdents_callback64 *buf =
3667 + container_of(ctx, struct getdents_callback64, ctx);
3668 + int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
3669 + sizeof(u64));
3670 ++ int prev_reclen;
3671 +
3672 + buf->error = verify_dirent_name(name, namlen);
3673 + if (unlikely(buf->error))
3674 +@@ -319,30 +322,27 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
3675 + buf->error = -EINVAL; /* only used if we fail.. */
3676 + if (reclen > buf->count)
3677 + return -EINVAL;
3678 +- dirent = buf->previous;
3679 +- if (dirent && signal_pending(current))
3680 ++ prev_reclen = buf->prev_reclen;
3681 ++ if (prev_reclen && signal_pending(current))
3682 + return -EINTR;
3683 +-
3684 +- /*
3685 +- * Note! This range-checks 'previous' (which may be NULL).
3686 +- * The real range was checked in getdents
3687 +- */
3688 +- if (!user_access_begin(dirent, sizeof(*dirent)))
3689 +- goto efault;
3690 +- if (dirent)
3691 +- unsafe_put_user(offset, &dirent->d_off, efault_end);
3692 + dirent = buf->current_dir;
3693 ++ prev = (void __user *)dirent - prev_reclen;
3694 ++ if (!user_access_begin(prev, reclen + prev_reclen))
3695 ++ goto efault;
3696 ++
3697 ++ /* This might be 'dirent->d_off', but if so it will get overwritten */
3698 ++ unsafe_put_user(offset, &prev->d_off, efault_end);
3699 + unsafe_put_user(ino, &dirent->d_ino, efault_end);
3700 + unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
3701 + unsafe_put_user(d_type, &dirent->d_type, efault_end);
3702 + unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
3703 + user_access_end();
3704 +
3705 +- buf->previous = dirent;
3706 +- dirent = (void __user *)dirent + reclen;
3707 +- buf->current_dir = dirent;
3708 ++ buf->prev_reclen = reclen;
3709 ++ buf->current_dir = (void __user *)dirent + reclen;
3710 + buf->count -= reclen;
3711 + return 0;
3712 ++
3713 + efault_end:
3714 + user_access_end();
3715 + efault:
3716 +@@ -354,7 +354,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
3717 + unsigned int count)
3718 + {
3719 + struct fd f;
3720 +- struct linux_dirent64 __user * lastdirent;
3721 + struct getdents_callback64 buf = {
3722 + .ctx.actor = filldir64,
3723 + .count = count,
3724 +@@ -372,9 +371,11 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
3725 + error = iterate_dir(f.file, &buf.ctx);
3726 + if (error >= 0)
3727 + error = buf.error;
3728 +- lastdirent = buf.previous;
3729 +- if (lastdirent) {
3730 ++ if (buf.prev_reclen) {
3731 ++ struct linux_dirent64 __user * lastdirent;
3732 + typeof(lastdirent->d_off) d_off = buf.ctx.pos;
3733 ++
3734 ++ lastdirent = (void __user *) buf.current_dir - buf.prev_reclen;
3735 + if (__put_user(d_off, &lastdirent->d_off))
3736 + error = -EFAULT;
3737 + else
3738 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3739 +index 13f09706033a..f8fde9fa479c 100644
3740 +--- a/include/linux/netdevice.h
3741 ++++ b/include/linux/netdevice.h
3742 +@@ -3666,6 +3666,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
3743 + int dev_get_alias(const struct net_device *, char *, size_t);
3744 + int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3745 + int __dev_set_mtu(struct net_device *, int);
3746 ++int dev_validate_mtu(struct net_device *dev, int mtu,
3747 ++ struct netlink_ext_ack *extack);
3748 + int dev_set_mtu_ext(struct net_device *dev, int mtu,
3749 + struct netlink_ext_ack *extack);
3750 + int dev_set_mtu(struct net_device *, int);
3751 +diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
3752 +index 9bc255a8461b..77336f4c4b1c 100644
3753 +--- a/include/linux/netfilter/ipset/ip_set.h
3754 ++++ b/include/linux/netfilter/ipset/ip_set.h
3755 +@@ -445,13 +445,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
3756 + sizeof(*addr));
3757 + }
3758 +
3759 +-/* Calculate the bytes required to store the inclusive range of a-b */
3760 +-static inline int
3761 +-bitmap_bytes(u32 a, u32 b)
3762 +-{
3763 +- return 4 * ((((b - a + 8) / 8) + 3) / 4);
3764 +-}
3765 +-
3766 + /* How often should the gc be run by default */
3767 + #define IPSET_GC_TIME (3 * 60)
3768 +
3769 +diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
3770 +index cf09ab37b45b..851425c3178f 100644
3771 +--- a/include/linux/netfilter/nfnetlink.h
3772 ++++ b/include/linux/netfilter/nfnetlink.h
3773 +@@ -31,7 +31,7 @@ struct nfnetlink_subsystem {
3774 + const struct nfnl_callback *cb; /* callback for individual types */
3775 + struct module *owner;
3776 + int (*commit)(struct net *net, struct sk_buff *skb);
3777 +- int (*abort)(struct net *net, struct sk_buff *skb);
3778 ++ int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
3779 + void (*cleanup)(struct net *net);
3780 + bool (*valid_genid)(struct net *net, u32 genid);
3781 + };
3782 +diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
3783 +index 286fd960896f..a1a8d45adb42 100644
3784 +--- a/include/net/netns/nftables.h
3785 ++++ b/include/net/netns/nftables.h
3786 +@@ -7,6 +7,7 @@
3787 + struct netns_nftables {
3788 + struct list_head tables;
3789 + struct list_head commit_list;
3790 ++ struct list_head module_list;
3791 + struct mutex commit_mutex;
3792 + unsigned int base_seq;
3793 + u8 gencursor;
3794 +diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
3795 +index 9a0e8af21310..a5ccfa67bc5c 100644
3796 +--- a/include/trace/events/xen.h
3797 ++++ b/include/trace/events/xen.h
3798 +@@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback,
3799 + TP_PROTO(xen_mc_callback_fn_t fn, void *data),
3800 + TP_ARGS(fn, data),
3801 + TP_STRUCT__entry(
3802 +- __field(xen_mc_callback_fn_t, fn)
3803 ++ /*
3804 ++ * Use field_struct to avoid is_signed_type()
3805 ++ * comparison of a function pointer.
3806 ++ */
3807 ++ __field_struct(xen_mc_callback_fn_t, fn)
3808 + __field(void *, data)
3809 + ),
3810 + TP_fast_assign(
3811 +diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
3812 +index 26b9168321e7..d65f2d5ab694 100644
3813 +--- a/kernel/power/snapshot.c
3814 ++++ b/kernel/power/snapshot.c
3815 +@@ -1147,24 +1147,24 @@ void free_basic_memory_bitmaps(void)
3816 +
3817 + void clear_free_pages(void)
3818 + {
3819 +-#ifdef CONFIG_PAGE_POISONING_ZERO
3820 + struct memory_bitmap *bm = free_pages_map;
3821 + unsigned long pfn;
3822 +
3823 + if (WARN_ON(!(free_pages_map)))
3824 + return;
3825 +
3826 +- memory_bm_position_reset(bm);
3827 +- pfn = memory_bm_next_pfn(bm);
3828 +- while (pfn != BM_END_OF_MAP) {
3829 +- if (pfn_valid(pfn))
3830 +- clear_highpage(pfn_to_page(pfn));
3831 +-
3832 ++ if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
3833 ++ memory_bm_position_reset(bm);
3834 + pfn = memory_bm_next_pfn(bm);
3835 ++ while (pfn != BM_END_OF_MAP) {
3836 ++ if (pfn_valid(pfn))
3837 ++ clear_highpage(pfn_to_page(pfn));
3838 ++
3839 ++ pfn = memory_bm_next_pfn(bm);
3840 ++ }
3841 ++ memory_bm_position_reset(bm);
3842 ++ pr_info("free pages cleared after restore\n");
3843 + }
3844 +- memory_bm_position_reset(bm);
3845 +- pr_info("free pages cleared after restore\n");
3846 +-#endif /* PAGE_POISONING_ZERO */
3847 + }
3848 +
3849 + /**
3850 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3851 +index bcb72f102613..341aab32c946 100644
3852 +--- a/kernel/trace/trace.c
3853 ++++ b/kernel/trace/trace.c
3854 +@@ -9270,6 +9270,11 @@ __init static int tracing_set_default_clock(void)
3855 + {
3856 + /* sched_clock_stable() is determined in late_initcall */
3857 + if (!trace_boot_clock && !sched_clock_stable()) {
3858 ++ if (security_locked_down(LOCKDOWN_TRACEFS)) {
3859 ++ pr_warn("Can not set tracing clock due to lockdown\n");
3860 ++ return -EPERM;
3861 ++ }
3862 ++
3863 + printk(KERN_WARNING
3864 + "Unstable clock detected, switching default tracing clock to \"global\"\n"
3865 + "If you want to keep using the local clock, then add:\n"
3866 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
3867 +index c2783915600c..205692181e7b 100644
3868 +--- a/kernel/trace/trace_events_hist.c
3869 ++++ b/kernel/trace/trace_events_hist.c
3870 +@@ -116,6 +116,7 @@ struct hist_field {
3871 + struct ftrace_event_field *field;
3872 + unsigned long flags;
3873 + hist_field_fn_t fn;
3874 ++ unsigned int ref;
3875 + unsigned int size;
3876 + unsigned int offset;
3877 + unsigned int is_signed;
3878 +@@ -1766,11 +1767,13 @@ static struct hist_field *find_var(struct hist_trigger_data *hist_data,
3879 + struct event_trigger_data *test;
3880 + struct hist_field *hist_field;
3881 +
3882 ++ lockdep_assert_held(&event_mutex);
3883 ++
3884 + hist_field = find_var_field(hist_data, var_name);
3885 + if (hist_field)
3886 + return hist_field;
3887 +
3888 +- list_for_each_entry_rcu(test, &file->triggers, list) {
3889 ++ list_for_each_entry(test, &file->triggers, list) {
3890 + if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3891 + test_data = test->private_data;
3892 + hist_field = find_var_field(test_data, var_name);
3893 +@@ -1820,7 +1823,9 @@ static struct hist_field *find_file_var(struct trace_event_file *file,
3894 + struct event_trigger_data *test;
3895 + struct hist_field *hist_field;
3896 +
3897 +- list_for_each_entry_rcu(test, &file->triggers, list) {
3898 ++ lockdep_assert_held(&event_mutex);
3899 ++
3900 ++ list_for_each_entry(test, &file->triggers, list) {
3901 + if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3902 + test_data = test->private_data;
3903 + hist_field = find_var_field(test_data, var_name);
3904 +@@ -2423,8 +2428,16 @@ static int contains_operator(char *str)
3905 + return field_op;
3906 + }
3907 +
3908 ++static void get_hist_field(struct hist_field *hist_field)
3909 ++{
3910 ++ hist_field->ref++;
3911 ++}
3912 ++
3913 + static void __destroy_hist_field(struct hist_field *hist_field)
3914 + {
3915 ++ if (--hist_field->ref > 1)
3916 ++ return;
3917 ++
3918 + kfree(hist_field->var.name);
3919 + kfree(hist_field->name);
3920 + kfree(hist_field->type);
3921 +@@ -2466,6 +2479,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
3922 + if (!hist_field)
3923 + return NULL;
3924 +
3925 ++ hist_field->ref = 1;
3926 ++
3927 + hist_field->hist_data = hist_data;
3928 +
3929 + if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
3930 +@@ -2661,6 +2676,17 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
3931 + {
3932 + unsigned long flags = HIST_FIELD_FL_VAR_REF;
3933 + struct hist_field *ref_field;
3934 ++ int i;
3935 ++
3936 ++ /* Check if the variable already exists */
3937 ++ for (i = 0; i < hist_data->n_var_refs; i++) {
3938 ++ ref_field = hist_data->var_refs[i];
3939 ++ if (ref_field->var.idx == var_field->var.idx &&
3940 ++ ref_field->var.hist_data == var_field->hist_data) {
3941 ++ get_hist_field(ref_field);
3942 ++ return ref_field;
3943 ++ }
3944 ++ }
3945 +
3946 + ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
3947 + if (ref_field) {
3948 +@@ -3115,7 +3141,9 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data,
3949 + {
3950 + struct event_trigger_data *test;
3951 +
3952 +- list_for_each_entry_rcu(test, &file->triggers, list) {
3953 ++ lockdep_assert_held(&event_mutex);
3954 ++
3955 ++ list_for_each_entry(test, &file->triggers, list) {
3956 + if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3957 + if (test->private_data == hist_data)
3958 + return test->filter_str;
3959 +@@ -3166,9 +3194,11 @@ find_compatible_hist(struct hist_trigger_data *target_hist_data,
3960 + struct event_trigger_data *test;
3961 + unsigned int n_keys;
3962 +
3963 ++ lockdep_assert_held(&event_mutex);
3964 ++
3965 + n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
3966 +
3967 +- list_for_each_entry_rcu(test, &file->triggers, list) {
3968 ++ list_for_each_entry(test, &file->triggers, list) {
3969 + if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3970 + hist_data = test->private_data;
3971 +
3972 +@@ -5528,7 +5558,7 @@ static int hist_show(struct seq_file *m, void *v)
3973 + goto out_unlock;
3974 + }
3975 +
3976 +- list_for_each_entry_rcu(data, &event_file->triggers, list) {
3977 ++ list_for_each_entry(data, &event_file->triggers, list) {
3978 + if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
3979 + hist_trigger_show(m, data, n++);
3980 + }
3981 +@@ -5921,7 +5951,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
3982 + if (hist_data->attrs->name && !named_data)
3983 + goto new;
3984 +
3985 +- list_for_each_entry_rcu(test, &file->triggers, list) {
3986 ++ lockdep_assert_held(&event_mutex);
3987 ++
3988 ++ list_for_each_entry(test, &file->triggers, list) {
3989 + if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
3990 + if (!hist_trigger_match(data, test, named_data, false))
3991 + continue;
3992 +@@ -6005,10 +6037,12 @@ static bool have_hist_trigger_match(struct event_trigger_data *data,
3993 + struct event_trigger_data *test, *named_data = NULL;
3994 + bool match = false;
3995 +
3996 ++ lockdep_assert_held(&event_mutex);
3997 ++
3998 + if (hist_data->attrs->name)
3999 + named_data = find_named_trigger(hist_data->attrs->name);
4000 +
4001 +- list_for_each_entry_rcu(test, &file->triggers, list) {
4002 ++ list_for_each_entry(test, &file->triggers, list) {
4003 + if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
4004 + if (hist_trigger_match(data, test, named_data, false)) {
4005 + match = true;
4006 +@@ -6026,10 +6060,12 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data,
4007 + struct hist_trigger_data *hist_data = data->private_data;
4008 + struct event_trigger_data *test, *named_data = NULL;
4009 +
4010 ++ lockdep_assert_held(&event_mutex);
4011 ++
4012 + if (hist_data->attrs->name)
4013 + named_data = find_named_trigger(hist_data->attrs->name);
4014 +
4015 +- list_for_each_entry_rcu(test, &file->triggers, list) {
4016 ++ list_for_each_entry(test, &file->triggers, list) {
4017 + if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
4018 + if (!hist_trigger_match(data, test, named_data, false))
4019 + continue;
4020 +@@ -6051,10 +6087,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
4021 + struct event_trigger_data *test, *named_data = NULL;
4022 + bool unregistered = false;
4023 +
4024 ++ lockdep_assert_held(&event_mutex);
4025 ++
4026 + if (hist_data->attrs->name)
4027 + named_data = find_named_trigger(hist_data->attrs->name);
4028 +
4029 +- list_for_each_entry_rcu(test, &file->triggers, list) {
4030 ++ list_for_each_entry(test, &file->triggers, list) {
4031 + if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
4032 + if (!hist_trigger_match(data, test, named_data, false))
4033 + continue;
4034 +@@ -6080,7 +6118,9 @@ static bool hist_file_check_refs(struct trace_event_file *file)
4035 + struct hist_trigger_data *hist_data;
4036 + struct event_trigger_data *test;
4037 +
4038 +- list_for_each_entry_rcu(test, &file->triggers, list) {
4039 ++ lockdep_assert_held(&event_mutex);
4040 ++
4041 ++ list_for_each_entry(test, &file->triggers, list) {
4042 + if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
4043 + hist_data = test->private_data;
4044 + if (check_var_refs(hist_data))
4045 +@@ -6323,7 +6363,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec,
4046 + struct enable_trigger_data *enable_data = data->private_data;
4047 + struct event_trigger_data *test;
4048 +
4049 +- list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
4050 ++ list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
4051 ++ lockdep_is_held(&event_mutex)) {
4052 + if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
4053 + if (enable_data->enable)
4054 + test->paused = false;
4055 +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
4056 +index 2cd53ca21b51..40106fff06a4 100644
4057 +--- a/kernel/trace/trace_events_trigger.c
4058 ++++ b/kernel/trace/trace_events_trigger.c
4059 +@@ -501,7 +501,9 @@ void update_cond_flag(struct trace_event_file *file)
4060 + struct event_trigger_data *data;
4061 + bool set_cond = false;
4062 +
4063 +- list_for_each_entry_rcu(data, &file->triggers, list) {
4064 ++ lockdep_assert_held(&event_mutex);
4065 ++
4066 ++ list_for_each_entry(data, &file->triggers, list) {
4067 + if (data->filter || event_command_post_trigger(data->cmd_ops) ||
4068 + event_command_needs_rec(data->cmd_ops)) {
4069 + set_cond = true;
4070 +@@ -536,7 +538,9 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops,
4071 + struct event_trigger_data *test;
4072 + int ret = 0;
4073 +
4074 +- list_for_each_entry_rcu(test, &file->triggers, list) {
4075 ++ lockdep_assert_held(&event_mutex);
4076 ++
4077 ++ list_for_each_entry(test, &file->triggers, list) {
4078 + if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
4079 + ret = -EEXIST;
4080 + goto out;
4081 +@@ -581,7 +585,9 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
4082 + struct event_trigger_data *data;
4083 + bool unregistered = false;
4084 +
4085 +- list_for_each_entry_rcu(data, &file->triggers, list) {
4086 ++ lockdep_assert_held(&event_mutex);
4087 ++
4088 ++ list_for_each_entry(data, &file->triggers, list) {
4089 + if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
4090 + unregistered = true;
4091 + list_del_rcu(&data->list);
4092 +@@ -1497,7 +1503,9 @@ int event_enable_register_trigger(char *glob,
4093 + struct event_trigger_data *test;
4094 + int ret = 0;
4095 +
4096 +- list_for_each_entry_rcu(test, &file->triggers, list) {
4097 ++ lockdep_assert_held(&event_mutex);
4098 ++
4099 ++ list_for_each_entry(test, &file->triggers, list) {
4100 + test_enable_data = test->private_data;
4101 + if (test_enable_data &&
4102 + (test->cmd_ops->trigger_type ==
4103 +@@ -1537,7 +1545,9 @@ void event_enable_unregister_trigger(char *glob,
4104 + struct event_trigger_data *data;
4105 + bool unregistered = false;
4106 +
4107 +- list_for_each_entry_rcu(data, &file->triggers, list) {
4108 ++ lockdep_assert_held(&event_mutex);
4109 ++
4110 ++ list_for_each_entry(data, &file->triggers, list) {
4111 + enable_data = data->private_data;
4112 + if (enable_data &&
4113 + (data->cmd_ops->trigger_type ==
4114 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
4115 +index 7f890262c8a3..3e5f9c7d939c 100644
4116 +--- a/kernel/trace/trace_kprobe.c
4117 ++++ b/kernel/trace/trace_kprobe.c
4118 +@@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
4119 + INIT_HLIST_NODE(&tk->rp.kp.hlist);
4120 + INIT_LIST_HEAD(&tk->rp.kp.list);
4121 +
4122 +- ret = trace_probe_init(&tk->tp, event, group);
4123 ++ ret = trace_probe_init(&tk->tp, event, group, 0);
4124 + if (ret < 0)
4125 + goto error;
4126 +
4127 +diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
4128 +index 905b10af5d5c..bba18cf44a30 100644
4129 +--- a/kernel/trace/trace_probe.c
4130 ++++ b/kernel/trace/trace_probe.c
4131 +@@ -984,7 +984,7 @@ void trace_probe_cleanup(struct trace_probe *tp)
4132 + }
4133 +
4134 + int trace_probe_init(struct trace_probe *tp, const char *event,
4135 +- const char *group)
4136 ++ const char *group, size_t event_data_size)
4137 + {
4138 + struct trace_event_call *call;
4139 + int ret = 0;
4140 +@@ -992,7 +992,8 @@ int trace_probe_init(struct trace_probe *tp, const char *event,
4141 + if (!event || !group)
4142 + return -EINVAL;
4143 +
4144 +- tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL);
4145 ++ tp->event = kzalloc(sizeof(struct trace_probe_event) + event_data_size,
4146 ++ GFP_KERNEL);
4147 + if (!tp->event)
4148 + return -ENOMEM;
4149 +
4150 +diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
4151 +index 4ee703728aec..03e4e180058d 100644
4152 +--- a/kernel/trace/trace_probe.h
4153 ++++ b/kernel/trace/trace_probe.h
4154 +@@ -230,6 +230,7 @@ struct trace_probe_event {
4155 + struct trace_event_call call;
4156 + struct list_head files;
4157 + struct list_head probes;
4158 ++ char data[0];
4159 + };
4160 +
4161 + struct trace_probe {
4162 +@@ -322,7 +323,7 @@ static inline bool trace_probe_has_single_file(struct trace_probe *tp)
4163 + }
4164 +
4165 + int trace_probe_init(struct trace_probe *tp, const char *event,
4166 +- const char *group);
4167 ++ const char *group, size_t event_data_size);
4168 + void trace_probe_cleanup(struct trace_probe *tp);
4169 + int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
4170 + void trace_probe_unlink(struct trace_probe *tp);
4171 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
4172 +index 352073d36585..f66e202fec13 100644
4173 +--- a/kernel/trace/trace_uprobe.c
4174 ++++ b/kernel/trace/trace_uprobe.c
4175 +@@ -60,7 +60,6 @@ static struct dyn_event_operations trace_uprobe_ops = {
4176 + */
4177 + struct trace_uprobe {
4178 + struct dyn_event devent;
4179 +- struct trace_uprobe_filter filter;
4180 + struct uprobe_consumer consumer;
4181 + struct path path;
4182 + struct inode *inode;
4183 +@@ -264,6 +263,14 @@ process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
4184 + }
4185 + NOKPROBE_SYMBOL(process_fetch_insn)
4186 +
4187 ++static struct trace_uprobe_filter *
4188 ++trace_uprobe_get_filter(struct trace_uprobe *tu)
4189 ++{
4190 ++ struct trace_probe_event *event = tu->tp.event;
4191 ++
4192 ++ return (struct trace_uprobe_filter *)&event->data[0];
4193 ++}
4194 ++
4195 + static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
4196 + {
4197 + rwlock_init(&filter->rwlock);
4198 +@@ -351,7 +358,8 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
4199 + if (!tu)
4200 + return ERR_PTR(-ENOMEM);
4201 +
4202 +- ret = trace_probe_init(&tu->tp, event, group);
4203 ++ ret = trace_probe_init(&tu->tp, event, group,
4204 ++ sizeof(struct trace_uprobe_filter));
4205 + if (ret < 0)
4206 + goto error;
4207 +
4208 +@@ -359,7 +367,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
4209 + tu->consumer.handler = uprobe_dispatcher;
4210 + if (is_ret)
4211 + tu->consumer.ret_handler = uretprobe_dispatcher;
4212 +- init_trace_uprobe_filter(&tu->filter);
4213 ++ init_trace_uprobe_filter(trace_uprobe_get_filter(tu));
4214 + return tu;
4215 +
4216 + error:
4217 +@@ -1067,13 +1075,14 @@ static void __probe_event_disable(struct trace_probe *tp)
4218 + struct trace_probe *pos;
4219 + struct trace_uprobe *tu;
4220 +
4221 ++ tu = container_of(tp, struct trace_uprobe, tp);
4222 ++ WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
4223 ++
4224 + list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
4225 + tu = container_of(pos, struct trace_uprobe, tp);
4226 + if (!tu->inode)
4227 + continue;
4228 +
4229 +- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
4230 +-
4231 + uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
4232 + tu->inode = NULL;
4233 + }
4234 +@@ -1108,7 +1117,7 @@ static int probe_event_enable(struct trace_event_call *call,
4235 + }
4236 +
4237 + tu = container_of(tp, struct trace_uprobe, tp);
4238 +- WARN_ON(!uprobe_filter_is_empty(&tu->filter));
4239 ++ WARN_ON(!uprobe_filter_is_empty(trace_uprobe_get_filter(tu)));
4240 +
4241 + if (enabled)
4242 + return 0;
4243 +@@ -1205,39 +1214,39 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
4244 + }
4245 +
4246 + static inline bool
4247 +-uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
4248 ++trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
4249 ++ struct perf_event *event)
4250 + {
4251 +- return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
4252 ++ return __uprobe_perf_filter(filter, event->hw.target->mm);
4253 + }
4254 +
4255 +-static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
4256 ++static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
4257 ++ struct perf_event *event)
4258 + {
4259 + bool done;
4260 +
4261 +- write_lock(&tu->filter.rwlock);
4262 ++ write_lock(&filter->rwlock);
4263 + if (event->hw.target) {
4264 + list_del(&event->hw.tp_list);
4265 +- done = tu->filter.nr_systemwide ||
4266 ++ done = filter->nr_systemwide ||
4267 + (event->hw.target->flags & PF_EXITING) ||
4268 +- uprobe_filter_event(tu, event);
4269 ++ trace_uprobe_filter_event(filter, event);
4270 + } else {
4271 +- tu->filter.nr_systemwide--;
4272 +- done = tu->filter.nr_systemwide;
4273 ++ filter->nr_systemwide--;
4274 ++ done = filter->nr_systemwide;
4275 + }
4276 +- write_unlock(&tu->filter.rwlock);
4277 +-
4278 +- if (!done)
4279 +- return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
4280 ++ write_unlock(&filter->rwlock);
4281 +
4282 +- return 0;
4283 ++ return done;
4284 + }
4285 +
4286 +-static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
4287 ++/* This returns true if the filter always covers target mm */
4288 ++static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
4289 ++ struct perf_event *event)
4290 + {
4291 + bool done;
4292 +- int err;
4293 +
4294 +- write_lock(&tu->filter.rwlock);
4295 ++ write_lock(&filter->rwlock);
4296 + if (event->hw.target) {
4297 + /*
4298 + * event->parent != NULL means copy_process(), we can avoid
4299 +@@ -1247,28 +1256,21 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
4300 + * attr.enable_on_exec means that exec/mmap will install the
4301 + * breakpoints we need.
4302 + */
4303 +- done = tu->filter.nr_systemwide ||
4304 ++ done = filter->nr_systemwide ||
4305 + event->parent || event->attr.enable_on_exec ||
4306 +- uprobe_filter_event(tu, event);
4307 +- list_add(&event->hw.tp_list, &tu->filter.perf_events);
4308 ++ trace_uprobe_filter_event(filter, event);
4309 ++ list_add(&event->hw.tp_list, &filter->perf_events);
4310 + } else {
4311 +- done = tu->filter.nr_systemwide;
4312 +- tu->filter.nr_systemwide++;
4313 ++ done = filter->nr_systemwide;
4314 ++ filter->nr_systemwide++;
4315 + }
4316 +- write_unlock(&tu->filter.rwlock);
4317 ++ write_unlock(&filter->rwlock);
4318 +
4319 +- err = 0;
4320 +- if (!done) {
4321 +- err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
4322 +- if (err)
4323 +- uprobe_perf_close(tu, event);
4324 +- }
4325 +- return err;
4326 ++ return done;
4327 + }
4328 +
4329 +-static int uprobe_perf_multi_call(struct trace_event_call *call,
4330 +- struct perf_event *event,
4331 +- int (*op)(struct trace_uprobe *tu, struct perf_event *event))
4332 ++static int uprobe_perf_close(struct trace_event_call *call,
4333 ++ struct perf_event *event)
4334 + {
4335 + struct trace_probe *pos, *tp;
4336 + struct trace_uprobe *tu;
4337 +@@ -1278,25 +1280,59 @@ static int uprobe_perf_multi_call(struct trace_event_call *call,
4338 + if (WARN_ON_ONCE(!tp))
4339 + return -ENODEV;
4340 +
4341 ++ tu = container_of(tp, struct trace_uprobe, tp);
4342 ++ if (trace_uprobe_filter_remove(trace_uprobe_get_filter(tu), event))
4343 ++ return 0;
4344 ++
4345 + list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
4346 + tu = container_of(pos, struct trace_uprobe, tp);
4347 +- ret = op(tu, event);
4348 ++ ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
4349 + if (ret)
4350 + break;
4351 + }
4352 +
4353 + return ret;
4354 + }
4355 ++
4356 ++static int uprobe_perf_open(struct trace_event_call *call,
4357 ++ struct perf_event *event)
4358 ++{
4359 ++ struct trace_probe *pos, *tp;
4360 ++ struct trace_uprobe *tu;
4361 ++ int err = 0;
4362 ++
4363 ++ tp = trace_probe_primary_from_call(call);
4364 ++ if (WARN_ON_ONCE(!tp))
4365 ++ return -ENODEV;
4366 ++
4367 ++ tu = container_of(tp, struct trace_uprobe, tp);
4368 ++ if (trace_uprobe_filter_add(trace_uprobe_get_filter(tu), event))
4369 ++ return 0;
4370 ++
4371 ++ list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
4372 ++ err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
4373 ++ if (err) {
4374 ++ uprobe_perf_close(call, event);
4375 ++ break;
4376 ++ }
4377 ++ }
4378 ++
4379 ++ return err;
4380 ++}
4381 ++
4382 + static bool uprobe_perf_filter(struct uprobe_consumer *uc,
4383 + enum uprobe_filter_ctx ctx, struct mm_struct *mm)
4384 + {
4385 ++ struct trace_uprobe_filter *filter;
4386 + struct trace_uprobe *tu;
4387 + int ret;
4388 +
4389 + tu = container_of(uc, struct trace_uprobe, consumer);
4390 +- read_lock(&tu->filter.rwlock);
4391 +- ret = __uprobe_perf_filter(&tu->filter, mm);
4392 +- read_unlock(&tu->filter.rwlock);
4393 ++ filter = trace_uprobe_get_filter(tu);
4394 ++
4395 ++ read_lock(&filter->rwlock);
4396 ++ ret = __uprobe_perf_filter(filter, mm);
4397 ++ read_unlock(&filter->rwlock);
4398 +
4399 + return ret;
4400 + }
4401 +@@ -1419,10 +1455,10 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
4402 + return 0;
4403 +
4404 + case TRACE_REG_PERF_OPEN:
4405 +- return uprobe_perf_multi_call(event, data, uprobe_perf_open);
4406 ++ return uprobe_perf_open(event, data);
4407 +
4408 + case TRACE_REG_PERF_CLOSE:
4409 +- return uprobe_perf_multi_call(event, data, uprobe_perf_close);
4410 ++ return uprobe_perf_close(event, data);
4411 +
4412 + #endif
4413 + default:
4414 +diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
4415 +index dccb95af6003..706020b06617 100644
4416 +--- a/lib/strncpy_from_user.c
4417 ++++ b/lib/strncpy_from_user.c
4418 +@@ -30,13 +30,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
4419 + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
4420 + unsigned long res = 0;
4421 +
4422 +- /*
4423 +- * Truncate 'max' to the user-specified limit, so that
4424 +- * we only have one limit we need to check in the loop
4425 +- */
4426 +- if (max > count)
4427 +- max = count;
4428 +-
4429 + if (IS_UNALIGNED(src, dst))
4430 + goto byte_at_a_time;
4431 +
4432 +@@ -114,6 +107,13 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
4433 + unsigned long max = max_addr - src_addr;
4434 + long retval;
4435 +
4436 ++ /*
4437 ++ * Truncate 'max' to the user-specified limit, so that
4438 ++ * we only have one limit we need to check in the loop
4439 ++ */
4440 ++ if (max > count)
4441 ++ max = count;
4442 ++
4443 + kasan_check_write(dst, count);
4444 + check_object_size(dst, count, false);
4445 + if (user_access_begin(src, max)) {
4446 +diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
4447 +index 6c0005d5dd5c..41670d4a5816 100644
4448 +--- a/lib/strnlen_user.c
4449 ++++ b/lib/strnlen_user.c
4450 +@@ -26,13 +26,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
4451 + unsigned long align, res = 0;
4452 + unsigned long c;
4453 +
4454 +- /*
4455 +- * Truncate 'max' to the user-specified limit, so that
4456 +- * we only have one limit we need to check in the loop
4457 +- */
4458 +- if (max > count)
4459 +- max = count;
4460 +-
4461 + /*
4462 + * Do everything aligned. But that means that we
4463 + * need to also expand the maximum..
4464 +@@ -109,6 +102,13 @@ long strnlen_user(const char __user *str, long count)
4465 + unsigned long max = max_addr - src_addr;
4466 + long retval;
4467 +
4468 ++ /*
4469 ++ * Truncate 'max' to the user-specified limit, so that
4470 ++ * we only have one limit we need to check in the loop
4471 ++ */
4472 ++ if (max > count)
4473 ++ max = count;
4474 ++
4475 + if (user_access_begin(str, max)) {
4476 + retval = do_strnlen_user(str, count, max);
4477 + user_access_end();
4478 +diff --git a/lib/test_xarray.c b/lib/test_xarray.c
4479 +index 7df4f7f395bf..03c3f42966ce 100644
4480 +--- a/lib/test_xarray.c
4481 ++++ b/lib/test_xarray.c
4482 +@@ -2,6 +2,7 @@
4483 + /*
4484 + * test_xarray.c: Test the XArray API
4485 + * Copyright (c) 2017-2018 Microsoft Corporation
4486 ++ * Copyright (c) 2019-2020 Oracle
4487 + * Author: Matthew Wilcox <willy@×××××××××.org>
4488 + */
4489 +
4490 +@@ -902,28 +903,34 @@ static noinline void check_store_iter(struct xarray *xa)
4491 + XA_BUG_ON(xa, !xa_empty(xa));
4492 + }
4493 +
4494 +-static noinline void check_multi_find(struct xarray *xa)
4495 ++static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
4496 + {
4497 + #ifdef CONFIG_XARRAY_MULTI
4498 ++ unsigned long multi = 3 << order;
4499 ++ unsigned long next = 4 << order;
4500 + unsigned long index;
4501 +
4502 +- xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
4503 +- XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
4504 ++ xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
4505 ++ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
4506 ++ XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
4507 +
4508 + index = 0;
4509 + XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
4510 +- xa_mk_value(12));
4511 +- XA_BUG_ON(xa, index != 12);
4512 +- index = 13;
4513 ++ xa_mk_value(multi));
4514 ++ XA_BUG_ON(xa, index != multi);
4515 ++ index = multi + 1;
4516 + XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
4517 +- xa_mk_value(12));
4518 +- XA_BUG_ON(xa, (index < 12) || (index >= 16));
4519 ++ xa_mk_value(multi));
4520 ++ XA_BUG_ON(xa, (index < multi) || (index >= next));
4521 + XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
4522 +- xa_mk_value(16));
4523 +- XA_BUG_ON(xa, index != 16);
4524 +-
4525 +- xa_erase_index(xa, 12);
4526 +- xa_erase_index(xa, 16);
4527 ++ xa_mk_value(next));
4528 ++ XA_BUG_ON(xa, index != next);
4529 ++ XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
4530 ++ XA_BUG_ON(xa, index != next);
4531 ++
4532 ++ xa_erase_index(xa, multi);
4533 ++ xa_erase_index(xa, next);
4534 ++ xa_erase_index(xa, next + 1);
4535 + XA_BUG_ON(xa, !xa_empty(xa));
4536 + #endif
4537 + }
4538 +@@ -1046,12 +1053,33 @@ static noinline void check_find_3(struct xarray *xa)
4539 + xa_destroy(xa);
4540 + }
4541 +
4542 ++static noinline void check_find_4(struct xarray *xa)
4543 ++{
4544 ++ unsigned long index = 0;
4545 ++ void *entry;
4546 ++
4547 ++ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
4548 ++
4549 ++ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
4550 ++ XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
4551 ++
4552 ++ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
4553 ++ XA_BUG_ON(xa, entry);
4554 ++
4555 ++ xa_erase_index(xa, ULONG_MAX);
4556 ++}
4557 ++
4558 + static noinline void check_find(struct xarray *xa)
4559 + {
4560 ++ unsigned i;
4561 ++
4562 + check_find_1(xa);
4563 + check_find_2(xa);
4564 + check_find_3(xa);
4565 +- check_multi_find(xa);
4566 ++ check_find_4(xa);
4567 ++
4568 ++ for (i = 2; i < 10; i++)
4569 ++ check_multi_find_1(xa, i);
4570 + check_multi_find_2(xa);
4571 + }
4572 +
4573 +diff --git a/lib/xarray.c b/lib/xarray.c
4574 +index 1237c213f52b..47e17d46e5f8 100644
4575 +--- a/lib/xarray.c
4576 ++++ b/lib/xarray.c
4577 +@@ -1,7 +1,8 @@
4578 + // SPDX-License-Identifier: GPL-2.0+
4579 + /*
4580 + * XArray implementation
4581 +- * Copyright (c) 2017 Microsoft Corporation
4582 ++ * Copyright (c) 2017-2018 Microsoft Corporation
4583 ++ * Copyright (c) 2018-2020 Oracle
4584 + * Author: Matthew Wilcox <willy@×××××××××.org>
4585 + */
4586 +
4587 +@@ -1081,6 +1082,8 @@ void *xas_find(struct xa_state *xas, unsigned long max)
4588 +
4589 + if (xas_error(xas))
4590 + return NULL;
4591 ++ if (xas->xa_index > max)
4592 ++ return set_bounds(xas);
4593 +
4594 + if (!xas->xa_node) {
4595 + xas->xa_index = 1;
4596 +@@ -1150,6 +1153,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
4597 +
4598 + if (xas_error(xas))
4599 + return NULL;
4600 ++ if (xas->xa_index > max)
4601 ++ goto max;
4602 +
4603 + if (!xas->xa_node) {
4604 + xas->xa_index = 1;
4605 +@@ -1824,6 +1829,17 @@ void *xa_find(struct xarray *xa, unsigned long *indexp,
4606 + }
4607 + EXPORT_SYMBOL(xa_find);
4608 +
4609 ++static bool xas_sibling(struct xa_state *xas)
4610 ++{
4611 ++ struct xa_node *node = xas->xa_node;
4612 ++ unsigned long mask;
4613 ++
4614 ++ if (!node)
4615 ++ return false;
4616 ++ mask = (XA_CHUNK_SIZE << node->shift) - 1;
4617 ++ return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
4618 ++}
4619 ++
4620 + /**
4621 + * xa_find_after() - Search the XArray for a present entry.
4622 + * @xa: XArray.
4623 +@@ -1847,21 +1863,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
4624 + XA_STATE(xas, xa, *indexp + 1);
4625 + void *entry;
4626 +
4627 ++ if (xas.xa_index == 0)
4628 ++ return NULL;
4629 ++
4630 + rcu_read_lock();
4631 + for (;;) {
4632 + if ((__force unsigned int)filter < XA_MAX_MARKS)
4633 + entry = xas_find_marked(&xas, max, filter);
4634 + else
4635 + entry = xas_find(&xas, max);
4636 +- if (xas.xa_node == XAS_BOUNDS)
4637 ++
4638 ++ if (xas_invalid(&xas))
4639 + break;
4640 +- if (xas.xa_shift) {
4641 +- if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
4642 +- continue;
4643 +- } else {
4644 +- if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
4645 +- continue;
4646 +- }
4647 ++ if (xas_sibling(&xas))
4648 ++ continue;
4649 + if (!xas_retry(&xas, entry))
4650 + break;
4651 + }
4652 +diff --git a/net/core/dev.c b/net/core/dev.c
4653 +index 3098c90d60e2..82325d3d1371 100644
4654 +--- a/net/core/dev.c
4655 ++++ b/net/core/dev.c
4656 +@@ -5270,9 +5270,29 @@ static void flush_all_backlogs(void)
4657 + put_online_cpus();
4658 + }
4659 +
4660 ++/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
4661 ++static void gro_normal_list(struct napi_struct *napi)
4662 ++{
4663 ++ if (!napi->rx_count)
4664 ++ return;
4665 ++ netif_receive_skb_list_internal(&napi->rx_list);
4666 ++ INIT_LIST_HEAD(&napi->rx_list);
4667 ++ napi->rx_count = 0;
4668 ++}
4669 ++
4670 ++/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
4671 ++ * pass the whole batch up to the stack.
4672 ++ */
4673 ++static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
4674 ++{
4675 ++ list_add_tail(&skb->list, &napi->rx_list);
4676 ++ if (++napi->rx_count >= gro_normal_batch)
4677 ++ gro_normal_list(napi);
4678 ++}
4679 ++
4680 + INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
4681 + INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
4682 +-static int napi_gro_complete(struct sk_buff *skb)
4683 ++static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
4684 + {
4685 + struct packet_offload *ptype;
4686 + __be16 type = skb->protocol;
4687 +@@ -5305,7 +5325,8 @@ static int napi_gro_complete(struct sk_buff *skb)
4688 + }
4689 +
4690 + out:
4691 +- return netif_receive_skb_internal(skb);
4692 ++ gro_normal_one(napi, skb);
4693 ++ return NET_RX_SUCCESS;
4694 + }
4695 +
4696 + static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
4697 +@@ -5318,7 +5339,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
4698 + if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4699 + return;
4700 + skb_list_del_init(skb);
4701 +- napi_gro_complete(skb);
4702 ++ napi_gro_complete(napi, skb);
4703 + napi->gro_hash[index].count--;
4704 + }
4705 +
4706 +@@ -5421,7 +5442,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4707 + }
4708 + }
4709 +
4710 +-static void gro_flush_oldest(struct list_head *head)
4711 ++static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
4712 + {
4713 + struct sk_buff *oldest;
4714 +
4715 +@@ -5437,7 +5458,7 @@ static void gro_flush_oldest(struct list_head *head)
4716 + * SKB to the chain.
4717 + */
4718 + skb_list_del_init(oldest);
4719 +- napi_gro_complete(oldest);
4720 ++ napi_gro_complete(napi, oldest);
4721 + }
4722 +
4723 + INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
4724 +@@ -5513,7 +5534,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
4725 +
4726 + if (pp) {
4727 + skb_list_del_init(pp);
4728 +- napi_gro_complete(pp);
4729 ++ napi_gro_complete(napi, pp);
4730 + napi->gro_hash[hash].count--;
4731 + }
4732 +
4733 +@@ -5524,7 +5545,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
4734 + goto normal;
4735 +
4736 + if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
4737 +- gro_flush_oldest(gro_head);
4738 ++ gro_flush_oldest(napi, gro_head);
4739 + } else {
4740 + napi->gro_hash[hash].count++;
4741 + }
4742 +@@ -5672,26 +5693,6 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
4743 + }
4744 + EXPORT_SYMBOL(napi_get_frags);
4745 +
4746 +-/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
4747 +-static void gro_normal_list(struct napi_struct *napi)
4748 +-{
4749 +- if (!napi->rx_count)
4750 +- return;
4751 +- netif_receive_skb_list_internal(&napi->rx_list);
4752 +- INIT_LIST_HEAD(&napi->rx_list);
4753 +- napi->rx_count = 0;
4754 +-}
4755 +-
4756 +-/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
4757 +- * pass the whole batch up to the stack.
4758 +- */
4759 +-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
4760 +-{
4761 +- list_add_tail(&skb->list, &napi->rx_list);
4762 +- if (++napi->rx_count >= gro_normal_batch)
4763 +- gro_normal_list(napi);
4764 +-}
4765 +-
4766 + static gro_result_t napi_frags_finish(struct napi_struct *napi,
4767 + struct sk_buff *skb,
4768 + gro_result_t ret)
4769 +@@ -5979,8 +5980,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
4770 + NAPIF_STATE_IN_BUSY_POLL)))
4771 + return false;
4772 +
4773 +- gro_normal_list(n);
4774 +-
4775 + if (n->gro_bitmask) {
4776 + unsigned long timeout = 0;
4777 +
4778 +@@ -5996,6 +5995,9 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
4779 + hrtimer_start(&n->timer, ns_to_ktime(timeout),
4780 + HRTIMER_MODE_REL_PINNED);
4781 + }
4782 ++
4783 ++ gro_normal_list(n);
4784 ++
4785 + if (unlikely(!list_empty(&n->poll_list))) {
4786 + /* If n->poll_list is not empty, we need to mask irqs */
4787 + local_irq_save(flags);
4788 +@@ -6327,8 +6329,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4789 + goto out_unlock;
4790 + }
4791 +
4792 +- gro_normal_list(n);
4793 +-
4794 + if (n->gro_bitmask) {
4795 + /* flush too old packets
4796 + * If HZ < 1000, flush all packets.
4797 +@@ -6336,6 +6336,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4798 + napi_gro_flush(n, HZ >= 1000);
4799 + }
4800 +
4801 ++ gro_normal_list(n);
4802 ++
4803 + /* Some drivers may have called napi_schedule
4804 + * prior to exhausting their budget.
4805 + */
4806 +@@ -7973,6 +7975,22 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
4807 + }
4808 + EXPORT_SYMBOL(__dev_set_mtu);
4809 +
4810 ++int dev_validate_mtu(struct net_device *dev, int new_mtu,
4811 ++ struct netlink_ext_ack *extack)
4812 ++{
4813 ++ /* MTU must be positive, and in range */
4814 ++ if (new_mtu < 0 || new_mtu < dev->min_mtu) {
4815 ++ NL_SET_ERR_MSG(extack, "mtu less than device minimum");
4816 ++ return -EINVAL;
4817 ++ }
4818 ++
4819 ++ if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
4820 ++ NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
4821 ++ return -EINVAL;
4822 ++ }
4823 ++ return 0;
4824 ++}
4825 ++
4826 + /**
4827 + * dev_set_mtu_ext - Change maximum transfer unit
4828 + * @dev: device
4829 +@@ -7989,16 +8007,9 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
4830 + if (new_mtu == dev->mtu)
4831 + return 0;
4832 +
4833 +- /* MTU must be positive, and in range */
4834 +- if (new_mtu < 0 || new_mtu < dev->min_mtu) {
4835 +- NL_SET_ERR_MSG(extack, "mtu less than device minimum");
4836 +- return -EINVAL;
4837 +- }
4838 +-
4839 +- if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
4840 +- NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
4841 +- return -EINVAL;
4842 +- }
4843 ++ err = dev_validate_mtu(dev, new_mtu, extack);
4844 ++ if (err)
4845 ++ return err;
4846 +
4847 + if (!netif_device_present(dev))
4848 + return -ENODEV;
4849 +@@ -9073,8 +9084,10 @@ int register_netdevice(struct net_device *dev)
4850 + goto err_uninit;
4851 +
4852 + ret = netdev_register_kobject(dev);
4853 +- if (ret)
4854 ++ if (ret) {
4855 ++ dev->reg_state = NETREG_UNREGISTERED;
4856 + goto err_uninit;
4857 ++ }
4858 + dev->reg_state = NETREG_REGISTERED;
4859 +
4860 + __netdev_update_features(dev);
4861 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4862 +index e4ec575c1fba..944acb1a9f29 100644
4863 +--- a/net/core/rtnetlink.c
4864 ++++ b/net/core/rtnetlink.c
4865 +@@ -2959,8 +2959,17 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
4866 + dev->rtnl_link_ops = ops;
4867 + dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
4868 +
4869 +- if (tb[IFLA_MTU])
4870 +- dev->mtu = nla_get_u32(tb[IFLA_MTU]);
4871 ++ if (tb[IFLA_MTU]) {
4872 ++ u32 mtu = nla_get_u32(tb[IFLA_MTU]);
4873 ++ int err;
4874 ++
4875 ++ err = dev_validate_mtu(dev, mtu, extack);
4876 ++ if (err) {
4877 ++ free_netdev(dev);
4878 ++ return ERR_PTR(err);
4879 ++ }
4880 ++ dev->mtu = mtu;
4881 ++ }
4882 + if (tb[IFLA_ADDRESS]) {
4883 + memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
4884 + nla_len(tb[IFLA_ADDRESS]));
4885 +diff --git a/net/core/skmsg.c b/net/core/skmsg.c
4886 +index 3866d7e20c07..ded2d5227678 100644
4887 +--- a/net/core/skmsg.c
4888 ++++ b/net/core/skmsg.c
4889 +@@ -594,8 +594,6 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy);
4890 +
4891 + void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
4892 + {
4893 +- sock_owned_by_me(sk);
4894 +-
4895 + sk_psock_cork_free(psock);
4896 + sk_psock_zap_ingress(psock);
4897 +
4898 +diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
4899 +index d40de84a637f..754d84b217f0 100644
4900 +--- a/net/hsr/hsr_main.h
4901 ++++ b/net/hsr/hsr_main.h
4902 +@@ -191,7 +191,7 @@ void hsr_debugfs_term(struct hsr_priv *priv);
4903 + void hsr_debugfs_create_root(void);
4904 + void hsr_debugfs_remove_root(void);
4905 + #else
4906 +-static inline void void hsr_debugfs_rename(struct net_device *dev)
4907 ++static inline void hsr_debugfs_rename(struct net_device *dev)
4908 + {
4909 + }
4910 + static inline void hsr_debugfs_init(struct hsr_priv *priv,
4911 +diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
4912 +index 0e4a7cf6bc87..e2e219c7854a 100644
4913 +--- a/net/ipv4/esp4_offload.c
4914 ++++ b/net/ipv4/esp4_offload.c
4915 +@@ -57,6 +57,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
4916 + if (!x)
4917 + goto out_reset;
4918 +
4919 ++ skb->mark = xfrm_smark_get(skb->mark, x);
4920 ++
4921 + sp->xvec[sp->len++] = x;
4922 + sp->olen++;
4923 +
4924 +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
4925 +index 1ab2fb6bb37d..f12fa8da6127 100644
4926 +--- a/net/ipv4/fib_trie.c
4927 ++++ b/net/ipv4/fib_trie.c
4928 +@@ -2175,6 +2175,12 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
4929 + int count = cb->args[2];
4930 + t_key key = cb->args[3];
4931 +
4932 ++ /* First time here, count and key are both always 0. Count > 0
4933 ++ * and key == 0 means the dump has wrapped around and we are done.
4934 ++ */
4935 ++ if (count && !key)
4936 ++ return skb->len;
4937 ++
4938 + while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
4939 + int err;
4940 +
4941 +diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
4942 +index 30fa771d382a..dcc79ff54b41 100644
4943 +--- a/net/ipv4/fou.c
4944 ++++ b/net/ipv4/fou.c
4945 +@@ -662,8 +662,8 @@ static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
4946 + [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
4947 + [FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
4948 + [FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
4949 +- [FOU_ATTR_LOCAL_V6] = { .type = sizeof(struct in6_addr), },
4950 +- [FOU_ATTR_PEER_V6] = { .type = sizeof(struct in6_addr), },
4951 ++ [FOU_ATTR_LOCAL_V6] = { .len = sizeof(struct in6_addr), },
4952 ++ [FOU_ATTR_PEER_V6] = { .len = sizeof(struct in6_addr), },
4953 + [FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
4954 + [FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
4955 + };
4956 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
4957 +index 0fe2a5d3e258..74e1d964a615 100644
4958 +--- a/net/ipv4/ip_tunnel.c
4959 ++++ b/net/ipv4/ip_tunnel.c
4960 +@@ -1236,10 +1236,8 @@ int ip_tunnel_init(struct net_device *dev)
4961 + iph->version = 4;
4962 + iph->ihl = 5;
4963 +
4964 +- if (tunnel->collect_md) {
4965 +- dev->features |= NETIF_F_NETNS_LOCAL;
4966 ++ if (tunnel->collect_md)
4967 + netif_keep_dst(dev);
4968 +- }
4969 + return 0;
4970 + }
4971 + EXPORT_SYMBOL_GPL(ip_tunnel_init);
4972 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
4973 +index 84115577d3dc..3640e8563a10 100644
4974 +--- a/net/ipv4/tcp.c
4975 ++++ b/net/ipv4/tcp.c
4976 +@@ -2520,6 +2520,7 @@ static void tcp_rtx_queue_purge(struct sock *sk)
4977 + {
4978 + struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
4979 +
4980 ++ tcp_sk(sk)->highest_sack = NULL;
4981 + while (p) {
4982 + struct sk_buff *skb = rb_to_skb(p);
4983 +
4984 +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
4985 +index a6545ef0d27b..6c4d79baff26 100644
4986 +--- a/net/ipv4/tcp_bbr.c
4987 ++++ b/net/ipv4/tcp_bbr.c
4988 +@@ -779,8 +779,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
4989 + * bandwidth sample. Delivered is in packets and interval_us in uS and
4990 + * ratio will be <<1 for most connections. So delivered is first scaled.
4991 + */
4992 +- bw = (u64)rs->delivered * BW_UNIT;
4993 +- do_div(bw, rs->interval_us);
4994 ++ bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
4995 +
4996 + /* If this sample is application-limited, it is likely to have a very
4997 + * low delivered count that represents application behavior rather than
4998 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
4999 +index 59b78ce2ce2e..6f7155d91313 100644
5000 +--- a/net/ipv4/tcp_input.c
5001 ++++ b/net/ipv4/tcp_input.c
5002 +@@ -3164,6 +3164,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
5003 + tp->retransmit_skb_hint = NULL;
5004 + if (unlikely(skb == tp->lost_skb_hint))
5005 + tp->lost_skb_hint = NULL;
5006 ++ tcp_highest_sack_replace(sk, skb, next);
5007 + tcp_rtx_queue_unlink_and_free(skb, sk);
5008 + }
5009 +
5010 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
5011 +index e4ba915c4bb5..660b24fe041e 100644
5012 +--- a/net/ipv4/tcp_output.c
5013 ++++ b/net/ipv4/tcp_output.c
5014 +@@ -3231,6 +3231,7 @@ int tcp_send_synack(struct sock *sk)
5015 + if (!nskb)
5016 + return -ENOMEM;
5017 + INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
5018 ++ tcp_highest_sack_replace(sk, skb, nskb);
5019 + tcp_rtx_queue_unlink_and_free(skb, sk);
5020 + __skb_header_release(nskb);
5021 + tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
5022 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5023 +index 7aa4e77161f6..7ae7065758bd 100644
5024 +--- a/net/ipv4/udp.c
5025 ++++ b/net/ipv4/udp.c
5026 +@@ -1368,7 +1368,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
5027 + if (likely(partial)) {
5028 + up->forward_deficit += size;
5029 + size = up->forward_deficit;
5030 +- if (size < (sk->sk_rcvbuf >> 2))
5031 ++ if (size < (sk->sk_rcvbuf >> 2) &&
5032 ++ !skb_queue_empty(&up->reader_queue))
5033 + return;
5034 + } else {
5035 + size += up->forward_deficit;
5036 +diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
5037 +index e31626ffccd1..fd535053245b 100644
5038 +--- a/net/ipv6/esp6_offload.c
5039 ++++ b/net/ipv6/esp6_offload.c
5040 +@@ -79,6 +79,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
5041 + if (!x)
5042 + goto out_reset;
5043 +
5044 ++ skb->mark = xfrm_smark_get(skb->mark, x);
5045 ++
5046 + sp->xvec[sp->len++] = x;
5047 + sp->olen++;
5048 +
5049 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
5050 +index 189de56f5e36..9ec05a1df5e1 100644
5051 +--- a/net/ipv6/ip6_gre.c
5052 ++++ b/net/ipv6/ip6_gre.c
5053 +@@ -1466,7 +1466,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
5054 + dev->mtu -= 8;
5055 +
5056 + if (tunnel->parms.collect_md) {
5057 +- dev->features |= NETIF_F_NETNS_LOCAL;
5058 + netif_keep_dst(dev);
5059 + }
5060 + ip6gre_tnl_init_features(dev);
5061 +@@ -1894,7 +1893,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
5062 + dev->needs_free_netdev = true;
5063 + dev->priv_destructor = ip6gre_dev_free;
5064 +
5065 +- dev->features |= NETIF_F_NETNS_LOCAL;
5066 + dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5067 + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5068 + netif_keep_dst(dev);
5069 +@@ -2197,7 +2195,6 @@ static void ip6erspan_tap_setup(struct net_device *dev)
5070 + dev->needs_free_netdev = true;
5071 + dev->priv_destructor = ip6gre_dev_free;
5072 +
5073 +- dev->features |= NETIF_F_NETNS_LOCAL;
5074 + dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5075 + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5076 + netif_keep_dst(dev);
5077 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
5078 +index 2f376dbc37d5..b5dd20c4599b 100644
5079 +--- a/net/ipv6/ip6_tunnel.c
5080 ++++ b/net/ipv6/ip6_tunnel.c
5081 +@@ -1877,10 +1877,8 @@ static int ip6_tnl_dev_init(struct net_device *dev)
5082 + if (err)
5083 + return err;
5084 + ip6_tnl_link_config(t);
5085 +- if (t->parms.collect_md) {
5086 +- dev->features |= NETIF_F_NETNS_LOCAL;
5087 ++ if (t->parms.collect_md)
5088 + netif_keep_dst(dev);
5089 +- }
5090 + return 0;
5091 + }
5092 +
5093 +diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
5094 +index e70567446f28..802eebf8ac4b 100644
5095 +--- a/net/ipv6/seg6_local.c
5096 ++++ b/net/ipv6/seg6_local.c
5097 +@@ -23,6 +23,7 @@
5098 + #include <net/addrconf.h>
5099 + #include <net/ip6_route.h>
5100 + #include <net/dst_cache.h>
5101 ++#include <net/ip_tunnels.h>
5102 + #ifdef CONFIG_IPV6_SEG6_HMAC
5103 + #include <net/seg6_hmac.h>
5104 + #endif
5105 +@@ -135,7 +136,8 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
5106 +
5107 + skb_reset_network_header(skb);
5108 + skb_reset_transport_header(skb);
5109 +- skb->encapsulation = 0;
5110 ++ if (iptunnel_pull_offloads(skb))
5111 ++ return false;
5112 +
5113 + return true;
5114 + }
5115 +diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
5116 +index e1f271a1b2c1..bfd4b42ba305 100644
5117 +--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
5118 ++++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
5119 +@@ -75,7 +75,7 @@ mtype_flush(struct ip_set *set)
5120 +
5121 + if (set->extensions & IPSET_EXT_DESTROY)
5122 + mtype_ext_cleanup(set);
5123 +- memset(map->members, 0, map->memsize);
5124 ++ bitmap_zero(map->members, map->elements);
5125 + set->elements = 0;
5126 + set->ext_size = 0;
5127 + }
5128 +diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
5129 +index 11ff9d4a7006..d934384f31ad 100644
5130 +--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
5131 ++++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
5132 +@@ -37,7 +37,7 @@ MODULE_ALIAS("ip_set_bitmap:ip");
5133 +
5134 + /* Type structure */
5135 + struct bitmap_ip {
5136 +- void *members; /* the set members */
5137 ++ unsigned long *members; /* the set members */
5138 + u32 first_ip; /* host byte order, included in range */
5139 + u32 last_ip; /* host byte order, included in range */
5140 + u32 elements; /* number of max elements in the set */
5141 +@@ -220,7 +220,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
5142 + u32 first_ip, u32 last_ip,
5143 + u32 elements, u32 hosts, u8 netmask)
5144 + {
5145 +- map->members = ip_set_alloc(map->memsize);
5146 ++ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
5147 + if (!map->members)
5148 + return false;
5149 + map->first_ip = first_ip;
5150 +@@ -310,7 +310,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
5151 + if (!map)
5152 + return -ENOMEM;
5153 +
5154 +- map->memsize = bitmap_bytes(0, elements - 1);
5155 ++ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
5156 + set->variant = &bitmap_ip;
5157 + if (!init_map_ip(set, map, first_ip, last_ip,
5158 + elements, hosts, netmask)) {
5159 +diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5160 +index 1d4e63326e68..e8532783b43a 100644
5161 +--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5162 ++++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
5163 +@@ -42,7 +42,7 @@ enum {
5164 +
5165 + /* Type structure */
5166 + struct bitmap_ipmac {
5167 +- void *members; /* the set members */
5168 ++ unsigned long *members; /* the set members */
5169 + u32 first_ip; /* host byte order, included in range */
5170 + u32 last_ip; /* host byte order, included in range */
5171 + u32 elements; /* number of max elements in the set */
5172 +@@ -299,7 +299,7 @@ static bool
5173 + init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
5174 + u32 first_ip, u32 last_ip, u32 elements)
5175 + {
5176 +- map->members = ip_set_alloc(map->memsize);
5177 ++ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
5178 + if (!map->members)
5179 + return false;
5180 + map->first_ip = first_ip;
5181 +@@ -360,7 +360,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
5182 + if (!map)
5183 + return -ENOMEM;
5184 +
5185 +- map->memsize = bitmap_bytes(0, elements - 1);
5186 ++ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
5187 + set->variant = &bitmap_ipmac;
5188 + if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
5189 + kfree(map);
5190 +diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
5191 +index 704a0dda1609..e3ac914fff1a 100644
5192 +--- a/net/netfilter/ipset/ip_set_bitmap_port.c
5193 ++++ b/net/netfilter/ipset/ip_set_bitmap_port.c
5194 +@@ -30,7 +30,7 @@ MODULE_ALIAS("ip_set_bitmap:port");
5195 +
5196 + /* Type structure */
5197 + struct bitmap_port {
5198 +- void *members; /* the set members */
5199 ++ unsigned long *members; /* the set members */
5200 + u16 first_port; /* host byte order, included in range */
5201 + u16 last_port; /* host byte order, included in range */
5202 + u32 elements; /* number of max elements in the set */
5203 +@@ -204,7 +204,7 @@ static bool
5204 + init_map_port(struct ip_set *set, struct bitmap_port *map,
5205 + u16 first_port, u16 last_port)
5206 + {
5207 +- map->members = ip_set_alloc(map->memsize);
5208 ++ map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
5209 + if (!map->members)
5210 + return false;
5211 + map->first_port = first_port;
5212 +@@ -244,7 +244,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
5213 + return -ENOMEM;
5214 +
5215 + map->elements = elements;
5216 +- map->memsize = bitmap_bytes(0, map->elements);
5217 ++ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
5218 + set->variant = &bitmap_port;
5219 + if (!init_map_port(set, map, first_port, last_port)) {
5220 + kfree(map);
5221 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5222 +index 9fefd0150091..23544842b692 100644
5223 +--- a/net/netfilter/nf_tables_api.c
5224 ++++ b/net/netfilter/nf_tables_api.c
5225 +@@ -488,48 +488,71 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
5226 +
5227 + static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
5228 +
5229 ++static const struct nft_chain_type *
5230 ++__nft_chain_type_get(u8 family, enum nft_chain_types type)
5231 ++{
5232 ++ if (family >= NFPROTO_NUMPROTO ||
5233 ++ type >= NFT_CHAIN_T_MAX)
5234 ++ return NULL;
5235 ++
5236 ++ return chain_type[family][type];
5237 ++}
5238 ++
5239 + static const struct nft_chain_type *
5240 + __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
5241 + {
5242 ++ const struct nft_chain_type *type;
5243 + int i;
5244 +
5245 + for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
5246 +- if (chain_type[family][i] != NULL &&
5247 +- !nla_strcmp(nla, chain_type[family][i]->name))
5248 +- return chain_type[family][i];
5249 ++ type = __nft_chain_type_get(family, i);
5250 ++ if (!type)
5251 ++ continue;
5252 ++ if (!nla_strcmp(nla, type->name))
5253 ++ return type;
5254 + }
5255 + return NULL;
5256 + }
5257 +
5258 +-/*
5259 +- * Loading a module requires dropping mutex that guards the transaction.
5260 +- * A different client might race to start a new transaction meanwhile. Zap the
5261 +- * list of pending transaction and then restore it once the mutex is grabbed
5262 +- * again. Users of this function return EAGAIN which implicitly triggers the
5263 +- * transaction abort path to clean up the list of pending transactions.
5264 +- */
5265 ++struct nft_module_request {
5266 ++ struct list_head list;
5267 ++ char module[MODULE_NAME_LEN];
5268 ++ bool done;
5269 ++};
5270 ++
5271 + #ifdef CONFIG_MODULES
5272 +-static void nft_request_module(struct net *net, const char *fmt, ...)
5273 ++static int nft_request_module(struct net *net, const char *fmt, ...)
5274 + {
5275 + char module_name[MODULE_NAME_LEN];
5276 +- LIST_HEAD(commit_list);
5277 ++ struct nft_module_request *req;
5278 + va_list args;
5279 + int ret;
5280 +
5281 +- list_splice_init(&net->nft.commit_list, &commit_list);
5282 +-
5283 + va_start(args, fmt);
5284 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
5285 + va_end(args);
5286 + if (ret >= MODULE_NAME_LEN)
5287 +- return;
5288 ++ return 0;
5289 +
5290 +- mutex_unlock(&net->nft.commit_mutex);
5291 +- request_module("%s", module_name);
5292 +- mutex_lock(&net->nft.commit_mutex);
5293 ++ list_for_each_entry(req, &net->nft.module_list, list) {
5294 ++ if (!strcmp(req->module, module_name)) {
5295 ++ if (req->done)
5296 ++ return 0;
5297 +
5298 +- WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
5299 +- list_splice(&commit_list, &net->nft.commit_list);
5300 ++ /* A request to load this module already exists. */
5301 ++ return -EAGAIN;
5302 ++ }
5303 ++ }
5304 ++
5305 ++ req = kmalloc(sizeof(*req), GFP_KERNEL);
5306 ++ if (!req)
5307 ++ return -ENOMEM;
5308 ++
5309 ++ req->done = false;
5310 ++ strlcpy(req->module, module_name, MODULE_NAME_LEN);
5311 ++ list_add_tail(&req->list, &net->nft.module_list);
5312 ++
5313 ++ return -EAGAIN;
5314 + }
5315 + #endif
5316 +
5317 +@@ -553,10 +576,9 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
5318 + lockdep_nfnl_nft_mutex_not_held();
5319 + #ifdef CONFIG_MODULES
5320 + if (autoload) {
5321 +- nft_request_module(net, "nft-chain-%u-%.*s", family,
5322 +- nla_len(nla), (const char *)nla_data(nla));
5323 +- type = __nf_tables_chain_type_lookup(nla, family);
5324 +- if (type != NULL)
5325 ++ if (nft_request_module(net, "nft-chain-%u-%.*s", family,
5326 ++ nla_len(nla),
5327 ++ (const char *)nla_data(nla)) == -EAGAIN)
5328 + return ERR_PTR(-EAGAIN);
5329 + }
5330 + #endif
5331 +@@ -1095,11 +1117,8 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
5332 +
5333 + void nft_register_chain_type(const struct nft_chain_type *ctype)
5334 + {
5335 +- if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO))
5336 +- return;
5337 +-
5338 + nfnl_lock(NFNL_SUBSYS_NFTABLES);
5339 +- if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) {
5340 ++ if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) {
5341 + nfnl_unlock(NFNL_SUBSYS_NFTABLES);
5342 + return;
5343 + }
5344 +@@ -1551,7 +1570,10 @@ static int nft_chain_parse_hook(struct net *net,
5345 + hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
5346 + hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
5347 +
5348 +- type = chain_type[family][NFT_CHAIN_T_DEFAULT];
5349 ++ type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT);
5350 ++ if (!type)
5351 ++ return -EOPNOTSUPP;
5352 ++
5353 + if (nla[NFTA_CHAIN_TYPE]) {
5354 + type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
5355 + family, autoload);
5356 +@@ -2060,9 +2082,8 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
5357 + static int nft_expr_type_request_module(struct net *net, u8 family,
5358 + struct nlattr *nla)
5359 + {
5360 +- nft_request_module(net, "nft-expr-%u-%.*s", family,
5361 +- nla_len(nla), (char *)nla_data(nla));
5362 +- if (__nft_expr_type_get(family, nla))
5363 ++ if (nft_request_module(net, "nft-expr-%u-%.*s", family,
5364 ++ nla_len(nla), (char *)nla_data(nla)) == -EAGAIN)
5365 + return -EAGAIN;
5366 +
5367 + return 0;
5368 +@@ -2088,9 +2109,9 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
5369 + if (nft_expr_type_request_module(net, family, nla) == -EAGAIN)
5370 + return ERR_PTR(-EAGAIN);
5371 +
5372 +- nft_request_module(net, "nft-expr-%.*s",
5373 +- nla_len(nla), (char *)nla_data(nla));
5374 +- if (__nft_expr_type_get(family, nla))
5375 ++ if (nft_request_module(net, "nft-expr-%.*s",
5376 ++ nla_len(nla),
5377 ++ (char *)nla_data(nla)) == -EAGAIN)
5378 + return ERR_PTR(-EAGAIN);
5379 + }
5380 + #endif
5381 +@@ -2181,9 +2202,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
5382 + err = PTR_ERR(ops);
5383 + #ifdef CONFIG_MODULES
5384 + if (err == -EAGAIN)
5385 +- nft_expr_type_request_module(ctx->net,
5386 +- ctx->family,
5387 +- tb[NFTA_EXPR_NAME]);
5388 ++ if (nft_expr_type_request_module(ctx->net,
5389 ++ ctx->family,
5390 ++ tb[NFTA_EXPR_NAME]) != -EAGAIN)
5391 ++ err = -ENOENT;
5392 + #endif
5393 + goto err1;
5394 + }
5395 +@@ -3020,8 +3042,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
5396 + lockdep_nfnl_nft_mutex_not_held();
5397 + #ifdef CONFIG_MODULES
5398 + if (list_empty(&nf_tables_set_types)) {
5399 +- nft_request_module(ctx->net, "nft-set");
5400 +- if (!list_empty(&nf_tables_set_types))
5401 ++ if (nft_request_module(ctx->net, "nft-set") == -EAGAIN)
5402 + return ERR_PTR(-EAGAIN);
5403 + }
5404 + #endif
5405 +@@ -5147,8 +5168,7 @@ nft_obj_type_get(struct net *net, u32 objtype)
5406 + lockdep_nfnl_nft_mutex_not_held();
5407 + #ifdef CONFIG_MODULES
5408 + if (type == NULL) {
5409 +- nft_request_module(net, "nft-obj-%u", objtype);
5410 +- if (__nft_obj_type_get(objtype))
5411 ++ if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN)
5412 + return ERR_PTR(-EAGAIN);
5413 + }
5414 + #endif
5415 +@@ -5764,8 +5784,7 @@ nft_flowtable_type_get(struct net *net, u8 family)
5416 + lockdep_nfnl_nft_mutex_not_held();
5417 + #ifdef CONFIG_MODULES
5418 + if (type == NULL) {
5419 +- nft_request_module(net, "nf-flowtable-%u", family);
5420 +- if (__nft_flowtable_type_get(family))
5421 ++ if (nft_request_module(net, "nf-flowtable-%u", family) == -EAGAIN)
5422 + return ERR_PTR(-EAGAIN);
5423 + }
5424 + #endif
5425 +@@ -6712,6 +6731,18 @@ static void nft_chain_del(struct nft_chain *chain)
5426 + list_del_rcu(&chain->list);
5427 + }
5428 +
5429 ++static void nf_tables_module_autoload_cleanup(struct net *net)
5430 ++{
5431 ++ struct nft_module_request *req, *next;
5432 ++
5433 ++ WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
5434 ++ list_for_each_entry_safe(req, next, &net->nft.module_list, list) {
5435 ++ WARN_ON_ONCE(!req->done);
5436 ++ list_del(&req->list);
5437 ++ kfree(req);
5438 ++ }
5439 ++}
5440 ++
5441 + static void nf_tables_commit_release(struct net *net)
5442 + {
5443 + struct nft_trans *trans;
5444 +@@ -6724,6 +6755,7 @@ static void nf_tables_commit_release(struct net *net)
5445 + * to prevent expensive synchronize_rcu() in commit phase.
5446 + */
5447 + if (list_empty(&net->nft.commit_list)) {
5448 ++ nf_tables_module_autoload_cleanup(net);
5449 + mutex_unlock(&net->nft.commit_mutex);
5450 + return;
5451 + }
5452 +@@ -6738,6 +6770,7 @@ static void nf_tables_commit_release(struct net *net)
5453 + list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
5454 + spin_unlock(&nf_tables_destroy_list_lock);
5455 +
5456 ++ nf_tables_module_autoload_cleanup(net);
5457 + mutex_unlock(&net->nft.commit_mutex);
5458 +
5459 + schedule_work(&trans_destroy_work);
5460 +@@ -6929,6 +6962,26 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
5461 + return 0;
5462 + }
5463 +
5464 ++static void nf_tables_module_autoload(struct net *net)
5465 ++{
5466 ++ struct nft_module_request *req, *next;
5467 ++ LIST_HEAD(module_list);
5468 ++
5469 ++ list_splice_init(&net->nft.module_list, &module_list);
5470 ++ mutex_unlock(&net->nft.commit_mutex);
5471 ++ list_for_each_entry_safe(req, next, &module_list, list) {
5472 ++ if (req->done) {
5473 ++ list_del(&req->list);
5474 ++ kfree(req);
5475 ++ } else {
5476 ++ request_module("%s", req->module);
5477 ++ req->done = true;
5478 ++ }
5479 ++ }
5480 ++ mutex_lock(&net->nft.commit_mutex);
5481 ++ list_splice(&module_list, &net->nft.module_list);
5482 ++}
5483 ++
5484 + static void nf_tables_abort_release(struct nft_trans *trans)
5485 + {
5486 + switch (trans->msg_type) {
5487 +@@ -6958,7 +7011,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
5488 + kfree(trans);
5489 + }
5490 +
5491 +-static int __nf_tables_abort(struct net *net)
5492 ++static int __nf_tables_abort(struct net *net, bool autoload)
5493 + {
5494 + struct nft_trans *trans, *next;
5495 + struct nft_trans_elem *te;
5496 +@@ -7080,6 +7133,11 @@ static int __nf_tables_abort(struct net *net)
5497 + nf_tables_abort_release(trans);
5498 + }
5499 +
5500 ++ if (autoload)
5501 ++ nf_tables_module_autoload(net);
5502 ++ else
5503 ++ nf_tables_module_autoload_cleanup(net);
5504 ++
5505 + return 0;
5506 + }
5507 +
5508 +@@ -7088,9 +7146,9 @@ static void nf_tables_cleanup(struct net *net)
5509 + nft_validate_state_update(net, NFT_VALIDATE_SKIP);
5510 + }
5511 +
5512 +-static int nf_tables_abort(struct net *net, struct sk_buff *skb)
5513 ++static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
5514 + {
5515 +- int ret = __nf_tables_abort(net);
5516 ++ int ret = __nf_tables_abort(net, autoload);
5517 +
5518 + mutex_unlock(&net->nft.commit_mutex);
5519 +
5520 +@@ -7685,6 +7743,7 @@ static int __net_init nf_tables_init_net(struct net *net)
5521 + {
5522 + INIT_LIST_HEAD(&net->nft.tables);
5523 + INIT_LIST_HEAD(&net->nft.commit_list);
5524 ++ INIT_LIST_HEAD(&net->nft.module_list);
5525 + mutex_init(&net->nft.commit_mutex);
5526 + net->nft.base_seq = 1;
5527 + net->nft.validate_state = NFT_VALIDATE_SKIP;
5528 +@@ -7696,7 +7755,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
5529 + {
5530 + mutex_lock(&net->nft.commit_mutex);
5531 + if (!list_empty(&net->nft.commit_list))
5532 +- __nf_tables_abort(net);
5533 ++ __nf_tables_abort(net, false);
5534 + __nft_release_tables(net);
5535 + mutex_unlock(&net->nft.commit_mutex);
5536 + WARN_ON_ONCE(!list_empty(&net->nft.tables));
5537 +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
5538 +index 4abbb452cf6c..99127e2d95a8 100644
5539 +--- a/net/netfilter/nfnetlink.c
5540 ++++ b/net/netfilter/nfnetlink.c
5541 +@@ -476,7 +476,7 @@ ack:
5542 + }
5543 + done:
5544 + if (status & NFNL_BATCH_REPLAY) {
5545 +- ss->abort(net, oskb);
5546 ++ ss->abort(net, oskb, true);
5547 + nfnl_err_reset(&err_list);
5548 + kfree_skb(skb);
5549 + module_put(ss->owner);
5550 +@@ -487,11 +487,11 @@ done:
5551 + status |= NFNL_BATCH_REPLAY;
5552 + goto done;
5553 + } else if (err) {
5554 +- ss->abort(net, oskb);
5555 ++ ss->abort(net, oskb, false);
5556 + netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
5557 + }
5558 + } else {
5559 +- ss->abort(net, oskb);
5560 ++ ss->abort(net, oskb, false);
5561 + }
5562 + if (ss->cleanup)
5563 + ss->cleanup(net);
5564 +diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
5565 +index f54d6ae15bb1..b42247aa48a9 100644
5566 +--- a/net/netfilter/nft_osf.c
5567 ++++ b/net/netfilter/nft_osf.c
5568 +@@ -61,6 +61,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
5569 + int err;
5570 + u8 ttl;
5571 +
5572 ++ if (!tb[NFTA_OSF_DREG])
5573 ++ return -EINVAL;
5574 ++
5575 + if (tb[NFTA_OSF_TTL]) {
5576 + ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
5577 + if (ttl > 2)
5578 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
5579 +index 76e0d122616a..c2cdd0fc2e70 100644
5580 +--- a/net/sched/cls_api.c
5581 ++++ b/net/sched/cls_api.c
5582 +@@ -2055,9 +2055,8 @@ replay:
5583 + &chain_info));
5584 +
5585 + mutex_unlock(&chain->filter_chain_lock);
5586 +- tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
5587 +- protocol, prio, chain, rtnl_held,
5588 +- extack);
5589 ++ tp_new = tcf_proto_create(name, protocol, prio, chain,
5590 ++ rtnl_held, extack);
5591 + if (IS_ERR(tp_new)) {
5592 + err = PTR_ERR(tp_new);
5593 + goto errout_tp;
5594 +diff --git a/net/sched/ematch.c b/net/sched/ematch.c
5595 +index 8f2ad706784d..d0140a92694a 100644
5596 +--- a/net/sched/ematch.c
5597 ++++ b/net/sched/ematch.c
5598 +@@ -263,12 +263,12 @@ static int tcf_em_validate(struct tcf_proto *tp,
5599 + }
5600 + em->data = (unsigned long) v;
5601 + }
5602 ++ em->datalen = data_len;
5603 + }
5604 + }
5605 +
5606 + em->matchid = em_hdr->matchid;
5607 + em->flags = em_hdr->flags;
5608 +- em->datalen = data_len;
5609 + em->net = net;
5610 +
5611 + err = 0;
5612 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
5613 +index a80920f261ca..41e9c2932b34 100644
5614 +--- a/net/tls/tls_sw.c
5615 ++++ b/net/tls/tls_sw.c
5616 +@@ -793,7 +793,7 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
5617 + psock = sk_psock_get(sk);
5618 + if (!psock || !policy) {
5619 + err = tls_push_record(sk, flags, record_type);
5620 +- if (err) {
5621 ++ if (err && err != -EINPROGRESS) {
5622 + *copied -= sk_msg_free(sk, msg);
5623 + tls_free_open_rec(sk);
5624 + }
5625 +@@ -819,7 +819,7 @@ more_data:
5626 + switch (psock->eval) {
5627 + case __SK_PASS:
5628 + err = tls_push_record(sk, flags, record_type);
5629 +- if (err < 0) {
5630 ++ if (err && err != -EINPROGRESS) {
5631 + *copied -= sk_msg_free(sk, msg);
5632 + tls_free_open_rec(sk);
5633 + goto out_err;
5634 +diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
5635 +index 6aee9f5e8e71..256f3e97d1f3 100644
5636 +--- a/net/x25/af_x25.c
5637 ++++ b/net/x25/af_x25.c
5638 +@@ -760,6 +760,10 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
5639 + if (sk->sk_state == TCP_ESTABLISHED)
5640 + goto out;
5641 +
5642 ++ rc = -EALREADY; /* Do nothing if call is already in progress */
5643 ++ if (sk->sk_state == TCP_SYN_SENT)
5644 ++ goto out;
5645 ++
5646 + sk->sk_state = TCP_CLOSE;
5647 + sock->state = SS_UNCONNECTED;
5648 +
5649 +@@ -806,7 +810,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
5650 + /* Now the loop */
5651 + rc = -EINPROGRESS;
5652 + if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
5653 +- goto out_put_neigh;
5654 ++ goto out;
5655 +
5656 + rc = x25_wait_for_connection_establishment(sk);
5657 + if (rc)
5658 +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
5659 +index 612268eabef4..7225107a9aaf 100644
5660 +--- a/scripts/recordmcount.c
5661 ++++ b/scripts/recordmcount.c
5662 +@@ -38,6 +38,10 @@
5663 + #define R_AARCH64_ABS64 257
5664 + #endif
5665 +
5666 ++#define R_ARM_PC24 1
5667 ++#define R_ARM_THM_CALL 10
5668 ++#define R_ARM_CALL 28
5669 ++
5670 + static int fd_map; /* File descriptor for file being modified. */
5671 + static int mmap_failed; /* Boolean flag. */
5672 + static char gpfx; /* prefix for global symbol name (sometimes '_') */
5673 +@@ -418,6 +422,18 @@ static char const *already_has_rel_mcount = "success"; /* our work here is done!
5674 + #define RECORD_MCOUNT_64
5675 + #include "recordmcount.h"
5676 +
5677 ++static int arm_is_fake_mcount(Elf32_Rel const *rp)
5678 ++{
5679 ++ switch (ELF32_R_TYPE(w(rp->r_info))) {
5680 ++ case R_ARM_THM_CALL:
5681 ++ case R_ARM_CALL:
5682 ++ case R_ARM_PC24:
5683 ++ return 0;
5684 ++ }
5685 ++
5686 ++ return 1;
5687 ++}
5688 ++
5689 + /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
5690 + * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
5691 + * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
5692 +@@ -523,6 +539,7 @@ static int do_file(char const *const fname)
5693 + altmcount = "__gnu_mcount_nc";
5694 + make_nop = make_nop_arm;
5695 + rel_type_nop = R_ARM_NONE;
5696 ++ is_fake_mcount32 = arm_is_fake_mcount;
5697 + gpfx = 0;
5698 + break;
5699 + case EM_AARCH64: