Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.14 commit in: /
Date: Wed, 27 Oct 2021 11:56:42
Message-Id: 1635335786.2661807959a80b1a8fe0501567b46f369fc44c98.mpagano@gentoo
1 commit: 2661807959a80b1a8fe0501567b46f369fc44c98
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Oct 27 11:56:26 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Oct 27 11:56:26 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=26618079
7
8 Linux patch 5.14.15
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1014_linux-5.14.15.patch | 6672 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6676 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 5ebc554..ea788cb 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -103,6 +103,10 @@ Patch: 1013_linux-5.14.14.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.14.14
23
24 +Patch: 1014_linux-5.14.15.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.14.15
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1014_linux-5.14.15.patch b/1014_linux-5.14.15.patch
33 new file mode 100644
34 index 0000000..5d7b8f7
35 --- /dev/null
36 +++ b/1014_linux-5.14.15.patch
37 @@ -0,0 +1,6672 @@
38 +diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst
39 +index a432dc419fa40..5d97cee9457be 100644
40 +--- a/Documentation/networking/devlink/ice.rst
41 ++++ b/Documentation/networking/devlink/ice.rst
42 +@@ -30,10 +30,11 @@ The ``ice`` driver reports the following versions
43 + PHY, link, etc.
44 + * - ``fw.mgmt.api``
45 + - running
46 +- - 1.5
47 +- - 2-digit version number of the API exported over the AdminQ by the
48 +- management firmware. Used by the driver to identify what commands
49 +- are supported.
50 ++ - 1.5.1
51 ++ - 3-digit version number (major.minor.patch) of the API exported over
52 ++ the AdminQ by the management firmware. Used by the driver to
53 ++ identify what commands are supported. Historical versions of the
54 ++ kernel only displayed a 2-digit version number (major.minor).
55 + * - ``fw.mgmt.build``
56 + - running
57 + - 0x305d955f
58 +diff --git a/Makefile b/Makefile
59 +index f05668e1ffaba..e66341fba8a4e 100644
60 +--- a/Makefile
61 ++++ b/Makefile
62 +@@ -1,7 +1,7 @@
63 + # SPDX-License-Identifier: GPL-2.0
64 + VERSION = 5
65 + PATCHLEVEL = 14
66 +-SUBLEVEL = 14
67 ++SUBLEVEL = 15
68 + EXTRAVERSION =
69 + NAME = Opossums on Parade
70 +
71 +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
72 +index 2fb7012c32463..110b305af27f1 100644
73 +--- a/arch/arm/Kconfig
74 ++++ b/arch/arm/Kconfig
75 +@@ -93,6 +93,7 @@ config ARM
76 + select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
77 + select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
78 + select HAVE_FUNCTION_TRACER if !XIP_KERNEL
79 ++ select HAVE_FUTEX_CMPXCHG if FUTEX
80 + select HAVE_GCC_PLUGINS
81 + select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
82 + select HAVE_IRQ_TIME_ACCOUNTING
83 +diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
84 +index 8034e5dacc808..949df688c5f18 100644
85 +--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
86 ++++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
87 +@@ -71,7 +71,6 @@
88 + isc: isc@f0008000 {
89 + pinctrl-names = "default";
90 + pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>;
91 +- status = "okay";
92 + };
93 +
94 + qspi1: spi@f0024000 {
95 +diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
96 +index f266b7b034823..cc88ebe7a60ce 100644
97 +--- a/arch/arm/boot/dts/spear3xx.dtsi
98 ++++ b/arch/arm/boot/dts/spear3xx.dtsi
99 +@@ -47,7 +47,7 @@
100 + };
101 +
102 + gmac: eth@e0800000 {
103 +- compatible = "st,spear600-gmac";
104 ++ compatible = "snps,dwmac-3.40a";
105 + reg = <0xe0800000 0x8000>;
106 + interrupts = <23 22>;
107 + interrupt-names = "macirq", "eth_wake_irq";
108 +diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
109 +index ec13ceb9ed362..79ba83d1f620c 100644
110 +--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
111 ++++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
112 +@@ -19,7 +19,7 @@
113 + */
114 +
115 + / {
116 +- bus@4000000 {
117 ++ bus@40000000 {
118 + motherboard {
119 + model = "V2M-P1";
120 + arm,hbi = <0x190>;
121 +diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
122 +index 4c58479558562..1317f0f58d53d 100644
123 +--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
124 ++++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
125 +@@ -295,7 +295,7 @@
126 + };
127 + };
128 +
129 +- smb: bus@4000000 {
130 ++ smb: bus@40000000 {
131 + compatible = "simple-bus";
132 +
133 + #address-cells = <2>;
134 +diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
135 +index fb0f523d14921..0a048dc06a7d7 100644
136 +--- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h
137 ++++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
138 +@@ -24,6 +24,7 @@ struct hyp_pool {
139 +
140 + /* Allocation */
141 + void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
142 ++void hyp_split_page(struct hyp_page *page);
143 + void hyp_get_page(struct hyp_pool *pool, void *addr);
144 + void hyp_put_page(struct hyp_pool *pool, void *addr);
145 +
146 +diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
147 +index a6ce991b14679..b79ce0059e7b7 100644
148 +--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
149 ++++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
150 +@@ -35,7 +35,18 @@ static const u8 pkvm_hyp_id = 1;
151 +
152 + static void *host_s2_zalloc_pages_exact(size_t size)
153 + {
154 +- return hyp_alloc_pages(&host_s2_pool, get_order(size));
155 ++ void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
156 ++
157 ++ hyp_split_page(hyp_virt_to_page(addr));
158 ++
159 ++ /*
160 ++ * The size of concatenated PGDs is always a power of two of PAGE_SIZE,
161 ++ * so there should be no need to free any of the tail pages to make the
162 ++ * allocation exact.
163 ++ */
164 ++ WARN_ON(size != (PAGE_SIZE << get_order(size)));
165 ++
166 ++ return addr;
167 + }
168 +
169 + static void *host_s2_zalloc_page(void *pool)
170 +diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
171 +index 41fc25bdfb346..a6e874e61a40e 100644
172 +--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
173 ++++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
174 +@@ -193,6 +193,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
175 + hyp_spin_unlock(&pool->lock);
176 + }
177 +
178 ++void hyp_split_page(struct hyp_page *p)
179 ++{
180 ++ unsigned short order = p->order;
181 ++ unsigned int i;
182 ++
183 ++ p->order = 0;
184 ++ for (i = 1; i < (1 << order); i++) {
185 ++ struct hyp_page *tail = p + i;
186 ++
187 ++ tail->order = 0;
188 ++ hyp_set_page_refcounted(tail);
189 ++ }
190 ++}
191 ++
192 + void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
193 + {
194 + unsigned short i = order;
195 +diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
196 +index 0625bf2353c22..3fcdacfee5799 100644
197 +--- a/arch/arm64/kvm/mmu.c
198 ++++ b/arch/arm64/kvm/mmu.c
199 +@@ -1477,8 +1477,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
200 + * when updating the PG_mte_tagged page flag, see
201 + * sanitise_mte_tags for more details.
202 + */
203 +- if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
204 +- return -EINVAL;
205 ++ if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
206 ++ ret = -EINVAL;
207 ++ break;
208 ++ }
209 +
210 + if (vma->vm_flags & VM_PFNMAP) {
211 + /* IO region dirty page logging not allowed */
212 +diff --git a/arch/nios2/include/asm/irqflags.h b/arch/nios2/include/asm/irqflags.h
213 +index b3ec3e510706d..25acf27862f91 100644
214 +--- a/arch/nios2/include/asm/irqflags.h
215 ++++ b/arch/nios2/include/asm/irqflags.h
216 +@@ -9,7 +9,7 @@
217 +
218 + static inline unsigned long arch_local_save_flags(void)
219 + {
220 +- return RDCTL(CTL_STATUS);
221 ++ return RDCTL(CTL_FSTATUS);
222 + }
223 +
224 + /*
225 +@@ -18,7 +18,7 @@ static inline unsigned long arch_local_save_flags(void)
226 + */
227 + static inline void arch_local_irq_restore(unsigned long flags)
228 + {
229 +- WRCTL(CTL_STATUS, flags);
230 ++ WRCTL(CTL_FSTATUS, flags);
231 + }
232 +
233 + static inline void arch_local_irq_disable(void)
234 +diff --git a/arch/nios2/include/asm/registers.h b/arch/nios2/include/asm/registers.h
235 +index 183c720e454d9..95b67dd16f818 100644
236 +--- a/arch/nios2/include/asm/registers.h
237 ++++ b/arch/nios2/include/asm/registers.h
238 +@@ -11,7 +11,7 @@
239 + #endif
240 +
241 + /* control register numbers */
242 +-#define CTL_STATUS 0
243 ++#define CTL_FSTATUS 0
244 + #define CTL_ESTATUS 1
245 + #define CTL_BSTATUS 2
246 + #define CTL_IENABLE 3
247 +diff --git a/arch/parisc/math-emu/fpudispatch.c b/arch/parisc/math-emu/fpudispatch.c
248 +index 7c46969ead9b1..01ed133227c25 100644
249 +--- a/arch/parisc/math-emu/fpudispatch.c
250 ++++ b/arch/parisc/math-emu/fpudispatch.c
251 +@@ -310,12 +310,15 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
252 + r1 &= ~3;
253 + fpregs[t+3] = fpregs[r1+3];
254 + fpregs[t+2] = fpregs[r1+2];
255 ++ fallthrough;
256 + case 1: /* double */
257 + fpregs[t+1] = fpregs[r1+1];
258 ++ fallthrough;
259 + case 0: /* single */
260 + fpregs[t] = fpregs[r1];
261 + return(NOEXCEPTION);
262 + }
263 ++ BUG();
264 + case 3: /* FABS */
265 + switch (fmt) {
266 + case 2: /* illegal */
267 +@@ -325,13 +328,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
268 + r1 &= ~3;
269 + fpregs[t+3] = fpregs[r1+3];
270 + fpregs[t+2] = fpregs[r1+2];
271 ++ fallthrough;
272 + case 1: /* double */
273 + fpregs[t+1] = fpregs[r1+1];
274 ++ fallthrough;
275 + case 0: /* single */
276 + /* copy and clear sign bit */
277 + fpregs[t] = fpregs[r1] & 0x7fffffff;
278 + return(NOEXCEPTION);
279 + }
280 ++ BUG();
281 + case 6: /* FNEG */
282 + switch (fmt) {
283 + case 2: /* illegal */
284 +@@ -341,13 +347,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
285 + r1 &= ~3;
286 + fpregs[t+3] = fpregs[r1+3];
287 + fpregs[t+2] = fpregs[r1+2];
288 ++ fallthrough;
289 + case 1: /* double */
290 + fpregs[t+1] = fpregs[r1+1];
291 ++ fallthrough;
292 + case 0: /* single */
293 + /* copy and invert sign bit */
294 + fpregs[t] = fpregs[r1] ^ 0x80000000;
295 + return(NOEXCEPTION);
296 + }
297 ++ BUG();
298 + case 7: /* FNEGABS */
299 + switch (fmt) {
300 + case 2: /* illegal */
301 +@@ -357,13 +366,16 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
302 + r1 &= ~3;
303 + fpregs[t+3] = fpregs[r1+3];
304 + fpregs[t+2] = fpregs[r1+2];
305 ++ fallthrough;
306 + case 1: /* double */
307 + fpregs[t+1] = fpregs[r1+1];
308 ++ fallthrough;
309 + case 0: /* single */
310 + /* copy and set sign bit */
311 + fpregs[t] = fpregs[r1] | 0x80000000;
312 + return(NOEXCEPTION);
313 + }
314 ++ BUG();
315 + case 4: /* FSQRT */
316 + switch (fmt) {
317 + case 0:
318 +@@ -376,6 +388,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
319 + case 3: /* quad not implemented */
320 + return(MAJOR_0C_EXCP);
321 + }
322 ++ BUG();
323 + case 5: /* FRND */
324 + switch (fmt) {
325 + case 0:
326 +@@ -389,7 +402,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
327 + return(MAJOR_0C_EXCP);
328 + }
329 + } /* end of switch (subop) */
330 +-
331 ++ BUG();
332 + case 1: /* class 1 */
333 + df = extru(ir,fpdfpos,2); /* get dest format */
334 + if ((df & 2) || (fmt & 2)) {
335 +@@ -419,6 +432,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
336 + case 3: /* dbl/dbl */
337 + return(MAJOR_0C_EXCP);
338 + }
339 ++ BUG();
340 + case 1: /* FCNVXF */
341 + switch(fmt) {
342 + case 0: /* sgl/sgl */
343 +@@ -434,6 +448,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
344 + return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
345 + &fpregs[t],status));
346 + }
347 ++ BUG();
348 + case 2: /* FCNVFX */
349 + switch(fmt) {
350 + case 0: /* sgl/sgl */
351 +@@ -449,6 +464,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
352 + return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
353 + &fpregs[t],status));
354 + }
355 ++ BUG();
356 + case 3: /* FCNVFXT */
357 + switch(fmt) {
358 + case 0: /* sgl/sgl */
359 +@@ -464,6 +480,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
360 + return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
361 + &fpregs[t],status));
362 + }
363 ++ BUG();
364 + case 5: /* FCNVUF (PA2.0 only) */
365 + switch(fmt) {
366 + case 0: /* sgl/sgl */
367 +@@ -479,6 +496,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
368 + return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
369 + &fpregs[t],status));
370 + }
371 ++ BUG();
372 + case 6: /* FCNVFU (PA2.0 only) */
373 + switch(fmt) {
374 + case 0: /* sgl/sgl */
375 +@@ -494,6 +512,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
376 + return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
377 + &fpregs[t],status));
378 + }
379 ++ BUG();
380 + case 7: /* FCNVFUT (PA2.0 only) */
381 + switch(fmt) {
382 + case 0: /* sgl/sgl */
383 +@@ -509,10 +528,11 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
384 + return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
385 + &fpregs[t],status));
386 + }
387 ++ BUG();
388 + case 4: /* undefined */
389 + return(MAJOR_0C_EXCP);
390 + } /* end of switch subop */
391 +-
392 ++ BUG();
393 + case 2: /* class 2 */
394 + fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];
395 + r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int);
396 +@@ -590,6 +610,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
397 + case 3: /* quad not implemented */
398 + return(MAJOR_0C_EXCP);
399 + }
400 ++ BUG();
401 + case 1: /* FTEST */
402 + switch (fmt) {
403 + case 0:
404 +@@ -609,8 +630,10 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
405 + case 3:
406 + return(MAJOR_0C_EXCP);
407 + }
408 ++ BUG();
409 + } /* end of switch subop */
410 + } /* end of else for PA1.0 & PA1.1 */
411 ++ BUG();
412 + case 3: /* class 3 */
413 + r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int);
414 + if (r2 == 0)
415 +@@ -633,6 +656,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
416 + case 3: /* quad not implemented */
417 + return(MAJOR_0C_EXCP);
418 + }
419 ++ BUG();
420 + case 1: /* FSUB */
421 + switch (fmt) {
422 + case 0:
423 +@@ -645,6 +669,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
424 + case 3: /* quad not implemented */
425 + return(MAJOR_0C_EXCP);
426 + }
427 ++ BUG();
428 + case 2: /* FMPY */
429 + switch (fmt) {
430 + case 0:
431 +@@ -657,6 +682,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
432 + case 3: /* quad not implemented */
433 + return(MAJOR_0C_EXCP);
434 + }
435 ++ BUG();
436 + case 3: /* FDIV */
437 + switch (fmt) {
438 + case 0:
439 +@@ -669,6 +695,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
440 + case 3: /* quad not implemented */
441 + return(MAJOR_0C_EXCP);
442 + }
443 ++ BUG();
444 + case 4: /* FREM */
445 + switch (fmt) {
446 + case 0:
447 +@@ -681,6 +708,7 @@ decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
448 + case 3: /* quad not implemented */
449 + return(MAJOR_0C_EXCP);
450 + }
451 ++ BUG();
452 + } /* end of class 3 switch */
453 + } /* end of switch(class) */
454 +
455 +@@ -736,10 +764,12 @@ u_int fpregs[];
456 + return(MAJOR_0E_EXCP);
457 + case 1: /* double */
458 + fpregs[t+1] = fpregs[r1+1];
459 ++ fallthrough;
460 + case 0: /* single */
461 + fpregs[t] = fpregs[r1];
462 + return(NOEXCEPTION);
463 + }
464 ++ BUG();
465 + case 3: /* FABS */
466 + switch (fmt) {
467 + case 2:
468 +@@ -747,10 +777,12 @@ u_int fpregs[];
469 + return(MAJOR_0E_EXCP);
470 + case 1: /* double */
471 + fpregs[t+1] = fpregs[r1+1];
472 ++ fallthrough;
473 + case 0: /* single */
474 + fpregs[t] = fpregs[r1] & 0x7fffffff;
475 + return(NOEXCEPTION);
476 + }
477 ++ BUG();
478 + case 6: /* FNEG */
479 + switch (fmt) {
480 + case 2:
481 +@@ -758,10 +790,12 @@ u_int fpregs[];
482 + return(MAJOR_0E_EXCP);
483 + case 1: /* double */
484 + fpregs[t+1] = fpregs[r1+1];
485 ++ fallthrough;
486 + case 0: /* single */
487 + fpregs[t] = fpregs[r1] ^ 0x80000000;
488 + return(NOEXCEPTION);
489 + }
490 ++ BUG();
491 + case 7: /* FNEGABS */
492 + switch (fmt) {
493 + case 2:
494 +@@ -769,10 +803,12 @@ u_int fpregs[];
495 + return(MAJOR_0E_EXCP);
496 + case 1: /* double */
497 + fpregs[t+1] = fpregs[r1+1];
498 ++ fallthrough;
499 + case 0: /* single */
500 + fpregs[t] = fpregs[r1] | 0x80000000;
501 + return(NOEXCEPTION);
502 + }
503 ++ BUG();
504 + case 4: /* FSQRT */
505 + switch (fmt) {
506 + case 0:
507 +@@ -785,6 +821,7 @@ u_int fpregs[];
508 + case 3:
509 + return(MAJOR_0E_EXCP);
510 + }
511 ++ BUG();
512 + case 5: /* FRMD */
513 + switch (fmt) {
514 + case 0:
515 +@@ -798,7 +835,7 @@ u_int fpregs[];
516 + return(MAJOR_0E_EXCP);
517 + }
518 + } /* end of switch (subop */
519 +-
520 ++ BUG();
521 + case 1: /* class 1 */
522 + df = extru(ir,fpdfpos,2); /* get dest format */
523 + /*
524 +@@ -826,6 +863,7 @@ u_int fpregs[];
525 + case 3: /* dbl/dbl */
526 + return(MAJOR_0E_EXCP);
527 + }
528 ++ BUG();
529 + case 1: /* FCNVXF */
530 + switch(fmt) {
531 + case 0: /* sgl/sgl */
532 +@@ -841,6 +879,7 @@ u_int fpregs[];
533 + return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
534 + &fpregs[t],status));
535 + }
536 ++ BUG();
537 + case 2: /* FCNVFX */
538 + switch(fmt) {
539 + case 0: /* sgl/sgl */
540 +@@ -856,6 +895,7 @@ u_int fpregs[];
541 + return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
542 + &fpregs[t],status));
543 + }
544 ++ BUG();
545 + case 3: /* FCNVFXT */
546 + switch(fmt) {
547 + case 0: /* sgl/sgl */
548 +@@ -871,6 +911,7 @@ u_int fpregs[];
549 + return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
550 + &fpregs[t],status));
551 + }
552 ++ BUG();
553 + case 5: /* FCNVUF (PA2.0 only) */
554 + switch(fmt) {
555 + case 0: /* sgl/sgl */
556 +@@ -886,6 +927,7 @@ u_int fpregs[];
557 + return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
558 + &fpregs[t],status));
559 + }
560 ++ BUG();
561 + case 6: /* FCNVFU (PA2.0 only) */
562 + switch(fmt) {
563 + case 0: /* sgl/sgl */
564 +@@ -901,6 +943,7 @@ u_int fpregs[];
565 + return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
566 + &fpregs[t],status));
567 + }
568 ++ BUG();
569 + case 7: /* FCNVFUT (PA2.0 only) */
570 + switch(fmt) {
571 + case 0: /* sgl/sgl */
572 +@@ -916,9 +959,11 @@ u_int fpregs[];
573 + return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
574 + &fpregs[t],status));
575 + }
576 ++ BUG();
577 + case 4: /* undefined */
578 + return(MAJOR_0C_EXCP);
579 + } /* end of switch subop */
580 ++ BUG();
581 + case 2: /* class 2 */
582 + /*
583 + * Be careful out there.
584 +@@ -994,6 +1039,7 @@ u_int fpregs[];
585 + }
586 + } /* end of switch subop */
587 + } /* end of else for PA1.0 & PA1.1 */
588 ++ BUG();
589 + case 3: /* class 3 */
590 + /*
591 + * Be careful out there.
592 +@@ -1026,6 +1072,7 @@ u_int fpregs[];
593 + return(dbl_fadd(&fpregs[r1],&fpregs[r2],
594 + &fpregs[t],status));
595 + }
596 ++ BUG();
597 + case 1: /* FSUB */
598 + switch (fmt) {
599 + case 0:
600 +@@ -1035,6 +1082,7 @@ u_int fpregs[];
601 + return(dbl_fsub(&fpregs[r1],&fpregs[r2],
602 + &fpregs[t],status));
603 + }
604 ++ BUG();
605 + case 2: /* FMPY or XMPYU */
606 + /*
607 + * check for integer multiply (x bit set)
608 +@@ -1071,6 +1119,7 @@ u_int fpregs[];
609 + &fpregs[r2],&fpregs[t],status));
610 + }
611 + }
612 ++ BUG();
613 + case 3: /* FDIV */
614 + switch (fmt) {
615 + case 0:
616 +@@ -1080,6 +1129,7 @@ u_int fpregs[];
617 + return(dbl_fdiv(&fpregs[r1],&fpregs[r2],
618 + &fpregs[t],status));
619 + }
620 ++ BUG();
621 + case 4: /* FREM */
622 + switch (fmt) {
623 + case 0:
624 +diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
625 +index a95f63788c6b1..4ba834599c4d4 100644
626 +--- a/arch/powerpc/include/asm/code-patching.h
627 ++++ b/arch/powerpc/include/asm/code-patching.h
628 +@@ -23,6 +23,7 @@
629 + #define BRANCH_ABSOLUTE 0x2
630 +
631 + bool is_offset_in_branch_range(long offset);
632 ++bool is_offset_in_cond_branch_range(long offset);
633 + int create_branch(struct ppc_inst *instr, const u32 *addr,
634 + unsigned long target, int flags);
635 + int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
636 +diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
637 +index 792eefaf230b8..27574f218b371 100644
638 +--- a/arch/powerpc/include/asm/security_features.h
639 ++++ b/arch/powerpc/include/asm/security_features.h
640 +@@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
641 + return !!(powerpc_security_features & feature);
642 + }
643 +
644 ++#ifdef CONFIG_PPC_BOOK3S_64
645 ++enum stf_barrier_type stf_barrier_type_get(void);
646 ++#else
647 ++static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
648 ++#endif
649 +
650 + // Features indicating support for Spectre/Meltdown mitigations
651 +
652 +diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
653 +index abb719b21cae7..3d97fb833834d 100644
654 +--- a/arch/powerpc/kernel/idle_book3s.S
655 ++++ b/arch/powerpc/kernel/idle_book3s.S
656 +@@ -126,14 +126,16 @@ _GLOBAL(idle_return_gpr_loss)
657 + /*
658 + * This is the sequence required to execute idle instructions, as
659 + * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
660 +- *
661 +- * The 0(r1) slot is used to save r2 in isa206, so use that here.
662 ++ * We have to store a GPR somewhere, ptesync, then reload it, and create
663 ++ * a false dependency on the result of the load. It doesn't matter which
664 ++ * GPR we store, or where we store it. We have already stored r2 to the
665 ++ * stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
666 + */
667 + #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
668 + /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
669 +- std r2,0(r1); \
670 ++ std r2,-8(r1); \
671 + ptesync; \
672 +- ld r2,0(r1); \
673 ++ ld r2,-8(r1); \
674 + 236: cmpd cr0,r2,r2; \
675 + bne 236b; \
676 + IDLE_INST; \
677 +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
678 +index cc51fa52e7831..e723ff77cc9be 100644
679 +--- a/arch/powerpc/kernel/security.c
680 ++++ b/arch/powerpc/kernel/security.c
681 +@@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p)
682 +
683 + early_param("no_stf_barrier", handle_no_stf_barrier);
684 +
685 ++enum stf_barrier_type stf_barrier_type_get(void)
686 ++{
687 ++ return stf_enabled_flush_types;
688 ++}
689 ++
690 + /* This is the generic flag used by other architectures */
691 + static int __init handle_ssbd(char *p)
692 + {
693 +diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
694 +index 12c75b95646a5..3c5eb9dc101b2 100644
695 +--- a/arch/powerpc/kernel/smp.c
696 ++++ b/arch/powerpc/kernel/smp.c
697 +@@ -1703,8 +1703,6 @@ void __cpu_die(unsigned int cpu)
698 +
699 + void arch_cpu_idle_dead(void)
700 + {
701 +- sched_preempt_enable_no_resched();
702 +-
703 + /*
704 + * Disable on the down path. This will be re-enabled by
705 + * start_secondary() via start_secondary_resume() below
706 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
707 +index dd18e1c447512..bbcc82b828da8 100644
708 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
709 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
710 +@@ -255,13 +255,16 @@ kvm_novcpu_exit:
711 + * r3 contains the SRR1 wakeup value, SRR1 is trashed.
712 + */
713 + _GLOBAL(idle_kvm_start_guest)
714 +- ld r4,PACAEMERGSP(r13)
715 + mfcr r5
716 + mflr r0
717 +- std r1,0(r4)
718 +- std r5,8(r4)
719 +- std r0,16(r4)
720 +- subi r1,r4,STACK_FRAME_OVERHEAD
721 ++ std r5, 8(r1) // Save CR in caller's frame
722 ++ std r0, 16(r1) // Save LR in caller's frame
723 ++ // Create frame on emergency stack
724 ++ ld r4, PACAEMERGSP(r13)
725 ++ stdu r1, -SWITCH_FRAME_SIZE(r4)
726 ++ // Switch to new frame on emergency stack
727 ++ mr r1, r4
728 ++ std r3, 32(r1) // Save SRR1 wakeup value
729 + SAVE_NVGPRS(r1)
730 +
731 + /*
732 +@@ -313,6 +316,10 @@ kvm_unsplit_wakeup:
733 +
734 + kvm_secondary_got_guest:
735 +
736 ++ // About to go to guest, clear saved SRR1
737 ++ li r0, 0
738 ++ std r0, 32(r1)
739 ++
740 + /* Set HSTATE_DSCR(r13) to something sensible */
741 + ld r6, PACA_DSCR_DEFAULT(r13)
742 + std r6, HSTATE_DSCR(r13)
743 +@@ -392,13 +399,12 @@ kvm_no_guest:
744 + mfspr r4, SPRN_LPCR
745 + rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
746 + mtspr SPRN_LPCR, r4
747 +- /* set up r3 for return */
748 +- mfspr r3,SPRN_SRR1
749 ++ // Return SRR1 wakeup value, or 0 if we went into the guest
750 ++ ld r3, 32(r1)
751 + REST_NVGPRS(r1)
752 +- addi r1, r1, STACK_FRAME_OVERHEAD
753 +- ld r0, 16(r1)
754 +- ld r5, 8(r1)
755 +- ld r1, 0(r1)
756 ++ ld r1, 0(r1) // Switch back to caller stack
757 ++ ld r0, 16(r1) // Reload LR
758 ++ ld r5, 8(r1) // Reload CR
759 + mtlr r0
760 + mtcr r5
761 + blr
762 +diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
763 +index f9a3019e37b43..c5ed988238352 100644
764 +--- a/arch/powerpc/lib/code-patching.c
765 ++++ b/arch/powerpc/lib/code-patching.c
766 +@@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset)
767 + return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
768 + }
769 +
770 ++bool is_offset_in_cond_branch_range(long offset)
771 ++{
772 ++ return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
773 ++}
774 ++
775 + /*
776 + * Helper to check if a given instruction is a conditional branch
777 + * Derived from the conditional checks in analyse_instr()
778 +@@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
779 + offset = offset - (unsigned long)addr;
780 +
781 + /* Check we can represent the target in the instruction format */
782 +- if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
783 ++ if (!is_offset_in_cond_branch_range(offset))
784 + return 1;
785 +
786 + /* Mask out the flags and target, so they don't step on each other. */
787 +diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
788 +index 99fad093f43ec..7e9b978b768ed 100644
789 +--- a/arch/powerpc/net/bpf_jit.h
790 ++++ b/arch/powerpc/net/bpf_jit.h
791 +@@ -24,16 +24,30 @@
792 + #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
793 +
794 + /* Long jump; (unconditional 'branch') */
795 +-#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
796 +- (((dest) - (ctx->idx * 4)) & 0x03fffffc))
797 ++#define PPC_JMP(dest) \
798 ++ do { \
799 ++ long offset = (long)(dest) - (ctx->idx * 4); \
800 ++ if (!is_offset_in_branch_range(offset)) { \
801 ++ pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
802 ++ return -ERANGE; \
803 ++ } \
804 ++ EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \
805 ++ } while (0)
806 ++
807 + /* blr; (unconditional 'branch' with link) to absolute address */
808 + #define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \
809 + (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
810 + /* "cond" here covers BO:BI fields. */
811 +-#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
812 +- (((cond) & 0x3ff) << 16) | \
813 +- (((dest) - (ctx->idx * 4)) & \
814 +- 0xfffc))
815 ++#define PPC_BCC_SHORT(cond, dest) \
816 ++ do { \
817 ++ long offset = (long)(dest) - (ctx->idx * 4); \
818 ++ if (!is_offset_in_cond_branch_range(offset)) { \
819 ++ pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
820 ++ return -ERANGE; \
821 ++ } \
822 ++ EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
823 ++ } while (0)
824 ++
825 + /* Sign-extended 32-bit immediate load */
826 + #define PPC_LI32(d, i) do { \
827 + if ((int)(uintptr_t)(i) >= -32768 && \
828 +@@ -78,11 +92,6 @@
829 + #define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
830 + #endif
831 +
832 +-static inline bool is_nearbranch(int offset)
833 +-{
834 +- return (offset < 32768) && (offset >= -32768);
835 +-}
836 +-
837 + /*
838 + * The fly in the ointment of code size changing from pass to pass is
839 + * avoided by padding the short branch case with a NOP. If code size differs
840 +@@ -91,7 +100,7 @@ static inline bool is_nearbranch(int offset)
841 + * state.
842 + */
843 + #define PPC_BCC(cond, dest) do { \
844 +- if (is_nearbranch((dest) - (ctx->idx * 4))) { \
845 ++ if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \
846 + PPC_BCC_SHORT(cond, dest); \
847 + EMIT(PPC_RAW_NOP()); \
848 + } else { \
849 +diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
850 +index 7b713edfa7e26..b63b35e45e558 100644
851 +--- a/arch/powerpc/net/bpf_jit64.h
852 ++++ b/arch/powerpc/net/bpf_jit64.h
853 +@@ -16,18 +16,18 @@
854 + * with our redzone usage.
855 + *
856 + * [ prev sp ] <-------------
857 +- * [ nv gpr save area ] 6*8 |
858 ++ * [ nv gpr save area ] 5*8 |
859 + * [ tail_call_cnt ] 8 |
860 +- * [ local_tmp_var ] 8 |
861 ++ * [ local_tmp_var ] 16 |
862 + * fp (r31) --> [ ebpf stack space ] upto 512 |
863 + * [ frame header ] 32/112 |
864 + * sp (r1) ---> [ stack pointer ] --------------
865 + */
866 +
867 + /* for gpr non volatile registers BPG_REG_6 to 10 */
868 +-#define BPF_PPC_STACK_SAVE (6*8)
869 ++#define BPF_PPC_STACK_SAVE (5*8)
870 + /* for bpf JIT code internal usage */
871 +-#define BPF_PPC_STACK_LOCALS 16
872 ++#define BPF_PPC_STACK_LOCALS 24
873 + /* stack frame excluding BPF stack, ensure this is quadword aligned */
874 + #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
875 + BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
876 +diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
877 +index 53aefee3fe70b..fcbf7a917c566 100644
878 +--- a/arch/powerpc/net/bpf_jit_comp.c
879 ++++ b/arch/powerpc/net/bpf_jit_comp.c
880 +@@ -210,7 +210,11 @@ skip_init_ctx:
881 + /* Now build the prologue, body code & epilogue for real. */
882 + cgctx.idx = 0;
883 + bpf_jit_build_prologue(code_base, &cgctx);
884 +- bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
885 ++ if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
886 ++ bpf_jit_binary_free(bpf_hdr);
887 ++ fp = org_fp;
888 ++ goto out_addrs;
889 ++ }
890 + bpf_jit_build_epilogue(code_base, &cgctx);
891 +
892 + if (bpf_jit_enable > 1)
893 +diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
894 +index a7759aa8043d2..0da31d41d4131 100644
895 +--- a/arch/powerpc/net/bpf_jit_comp32.c
896 ++++ b/arch/powerpc/net/bpf_jit_comp32.c
897 +@@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
898 + }
899 + }
900 +
901 +-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
902 ++static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
903 + {
904 + /*
905 + * By now, the eBPF program has already setup parameters in r3-r6
906 +@@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
907 + bpf_jit_emit_common_epilogue(image, ctx);
908 +
909 + EMIT(PPC_RAW_BCTR());
910 ++
911 + /* out: */
912 ++ return 0;
913 + }
914 +
915 + /* Assemble the body code between the prologue & epilogue */
916 +@@ -1090,7 +1092,9 @@ cond_branch:
917 + */
918 + case BPF_JMP | BPF_TAIL_CALL:
919 + ctx->seen |= SEEN_TAILCALL;
920 +- bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
921 ++ ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
922 ++ if (ret < 0)
923 ++ return ret;
924 + break;
925 +
926 + default:
927 +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
928 +index dff4a2930970b..8b5157ccfebae 100644
929 +--- a/arch/powerpc/net/bpf_jit_comp64.c
930 ++++ b/arch/powerpc/net/bpf_jit_comp64.c
931 +@@ -15,6 +15,7 @@
932 + #include <linux/if_vlan.h>
933 + #include <asm/kprobes.h>
934 + #include <linux/bpf.h>
935 ++#include <asm/security_features.h>
936 +
937 + #include "bpf_jit64.h"
938 +
939 +@@ -35,9 +36,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
940 + * [ prev sp ] <-------------
941 + * [ ... ] |
942 + * sp (r1) ---> [ stack pointer ] --------------
943 +- * [ nv gpr save area ] 6*8
944 ++ * [ nv gpr save area ] 5*8
945 + * [ tail_call_cnt ] 8
946 +- * [ local_tmp_var ] 8
947 ++ * [ local_tmp_var ] 16
948 + * [ unused red zone ] 208 bytes protected
949 + */
950 + static int bpf_jit_stack_local(struct codegen_context *ctx)
951 +@@ -45,12 +46,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
952 + if (bpf_has_stack_frame(ctx))
953 + return STACK_FRAME_MIN_SIZE + ctx->stack_size;
954 + else
955 +- return -(BPF_PPC_STACK_SAVE + 16);
956 ++ return -(BPF_PPC_STACK_SAVE + 24);
957 + }
958 +
959 + static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
960 + {
961 +- return bpf_jit_stack_local(ctx) + 8;
962 ++ return bpf_jit_stack_local(ctx) + 16;
963 + }
964 +
965 + static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
966 +@@ -206,7 +207,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
967 + EMIT(PPC_RAW_BCTRL());
968 + }
969 +
970 +-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
971 ++static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
972 + {
973 + /*
974 + * By now, the eBPF program has already setup parameters in r3, r4 and r5
975 +@@ -267,13 +268,38 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
976 + bpf_jit_emit_common_epilogue(image, ctx);
977 +
978 + EMIT(PPC_RAW_BCTR());
979 ++
980 + /* out: */
981 ++ return 0;
982 + }
983 +
984 ++/*
985 ++ * We spill into the redzone always, even if the bpf program has its own stackframe.
986 ++ * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
987 ++ */
988 ++void bpf_stf_barrier(void);
989 ++
990 ++asm (
991 ++" .global bpf_stf_barrier ;"
992 ++" bpf_stf_barrier: ;"
993 ++" std 21,-64(1) ;"
994 ++" std 22,-56(1) ;"
995 ++" sync ;"
996 ++" ld 21,-64(1) ;"
997 ++" ld 22,-56(1) ;"
998 ++" ori 31,31,0 ;"
999 ++" .rept 14 ;"
1000 ++" b 1f ;"
1001 ++" 1: ;"
1002 ++" .endr ;"
1003 ++" blr ;"
1004 ++);
1005 ++
1006 + /* Assemble the body code between the prologue & epilogue */
1007 + int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
1008 + u32 *addrs, bool extra_pass)
1009 + {
1010 ++ enum stf_barrier_type stf_barrier = stf_barrier_type_get();
1011 + const struct bpf_insn *insn = fp->insnsi;
1012 + int flen = fp->len;
1013 + int i, ret;
1014 +@@ -644,6 +670,29 @@ emit_clear:
1015 + * BPF_ST NOSPEC (speculation barrier)
1016 + */
1017 + case BPF_ST | BPF_NOSPEC:
1018 ++ if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
1019 ++ !security_ftr_enabled(SEC_FTR_STF_BARRIER))
1020 ++ break;
1021 ++
1022 ++ switch (stf_barrier) {
1023 ++ case STF_BARRIER_EIEIO:
1024 ++ EMIT(PPC_RAW_EIEIO() | 0x02000000);
1025 ++ break;
1026 ++ case STF_BARRIER_SYNC_ORI:
1027 ++ EMIT(PPC_RAW_SYNC());
1028 ++ EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
1029 ++ EMIT(PPC_RAW_ORI(_R31, _R31, 0));
1030 ++ break;
1031 ++ case STF_BARRIER_FALLBACK:
1032 ++ EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
1033 ++ PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
1034 ++ EMIT(PPC_RAW_MTCTR(12));
1035 ++ EMIT(PPC_RAW_BCTRL());
1036 ++ EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
1037 ++ break;
1038 ++ case STF_BARRIER_NONE:
1039 ++ break;
1040 ++ }
1041 + break;
1042 +
1043 + /*
1044 +@@ -1006,7 +1055,9 @@ cond_branch:
1045 + */
1046 + case BPF_JMP | BPF_TAIL_CALL:
1047 + ctx->seen |= SEEN_TAILCALL;
1048 +- bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1049 ++ ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1050 ++ if (ret < 0)
1051 ++ return ret;
1052 + break;
1053 +
1054 + default:
1055 +diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
1056 +index 5509b224c2eca..abe4d45c2f471 100644
1057 +--- a/arch/s390/include/asm/pci.h
1058 ++++ b/arch/s390/include/asm/pci.h
1059 +@@ -207,6 +207,8 @@ int zpci_enable_device(struct zpci_dev *);
1060 + int zpci_disable_device(struct zpci_dev *);
1061 + int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
1062 + int zpci_deconfigure_device(struct zpci_dev *zdev);
1063 ++void zpci_device_reserved(struct zpci_dev *zdev);
1064 ++bool zpci_is_device_configured(struct zpci_dev *zdev);
1065 +
1066 + int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
1067 + int zpci_unregister_ioat(struct zpci_dev *, u8);
1068 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
1069 +index 34839bad33e4d..d7ef98218c80c 100644
1070 +--- a/arch/s390/pci/pci.c
1071 ++++ b/arch/s390/pci/pci.c
1072 +@@ -92,7 +92,7 @@ void zpci_remove_reserved_devices(void)
1073 + spin_unlock(&zpci_list_lock);
1074 +
1075 + list_for_each_entry_safe(zdev, tmp, &remove, entry)
1076 +- zpci_zdev_put(zdev);
1077 ++ zpci_device_reserved(zdev);
1078 + }
1079 +
1080 + int pci_domain_nr(struct pci_bus *bus)
1081 +@@ -744,6 +744,14 @@ error:
1082 + return ERR_PTR(rc);
1083 + }
1084 +
1085 ++bool zpci_is_device_configured(struct zpci_dev *zdev)
1086 ++{
1087 ++ enum zpci_state state = zdev->state;
1088 ++
1089 ++ return state != ZPCI_FN_STATE_RESERVED &&
1090 ++ state != ZPCI_FN_STATE_STANDBY;
1091 ++}
1092 ++
1093 + /**
1094 + * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
1095 + * @zdev: The zpci_dev to be configured
1096 +@@ -810,6 +818,31 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
1097 + return 0;
1098 + }
1099 +
1100 ++/**
1101 ++ * zpci_device_reserved() - Mark device as resverved
1102 ++ * @zdev: the zpci_dev that was reserved
1103 ++ *
1104 ++ * Handle the case that a given zPCI function was reserved by another system.
1105 ++ * After a call to this function the zpci_dev can not be found via
1106 ++ * get_zdev_by_fid() anymore but may still be accessible via existing
1107 ++ * references though it will not be functional anymore.
1108 ++ */
1109 ++void zpci_device_reserved(struct zpci_dev *zdev)
1110 ++{
1111 ++ if (zdev->has_hp_slot)
1112 ++ zpci_exit_slot(zdev);
1113 ++ /*
1114 ++ * Remove device from zpci_list as it is going away. This also
1115 ++ * makes sure we ignore subsequent zPCI events for this device.
1116 ++ */
1117 ++ spin_lock(&zpci_list_lock);
1118 ++ list_del(&zdev->entry);
1119 ++ spin_unlock(&zpci_list_lock);
1120 ++ zdev->state = ZPCI_FN_STATE_RESERVED;
1121 ++ zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
1122 ++ zpci_zdev_put(zdev);
1123 ++}
1124 ++
1125 + void zpci_release_device(struct kref *kref)
1126 + {
1127 + struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
1128 +@@ -829,17 +862,20 @@ void zpci_release_device(struct kref *kref)
1129 + case ZPCI_FN_STATE_STANDBY:
1130 + if (zdev->has_hp_slot)
1131 + zpci_exit_slot(zdev);
1132 +- zpci_cleanup_bus_resources(zdev);
1133 ++ spin_lock(&zpci_list_lock);
1134 ++ list_del(&zdev->entry);
1135 ++ spin_unlock(&zpci_list_lock);
1136 ++ zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
1137 ++ fallthrough;
1138 ++ case ZPCI_FN_STATE_RESERVED:
1139 ++ if (zdev->has_resources)
1140 ++ zpci_cleanup_bus_resources(zdev);
1141 + zpci_bus_device_unregister(zdev);
1142 + zpci_destroy_iommu(zdev);
1143 + fallthrough;
1144 + default:
1145 + break;
1146 + }
1147 +-
1148 +- spin_lock(&zpci_list_lock);
1149 +- list_del(&zdev->entry);
1150 +- spin_unlock(&zpci_list_lock);
1151 + zpci_dbg(3, "rem fid:%x\n", zdev->fid);
1152 + kfree(zdev);
1153 + }
1154 +diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
1155 +index cd447b96b4b1b..9b26617ca1c59 100644
1156 +--- a/arch/s390/pci/pci_event.c
1157 ++++ b/arch/s390/pci/pci_event.c
1158 +@@ -137,7 +137,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
1159 + /* The 0x0304 event may immediately reserve the device */
1160 + if (!clp_get_state(zdev->fid, &state) &&
1161 + state == ZPCI_FN_STATE_RESERVED) {
1162 +- zpci_zdev_put(zdev);
1163 ++ zpci_device_reserved(zdev);
1164 + }
1165 + }
1166 + break;
1167 +@@ -148,7 +148,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
1168 + case 0x0308: /* Standby -> Reserved */
1169 + if (!zdev)
1170 + break;
1171 +- zpci_zdev_put(zdev);
1172 ++ zpci_device_reserved(zdev);
1173 + break;
1174 + default:
1175 + break;
1176 +diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
1177 +index 56bf35c2f29c2..cdced80a7ffa3 100644
1178 +--- a/arch/sh/include/asm/pgtable-3level.h
1179 ++++ b/arch/sh/include/asm/pgtable-3level.h
1180 +@@ -34,7 +34,7 @@ typedef struct { unsigned long long pmd; } pmd_t;
1181 +
1182 + static inline pmd_t *pud_pgtable(pud_t pud)
1183 + {
1184 +- return (pmd_t *)pud_val(pud);
1185 ++ return (pmd_t *)(unsigned long)pud_val(pud);
1186 + }
1187 +
1188 + /* only used by the stubbed out hugetlb gup code, should never be called */
1189 +diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
1190 +index c853b28efa334..96c775abe31ff 100644
1191 +--- a/arch/x86/events/msr.c
1192 ++++ b/arch/x86/events/msr.c
1193 +@@ -68,6 +68,7 @@ static bool test_intel(int idx, void *data)
1194 + case INTEL_FAM6_BROADWELL_D:
1195 + case INTEL_FAM6_BROADWELL_G:
1196 + case INTEL_FAM6_BROADWELL_X:
1197 ++ case INTEL_FAM6_SAPPHIRERAPIDS_X:
1198 +
1199 + case INTEL_FAM6_ATOM_SILVERMONT:
1200 + case INTEL_FAM6_ATOM_SILVERMONT_D:
1201 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
1202 +index af6ce8d4c86a8..471b35d0b121e 100644
1203 +--- a/arch/x86/include/asm/kvm_host.h
1204 ++++ b/arch/x86/include/asm/kvm_host.h
1205 +@@ -695,7 +695,8 @@ struct kvm_vcpu_arch {
1206 +
1207 + struct kvm_pio_request pio;
1208 + void *pio_data;
1209 +- void *guest_ins_data;
1210 ++ void *sev_pio_data;
1211 ++ unsigned sev_pio_count;
1212 +
1213 + u8 event_exit_inst_len;
1214 +
1215 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
1216 +index c268fb59f7794..6719a8041f594 100644
1217 +--- a/arch/x86/kvm/mmu/mmu.c
1218 ++++ b/arch/x86/kvm/mmu/mmu.c
1219 +@@ -4465,10 +4465,10 @@ static void update_pkru_bitmask(struct kvm_mmu *mmu)
1220 + unsigned bit;
1221 + bool wp;
1222 +
1223 +- if (!is_cr4_pke(mmu)) {
1224 +- mmu->pkru_mask = 0;
1225 ++ mmu->pkru_mask = 0;
1226 ++
1227 ++ if (!is_cr4_pke(mmu))
1228 + return;
1229 +- }
1230 +
1231 + wp = is_cr0_wp(mmu);
1232 +
1233 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
1234 +index cb166bde449bd..9959888cb10c8 100644
1235 +--- a/arch/x86/kvm/svm/sev.c
1236 ++++ b/arch/x86/kvm/svm/sev.c
1237 +@@ -619,7 +619,12 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
1238 + vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
1239 + vmsa.address = __sme_pa(svm->vmsa);
1240 + vmsa.len = PAGE_SIZE;
1241 +- return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
1242 ++ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
1243 ++ if (ret)
1244 ++ return ret;
1245 ++
1246 ++ vcpu->arch.guest_state_protected = true;
1247 ++ return 0;
1248 + }
1249 +
1250 + static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
1251 +@@ -1480,6 +1485,13 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1252 + goto e_free_trans;
1253 + }
1254 +
1255 ++ /*
1256 ++ * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
1257 ++ * encrypts the written data with the guest's key, and the cache may
1258 ++ * contain dirty, unencrypted data.
1259 ++ */
1260 ++ sev_clflush_pages(guest_page, n);
1261 ++
1262 + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
1263 + data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1264 + data.guest_address |= sev_me_mask;
1265 +@@ -2584,7 +2596,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
1266 + return -EINVAL;
1267 +
1268 + return kvm_sev_es_string_io(&svm->vcpu, size, port,
1269 +- svm->ghcb_sa, svm->ghcb_sa_len, in);
1270 ++ svm->ghcb_sa, svm->ghcb_sa_len / size, in);
1271 + }
1272 +
1273 + void sev_es_init_vmcb(struct vcpu_svm *svm)
1274 +diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
1275 +index bd0fe94c29207..820f807f8a4fb 100644
1276 +--- a/arch/x86/kvm/svm/svm.h
1277 ++++ b/arch/x86/kvm/svm/svm.h
1278 +@@ -191,7 +191,7 @@ struct vcpu_svm {
1279 +
1280 + /* SEV-ES scratch area support */
1281 + void *ghcb_sa;
1282 +- u64 ghcb_sa_len;
1283 ++ u32 ghcb_sa_len;
1284 + bool ghcb_sa_sync;
1285 + bool ghcb_sa_free;
1286 +
1287 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
1288 +index 55de1eb135f92..3cb2f4739e324 100644
1289 +--- a/arch/x86/kvm/vmx/vmx.c
1290 ++++ b/arch/x86/kvm/vmx/vmx.c
1291 +@@ -6288,18 +6288,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
1292 +
1293 + /*
1294 + * If we are running L2 and L1 has a new pending interrupt
1295 +- * which can be injected, we should re-evaluate
1296 +- * what should be done with this new L1 interrupt.
1297 +- * If L1 intercepts external-interrupts, we should
1298 +- * exit from L2 to L1. Otherwise, interrupt should be
1299 +- * delivered directly to L2.
1300 ++ * which can be injected, this may cause a vmexit or it may
1301 ++ * be injected into L2. Either way, this interrupt will be
1302 ++ * processed via KVM_REQ_EVENT, not RVI, because we do not use
1303 ++ * virtual interrupt delivery to inject L1 interrupts into L2.
1304 + */
1305 +- if (is_guest_mode(vcpu) && max_irr_updated) {
1306 +- if (nested_exit_on_intr(vcpu))
1307 +- kvm_vcpu_exiting_guest_mode(vcpu);
1308 +- else
1309 +- kvm_make_request(KVM_REQ_EVENT, vcpu);
1310 +- }
1311 ++ if (is_guest_mode(vcpu) && max_irr_updated)
1312 ++ kvm_make_request(KVM_REQ_EVENT, vcpu);
1313 + } else {
1314 + max_irr = kvm_lapic_find_highest_irr(vcpu);
1315 + }
1316 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1317 +index 4b0e866e9f086..8e9df0e00f3dd 100644
1318 +--- a/arch/x86/kvm/x86.c
1319 ++++ b/arch/x86/kvm/x86.c
1320 +@@ -6907,7 +6907,7 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
1321 + }
1322 +
1323 + static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
1324 +- unsigned short port, void *val,
1325 ++ unsigned short port,
1326 + unsigned int count, bool in)
1327 + {
1328 + vcpu->arch.pio.port = port;
1329 +@@ -6915,10 +6915,8 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
1330 + vcpu->arch.pio.count = count;
1331 + vcpu->arch.pio.size = size;
1332 +
1333 +- if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
1334 +- vcpu->arch.pio.count = 0;
1335 ++ if (!kernel_pio(vcpu, vcpu->arch.pio_data))
1336 + return 1;
1337 +- }
1338 +
1339 + vcpu->run->exit_reason = KVM_EXIT_IO;
1340 + vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1341 +@@ -6930,26 +6928,39 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
1342 + return 0;
1343 + }
1344 +
1345 +-static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
1346 +- unsigned short port, void *val, unsigned int count)
1347 ++static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size,
1348 ++ unsigned short port, unsigned int count)
1349 + {
1350 +- int ret;
1351 ++ WARN_ON(vcpu->arch.pio.count);
1352 ++ memset(vcpu->arch.pio_data, 0, size * count);
1353 ++ return emulator_pio_in_out(vcpu, size, port, count, true);
1354 ++}
1355 +
1356 +- if (vcpu->arch.pio.count)
1357 +- goto data_avail;
1358 ++static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
1359 ++{
1360 ++ int size = vcpu->arch.pio.size;
1361 ++ unsigned count = vcpu->arch.pio.count;
1362 ++ memcpy(val, vcpu->arch.pio_data, size * count);
1363 ++ trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
1364 ++ vcpu->arch.pio.count = 0;
1365 ++}
1366 +
1367 +- memset(vcpu->arch.pio_data, 0, size * count);
1368 ++static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
1369 ++ unsigned short port, void *val, unsigned int count)
1370 ++{
1371 ++ if (vcpu->arch.pio.count) {
1372 ++ /* Complete previous iteration. */
1373 ++ } else {
1374 ++ int r = __emulator_pio_in(vcpu, size, port, count);
1375 ++ if (!r)
1376 ++ return r;
1377 +
1378 +- ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
1379 +- if (ret) {
1380 +-data_avail:
1381 +- memcpy(val, vcpu->arch.pio_data, size * count);
1382 +- trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
1383 +- vcpu->arch.pio.count = 0;
1384 +- return 1;
1385 ++ /* Results already available, fall through. */
1386 + }
1387 +
1388 +- return 0;
1389 ++ WARN_ON(count != vcpu->arch.pio.count);
1390 ++ complete_emulator_pio_in(vcpu, val);
1391 ++ return 1;
1392 + }
1393 +
1394 + static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1395 +@@ -6964,9 +6975,15 @@ static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
1396 + unsigned short port, const void *val,
1397 + unsigned int count)
1398 + {
1399 ++ int ret;
1400 ++
1401 + memcpy(vcpu->arch.pio_data, val, size * count);
1402 + trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
1403 +- return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
1404 ++ ret = emulator_pio_in_out(vcpu, size, port, count, false);
1405 ++ if (ret)
1406 ++ vcpu->arch.pio.count = 0;
1407 ++
1408 ++ return ret;
1409 + }
1410 +
1411 + static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
1412 +@@ -9637,14 +9654,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1413 + if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
1414 + break;
1415 +
1416 +- if (unlikely(kvm_vcpu_exit_request(vcpu))) {
1417 ++ if (vcpu->arch.apicv_active)
1418 ++ static_call(kvm_x86_sync_pir_to_irr)(vcpu);
1419 ++
1420 ++ if (unlikely(kvm_vcpu_exit_request(vcpu))) {
1421 + exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
1422 + break;
1423 + }
1424 +-
1425 +- if (vcpu->arch.apicv_active)
1426 +- static_call(kvm_x86_sync_pir_to_irr)(vcpu);
1427 +- }
1428 ++ }
1429 +
1430 + /*
1431 + * Do this here before restoring debug registers on the host. And
1432 +@@ -12320,44 +12337,81 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
1433 + }
1434 + EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
1435 +
1436 +-static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
1437 ++static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
1438 ++ unsigned int port);
1439 ++
1440 ++static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
1441 + {
1442 +- memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data,
1443 +- vcpu->arch.pio.count * vcpu->arch.pio.size);
1444 +- vcpu->arch.pio.count = 0;
1445 ++ int size = vcpu->arch.pio.size;
1446 ++ int port = vcpu->arch.pio.port;
1447 +
1448 ++ vcpu->arch.pio.count = 0;
1449 ++ if (vcpu->arch.sev_pio_count)
1450 ++ return kvm_sev_es_outs(vcpu, size, port);
1451 + return 1;
1452 + }
1453 +
1454 + static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
1455 +- unsigned int port, void *data, unsigned int count)
1456 ++ unsigned int port)
1457 + {
1458 +- int ret;
1459 +-
1460 +- ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port,
1461 +- data, count);
1462 +- if (ret)
1463 +- return ret;
1464 ++ for (;;) {
1465 ++ unsigned int count =
1466 ++ min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
1467 ++ int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
1468 ++
1469 ++ /* memcpy done already by emulator_pio_out. */
1470 ++ vcpu->arch.sev_pio_count -= count;
1471 ++ vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
1472 ++ if (!ret)
1473 ++ break;
1474 +
1475 +- vcpu->arch.pio.count = 0;
1476 ++ /* Emulation done by the kernel. */
1477 ++ if (!vcpu->arch.sev_pio_count)
1478 ++ return 1;
1479 ++ }
1480 +
1481 ++ vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
1482 + return 0;
1483 + }
1484 +
1485 + static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
1486 +- unsigned int port, void *data, unsigned int count)
1487 ++ unsigned int port);
1488 ++
1489 ++static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
1490 + {
1491 +- int ret;
1492 ++ unsigned count = vcpu->arch.pio.count;
1493 ++ complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
1494 ++ vcpu->arch.sev_pio_count -= count;
1495 ++ vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
1496 ++}
1497 +
1498 +- ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port,
1499 +- data, count);
1500 +- if (ret) {
1501 +- vcpu->arch.pio.count = 0;
1502 +- } else {
1503 +- vcpu->arch.guest_ins_data = data;
1504 +- vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
1505 ++static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
1506 ++{
1507 ++ int size = vcpu->arch.pio.size;
1508 ++ int port = vcpu->arch.pio.port;
1509 ++
1510 ++ advance_sev_es_emulated_ins(vcpu);
1511 ++ if (vcpu->arch.sev_pio_count)
1512 ++ return kvm_sev_es_ins(vcpu, size, port);
1513 ++ return 1;
1514 ++}
1515 ++
1516 ++static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
1517 ++ unsigned int port)
1518 ++{
1519 ++ for (;;) {
1520 ++ unsigned int count =
1521 ++ min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
1522 ++ if (!__emulator_pio_in(vcpu, size, port, count))
1523 ++ break;
1524 ++
1525 ++ /* Emulation done by the kernel. */
1526 ++ advance_sev_es_emulated_ins(vcpu);
1527 ++ if (!vcpu->arch.sev_pio_count)
1528 ++ return 1;
1529 + }
1530 +
1531 ++ vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
1532 + return 0;
1533 + }
1534 +
1535 +@@ -12365,8 +12419,10 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
1536 + unsigned int port, void *data, unsigned int count,
1537 + int in)
1538 + {
1539 +- return in ? kvm_sev_es_ins(vcpu, size, port, data, count)
1540 +- : kvm_sev_es_outs(vcpu, size, port, data, count);
1541 ++ vcpu->arch.sev_pio_data = data;
1542 ++ vcpu->arch.sev_pio_count = count;
1543 ++ return in ? kvm_sev_es_ins(vcpu, size, port)
1544 ++ : kvm_sev_es_outs(vcpu, size, port);
1545 + }
1546 + EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
1547 +
1548 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
1549 +index c79bd0af2e8c2..f252faf5028f2 100644
1550 +--- a/arch/x86/xen/enlighten.c
1551 ++++ b/arch/x86/xen/enlighten.c
1552 +@@ -52,9 +52,6 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
1553 + DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
1554 + EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
1555 +
1556 +-enum xen_domain_type xen_domain_type = XEN_NATIVE;
1557 +-EXPORT_SYMBOL_GPL(xen_domain_type);
1558 +-
1559 + unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
1560 + EXPORT_SYMBOL(machine_to_phys_mapping);
1561 + unsigned long machine_to_phys_nr;
1562 +@@ -69,9 +66,11 @@ __read_mostly int xen_have_vector_callback;
1563 + EXPORT_SYMBOL_GPL(xen_have_vector_callback);
1564 +
1565 + /*
1566 +- * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
1567 +- * before clearing the bss.
1568 ++ * NB: These need to live in .data or alike because they're used by
1569 ++ * xen_prepare_pvh() which runs before clearing the bss.
1570 + */
1571 ++enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE;
1572 ++EXPORT_SYMBOL_GPL(xen_domain_type);
1573 + uint32_t xen_start_flags __section(".data") = 0;
1574 + EXPORT_SYMBOL(xen_start_flags);
1575 +
1576 +diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
1577 +index 4f7d6142d41fa..538e6748e85a7 100644
1578 +--- a/arch/xtensa/platforms/xtfpga/setup.c
1579 ++++ b/arch/xtensa/platforms/xtfpga/setup.c
1580 +@@ -51,8 +51,12 @@ void platform_power_off(void)
1581 +
1582 + void platform_restart(void)
1583 + {
1584 +- /* Flush and reset the mmu, simulate a processor reset, and
1585 +- * jump to the reset vector. */
1586 ++ /* Try software reset first. */
1587 ++ WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead);
1588 ++
1589 ++ /* If software reset did not work, flush and reset the mmu,
1590 ++ * simulate a processor reset, and jump to the reset vector.
1591 ++ */
1592 + cpu_reset();
1593 + /* control never gets here */
1594 + }
1595 +@@ -66,7 +70,7 @@ void __init platform_calibrate_ccount(void)
1596 +
1597 + #endif
1598 +
1599 +-#ifdef CONFIG_OF
1600 ++#ifdef CONFIG_USE_OF
1601 +
1602 + static void __init xtfpga_clk_setup(struct device_node *np)
1603 + {
1604 +@@ -284,4 +288,4 @@ static int __init xtavnet_init(void)
1605 + */
1606 + arch_initcall(xtavnet_init);
1607 +
1608 +-#endif /* CONFIG_OF */
1609 ++#endif /* CONFIG_USE_OF */
1610 +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
1611 +index 28e11decbac58..8e4dcf6036f60 100644
1612 +--- a/block/blk-cgroup.c
1613 ++++ b/block/blk-cgroup.c
1614 +@@ -1916,10 +1916,11 @@ void blk_cgroup_bio_start(struct bio *bio)
1615 + {
1616 + int rwd = blk_cgroup_io_type(bio), cpu;
1617 + struct blkg_iostat_set *bis;
1618 ++ unsigned long flags;
1619 +
1620 + cpu = get_cpu();
1621 + bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
1622 +- u64_stats_update_begin(&bis->sync);
1623 ++ flags = u64_stats_update_begin_irqsave(&bis->sync);
1624 +
1625 + /*
1626 + * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
1627 +@@ -1931,7 +1932,7 @@ void blk_cgroup_bio_start(struct bio *bio)
1628 + }
1629 + bis->cur.ios[rwd]++;
1630 +
1631 +- u64_stats_update_end(&bis->sync);
1632 ++ u64_stats_update_end_irqrestore(&bis->sync, flags);
1633 + if (cgroup_subsys_on_dfl(io_cgrp_subsys))
1634 + cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
1635 + put_cpu();
1636 +diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
1637 +index 4b66d2776edac..3b38d15723de1 100644
1638 +--- a/block/blk-mq-debugfs.c
1639 ++++ b/block/blk-mq-debugfs.c
1640 +@@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
1641 + QUEUE_FLAG_NAME(PCI_P2PDMA),
1642 + QUEUE_FLAG_NAME(ZONE_RESETALL),
1643 + QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
1644 ++ QUEUE_FLAG_NAME(HCTX_ACTIVE),
1645 + QUEUE_FLAG_NAME(NOWAIT),
1646 + };
1647 + #undef QUEUE_FLAG_NAME
1648 +diff --git a/block/mq-deadline.c b/block/mq-deadline.c
1649 +index 3c3693c34f061..7f3c3932b723e 100644
1650 +--- a/block/mq-deadline.c
1651 ++++ b/block/mq-deadline.c
1652 +@@ -270,12 +270,6 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
1653 + deadline_remove_request(rq->q, per_prio, rq);
1654 + }
1655 +
1656 +-/* Number of requests queued for a given priority level. */
1657 +-static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
1658 +-{
1659 +- return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
1660 +-}
1661 +-
1662 + /*
1663 + * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
1664 + * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
1665 +@@ -953,6 +947,12 @@ static int dd_async_depth_show(void *data, struct seq_file *m)
1666 + return 0;
1667 + }
1668 +
1669 ++/* Number of requests queued for a given priority level. */
1670 ++static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
1671 ++{
1672 ++ return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
1673 ++}
1674 ++
1675 + static int dd_queued_show(void *data, struct seq_file *m)
1676 + {
1677 + struct request_queue *q = data;
1678 +diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
1679 +index 64b2f3d744d51..7f76fee6f989d 100644
1680 +--- a/drivers/base/test/Makefile
1681 ++++ b/drivers/base/test/Makefile
1682 +@@ -2,4 +2,4 @@
1683 + obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
1684 +
1685 + obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
1686 +-CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
1687 ++CFLAGS_property-entry-test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
1688 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1689 +index d60096b3b2c2a..cd8cc7d31b49c 100644
1690 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1691 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1692 +@@ -2342,10 +2342,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1693 + if (r)
1694 + goto init_failed;
1695 +
1696 +- r = amdgpu_amdkfd_resume_iommu(adev);
1697 +- if (r)
1698 +- goto init_failed;
1699 +-
1700 + r = amdgpu_device_ip_hw_init_phase1(adev);
1701 + if (r)
1702 + goto init_failed;
1703 +@@ -2384,6 +2380,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1704 + if (!adev->gmc.xgmi.pending_reset)
1705 + amdgpu_amdkfd_device_init(adev);
1706 +
1707 ++ r = amdgpu_amdkfd_resume_iommu(adev);
1708 ++ if (r)
1709 ++ goto init_failed;
1710 ++
1711 + amdgpu_fru_get_product_info(adev);
1712 +
1713 + init_failed:
1714 +diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
1715 +index 7dffc04a557ea..127667e549c19 100644
1716 +--- a/drivers/gpu/drm/amd/display/Kconfig
1717 ++++ b/drivers/gpu/drm/amd/display/Kconfig
1718 +@@ -25,6 +25,8 @@ config DRM_AMD_DC_HDCP
1719 +
1720 + config DRM_AMD_DC_SI
1721 + bool "AMD DC support for Southern Islands ASICs"
1722 ++ depends on DRM_AMDGPU_SI
1723 ++ depends on DRM_AMD_DC
1724 + default n
1725 + help
1726 + Choose this option to enable new AMD DC support for SI asics
1727 +diff --git a/drivers/gpu/drm/kmb/kmb_crtc.c b/drivers/gpu/drm/kmb/kmb_crtc.c
1728 +index 44327bc629ca0..06613ffeaaf85 100644
1729 +--- a/drivers/gpu/drm/kmb/kmb_crtc.c
1730 ++++ b/drivers/gpu/drm/kmb/kmb_crtc.c
1731 +@@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = {
1732 + .disable_vblank = kmb_crtc_disable_vblank,
1733 + };
1734 +
1735 +-static void kmb_crtc_set_mode(struct drm_crtc *crtc)
1736 ++static void kmb_crtc_set_mode(struct drm_crtc *crtc,
1737 ++ struct drm_atomic_state *old_state)
1738 + {
1739 + struct drm_device *dev = crtc->dev;
1740 + struct drm_display_mode *m = &crtc->state->adjusted_mode;
1741 +@@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc)
1742 + unsigned int val = 0;
1743 +
1744 + /* Initialize mipi */
1745 +- kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz);
1746 ++ kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state);
1747 + drm_info(dev,
1748 + "vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
1749 + m->crtc_vsync_start - m->crtc_vdisplay,
1750 +@@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
1751 + struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
1752 +
1753 + clk_prepare_enable(kmb->kmb_clk.clk_lcd);
1754 +- kmb_crtc_set_mode(crtc);
1755 ++ kmb_crtc_set_mode(crtc, state);
1756 + drm_crtc_vblank_on(crtc);
1757 + }
1758 +
1759 +@@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
1760 + spin_unlock_irq(&crtc->dev->event_lock);
1761 + }
1762 +
1763 ++static enum drm_mode_status
1764 ++ kmb_crtc_mode_valid(struct drm_crtc *crtc,
1765 ++ const struct drm_display_mode *mode)
1766 ++{
1767 ++ int refresh;
1768 ++ struct drm_device *dev = crtc->dev;
1769 ++ int vfp = mode->vsync_start - mode->vdisplay;
1770 ++
1771 ++ if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) {
1772 ++ drm_dbg(dev, "height = %d less than %d",
1773 ++ mode->vdisplay, KMB_CRTC_MAX_HEIGHT);
1774 ++ return MODE_BAD_VVALUE;
1775 ++ }
1776 ++ if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) {
1777 ++ drm_dbg(dev, "width = %d less than %d",
1778 ++ mode->hdisplay, KMB_CRTC_MAX_WIDTH);
1779 ++ return MODE_BAD_HVALUE;
1780 ++ }
1781 ++ refresh = drm_mode_vrefresh(mode);
1782 ++ if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) {
1783 ++ drm_dbg(dev, "refresh = %d less than %d or greater than %d",
1784 ++ refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH);
1785 ++ return MODE_BAD;
1786 ++ }
1787 ++
1788 ++ if (vfp < KMB_CRTC_MIN_VFP) {
1789 ++ drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP);
1790 ++ return MODE_BAD;
1791 ++ }
1792 ++
1793 ++ return MODE_OK;
1794 ++}
1795 ++
1796 + static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
1797 + .atomic_begin = kmb_crtc_atomic_begin,
1798 + .atomic_enable = kmb_crtc_atomic_enable,
1799 + .atomic_disable = kmb_crtc_atomic_disable,
1800 + .atomic_flush = kmb_crtc_atomic_flush,
1801 ++ .mode_valid = kmb_crtc_mode_valid,
1802 + };
1803 +
1804 + int kmb_setup_crtc(struct drm_device *drm)
1805 +diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
1806 +index f54392ec4faba..d3091bf38cc02 100644
1807 +--- a/drivers/gpu/drm/kmb/kmb_drv.c
1808 ++++ b/drivers/gpu/drm/kmb/kmb_drv.c
1809 +@@ -173,10 +173,10 @@ static int kmb_setup_mode_config(struct drm_device *drm)
1810 + ret = drmm_mode_config_init(drm);
1811 + if (ret)
1812 + return ret;
1813 +- drm->mode_config.min_width = KMB_MIN_WIDTH;
1814 +- drm->mode_config.min_height = KMB_MIN_HEIGHT;
1815 +- drm->mode_config.max_width = KMB_MAX_WIDTH;
1816 +- drm->mode_config.max_height = KMB_MAX_HEIGHT;
1817 ++ drm->mode_config.min_width = KMB_FB_MIN_WIDTH;
1818 ++ drm->mode_config.min_height = KMB_FB_MIN_HEIGHT;
1819 ++ drm->mode_config.max_width = KMB_FB_MAX_WIDTH;
1820 ++ drm->mode_config.max_height = KMB_FB_MAX_HEIGHT;
1821 + drm->mode_config.funcs = &kmb_mode_config_funcs;
1822 +
1823 + ret = kmb_setup_crtc(drm);
1824 +@@ -381,7 +381,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
1825 + if (val & LAYER3_DMA_FIFO_UNDERFLOW)
1826 + drm_dbg(&kmb->drm,
1827 + "LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
1828 +- if (val & LAYER3_DMA_FIFO_UNDERFLOW)
1829 ++ if (val & LAYER3_DMA_FIFO_OVERFLOW)
1830 + drm_dbg(&kmb->drm,
1831 + "LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
1832 + }
1833 +diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
1834 +index ebbaa5f422d59..bf085e95b28f4 100644
1835 +--- a/drivers/gpu/drm/kmb/kmb_drv.h
1836 ++++ b/drivers/gpu/drm/kmb/kmb_drv.h
1837 +@@ -20,6 +20,18 @@
1838 + #define DRIVER_MAJOR 1
1839 + #define DRIVER_MINOR 1
1840 +
1841 ++/* Platform definitions */
1842 ++#define KMB_CRTC_MIN_VFP 4
1843 ++#define KMB_CRTC_MAX_WIDTH 1920 /* max width in pixels */
1844 ++#define KMB_CRTC_MAX_HEIGHT 1080 /* max height in pixels */
1845 ++#define KMB_CRTC_MIN_WIDTH 1920
1846 ++#define KMB_CRTC_MIN_HEIGHT 1080
1847 ++#define KMB_FB_MAX_WIDTH 1920
1848 ++#define KMB_FB_MAX_HEIGHT 1080
1849 ++#define KMB_FB_MIN_WIDTH 1
1850 ++#define KMB_FB_MIN_HEIGHT 1
1851 ++#define KMB_MIN_VREFRESH 59 /*vertical refresh in Hz */
1852 ++#define KMB_MAX_VREFRESH 60 /*vertical refresh in Hz */
1853 + #define KMB_LCD_DEFAULT_CLK 200000000
1854 + #define KMB_SYS_CLK_MHZ 500
1855 +
1856 +@@ -45,6 +57,7 @@ struct kmb_drm_private {
1857 + spinlock_t irq_lock;
1858 + int irq_lcd;
1859 + int sys_clk_mhz;
1860 ++ struct disp_cfg init_disp_cfg[KMB_MAX_PLANES];
1861 + struct layer_status plane_status[KMB_MAX_PLANES];
1862 + int kmb_under_flow;
1863 + int kmb_flush_done;
1864 +diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c
1865 +index 231041b269f53..756490589e0ad 100644
1866 +--- a/drivers/gpu/drm/kmb/kmb_dsi.c
1867 ++++ b/drivers/gpu/drm/kmb/kmb_dsi.c
1868 +@@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi,
1869 + return 0;
1870 + }
1871 +
1872 ++#define CLK_DIFF_LOW 50
1873 ++#define CLK_DIFF_HI 60
1874 ++#define SYSCLK_500 500
1875 ++
1876 + static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
1877 + struct mipi_tx_frame_timing_cfg *fg_cfg)
1878 + {
1879 +@@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
1880 + /* 500 Mhz system clock minus 50 to account for the difference in
1881 + * MIPI clock speed in RTL tests
1882 + */
1883 +- sysclk = kmb_dsi->sys_clk_mhz - 50;
1884 ++ if (kmb_dsi->sys_clk_mhz == SYSCLK_500) {
1885 ++ sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW;
1886 ++ } else {
1887 ++ /* 700 Mhz clk*/
1888 ++ sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI;
1889 ++ }
1890 +
1891 + /* PPL-Pixel Packing Layer, LLP-Low Level Protocol
1892 + * Frame genartor timing parameters are clocked on the system clock,
1893 +@@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi,
1894 + return 0;
1895 + }
1896 +
1897 +-static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
1898 ++static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi,
1899 ++ struct drm_atomic_state *old_state)
1900 + {
1901 + struct regmap *msscam;
1902 +
1903 +@@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
1904 + dev_dbg(kmb_dsi->dev, "failed to get msscam syscon");
1905 + return;
1906 + }
1907 +-
1908 ++ drm_atomic_bridge_chain_enable(adv_bridge, old_state);
1909 + /* DISABLE MIPI->CIF CONNECTION */
1910 + regmap_write(msscam, MSS_MIPI_CIF_CFG, 0);
1911 +
1912 +@@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
1913 + }
1914 +
1915 + int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
1916 +- int sys_clk_mhz)
1917 ++ int sys_clk_mhz, struct drm_atomic_state *old_state)
1918 + {
1919 + u64 data_rate;
1920 +
1921 +@@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
1922 + mipi_tx_init_cfg.lane_rate_mbps = data_rate;
1923 + }
1924 +
1925 +- kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0);
1926 +- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0);
1927 +- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0);
1928 +- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0);
1929 +-
1930 + /* Initialize mipi controller */
1931 + mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg);
1932 +
1933 + /* Dphy initialization */
1934 + mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg);
1935 +
1936 +- connect_lcd_to_mipi(kmb_dsi);
1937 ++ connect_lcd_to_mipi(kmb_dsi, old_state);
1938 + dev_info(kmb_dsi->dev, "mipi hw initialized");
1939 +
1940 + return 0;
1941 +diff --git a/drivers/gpu/drm/kmb/kmb_dsi.h b/drivers/gpu/drm/kmb/kmb_dsi.h
1942 +index 66b7c500d9bcf..09dc88743d779 100644
1943 +--- a/drivers/gpu/drm/kmb/kmb_dsi.h
1944 ++++ b/drivers/gpu/drm/kmb/kmb_dsi.h
1945 +@@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev);
1946 + struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev);
1947 + void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi);
1948 + int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
1949 +- int sys_clk_mhz);
1950 ++ int sys_clk_mhz, struct drm_atomic_state *old_state);
1951 + int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi);
1952 + int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi);
1953 + int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi);
1954 +diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
1955 +index ecee6782612d8..00404ba4126dd 100644
1956 +--- a/drivers/gpu/drm/kmb/kmb_plane.c
1957 ++++ b/drivers/gpu/drm/kmb/kmb_plane.c
1958 +@@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = {
1959 +
1960 + static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
1961 + {
1962 ++ struct kmb_drm_private *kmb;
1963 ++ struct kmb_plane *kmb_plane = to_kmb_plane(plane);
1964 + int i;
1965 ++ int plane_id = kmb_plane->id;
1966 ++ struct disp_cfg init_disp_cfg;
1967 +
1968 ++ kmb = to_kmb(plane->dev);
1969 ++ init_disp_cfg = kmb->init_disp_cfg[plane_id];
1970 ++ /* Due to HW limitations, changing pixel format after initial
1971 ++ * plane configuration is not supported.
1972 ++ */
1973 ++ if (init_disp_cfg.format && init_disp_cfg.format != format) {
1974 ++ drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration");
1975 ++ return -EINVAL;
1976 ++ }
1977 + for (i = 0; i < plane->format_count; i++) {
1978 + if (plane->format_types[i] == format)
1979 + return 0;
1980 +@@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
1981 + {
1982 + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1983 + plane);
1984 ++ struct kmb_drm_private *kmb;
1985 ++ struct kmb_plane *kmb_plane = to_kmb_plane(plane);
1986 ++ int plane_id = kmb_plane->id;
1987 ++ struct disp_cfg init_disp_cfg;
1988 + struct drm_framebuffer *fb;
1989 + int ret;
1990 + struct drm_crtc_state *crtc_state;
1991 + bool can_position;
1992 +
1993 ++ kmb = to_kmb(plane->dev);
1994 ++ init_disp_cfg = kmb->init_disp_cfg[plane_id];
1995 + fb = new_plane_state->fb;
1996 + if (!fb || !new_plane_state->crtc)
1997 + return 0;
1998 +@@ -94,10 +113,21 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
1999 + if (ret)
2000 + return ret;
2001 +
2002 +- if (new_plane_state->crtc_w > KMB_MAX_WIDTH || new_plane_state->crtc_h > KMB_MAX_HEIGHT)
2003 ++ if (new_plane_state->crtc_w > KMB_FB_MAX_WIDTH ||
2004 ++ new_plane_state->crtc_h > KMB_FB_MAX_HEIGHT ||
2005 ++ new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
2006 ++ new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
2007 + return -EINVAL;
2008 +- if (new_plane_state->crtc_w < KMB_MIN_WIDTH || new_plane_state->crtc_h < KMB_MIN_HEIGHT)
2009 ++
2010 ++ /* Due to HW limitations, changing plane height or width after
2011 ++ * initial plane configuration is not supported.
2012 ++ */
2013 ++ if ((init_disp_cfg.width && init_disp_cfg.height) &&
2014 ++ (init_disp_cfg.width != fb->width ||
2015 ++ init_disp_cfg.height != fb->height)) {
2016 ++ drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration");
2017 + return -EINVAL;
2018 ++ }
2019 + can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
2020 + crtc_state =
2021 + drm_atomic_get_existing_crtc_state(state,
2022 +@@ -277,6 +307,44 @@ static void config_csc(struct kmb_drm_private *kmb, int plane_id)
2023 + kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]);
2024 + }
2025 +
2026 ++static void kmb_plane_set_alpha(struct kmb_drm_private *kmb,
2027 ++ const struct drm_plane_state *state,
2028 ++ unsigned char plane_id,
2029 ++ unsigned int *val)
2030 ++{
2031 ++ u16 plane_alpha = state->alpha;
2032 ++ u16 pixel_blend_mode = state->pixel_blend_mode;
2033 ++ int has_alpha = state->fb->format->has_alpha;
2034 ++
2035 ++ if (plane_alpha != DRM_BLEND_ALPHA_OPAQUE)
2036 ++ *val |= LCD_LAYER_ALPHA_STATIC;
2037 ++
2038 ++ if (has_alpha) {
2039 ++ switch (pixel_blend_mode) {
2040 ++ case DRM_MODE_BLEND_PIXEL_NONE:
2041 ++ break;
2042 ++ case DRM_MODE_BLEND_PREMULTI:
2043 ++ *val |= LCD_LAYER_ALPHA_EMBED | LCD_LAYER_ALPHA_PREMULT;
2044 ++ break;
2045 ++ case DRM_MODE_BLEND_COVERAGE:
2046 ++ *val |= LCD_LAYER_ALPHA_EMBED;
2047 ++ break;
2048 ++ default:
2049 ++ DRM_DEBUG("Missing pixel blend mode case (%s == %ld)\n",
2050 ++ __stringify(pixel_blend_mode),
2051 ++ (long)pixel_blend_mode);
2052 ++ break;
2053 ++ }
2054 ++ }
2055 ++
2056 ++ if (plane_alpha == DRM_BLEND_ALPHA_OPAQUE && !has_alpha) {
2057 ++ *val &= LCD_LAYER_ALPHA_DISABLED;
2058 ++ return;
2059 ++ }
2060 ++
2061 ++ kmb_write_lcd(kmb, LCD_LAYERn_ALPHA(plane_id), plane_alpha);
2062 ++}
2063 ++
2064 + static void kmb_plane_atomic_update(struct drm_plane *plane,
2065 + struct drm_atomic_state *state)
2066 + {
2067 +@@ -296,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
2068 + unsigned char plane_id;
2069 + int num_planes;
2070 + static dma_addr_t addr[MAX_SUB_PLANES];
2071 ++ struct disp_cfg *init_disp_cfg;
2072 +
2073 + if (!plane || !new_plane_state || !old_plane_state)
2074 + return;
2075 +@@ -303,11 +372,12 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
2076 + fb = new_plane_state->fb;
2077 + if (!fb)
2078 + return;
2079 ++
2080 + num_planes = fb->format->num_planes;
2081 + kmb_plane = to_kmb_plane(plane);
2082 +- plane_id = kmb_plane->id;
2083 +
2084 + kmb = to_kmb(plane->dev);
2085 ++ plane_id = kmb_plane->id;
2086 +
2087 + spin_lock_irq(&kmb->irq_lock);
2088 + if (kmb->kmb_under_flow || kmb->kmb_flush_done) {
2089 +@@ -317,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
2090 + }
2091 + spin_unlock_irq(&kmb->irq_lock);
2092 +
2093 +- src_w = (new_plane_state->src_w >> 16);
2094 ++ init_disp_cfg = &kmb->init_disp_cfg[plane_id];
2095 ++ src_w = new_plane_state->src_w >> 16;
2096 + src_h = new_plane_state->src_h >> 16;
2097 + crtc_x = new_plane_state->crtc_x;
2098 + crtc_y = new_plane_state->crtc_y;
2099 +@@ -400,20 +471,32 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
2100 + config_csc(kmb, plane_id);
2101 + }
2102 +
2103 ++ kmb_plane_set_alpha(kmb, plane->state, plane_id, &val);
2104 ++
2105 + kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val);
2106 +
2107 ++ /* Configure LCD_CONTROL */
2108 ++ ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
2109 ++
2110 ++ /* Set layer blending config */
2111 ++ ctrl &= ~LCD_CTRL_ALPHA_ALL;
2112 ++ ctrl |= LCD_CTRL_ALPHA_BOTTOM_VL1 |
2113 ++ LCD_CTRL_ALPHA_BLEND_VL2;
2114 ++
2115 ++ ctrl &= ~LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE;
2116 ++
2117 + switch (plane_id) {
2118 + case LAYER_0:
2119 +- ctrl = LCD_CTRL_VL1_ENABLE;
2120 ++ ctrl |= LCD_CTRL_VL1_ENABLE;
2121 + break;
2122 + case LAYER_1:
2123 +- ctrl = LCD_CTRL_VL2_ENABLE;
2124 ++ ctrl |= LCD_CTRL_VL2_ENABLE;
2125 + break;
2126 + case LAYER_2:
2127 +- ctrl = LCD_CTRL_GL1_ENABLE;
2128 ++ ctrl |= LCD_CTRL_GL1_ENABLE;
2129 + break;
2130 + case LAYER_3:
2131 +- ctrl = LCD_CTRL_GL2_ENABLE;
2132 ++ ctrl |= LCD_CTRL_GL2_ENABLE;
2133 + break;
2134 + }
2135 +
2136 +@@ -425,7 +508,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
2137 + */
2138 + ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL;
2139 +
2140 +- kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl);
2141 ++ kmb_write_lcd(kmb, LCD_CONTROL, ctrl);
2142 +
2143 + /* Enable pipeline AXI read transactions for the DMA
2144 + * after setting graphics layers. This must be done
2145 +@@ -448,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
2146 +
2147 + /* Enable DMA */
2148 + kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
2149 ++
2150 ++ /* Save initial display config */
2151 ++ if (!init_disp_cfg->width ||
2152 ++ !init_disp_cfg->height ||
2153 ++ !init_disp_cfg->format) {
2154 ++ init_disp_cfg->width = width;
2155 ++ init_disp_cfg->height = height;
2156 ++ init_disp_cfg->format = fb->format->format;
2157 ++ }
2158 ++
2159 + drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
2160 + kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
2161 +
2162 +@@ -490,6 +583,9 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
2163 + enum drm_plane_type plane_type;
2164 + const u32 *plane_formats;
2165 + int num_plane_formats;
2166 ++ unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
2167 ++ BIT(DRM_MODE_BLEND_PREMULTI) |
2168 ++ BIT(DRM_MODE_BLEND_COVERAGE);
2169 +
2170 + for (i = 0; i < KMB_MAX_PLANES; i++) {
2171 + plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL);
2172 +@@ -521,8 +617,16 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm)
2173 + drm_dbg(drm, "%s : %d i=%d type=%d",
2174 + __func__, __LINE__,
2175 + i, plane_type);
2176 ++ drm_plane_create_alpha_property(&plane->base_plane);
2177 ++
2178 ++ drm_plane_create_blend_mode_property(&plane->base_plane,
2179 ++ blend_caps);
2180 ++
2181 ++ drm_plane_create_zpos_immutable_property(&plane->base_plane, i);
2182 ++
2183 + drm_plane_helper_add(&plane->base_plane,
2184 + &kmb_plane_helper_funcs);
2185 ++
2186 + if (plane_type == DRM_PLANE_TYPE_PRIMARY) {
2187 + primary = plane;
2188 + kmb->plane = plane;
2189 +diff --git a/drivers/gpu/drm/kmb/kmb_plane.h b/drivers/gpu/drm/kmb/kmb_plane.h
2190 +index 486490f7a3ec5..b51144044fe8e 100644
2191 +--- a/drivers/gpu/drm/kmb/kmb_plane.h
2192 ++++ b/drivers/gpu/drm/kmb/kmb_plane.h
2193 +@@ -35,6 +35,9 @@
2194 + #define POSSIBLE_CRTCS 1
2195 + #define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane)
2196 +
2197 ++#define POSSIBLE_CRTCS 1
2198 ++#define KMB_MAX_PLANES 2
2199 ++
2200 + enum layer_id {
2201 + LAYER_0,
2202 + LAYER_1,
2203 +@@ -43,8 +46,6 @@ enum layer_id {
2204 + /* KMB_MAX_PLANES */
2205 + };
2206 +
2207 +-#define KMB_MAX_PLANES 1
2208 +-
2209 + enum sub_plane_id {
2210 + Y_PLANE,
2211 + U_PLANE,
2212 +@@ -62,6 +63,12 @@ struct layer_status {
2213 + u32 ctrl;
2214 + };
2215 +
2216 ++struct disp_cfg {
2217 ++ unsigned int width;
2218 ++ unsigned int height;
2219 ++ unsigned int format;
2220 ++};
2221 ++
2222 + struct kmb_plane *kmb_plane_init(struct drm_device *drm);
2223 + void kmb_plane_destroy(struct drm_plane *plane);
2224 + #endif /* __KMB_PLANE_H__ */
2225 +diff --git a/drivers/gpu/drm/kmb/kmb_regs.h b/drivers/gpu/drm/kmb/kmb_regs.h
2226 +index 48150569f7025..9756101b0d32f 100644
2227 +--- a/drivers/gpu/drm/kmb/kmb_regs.h
2228 ++++ b/drivers/gpu/drm/kmb/kmb_regs.h
2229 +@@ -43,8 +43,10 @@
2230 + #define LCD_CTRL_OUTPUT_ENABLED BIT(19)
2231 + #define LCD_CTRL_BPORCH_ENABLE BIT(21)
2232 + #define LCD_CTRL_FPORCH_ENABLE BIT(22)
2233 ++#define LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE BIT(23)
2234 + #define LCD_CTRL_PIPELINE_DMA BIT(28)
2235 + #define LCD_CTRL_VHSYNC_IDLE_LVL BIT(31)
2236 ++#define LCD_CTRL_ALPHA_ALL (0xff << 6)
2237 +
2238 + /* interrupts */
2239 + #define LCD_INT_STATUS (0x4 * 0x001)
2240 +@@ -115,6 +117,7 @@
2241 + #define LCD_LAYER_ALPHA_EMBED BIT(5)
2242 + #define LCD_LAYER_ALPHA_COMBI (LCD_LAYER_ALPHA_STATIC | \
2243 + LCD_LAYER_ALPHA_EMBED)
2244 ++#define LCD_LAYER_ALPHA_DISABLED ~(LCD_LAYER_ALPHA_COMBI)
2245 + /* RGB multiplied with alpha */
2246 + #define LCD_LAYER_ALPHA_PREMULT BIT(6)
2247 + #define LCD_LAYER_INVERT_COL BIT(7)
2248 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2249 +index b349692219b77..c95985792076f 100644
2250 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2251 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
2252 +@@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
2253 + u32 val;
2254 + int request, ack;
2255 +
2256 ++ WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
2257 ++
2258 + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
2259 + return -EINVAL;
2260 +
2261 +@@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
2262 + {
2263 + int bit;
2264 +
2265 ++ WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
2266 ++
2267 + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
2268 + return;
2269 +
2270 +@@ -1478,6 +1482,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
2271 + if (!pdev)
2272 + return -ENODEV;
2273 +
2274 ++ mutex_init(&gmu->lock);
2275 ++
2276 + gmu->dev = &pdev->dev;
2277 +
2278 + of_dma_configure(gmu->dev, node, true);
2279 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
2280 +index 71dfa60070cc0..19c1a0ddee7a0 100644
2281 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
2282 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
2283 +@@ -44,6 +44,9 @@ struct a6xx_gmu_bo {
2284 + struct a6xx_gmu {
2285 + struct device *dev;
2286 +
2287 ++ /* For serializing communication with the GMU: */
2288 ++ struct mutex lock;
2289 ++
2290 + struct msm_gem_address_space *aspace;
2291 +
2292 + void * __iomem mmio;
2293 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2294 +index 1b3519b821a3f..91c5c6709b6cd 100644
2295 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2296 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
2297 +@@ -859,7 +859,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
2298 + A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
2299 + A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
2300 +
2301 +-static int a6xx_hw_init(struct msm_gpu *gpu)
2302 ++static int hw_init(struct msm_gpu *gpu)
2303 + {
2304 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2305 + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2306 +@@ -1107,6 +1107,19 @@ out:
2307 + return ret;
2308 + }
2309 +
2310 ++static int a6xx_hw_init(struct msm_gpu *gpu)
2311 ++{
2312 ++ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2313 ++ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2314 ++ int ret;
2315 ++
2316 ++ mutex_lock(&a6xx_gpu->gmu.lock);
2317 ++ ret = hw_init(gpu);
2318 ++ mutex_unlock(&a6xx_gpu->gmu.lock);
2319 ++
2320 ++ return ret;
2321 ++}
2322 ++
2323 + static void a6xx_dump(struct msm_gpu *gpu)
2324 + {
2325 + DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
2326 +@@ -1481,7 +1494,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
2327 +
2328 + trace_msm_gpu_resume(0);
2329 +
2330 ++ mutex_lock(&a6xx_gpu->gmu.lock);
2331 + ret = a6xx_gmu_resume(a6xx_gpu);
2332 ++ mutex_unlock(&a6xx_gpu->gmu.lock);
2333 + if (ret)
2334 + return ret;
2335 +
2336 +@@ -1504,7 +1519,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
2337 +
2338 + devfreq_suspend_device(gpu->devfreq.devfreq);
2339 +
2340 ++ mutex_lock(&a6xx_gpu->gmu.lock);
2341 + ret = a6xx_gmu_stop(a6xx_gpu);
2342 ++ mutex_unlock(&a6xx_gpu->gmu.lock);
2343 + if (ret)
2344 + return ret;
2345 +
2346 +@@ -1519,18 +1536,19 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
2347 + {
2348 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2349 + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2350 +- static DEFINE_MUTEX(perfcounter_oob);
2351 +
2352 +- mutex_lock(&perfcounter_oob);
2353 ++ mutex_lock(&a6xx_gpu->gmu.lock);
2354 +
2355 + /* Force the GPU power on so we can read this register */
2356 + a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
2357 +
2358 + *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
2359 +- REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
2360 ++ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
2361 +
2362 + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
2363 +- mutex_unlock(&perfcounter_oob);
2364 ++
2365 ++ mutex_unlock(&a6xx_gpu->gmu.lock);
2366 ++
2367 + return 0;
2368 + }
2369 +
2370 +@@ -1594,6 +1612,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
2371 + return (unsigned long)busy_time;
2372 + }
2373 +
2374 ++void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
2375 ++{
2376 ++ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
2377 ++ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
2378 ++
2379 ++ mutex_lock(&a6xx_gpu->gmu.lock);
2380 ++ a6xx_gmu_set_freq(gpu, opp);
2381 ++ mutex_unlock(&a6xx_gpu->gmu.lock);
2382 ++}
2383 ++
2384 + static struct msm_gem_address_space *
2385 + a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
2386 + {
2387 +@@ -1740,7 +1768,7 @@ static const struct adreno_gpu_funcs funcs = {
2388 + #endif
2389 + .gpu_busy = a6xx_gpu_busy,
2390 + .gpu_get_freq = a6xx_gmu_get_freq,
2391 +- .gpu_set_freq = a6xx_gmu_set_freq,
2392 ++ .gpu_set_freq = a6xx_gpu_set_freq,
2393 + #if defined(CONFIG_DRM_MSM_GPU_STATE)
2394 + .gpu_state_get = a6xx_gpu_state_get,
2395 + .gpu_state_put = a6xx_gpu_state_put,
2396 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
2397 +index c277d3f61a5ef..1c19ca5398e98 100644
2398 +--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
2399 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
2400 +@@ -268,7 +268,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
2401 + struct mxsfb_drm_private *mxsfb = drm->dev_private;
2402 +
2403 + mxsfb_enable_axi_clk(mxsfb);
2404 +- mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
2405 ++
2406 ++ /* Disable and clear VBLANK IRQ */
2407 ++ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
2408 ++ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
2409 ++
2410 + mxsfb_disable_axi_clk(mxsfb);
2411 + }
2412 +
2413 +diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
2414 +index 0145129d7c661..534dd7414d428 100644
2415 +--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
2416 ++++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
2417 +@@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
2418 + .clock = 69700,
2419 +
2420 + .hdisplay = 800,
2421 +- .hsync_start = 800 + 6,
2422 +- .hsync_end = 800 + 6 + 15,
2423 +- .htotal = 800 + 6 + 15 + 16,
2424 ++ .hsync_start = 800 + 52,
2425 ++ .hsync_end = 800 + 52 + 8,
2426 ++ .htotal = 800 + 52 + 8 + 48,
2427 +
2428 + .vdisplay = 1280,
2429 +- .vsync_start = 1280 + 8,
2430 +- .vsync_end = 1280 + 8 + 48,
2431 +- .vtotal = 1280 + 8 + 48 + 52,
2432 ++ .vsync_start = 1280 + 16,
2433 ++ .vsync_end = 1280 + 16 + 6,
2434 ++ .vtotal = 1280 + 16 + 6 + 15,
2435 +
2436 + .width_mm = 135,
2437 + .height_mm = 217,
2438 +diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile
2439 +index f1099b4953014..467519a2027e5 100644
2440 +--- a/drivers/iio/test/Makefile
2441 ++++ b/drivers/iio/test/Makefile
2442 +@@ -5,3 +5,4 @@
2443 +
2444 + # Keep in alphabetical order
2445 + obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o
2446 ++CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
2447 +diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
2448 +index 2f5e3ab5ed638..65286762b02ab 100644
2449 +--- a/drivers/input/keyboard/snvs_pwrkey.c
2450 ++++ b/drivers/input/keyboard/snvs_pwrkey.c
2451 +@@ -3,6 +3,7 @@
2452 + // Driver for the IMX SNVS ON/OFF Power Key
2453 + // Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
2454 +
2455 ++#include <linux/clk.h>
2456 + #include <linux/device.h>
2457 + #include <linux/err.h>
2458 + #include <linux/init.h>
2459 +@@ -99,6 +100,11 @@ static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
2460 + return IRQ_HANDLED;
2461 + }
2462 +
2463 ++static void imx_snvs_pwrkey_disable_clk(void *data)
2464 ++{
2465 ++ clk_disable_unprepare(data);
2466 ++}
2467 ++
2468 + static void imx_snvs_pwrkey_act(void *pdata)
2469 + {
2470 + struct pwrkey_drv_data *pd = pdata;
2471 +@@ -111,6 +117,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
2472 + struct pwrkey_drv_data *pdata;
2473 + struct input_dev *input;
2474 + struct device_node *np;
2475 ++ struct clk *clk;
2476 + int error;
2477 + u32 vid;
2478 +
2479 +@@ -134,6 +141,28 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
2480 + dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
2481 + }
2482 +
2483 ++ clk = devm_clk_get_optional(&pdev->dev, NULL);
2484 ++ if (IS_ERR(clk)) {
2485 ++ dev_err(&pdev->dev, "Failed to get snvs clock (%pe)\n", clk);
2486 ++ return PTR_ERR(clk);
2487 ++ }
2488 ++
2489 ++ error = clk_prepare_enable(clk);
2490 ++ if (error) {
2491 ++ dev_err(&pdev->dev, "Failed to enable snvs clock (%pe)\n",
2492 ++ ERR_PTR(error));
2493 ++ return error;
2494 ++ }
2495 ++
2496 ++ error = devm_add_action_or_reset(&pdev->dev,
2497 ++ imx_snvs_pwrkey_disable_clk, clk);
2498 ++ if (error) {
2499 ++ dev_err(&pdev->dev,
2500 ++ "Failed to register clock cleanup handler (%pe)\n",
2501 ++ ERR_PTR(error));
2502 ++ return error;
2503 ++ }
2504 ++
2505 + pdata->wakeup = of_property_read_bool(np, "wakeup-source");
2506 +
2507 + pdata->irq = platform_get_irq(pdev, 0);
2508 +diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
2509 +index cb0afe8971623..7313454e403a6 100644
2510 +--- a/drivers/isdn/capi/kcapi.c
2511 ++++ b/drivers/isdn/capi/kcapi.c
2512 +@@ -480,6 +480,11 @@ int detach_capi_ctr(struct capi_ctr *ctr)
2513 +
2514 + ctr_down(ctr, CAPI_CTR_DETACHED);
2515 +
2516 ++ if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) {
2517 ++ err = -EINVAL;
2518 ++ goto unlock_out;
2519 ++ }
2520 ++
2521 + if (capi_controller[ctr->cnr - 1] != ctr) {
2522 + err = -EINVAL;
2523 + goto unlock_out;
2524 +diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
2525 +index 2a1ddd47a0968..a52f275f82634 100644
2526 +--- a/drivers/isdn/hardware/mISDN/netjet.c
2527 ++++ b/drivers/isdn/hardware/mISDN/netjet.c
2528 +@@ -949,8 +949,8 @@ nj_release(struct tiger_hw *card)
2529 + nj_disable_hwirq(card);
2530 + mode_tiger(&card->bc[0], ISDN_P_NONE);
2531 + mode_tiger(&card->bc[1], ISDN_P_NONE);
2532 +- card->isac.release(&card->isac);
2533 + spin_unlock_irqrestore(&card->lock, flags);
2534 ++ card->isac.release(&card->isac);
2535 + release_region(card->base, card->base_s);
2536 + card->base_s = 0;
2537 + }
2538 +diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
2539 +index 00e4533c8bddc..8999ec9455ec2 100644
2540 +--- a/drivers/net/can/rcar/rcar_can.c
2541 ++++ b/drivers/net/can/rcar/rcar_can.c
2542 +@@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
2543 + struct rcar_can_priv *priv = netdev_priv(ndev);
2544 + u16 ctlr;
2545 +
2546 +- if (netif_running(ndev)) {
2547 +- netif_stop_queue(ndev);
2548 +- netif_device_detach(ndev);
2549 +- }
2550 ++ if (!netif_running(ndev))
2551 ++ return 0;
2552 ++
2553 ++ netif_stop_queue(ndev);
2554 ++ netif_device_detach(ndev);
2555 ++
2556 + ctlr = readw(&priv->regs->ctlr);
2557 + ctlr |= RCAR_CAN_CTLR_CANM_HALT;
2558 + writew(ctlr, &priv->regs->ctlr);
2559 +@@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
2560 + u16 ctlr;
2561 + int err;
2562 +
2563 ++ if (!netif_running(ndev))
2564 ++ return 0;
2565 ++
2566 + err = clk_enable(priv->clk);
2567 + if (err) {
2568 + netdev_err(ndev, "clk_enable() failed, error %d\n", err);
2569 +@@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
2570 + writew(ctlr, &priv->regs->ctlr);
2571 + priv->can.state = CAN_STATE_ERROR_ACTIVE;
2572 +
2573 +- if (netif_running(ndev)) {
2574 +- netif_device_attach(ndev);
2575 +- netif_start_queue(ndev);
2576 +- }
2577 ++ netif_device_attach(ndev);
2578 ++ netif_start_queue(ndev);
2579 ++
2580 + return 0;
2581 + }
2582 +
2583 +diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
2584 +index 84eac8cb86869..15bca07dc7066 100644
2585 +--- a/drivers/net/can/sja1000/peak_pci.c
2586 ++++ b/drivers/net/can/sja1000/peak_pci.c
2587 +@@ -729,16 +729,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
2588 + struct net_device *prev_dev = chan->prev_dev;
2589 +
2590 + dev_info(&pdev->dev, "removing device %s\n", dev->name);
2591 ++ /* do that only for first channel */
2592 ++ if (!prev_dev && chan->pciec_card)
2593 ++ peak_pciec_remove(chan->pciec_card);
2594 + unregister_sja1000dev(dev);
2595 + free_sja1000dev(dev);
2596 + dev = prev_dev;
2597 +
2598 +- if (!dev) {
2599 +- /* do that only for first channel */
2600 +- if (chan->pciec_card)
2601 +- peak_pciec_remove(chan->pciec_card);
2602 ++ if (!dev)
2603 + break;
2604 +- }
2605 + priv = netdev_priv(dev);
2606 + chan = priv->priv;
2607 + }
2608 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
2609 +index b11eabad575bb..e206959b3d06c 100644
2610 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
2611 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
2612 +@@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
2613 + } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
2614 + new_state = CAN_STATE_ERROR_WARNING;
2615 + } else {
2616 +- /* no error bit (so, no error skb, back to active state) */
2617 +- dev->can.state = CAN_STATE_ERROR_ACTIVE;
2618 ++ /* back to (or still in) ERROR_ACTIVE state */
2619 ++ new_state = CAN_STATE_ERROR_ACTIVE;
2620 + pdev->bec.txerr = 0;
2621 + pdev->bec.rxerr = 0;
2622 +- return 0;
2623 + }
2624 +
2625 + /* state hasn't changed */
2626 +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
2627 +index 267324889dd64..1b9b7569c371b 100644
2628 +--- a/drivers/net/dsa/lantiq_gswip.c
2629 ++++ b/drivers/net/dsa/lantiq_gswip.c
2630 +@@ -230,7 +230,7 @@
2631 + #define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
2632 + #define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
2633 + #define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
2634 +-#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
2635 ++#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
2636 +
2637 + #define GSWIP_TABLE_ACTIVE_VLAN 0x01
2638 + #define GSWIP_TABLE_VLAN_MAPPING 0x02
2639 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
2640 +index 0cea1572f8260..5f60db08bf80b 100644
2641 +--- a/drivers/net/dsa/mt7530.c
2642 ++++ b/drivers/net/dsa/mt7530.c
2643 +@@ -1031,9 +1031,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
2644 + {
2645 + struct mt7530_priv *priv = ds->priv;
2646 +
2647 +- if (!dsa_is_user_port(ds, port))
2648 +- return 0;
2649 +-
2650 + mutex_lock(&priv->reg_mutex);
2651 +
2652 + /* Allow the user port gets connected to the cpu port and also
2653 +@@ -1056,9 +1053,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
2654 + {
2655 + struct mt7530_priv *priv = ds->priv;
2656 +
2657 +- if (!dsa_is_user_port(ds, port))
2658 +- return;
2659 +-
2660 + mutex_lock(&priv->reg_mutex);
2661 +
2662 + /* Clear up all port matrix which could be restored in the next
2663 +@@ -3132,7 +3126,7 @@ mt7530_probe(struct mdio_device *mdiodev)
2664 + return -ENOMEM;
2665 +
2666 + priv->ds->dev = &mdiodev->dev;
2667 +- priv->ds->num_ports = DSA_MAX_PORTS;
2668 ++ priv->ds->num_ports = MT7530_NUM_PORTS;
2669 +
2670 + /* Use medatek,mcm property to distinguish hardware type that would
2671 + * casues a little bit differences on power-on sequence.
2672 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
2673 +index ebccaf02411c9..8b618c15984db 100644
2674 +--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
2675 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
2676 +@@ -157,7 +157,7 @@ static const struct {
2677 + { ENETC_PM0_TFRM, "MAC tx frames" },
2678 + { ENETC_PM0_TFCS, "MAC tx fcs errors" },
2679 + { ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
2680 +- { ENETC_PM0_TERR, "MAC tx frames" },
2681 ++ { ENETC_PM0_TERR, "MAC tx frame errors" },
2682 + { ENETC_PM0_TUCA, "MAC tx unicast frames" },
2683 + { ENETC_PM0_TMCA, "MAC tx multicast frames" },
2684 + { ENETC_PM0_TBCA, "MAC tx broadcast frames" },
2685 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2686 +index cf00709caea4b..3ac324509f43e 100644
2687 +--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2688 ++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
2689 +@@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)
2690 +
2691 + static void enetc_configure_port_mac(struct enetc_hw *hw)
2692 + {
2693 ++ int tc;
2694 ++
2695 + enetc_port_wr(hw, ENETC_PM0_MAXFRM,
2696 + ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
2697 +
2698 +- enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
2699 ++ for (tc = 0; tc < 8; tc++)
2700 ++ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
2701 +
2702 + enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
2703 + ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
2704 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
2705 +index eef1b2764d34a..67b0bf310daaa 100644
2706 +--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
2707 ++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
2708 +@@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
2709 + static LIST_HEAD(hnae3_client_list);
2710 + static LIST_HEAD(hnae3_ae_dev_list);
2711 +
2712 ++void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
2713 ++{
2714 ++ const struct pci_device_id *pci_id;
2715 ++ struct hnae3_ae_dev *ae_dev;
2716 ++
2717 ++ if (!ae_algo)
2718 ++ return;
2719 ++
2720 ++ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
2721 ++ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
2722 ++ continue;
2723 ++
2724 ++ pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
2725 ++ if (!pci_id)
2726 ++ continue;
2727 ++ if (IS_ENABLED(CONFIG_PCI_IOV))
2728 ++ pci_disable_sriov(ae_dev->pdev);
2729 ++ }
2730 ++}
2731 ++EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
2732 ++
2733 + /* we are keeping things simple and using single lock for all the
2734 + * list. This is a non-critical code so other updations, if happen
2735 + * in parallel, can wait.
2736 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
2737 +index 32987bd134a1d..dc5cce127d8ea 100644
2738 +--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
2739 ++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
2740 +@@ -850,6 +850,7 @@ struct hnae3_handle {
2741 + int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
2742 + void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
2743 +
2744 ++void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
2745 + void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
2746 + void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
2747 +
2748 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2749 +index 114692c4f7978..5aad7951308d9 100644
2750 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2751 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2752 +@@ -1845,7 +1845,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
2753 +
2754 + static int hns3_skb_linearize(struct hns3_enet_ring *ring,
2755 + struct sk_buff *skb,
2756 +- u8 max_non_tso_bd_num,
2757 + unsigned int bd_num)
2758 + {
2759 + /* 'bd_num == UINT_MAX' means the skb' fraglist has a
2760 +@@ -1862,8 +1861,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
2761 + * will not help.
2762 + */
2763 + if (skb->len > HNS3_MAX_TSO_SIZE ||
2764 +- (!skb_is_gso(skb) && skb->len >
2765 +- HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
2766 ++ (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
2767 + u64_stats_update_begin(&ring->syncp);
2768 + ring->stats.hw_limitation++;
2769 + u64_stats_update_end(&ring->syncp);
2770 +@@ -1898,8 +1896,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
2771 + goto out;
2772 + }
2773 +
2774 +- if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
2775 +- bd_num))
2776 ++ if (hns3_skb_linearize(ring, skb, bd_num))
2777 + return -ENOMEM;
2778 +
2779 + bd_num = hns3_tx_bd_count(skb->len);
2780 +@@ -3266,6 +3263,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
2781 + {
2782 + hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2783 + ring->desc[i].addr = 0;
2784 ++ ring->desc_cb[i].refill = 0;
2785 + }
2786 +
2787 + static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
2788 +@@ -3343,6 +3341,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
2789 + return ret;
2790 +
2791 + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2792 ++ ring->desc_cb[i].refill = 1;
2793 +
2794 + return 0;
2795 + }
2796 +@@ -3373,12 +3372,14 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2797 + hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2798 + ring->desc_cb[i] = *res_cb;
2799 + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2800 ++ ring->desc_cb[i].refill = 1;
2801 + ring->desc[i].rx.bd_base_info = 0;
2802 + }
2803 +
2804 + static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2805 + {
2806 + ring->desc_cb[i].reuse_flag = 0;
2807 ++ ring->desc_cb[i].refill = 1;
2808 + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
2809 + ring->desc_cb[i].page_offset);
2810 + ring->desc[i].rx.bd_base_info = 0;
2811 +@@ -3485,10 +3486,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
2812 + int ntc = ring->next_to_clean;
2813 + int ntu = ring->next_to_use;
2814 +
2815 ++ if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
2816 ++ return ring->desc_num;
2817 ++
2818 + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2819 + }
2820 +
2821 +-static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
2822 ++/* Return true if there is any allocation failure */
2823 ++static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
2824 + int cleand_count)
2825 + {
2826 + struct hns3_desc_cb *desc_cb;
2827 +@@ -3513,7 +3518,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
2828 + hns3_rl_err(ring_to_netdev(ring),
2829 + "alloc rx buffer failed: %d\n",
2830 + ret);
2831 +- break;
2832 ++
2833 ++ writel(i, ring->tqp->io_base +
2834 ++ HNS3_RING_RX_RING_HEAD_REG);
2835 ++ return true;
2836 + }
2837 + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2838 +
2839 +@@ -3526,6 +3534,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
2840 + }
2841 +
2842 + writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2843 ++ return false;
2844 + }
2845 +
2846 + static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
2847 +@@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
2848 + {
2849 + ring->desc[ring->next_to_clean].rx.bd_base_info &=
2850 + cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
2851 ++ ring->desc_cb[ring->next_to_clean].refill = 0;
2852 + ring->next_to_clean += 1;
2853 +
2854 + if (unlikely(ring->next_to_clean == ring->desc_num))
2855 +@@ -4159,6 +4169,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
2856 + {
2857 + #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2858 + int unused_count = hns3_desc_unused(ring);
2859 ++ bool failure = false;
2860 + int recv_pkts = 0;
2861 + int err;
2862 +
2863 +@@ -4167,9 +4178,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
2864 + while (recv_pkts < budget) {
2865 + /* Reuse or realloc buffers */
2866 + if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2867 +- hns3_nic_alloc_rx_buffers(ring, unused_count);
2868 +- unused_count = hns3_desc_unused(ring) -
2869 +- ring->pending_buf;
2870 ++ failure = failure ||
2871 ++ hns3_nic_alloc_rx_buffers(ring, unused_count);
2872 ++ unused_count = 0;
2873 + }
2874 +
2875 + /* Poll one pkt */
2876 +@@ -4188,11 +4199,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
2877 + }
2878 +
2879 + out:
2880 +- /* Make all data has been write before submit */
2881 +- if (unused_count > 0)
2882 +- hns3_nic_alloc_rx_buffers(ring, unused_count);
2883 +-
2884 +- return recv_pkts;
2885 ++ return failure ? budget : recv_pkts;
2886 + }
2887 +
2888 + static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
2889 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
2890 +index 15af3d93857b6..d146f44bfacab 100644
2891 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
2892 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
2893 +@@ -185,11 +185,9 @@ enum hns3_nic_state {
2894 +
2895 + #define HNS3_MAX_BD_SIZE 65535
2896 + #define HNS3_MAX_TSO_BD_NUM 63U
2897 +-#define HNS3_MAX_TSO_SIZE \
2898 +- (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
2899 ++#define HNS3_MAX_TSO_SIZE 1048576U
2900 ++#define HNS3_MAX_NON_TSO_SIZE 9728U
2901 +
2902 +-#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
2903 +- (HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
2904 +
2905 + #define HNS3_VECTOR_GL0_OFFSET 0x100
2906 + #define HNS3_VECTOR_GL1_OFFSET 0x200
2907 +@@ -324,6 +322,7 @@ struct hns3_desc_cb {
2908 + u32 length; /* length of the buffer */
2909 +
2910 + u16 reuse_flag;
2911 ++ u16 refill;
2912 +
2913 + /* desc type, used by the ring user to mark the type of the priv data */
2914 + u16 type;
2915 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
2916 +index c90bfde2aecff..c60d0626062cf 100644
2917 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
2918 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
2919 +@@ -133,6 +133,15 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
2920 + *changed = true;
2921 + break;
2922 + case IEEE_8021QAZ_TSA_ETS:
2923 ++ /* The hardware will switch to sp mode if bandwidth is
2924 ++ * 0, so limit ets bandwidth must be greater than 0.
2925 ++ */
2926 ++ if (!ets->tc_tx_bw[i]) {
2927 ++ dev_err(&hdev->pdev->dev,
2928 ++ "tc%u ets bw cannot be 0\n", i);
2929 ++ return -EINVAL;
2930 ++ }
2931 ++
2932 + if (hdev->tm_info.tc_info[i].tc_sch_mode !=
2933 + HCLGE_SCH_MODE_DWRR)
2934 + *changed = true;
2935 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
2936 +index 2eeafd61a07ee..c63b440fd6549 100644
2937 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
2938 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
2939 +@@ -995,8 +995,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
2940 +
2941 + /* configure TM QCN hw errors */
2942 + hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
2943 +- if (en)
2944 ++ desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
2945 ++ if (en) {
2946 ++ desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
2947 + desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
2948 ++ }
2949 +
2950 + ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2951 + if (ret)
2952 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
2953 +index 07987fb8332ef..d811eeefe2c05 100644
2954 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
2955 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
2956 +@@ -50,6 +50,8 @@
2957 + #define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
2958 + #define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
2959 + #define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
2960 ++#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
2961 ++#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
2962 + #define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
2963 + #define HCLGE_NCSI_ERR_INT_EN 0x3
2964 + #define HCLGE_NCSI_ERR_INT_TYPE 0x9
2965 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2966 +index 9920e76b4f41c..be46b164b0e2c 100644
2967 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2968 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2969 +@@ -13023,6 +13023,7 @@ static int hclge_init(void)
2970 +
2971 + static void hclge_exit(void)
2972 + {
2973 ++ hnae3_unregister_ae_algo_prepare(&ae_algo);
2974 + hnae3_unregister_ae_algo(&ae_algo);
2975 + destroy_workqueue(hclge_wq);
2976 + }
2977 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2978 +index f314dbd3ce11f..95074e91a8466 100644
2979 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2980 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2981 +@@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
2982 + hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
2983 + for (k = 0; k < hdev->tm_info.num_tc; k++)
2984 + hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
2985 ++ for (; k < HNAE3_MAX_TC; k++)
2986 ++ hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
2987 + }
2988 + }
2989 +
2990 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2991 +index 22cf66004dfa2..b8414f684e89d 100644
2992 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2993 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2994 +@@ -2271,9 +2271,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
2995 + hdev->reset_attempts = 0;
2996 +
2997 + hdev->last_reset_time = jiffies;
2998 +- while ((hdev->reset_type =
2999 +- hclgevf_get_reset_level(hdev, &hdev->reset_pending))
3000 +- != HNAE3_NONE_RESET)
3001 ++ hdev->reset_type =
3002 ++ hclgevf_get_reset_level(hdev, &hdev->reset_pending);
3003 ++ if (hdev->reset_type != HNAE3_NONE_RESET)
3004 + hclgevf_reset(hdev);
3005 + } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
3006 + &hdev->reset_state)) {
3007 +diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
3008 +index 5b2143f4b1f85..3178efd980066 100644
3009 +--- a/drivers/net/ethernet/intel/e1000e/e1000.h
3010 ++++ b/drivers/net/ethernet/intel/e1000e/e1000.h
3011 +@@ -113,7 +113,8 @@ enum e1000_boards {
3012 + board_pch2lan,
3013 + board_pch_lpt,
3014 + board_pch_spt,
3015 +- board_pch_cnp
3016 ++ board_pch_cnp,
3017 ++ board_pch_tgp
3018 + };
3019 +
3020 + struct e1000_ps_page {
3021 +@@ -499,6 +500,7 @@ extern const struct e1000_info e1000_pch2_info;
3022 + extern const struct e1000_info e1000_pch_lpt_info;
3023 + extern const struct e1000_info e1000_pch_spt_info;
3024 + extern const struct e1000_info e1000_pch_cnp_info;
3025 ++extern const struct e1000_info e1000_pch_tgp_info;
3026 + extern const struct e1000_info e1000_es2_info;
3027 +
3028 + void e1000e_ptp_init(struct e1000_adapter *adapter);
3029 +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
3030 +index a80336c4319bb..f8b3e758a8d2e 100644
3031 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
3032 ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
3033 +@@ -4804,7 +4804,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3034 + static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3035 + {
3036 + struct e1000_mac_info *mac = &hw->mac;
3037 +- u32 ctrl_ext, txdctl, snoop;
3038 ++ u32 ctrl_ext, txdctl, snoop, fflt_dbg;
3039 + s32 ret_val;
3040 + u16 i;
3041 +
3042 +@@ -4863,6 +4863,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3043 + snoop = (u32)~(PCIE_NO_SNOOP_ALL);
3044 + e1000e_set_pcie_no_snoop(hw, snoop);
3045 +
3046 ++ /* Enable workaround for packet loss issue on TGP PCH
3047 ++ * Do not gate DMA clock from the modPHY block
3048 ++ */
3049 ++ if (mac->type >= e1000_pch_tgp) {
3050 ++ fflt_dbg = er32(FFLT_DBG);
3051 ++ fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
3052 ++ ew32(FFLT_DBG, fflt_dbg);
3053 ++ }
3054 ++
3055 + ctrl_ext = er32(CTRL_EXT);
3056 + ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3057 + ew32(CTRL_EXT, ctrl_ext);
3058 +@@ -5983,3 +5992,23 @@ const struct e1000_info e1000_pch_cnp_info = {
3059 + .phy_ops = &ich8_phy_ops,
3060 + .nvm_ops = &spt_nvm_ops,
3061 + };
3062 ++
3063 ++const struct e1000_info e1000_pch_tgp_info = {
3064 ++ .mac = e1000_pch_tgp,
3065 ++ .flags = FLAG_IS_ICH
3066 ++ | FLAG_HAS_WOL
3067 ++ | FLAG_HAS_HW_TIMESTAMP
3068 ++ | FLAG_HAS_CTRLEXT_ON_LOAD
3069 ++ | FLAG_HAS_AMT
3070 ++ | FLAG_HAS_FLASH
3071 ++ | FLAG_HAS_JUMBO_FRAMES
3072 ++ | FLAG_APME_IN_WUC,
3073 ++ .flags2 = FLAG2_HAS_PHY_STATS
3074 ++ | FLAG2_HAS_EEE,
3075 ++ .pba = 26,
3076 ++ .max_hw_frame_size = 9022,
3077 ++ .get_variants = e1000_get_variants_ich8lan,
3078 ++ .mac_ops = &ich8_mac_ops,
3079 ++ .phy_ops = &ich8_phy_ops,
3080 ++ .nvm_ops = &spt_nvm_ops,
3081 ++};
3082 +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
3083 +index e757896287eba..8f2a8f4ce0ee4 100644
3084 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
3085 ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
3086 +@@ -286,6 +286,9 @@
3087 + /* Proprietary Latency Tolerance Reporting PCI Capability */
3088 + #define E1000_PCI_LTR_CAP_LPT 0xA8
3089 +
3090 ++/* Don't gate wake DMA clock */
3091 ++#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK 0x1000
3092 ++
3093 + void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
3094 + void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3095 + bool state);
3096 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
3097 +index 757a54c39eefd..774f849027f09 100644
3098 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
3099 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
3100 +@@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
3101 + [board_pch_lpt] = &e1000_pch_lpt_info,
3102 + [board_pch_spt] = &e1000_pch_spt_info,
3103 + [board_pch_cnp] = &e1000_pch_cnp_info,
3104 ++ [board_pch_tgp] = &e1000_pch_tgp_info,
3105 + };
3106 +
3107 + struct e1000_reg_info {
3108 +@@ -7844,20 +7845,20 @@ static const struct pci_device_id e1000_pci_tbl[] = {
3109 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
3110 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
3111 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
3112 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
3113 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
3114 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
3115 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
3116 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
3117 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
3118 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
3119 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
3120 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
3121 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
3122 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
3123 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
3124 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
3125 +- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
3126 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
3127 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
3128 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
3129 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
3130 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
3131 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
3132 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
3133 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
3134 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
3135 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
3136 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
3137 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
3138 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
3139 ++ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
3140 +
3141 + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
3142 + };
3143 +diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
3144 +index 2fb81e359cdfd..df5ad4de1f00e 100644
3145 +--- a/drivers/net/ethernet/intel/ice/ice_common.c
3146 ++++ b/drivers/net/ethernet/intel/ice/ice_common.c
3147 +@@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
3148 + case ICE_DEV_ID_E810C_BACKPLANE:
3149 + case ICE_DEV_ID_E810C_QSFP:
3150 + case ICE_DEV_ID_E810C_SFP:
3151 ++ case ICE_DEV_ID_E810_XXV_BACKPLANE:
3152 ++ case ICE_DEV_ID_E810_XXV_QSFP:
3153 + case ICE_DEV_ID_E810_XXV_SFP:
3154 + hw->mac_type = ICE_MAC_E810;
3155 + break;
3156 +diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
3157 +index 9d8194671f6a6..ef4392e6e2444 100644
3158 +--- a/drivers/net/ethernet/intel/ice/ice_devids.h
3159 ++++ b/drivers/net/ethernet/intel/ice/ice_devids.h
3160 +@@ -21,6 +21,10 @@
3161 + #define ICE_DEV_ID_E810C_QSFP 0x1592
3162 + /* Intel(R) Ethernet Controller E810-C for SFP */
3163 + #define ICE_DEV_ID_E810C_SFP 0x1593
3164 ++/* Intel(R) Ethernet Controller E810-XXV for backplane */
3165 ++#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
3166 ++/* Intel(R) Ethernet Controller E810-XXV for QSFP */
3167 ++#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
3168 + /* Intel(R) Ethernet Controller E810-XXV for SFP */
3169 + #define ICE_DEV_ID_E810_XXV_SFP 0x159B
3170 + /* Intel(R) Ethernet Connection E823-C for backplane */
3171 +diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
3172 +index 7fe6e8ea39f0d..64bea7659cf7e 100644
3173 +--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
3174 ++++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
3175 +@@ -63,7 +63,8 @@ static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
3176 + {
3177 + struct ice_hw *hw = &pf->hw;
3178 +
3179 +- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
3180 ++ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
3181 ++ hw->api_min_ver, hw->api_patch);
3182 +
3183 + return 0;
3184 + }
3185 +diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3186 +index 06ac9badee774..1ac96dc66d0db 100644
3187 +--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3188 ++++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
3189 +@@ -1668,7 +1668,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
3190 + for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
3191 + if (hw->tnl.tbl[i].valid &&
3192 + hw->tnl.tbl[i].type == type &&
3193 +- idx--)
3194 ++ idx-- == 0)
3195 + return i;
3196 +
3197 + WARN_ON_ONCE(1);
3198 +@@ -1828,7 +1828,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
3199 + u16 index;
3200 +
3201 + tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
3202 +- index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
3203 ++ index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
3204 +
3205 + status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
3206 + if (status) {
3207 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
3208 +index dde9802c6c729..b718e196af2a4 100644
3209 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
3210 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
3211 +@@ -2841,6 +2841,7 @@ void ice_napi_del(struct ice_vsi *vsi)
3212 + */
3213 + int ice_vsi_release(struct ice_vsi *vsi)
3214 + {
3215 ++ enum ice_status err;
3216 + struct ice_pf *pf;
3217 +
3218 + if (!vsi->back)
3219 +@@ -2912,6 +2913,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
3220 +
3221 + ice_fltr_remove_all(vsi);
3222 + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
3223 ++ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
3224 ++ if (err)
3225 ++ dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
3226 ++ vsi->vsi_num, err);
3227 + ice_vsi_delete(vsi);
3228 + ice_vsi_free_q_vectors(vsi);
3229 +
3230 +@@ -3092,6 +3097,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
3231 + prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
3232 +
3233 + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
3234 ++ ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
3235 ++ if (ret)
3236 ++ dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
3237 ++ vsi->vsi_num, ret);
3238 + ice_vsi_free_q_vectors(vsi);
3239 +
3240 + /* SR-IOV determines needed MSIX resources all at once instead of per
3241 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
3242 +index a8bd512d5b450..ed087fde20668 100644
3243 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
3244 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
3245 +@@ -4224,6 +4224,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
3246 + if (!pf)
3247 + return -ENOMEM;
3248 +
3249 ++ /* initialize Auxiliary index to invalid value */
3250 ++ pf->aux_idx = -1;
3251 ++
3252 + /* set up for high or low DMA */
3253 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3254 + if (err)
3255 +@@ -4615,7 +4618,8 @@ static void ice_remove(struct pci_dev *pdev)
3256 +
3257 + ice_aq_cancel_waiting_tasks(pf);
3258 + ice_unplug_aux_dev(pf);
3259 +- ida_free(&ice_aux_ida, pf->aux_idx);
3260 ++ if (pf->aux_idx >= 0)
3261 ++ ida_free(&ice_aux_ida, pf->aux_idx);
3262 + set_bit(ICE_DOWN, pf->state);
3263 +
3264 + mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
3265 +@@ -5016,6 +5020,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
3266 + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
3267 + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
3268 + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
3269 ++ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
3270 ++ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
3271 + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
3272 + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
3273 + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
3274 +diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
3275 +index 9f07b66417059..2d9b10277186b 100644
3276 +--- a/drivers/net/ethernet/intel/ice/ice_sched.c
3277 ++++ b/drivers/net/ethernet/intel/ice/ice_sched.c
3278 +@@ -2070,6 +2070,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
3279 + return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
3280 + }
3281 +
3282 ++/**
3283 ++ * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
3284 ++ * @pi: port information structure
3285 ++ * @vsi_handle: software VSI handle
3286 ++ *
3287 ++ * This function clears the VSI and its RDMA children nodes from scheduler tree
3288 ++ * for all TCs.
3289 ++ */
3290 ++enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
3291 ++{
3292 ++ return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
3293 ++}
3294 ++
3295 + /**
3296 + * ice_get_agg_info - get the aggregator ID
3297 + * @hw: pointer to the hardware structure
3298 +diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
3299 +index 9beef8f0ec760..fdf7a5882f076 100644
3300 +--- a/drivers/net/ethernet/intel/ice/ice_sched.h
3301 ++++ b/drivers/net/ethernet/intel/ice/ice_sched.h
3302 +@@ -89,6 +89,7 @@ enum ice_status
3303 + ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
3304 + u8 owner, bool enable);
3305 + enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
3306 ++enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
3307 +
3308 + /* Tx scheduler rate limiter functions */
3309 + enum ice_status
3310 +diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
3311 +index 4461f8b9a864b..4e0203336c6bf 100644
3312 +--- a/drivers/net/ethernet/intel/igc/igc_hw.h
3313 ++++ b/drivers/net/ethernet/intel/igc/igc_hw.h
3314 +@@ -22,8 +22,8 @@
3315 + #define IGC_DEV_ID_I220_V 0x15F7
3316 + #define IGC_DEV_ID_I225_K 0x3100
3317 + #define IGC_DEV_ID_I225_K2 0x3101
3318 ++#define IGC_DEV_ID_I226_K 0x3102
3319 + #define IGC_DEV_ID_I225_LMVP 0x5502
3320 +-#define IGC_DEV_ID_I226_K 0x5504
3321 + #define IGC_DEV_ID_I225_IT 0x0D9F
3322 + #define IGC_DEV_ID_I226_LM 0x125B
3323 + #define IGC_DEV_ID_I226_V 0x125C
3324 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
3325 +index 1e2d117082d47..603d9884b6bd1 100644
3326 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
3327 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
3328 +@@ -10,6 +10,8 @@
3329 + #include "en_tc.h"
3330 + #include "rep/tc.h"
3331 + #include "rep/neigh.h"
3332 ++#include "lag.h"
3333 ++#include "lag_mp.h"
3334 +
3335 + struct mlx5e_tc_tun_route_attr {
3336 + struct net_device *out_dev;
3337 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
3338 +index 33de8f0092a66..fb5397324aa4f 100644
3339 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
3340 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
3341 +@@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
3342 + * Pkt: MAC IP ESP IP L4
3343 + *
3344 + * Transport Mode:
3345 +- * SWP: OutL3 InL4
3346 +- * InL3
3347 ++ * SWP: OutL3 OutL4
3348 + * Pkt: MAC IP ESP L4
3349 + *
3350 + * Tunnel(VXLAN TCP/UDP) over Transport Mode
3351 +@@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
3352 + return;
3353 +
3354 + if (!xo->inner_ipproto) {
3355 +- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
3356 +- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
3357 +- if (skb->protocol == htons(ETH_P_IPV6))
3358 +- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
3359 +- if (xo->proto == IPPROTO_UDP)
3360 ++ switch (xo->proto) {
3361 ++ case IPPROTO_UDP:
3362 ++ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
3363 ++ fallthrough;
3364 ++ case IPPROTO_TCP:
3365 ++ /* IP | ESP | TCP */
3366 ++ eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
3367 ++ break;
3368 ++ default:
3369 ++ break;
3370 ++ }
3371 ++ } else {
3372 ++ /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
3373 ++ switch (xo->inner_ipproto) {
3374 ++ case IPPROTO_UDP:
3375 + eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
3376 +- return;
3377 +- }
3378 +-
3379 +- /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
3380 +- switch (xo->inner_ipproto) {
3381 +- case IPPROTO_UDP:
3382 +- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
3383 +- fallthrough;
3384 +- case IPPROTO_TCP:
3385 +- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
3386 +- eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
3387 +- if (skb->protocol == htons(ETH_P_IPV6))
3388 +- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
3389 +- break;
3390 +- default:
3391 +- break;
3392 ++ fallthrough;
3393 ++ case IPPROTO_TCP:
3394 ++ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
3395 ++ eseg->swp_inner_l4_offset =
3396 ++ (skb->csum_start + skb->head - skb->data) / 2;
3397 ++ if (skb->protocol == htons(ETH_P_IPV6))
3398 ++ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
3399 ++ break;
3400 ++ default:
3401 ++ break;
3402 ++ }
3403 + }
3404 +
3405 +- return;
3406 + }
3407 +
3408 + void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
3409 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
3410 +index 6eba574c5a364..c757209b47ee0 100644
3411 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
3412 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
3413 +@@ -72,6 +72,8 @@
3414 + #include "lib/fs_chains.h"
3415 + #include "diag/en_tc_tracepoint.h"
3416 + #include <asm/div64.h>
3417 ++#include "lag.h"
3418 ++#include "lag_mp.h"
3419 +
3420 + #define nic_chains(priv) ((priv)->fs.tc.chains)
3421 + #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
3422 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
3423 +index c63d78eda6060..188994d091c54 100644
3424 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
3425 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
3426 +@@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
3427 + memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
3428 + }
3429 +
3430 +-/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
3431 +- * need to set L3 checksum flag for IPsec
3432 +- */
3433 + static void
3434 + ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
3435 + struct mlx5_wqe_eth_seg *eseg)
3436 + {
3437 ++ struct xfrm_offload *xo = xfrm_offload(skb);
3438 ++
3439 + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
3440 +- if (skb->encapsulation) {
3441 +- eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
3442 ++ if (xo->inner_ipproto) {
3443 ++ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
3444 ++ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
3445 ++ eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
3446 + sq->stats->csum_partial_inner++;
3447 +- } else {
3448 +- sq->stats->csum_partial++;
3449 + }
3450 + }
3451 +
3452 +@@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
3453 + struct mlx5e_accel_tx_state *accel,
3454 + struct mlx5_wqe_eth_seg *eseg)
3455 + {
3456 ++ if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
3457 ++ ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
3458 ++ return;
3459 ++ }
3460 ++
3461 + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
3462 + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
3463 + if (skb->encapsulation) {
3464 +@@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
3465 + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
3466 + sq->stats->csum_partial++;
3467 + #endif
3468 +- } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
3469 +- ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
3470 + } else
3471 + sq->stats->csum_none++;
3472 + }
3473 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
3474 +index 40ef60f562b42..be6e7e10b2529 100644
3475 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
3476 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
3477 +@@ -372,12 +372,17 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
3478 + bool do_bond, roce_lag;
3479 + int err;
3480 +
3481 +- if (!mlx5_lag_is_ready(ldev))
3482 +- return;
3483 ++ if (!mlx5_lag_is_ready(ldev)) {
3484 ++ do_bond = false;
3485 ++ } else {
3486 ++ /* VF LAG is in multipath mode, ignore bond change requests */
3487 ++ if (mlx5_lag_is_multipath(dev0))
3488 ++ return;
3489 +
3490 +- tracker = ldev->tracker;
3491 ++ tracker = ldev->tracker;
3492 +
3493 +- do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
3494 ++ do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
3495 ++ }
3496 +
3497 + if (do_bond && !__mlx5_lag_is_active(ldev)) {
3498 + roce_lag = !mlx5_sriov_is_enabled(dev0) &&
3499 +@@ -691,11 +696,11 @@ void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
3500 + if (!ldev)
3501 + return;
3502 +
3503 +- if (__mlx5_lag_is_active(ldev))
3504 +- mlx5_disable_lag(ldev);
3505 +-
3506 + mlx5_ldev_remove_netdev(ldev, netdev);
3507 + ldev->flags &= ~MLX5_LAG_FLAG_READY;
3508 ++
3509 ++ if (__mlx5_lag_is_active(ldev))
3510 ++ mlx5_queue_bond_work(ldev, 0);
3511 + }
3512 +
3513 + /* Must be called with intf_mutex held */
3514 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
3515 +index 516bfc2bd797b..577e5d02bfdd4 100644
3516 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
3517 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
3518 +@@ -9,20 +9,23 @@
3519 + #include "eswitch.h"
3520 + #include "lib/mlx5.h"
3521 +
3522 ++static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
3523 ++{
3524 ++ return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
3525 ++}
3526 ++
3527 + static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
3528 + {
3529 + if (!mlx5_lag_is_ready(ldev))
3530 + return false;
3531 +
3532 ++ if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
3533 ++ return false;
3534 ++
3535 + return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
3536 + ldev->pf[MLX5_LAG_P2].dev);
3537 + }
3538 +
3539 +-static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
3540 +-{
3541 +- return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
3542 +-}
3543 +-
3544 + bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
3545 + {
3546 + struct mlx5_lag *ldev;
3547 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
3548 +index 729c839397a89..dea199e79beda 100644
3549 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
3550 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
3551 +@@ -24,12 +24,14 @@ struct lag_mp {
3552 + void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
3553 + int mlx5_lag_mp_init(struct mlx5_lag *ldev);
3554 + void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
3555 ++bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
3556 +
3557 + #else /* CONFIG_MLX5_ESWITCH */
3558 +
3559 + static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
3560 + static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
3561 + static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
3562 ++bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
3563 +
3564 + #endif /* CONFIG_MLX5_ESWITCH */
3565 + #endif /* __MLX5_LAG_MP_H__ */
3566 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
3567 +index fbfda55b4c526..5e731a72cce81 100644
3568 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
3569 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
3570 +@@ -71,6 +71,7 @@ err_remove_config_dt:
3571 +
3572 + static const struct of_device_id dwmac_generic_match[] = {
3573 + { .compatible = "st,spear600-gmac"},
3574 ++ { .compatible = "snps,dwmac-3.40a"},
3575 + { .compatible = "snps,dwmac-3.50a"},
3576 + { .compatible = "snps,dwmac-3.610"},
3577 + { .compatible = "snps,dwmac-3.70a"},
3578 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3579 +index 6b2a5e5769e89..14e577787415e 100644
3580 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3581 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3582 +@@ -736,7 +736,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
3583 + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3584 + ptp_v2 = PTP_TCR_TSVER2ENA;
3585 + snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
3586 +- if (priv->synopsys_id != DWMAC_CORE_5_10)
3587 ++ if (priv->synopsys_id < DWMAC_CORE_4_10)
3588 + ts_event_en = PTP_TCR_TSEVNTENA;
3589 + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
3590 + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
3591 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
3592 +index 62cec9bfcd337..232ac98943cd0 100644
3593 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
3594 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
3595 +@@ -508,6 +508,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
3596 + plat->pmt = 1;
3597 + }
3598 +
3599 ++ if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
3600 ++ plat->has_gmac = 1;
3601 ++ plat->enh_desc = 1;
3602 ++ plat->tx_coe = 1;
3603 ++ plat->bugged_jumbo = 1;
3604 ++ plat->pmt = 1;
3605 ++ }
3606 ++
3607 + if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
3608 + of_device_is_compatible(np, "snps,dwmac-4.10a") ||
3609 + of_device_is_compatible(np, "snps,dwmac-4.20a") ||
3610 +diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
3611 +index 4435a1195194d..8ea8d50f81c4b 100644
3612 +--- a/drivers/net/hamradio/baycom_epp.c
3613 ++++ b/drivers/net/hamradio/baycom_epp.c
3614 +@@ -623,16 +623,16 @@ static int receive(struct net_device *dev, int cnt)
3615 +
3616 + /* --------------------------------------------------------------------- */
3617 +
3618 +-#ifdef __i386__
3619 ++#if defined(__i386__) && !defined(CONFIG_UML)
3620 + #include <asm/msr.h>
3621 + #define GETTICK(x) \
3622 + ({ \
3623 + if (boot_cpu_has(X86_FEATURE_TSC)) \
3624 + x = (unsigned int)rdtsc(); \
3625 + })
3626 +-#else /* __i386__ */
3627 ++#else /* __i386__ && !CONFIG_UML */
3628 + #define GETTICK(x)
3629 +-#endif /* __i386__ */
3630 ++#endif /* __i386__ && !CONFIG_UML */
3631 +
3632 + static void epp_bh(struct work_struct *work)
3633 + {
3634 +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
3635 +index 6865d9319197f..3fa9c15ec81e2 100644
3636 +--- a/drivers/net/phy/mdio_bus.c
3637 ++++ b/drivers/net/phy/mdio_bus.c
3638 +@@ -548,6 +548,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
3639 + err = device_register(&bus->dev);
3640 + if (err) {
3641 + pr_err("mii_bus %s failed to register\n", bus->id);
3642 ++ put_device(&bus->dev);
3643 + return -EINVAL;
3644 + }
3645 +
3646 +diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
3647 +index f87f175033731..b554054a7560a 100644
3648 +--- a/drivers/net/usb/Kconfig
3649 ++++ b/drivers/net/usb/Kconfig
3650 +@@ -117,6 +117,7 @@ config USB_LAN78XX
3651 + select PHYLIB
3652 + select MICROCHIP_PHY
3653 + select FIXED_PHY
3654 ++ select CRC32
3655 + help
3656 + This option adds support for Microchip LAN78XX based USB 2
3657 + & USB 3 10/100/1000 Ethernet adapters.
3658 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
3659 +index 79832374f78db..92fca5e9ed030 100644
3660 +--- a/drivers/net/usb/r8152.c
3661 ++++ b/drivers/net/usb/r8152.c
3662 +@@ -767,6 +767,7 @@ enum rtl8152_flags {
3663 + PHY_RESET,
3664 + SCHEDULE_TASKLET,
3665 + GREEN_ETHERNET,
3666 ++ RX_EPROTO,
3667 + };
3668 +
3669 + #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
3670 +@@ -1770,6 +1771,14 @@ static void read_bulk_callback(struct urb *urb)
3671 + rtl_set_unplug(tp);
3672 + netif_device_detach(tp->netdev);
3673 + return;
3674 ++ case -EPROTO:
3675 ++ urb->actual_length = 0;
3676 ++ spin_lock_irqsave(&tp->rx_lock, flags);
3677 ++ list_add_tail(&agg->list, &tp->rx_done);
3678 ++ spin_unlock_irqrestore(&tp->rx_lock, flags);
3679 ++ set_bit(RX_EPROTO, &tp->flags);
3680 ++ schedule_delayed_work(&tp->schedule, 1);
3681 ++ return;
3682 + case -ENOENT:
3683 + return; /* the urb is in unlink state */
3684 + case -ETIME:
3685 +@@ -2425,6 +2434,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
3686 + if (list_empty(&tp->rx_done))
3687 + goto out1;
3688 +
3689 ++ clear_bit(RX_EPROTO, &tp->flags);
3690 + INIT_LIST_HEAD(&rx_queue);
3691 + spin_lock_irqsave(&tp->rx_lock, flags);
3692 + list_splice_init(&tp->rx_done, &rx_queue);
3693 +@@ -2441,7 +2451,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
3694 +
3695 + agg = list_entry(cursor, struct rx_agg, list);
3696 + urb = agg->urb;
3697 +- if (urb->actual_length < ETH_ZLEN)
3698 ++ if (urb->status != 0 || urb->actual_length < ETH_ZLEN)
3699 + goto submit;
3700 +
3701 + agg_free = rtl_get_free_rx(tp, GFP_ATOMIC);
3702 +@@ -6643,6 +6653,10 @@ static void rtl_work_func_t(struct work_struct *work)
3703 + netif_carrier_ok(tp->netdev))
3704 + tasklet_schedule(&tp->tx_tl);
3705 +
3706 ++ if (test_and_clear_bit(RX_EPROTO, &tp->flags) &&
3707 ++ !list_empty(&tp->rx_done))
3708 ++ napi_schedule(&tp->napi);
3709 ++
3710 + mutex_unlock(&tp->control);
3711 +
3712 + out1:
3713 +diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
3714 +index 014868752cd4d..dcefdb42ac463 100644
3715 +--- a/drivers/pci/hotplug/s390_pci_hpc.c
3716 ++++ b/drivers/pci/hotplug/s390_pci_hpc.c
3717 +@@ -62,14 +62,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
3718 + struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
3719 + hotplug_slot);
3720 +
3721 +- switch (zdev->state) {
3722 +- case ZPCI_FN_STATE_STANDBY:
3723 +- *value = 0;
3724 +- break;
3725 +- default:
3726 +- *value = 1;
3727 +- break;
3728 +- }
3729 ++ *value = zpci_is_device_configured(zdev) ? 1 : 0;
3730 + return 0;
3731 + }
3732 +
3733 +diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
3734 +index 68b3886f9f0f3..dfd8888a222a4 100644
3735 +--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
3736 ++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
3737 +@@ -1644,8 +1644,8 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
3738 + struct stm32_pinctrl_group *g = pctl->groups;
3739 + int i;
3740 +
3741 +- for (i = g->pin; i < g->pin + pctl->ngroups; i++)
3742 +- stm32_pinctrl_restore_gpio_regs(pctl, i);
3743 ++ for (i = 0; i < pctl->ngroups; i++, g++)
3744 ++ stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
3745 +
3746 + return 0;
3747 + }
3748 +diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
3749 +index 25b98b12439f1..121037d0a9330 100644
3750 +--- a/drivers/platform/x86/intel_scu_ipc.c
3751 ++++ b/drivers/platform/x86/intel_scu_ipc.c
3752 +@@ -75,7 +75,7 @@ struct intel_scu_ipc_dev {
3753 + #define IPC_READ_BUFFER 0x90
3754 +
3755 + /* Timeout in jiffies */
3756 +-#define IPC_TIMEOUT (5 * HZ)
3757 ++#define IPC_TIMEOUT (10 * HZ)
3758 +
3759 + static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
3760 + static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
3761 +@@ -247,7 +247,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu)
3762 + return -ETIMEDOUT;
3763 + }
3764 +
3765 +-/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
3766 ++/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
3767 + static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
3768 + {
3769 + int status;
3770 +diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
3771 +index 4dfc52e067041..7fd02aabd79a4 100644
3772 +--- a/drivers/ptp/ptp_clock.c
3773 ++++ b/drivers/ptp/ptp_clock.c
3774 +@@ -283,15 +283,22 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
3775 + /* Create a posix clock and link it to the device. */
3776 + err = posix_clock_register(&ptp->clock, &ptp->dev);
3777 + if (err) {
3778 ++ if (ptp->pps_source)
3779 ++ pps_unregister_source(ptp->pps_source);
3780 ++
3781 ++ kfree(ptp->vclock_index);
3782 ++
3783 ++ if (ptp->kworker)
3784 ++ kthread_destroy_worker(ptp->kworker);
3785 ++
3786 ++ put_device(&ptp->dev);
3787 ++
3788 + pr_err("failed to create posix clock\n");
3789 +- goto no_clock;
3790 ++ return ERR_PTR(err);
3791 + }
3792 +
3793 + return ptp;
3794 +
3795 +-no_clock:
3796 +- if (ptp->pps_source)
3797 +- pps_unregister_source(ptp->pps_source);
3798 + no_pps:
3799 + ptp_cleanup_pin_groups(ptp);
3800 + no_pin_groups:
3801 +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
3802 +index 3f6f14f0cafb3..24b72ee4246fb 100644
3803 +--- a/drivers/scsi/hosts.c
3804 ++++ b/drivers/scsi/hosts.c
3805 +@@ -220,7 +220,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
3806 + goto fail;
3807 + }
3808 +
3809 +- shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
3810 ++ /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
3811 ++ shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
3812 + shost->can_queue);
3813 +
3814 + error = scsi_init_sense_cache(shost);
3815 +diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
3816 +index 24ac7ddec7494..206c2598ade30 100644
3817 +--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
3818 ++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
3819 +@@ -3755,7 +3755,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3820 + shost->max_lun = -1;
3821 + shost->unique_id = mrioc->id;
3822 +
3823 +- shost->max_channel = 1;
3824 ++ shost->max_channel = 0;
3825 + shost->max_id = 0xFFFFFFFF;
3826 +
3827 + if (prot_mask >= 0)
3828 +diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
3829 +index d42b2ad840498..2304f54fdc935 100644
3830 +--- a/drivers/scsi/qla2xxx/qla_bsg.c
3831 ++++ b/drivers/scsi/qla2xxx/qla_bsg.c
3832 +@@ -415,7 +415,7 @@ done_unmap_sg:
3833 + goto done_free_fcport;
3834 +
3835 + done_free_fcport:
3836 +- if (bsg_request->msgcode == FC_BSG_RPT_ELS)
3837 ++ if (bsg_request->msgcode != FC_BSG_RPT_ELS)
3838 + qla2x00_free_fcport(fcport);
3839 + done:
3840 + return rval;
3841 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
3842 +index 922e4c7bd88e4..78343d3f93857 100644
3843 +--- a/drivers/scsi/scsi_transport_iscsi.c
3844 ++++ b/drivers/scsi/scsi_transport_iscsi.c
3845 +@@ -2930,8 +2930,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
3846 + session->recovery_tmo = value;
3847 + break;
3848 + default:
3849 +- err = transport->set_param(conn, ev->u.set_param.param,
3850 +- data, ev->u.set_param.len);
3851 + if ((conn->state == ISCSI_CONN_BOUND) ||
3852 + (conn->state == ISCSI_CONN_UP)) {
3853 + err = transport->set_param(conn, ev->u.set_param.param,
3854 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
3855 +index 37506b3fe5a92..5fa1120a87f7e 100644
3856 +--- a/drivers/scsi/storvsc_drv.c
3857 ++++ b/drivers/scsi/storvsc_drv.c
3858 +@@ -1285,11 +1285,15 @@ static void storvsc_on_channel_callback(void *context)
3859 + foreach_vmbus_pkt(desc, channel) {
3860 + struct vstor_packet *packet = hv_pkt_data(desc);
3861 + struct storvsc_cmd_request *request = NULL;
3862 ++ u32 pktlen = hv_pkt_datalen(desc);
3863 + u64 rqst_id = desc->trans_id;
3864 ++ u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
3865 ++ stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
3866 +
3867 +- if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) -
3868 +- stor_device->vmscsi_size_delta) {
3869 +- dev_err(&device->device, "Invalid packet len\n");
3870 ++ if (pktlen < minlen) {
3871 ++ dev_err(&device->device,
3872 ++ "Invalid pkt: id=%llu, len=%u, minlen=%u\n",
3873 ++ rqst_id, pktlen, minlen);
3874 + continue;
3875 + }
3876 +
3877 +@@ -1302,13 +1306,23 @@ static void storvsc_on_channel_callback(void *context)
3878 + if (rqst_id == 0) {
3879 + /*
3880 + * storvsc_on_receive() looks at the vstor_packet in the message
3881 +- * from the ring buffer. If the operation in the vstor_packet is
3882 +- * COMPLETE_IO, then we call storvsc_on_io_completion(), and
3883 +- * dereference the guest memory address. Make sure we don't call
3884 +- * storvsc_on_io_completion() with a guest memory address that is
3885 +- * zero if Hyper-V were to construct and send such a bogus packet.
3886 ++ * from the ring buffer.
3887 ++ *
3888 ++ * - If the operation in the vstor_packet is COMPLETE_IO, then
3889 ++ * we call storvsc_on_io_completion(), and dereference the
3890 ++ * guest memory address. Make sure we don't call
3891 ++ * storvsc_on_io_completion() with a guest memory address
3892 ++ * that is zero if Hyper-V were to construct and send such
3893 ++ * a bogus packet.
3894 ++ *
3895 ++ * - If the operation in the vstor_packet is FCHBA_DATA, then
3896 ++ * we call cache_wwn(), and access the data payload area of
3897 ++ * the packet (wwn_packet); however, there is no guarantee
3898 ++ * that the packet is big enough to contain such area.
3899 ++ * Future-proof the code by rejecting such a bogus packet.
3900 + */
3901 +- if (packet->operation == VSTOR_OPERATION_COMPLETE_IO) {
3902 ++ if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
3903 ++ packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
3904 + dev_err(&device->device, "Invalid packet with ID of 0\n");
3905 + continue;
3906 + }
3907 +diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
3908 +index 9708b7827ff70..f5d32ec4634e3 100644
3909 +--- a/drivers/spi/spi-mux.c
3910 ++++ b/drivers/spi/spi-mux.c
3911 +@@ -137,6 +137,13 @@ static int spi_mux_probe(struct spi_device *spi)
3912 + priv = spi_controller_get_devdata(ctlr);
3913 + priv->spi = spi;
3914 +
3915 ++ /*
3916 ++ * Increase lockdep class as these lock are taken while the parent bus
3917 ++ * already holds their instance's lock.
3918 ++ */
3919 ++ lockdep_set_subclass(&ctlr->io_mutex, 1);
3920 ++ lockdep_set_subclass(&ctlr->add_lock, 1);
3921 ++
3922 + priv->mux = devm_mux_control_get(&spi->dev, NULL);
3923 + if (IS_ERR(priv->mux)) {
3924 + ret = dev_err_probe(&spi->dev, PTR_ERR(priv->mux),
3925 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
3926 +index f95f7666cb5b7..3093e0041158c 100644
3927 +--- a/drivers/spi/spi.c
3928 ++++ b/drivers/spi/spi.c
3929 +@@ -480,12 +480,6 @@ static LIST_HEAD(spi_controller_list);
3930 + */
3931 + static DEFINE_MUTEX(board_lock);
3932 +
3933 +-/*
3934 +- * Prevents addition of devices with same chip select and
3935 +- * addition of devices below an unregistering controller.
3936 +- */
3937 +-static DEFINE_MUTEX(spi_add_lock);
3938 +-
3939 + /**
3940 + * spi_alloc_device - Allocate a new SPI device
3941 + * @ctlr: Controller to which device is connected
3942 +@@ -638,9 +632,9 @@ int spi_add_device(struct spi_device *spi)
3943 + * chipselect **BEFORE** we call setup(), else we'll trash
3944 + * its configuration. Lock against concurrent add() calls.
3945 + */
3946 +- mutex_lock(&spi_add_lock);
3947 ++ mutex_lock(&ctlr->add_lock);
3948 + status = __spi_add_device(spi);
3949 +- mutex_unlock(&spi_add_lock);
3950 ++ mutex_unlock(&ctlr->add_lock);
3951 + return status;
3952 + }
3953 + EXPORT_SYMBOL_GPL(spi_add_device);
3954 +@@ -660,7 +654,7 @@ static int spi_add_device_locked(struct spi_device *spi)
3955 + /* Set the bus ID string */
3956 + spi_dev_set_name(spi);
3957 +
3958 +- WARN_ON(!mutex_is_locked(&spi_add_lock));
3959 ++ WARN_ON(!mutex_is_locked(&ctlr->add_lock));
3960 + return __spi_add_device(spi);
3961 + }
3962 +
3963 +@@ -2555,6 +2549,12 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
3964 + return NULL;
3965 +
3966 + device_initialize(&ctlr->dev);
3967 ++ INIT_LIST_HEAD(&ctlr->queue);
3968 ++ spin_lock_init(&ctlr->queue_lock);
3969 ++ spin_lock_init(&ctlr->bus_lock_spinlock);
3970 ++ mutex_init(&ctlr->bus_lock_mutex);
3971 ++ mutex_init(&ctlr->io_mutex);
3972 ++ mutex_init(&ctlr->add_lock);
3973 + ctlr->bus_num = -1;
3974 + ctlr->num_chipselect = 1;
3975 + ctlr->slave = slave;
3976 +@@ -2827,11 +2827,6 @@ int spi_register_controller(struct spi_controller *ctlr)
3977 + return id;
3978 + ctlr->bus_num = id;
3979 + }
3980 +- INIT_LIST_HEAD(&ctlr->queue);
3981 +- spin_lock_init(&ctlr->queue_lock);
3982 +- spin_lock_init(&ctlr->bus_lock_spinlock);
3983 +- mutex_init(&ctlr->bus_lock_mutex);
3984 +- mutex_init(&ctlr->io_mutex);
3985 + ctlr->bus_lock_flag = 0;
3986 + init_completion(&ctlr->xfer_completion);
3987 + if (!ctlr->max_dma_len)
3988 +@@ -2968,7 +2963,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
3989 +
3990 + /* Prevent addition of new devices, unregister existing ones */
3991 + if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3992 +- mutex_lock(&spi_add_lock);
3993 ++ mutex_lock(&ctlr->add_lock);
3994 +
3995 + device_for_each_child(&ctlr->dev, NULL, __unregister);
3996 +
3997 +@@ -2999,7 +2994,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
3998 + mutex_unlock(&board_lock);
3999 +
4000 + if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
4001 +- mutex_unlock(&spi_add_lock);
4002 ++ mutex_unlock(&ctlr->add_lock);
4003 + }
4004 + EXPORT_SYMBOL_GPL(spi_unregister_controller);
4005 +
4006 +diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
4007 +index da19d7987d005..78fd365893c13 100644
4008 +--- a/drivers/thunderbolt/Makefile
4009 ++++ b/drivers/thunderbolt/Makefile
4010 +@@ -7,6 +7,7 @@ thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
4011 + thunderbolt-${CONFIG_ACPI} += acpi.o
4012 + thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
4013 + thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
4014 ++CFLAGS_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
4015 +
4016 + thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o
4017 + obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o
4018 +diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c
4019 +index 16b5fca0626e6..54c1f8b8b0757 100644
4020 +--- a/fs/autofs/waitq.c
4021 ++++ b/fs/autofs/waitq.c
4022 +@@ -358,7 +358,7 @@ int autofs_wait(struct autofs_sb_info *sbi,
4023 + qstr.len = strlen(p);
4024 + offset = p - name;
4025 + }
4026 +- qstr.hash = full_name_hash(dentry, name, qstr.len);
4027 ++ qstr.hash = full_name_hash(dentry, qstr.name, qstr.len);
4028 +
4029 + if (mutex_lock_interruptible(&sbi->wq_mutex)) {
4030 + kfree(name);
4031 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4032 +index 17f0de5bb8733..539c5db2b22b8 100644
4033 +--- a/fs/btrfs/tree-log.c
4034 ++++ b/fs/btrfs/tree-log.c
4035 +@@ -939,9 +939,11 @@ out:
4036 + }
4037 +
4038 + /*
4039 +- * helper function to see if a given name and sequence number found
4040 +- * in an inode back reference are already in a directory and correctly
4041 +- * point to this inode
4042 ++ * See if a given name and sequence number found in an inode back reference are
4043 ++ * already in a directory and correctly point to this inode.
4044 ++ *
4045 ++ * Returns: < 0 on error, 0 if the directory entry does not exists and 1 if it
4046 ++ * exists.
4047 + */
4048 + static noinline int inode_in_dir(struct btrfs_root *root,
4049 + struct btrfs_path *path,
4050 +@@ -950,29 +952,35 @@ static noinline int inode_in_dir(struct btrfs_root *root,
4051 + {
4052 + struct btrfs_dir_item *di;
4053 + struct btrfs_key location;
4054 +- int match = 0;
4055 ++ int ret = 0;
4056 +
4057 + di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
4058 + index, name, name_len, 0);
4059 +- if (di && !IS_ERR(di)) {
4060 ++ if (IS_ERR(di)) {
4061 ++ if (PTR_ERR(di) != -ENOENT)
4062 ++ ret = PTR_ERR(di);
4063 ++ goto out;
4064 ++ } else if (di) {
4065 + btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
4066 + if (location.objectid != objectid)
4067 + goto out;
4068 +- } else
4069 ++ } else {
4070 + goto out;
4071 +- btrfs_release_path(path);
4072 ++ }
4073 +
4074 ++ btrfs_release_path(path);
4075 + di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
4076 +- if (di && !IS_ERR(di)) {
4077 +- btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
4078 +- if (location.objectid != objectid)
4079 +- goto out;
4080 +- } else
4081 ++ if (IS_ERR(di)) {
4082 ++ ret = PTR_ERR(di);
4083 + goto out;
4084 +- match = 1;
4085 ++ } else if (di) {
4086 ++ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
4087 ++ if (location.objectid == objectid)
4088 ++ ret = 1;
4089 ++ }
4090 + out:
4091 + btrfs_release_path(path);
4092 +- return match;
4093 ++ return ret;
4094 + }
4095 +
4096 + /*
4097 +@@ -1522,10 +1530,12 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
4098 + if (ret)
4099 + goto out;
4100 +
4101 +- /* if we already have a perfect match, we're done */
4102 +- if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
4103 +- btrfs_ino(BTRFS_I(inode)), ref_index,
4104 +- name, namelen)) {
4105 ++ ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
4106 ++ btrfs_ino(BTRFS_I(inode)), ref_index,
4107 ++ name, namelen);
4108 ++ if (ret < 0) {
4109 ++ goto out;
4110 ++ } else if (ret == 0) {
4111 + /*
4112 + * look for a conflicting back reference in the
4113 + * metadata. if we find one we have to unlink that name
4114 +@@ -1585,6 +1595,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
4115 + if (ret)
4116 + goto out;
4117 + }
4118 ++ /* Else, ret == 1, we already have a perfect match, we're done. */
4119 +
4120 + ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
4121 + kfree(name);
4122 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
4123 +index 3296a93be907c..9290f97446c24 100644
4124 +--- a/fs/ceph/caps.c
4125 ++++ b/fs/ceph/caps.c
4126 +@@ -2264,7 +2264,6 @@ static int unsafe_request_wait(struct inode *inode)
4127 +
4128 + int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
4129 + {
4130 +- struct ceph_file_info *fi = file->private_data;
4131 + struct inode *inode = file->f_mapping->host;
4132 + struct ceph_inode_info *ci = ceph_inode(inode);
4133 + u64 flush_tid;
4134 +@@ -2299,14 +2298,9 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
4135 + if (err < 0)
4136 + ret = err;
4137 +
4138 +- if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) {
4139 +- spin_lock(&file->f_lock);
4140 +- err = errseq_check_and_advance(&ci->i_meta_err,
4141 +- &fi->meta_err);
4142 +- spin_unlock(&file->f_lock);
4143 +- if (err < 0)
4144 +- ret = err;
4145 +- }
4146 ++ err = file_check_and_advance_wb_err(file);
4147 ++ if (err < 0)
4148 ++ ret = err;
4149 + out:
4150 + dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
4151 + return ret;
4152 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
4153 +index 3daebfaec8c6d..8a50a74c80177 100644
4154 +--- a/fs/ceph/file.c
4155 ++++ b/fs/ceph/file.c
4156 +@@ -233,7 +233,6 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
4157 +
4158 + spin_lock_init(&fi->rw_contexts_lock);
4159 + INIT_LIST_HEAD(&fi->rw_contexts);
4160 +- fi->meta_err = errseq_sample(&ci->i_meta_err);
4161 + fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
4162 +
4163 + return 0;
4164 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
4165 +index 1bd2cc015913f..4648a4b5d9c51 100644
4166 +--- a/fs/ceph/inode.c
4167 ++++ b/fs/ceph/inode.c
4168 +@@ -541,8 +541,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
4169 +
4170 + ceph_fscache_inode_init(ci);
4171 +
4172 +- ci->i_meta_err = 0;
4173 +-
4174 + return &ci->vfs_inode;
4175 + }
4176 +
4177 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
4178 +index 52b3ddc5f1991..363f4f66b048f 100644
4179 +--- a/fs/ceph/mds_client.c
4180 ++++ b/fs/ceph/mds_client.c
4181 +@@ -1479,7 +1479,6 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
4182 + {
4183 + struct ceph_mds_request *req;
4184 + struct rb_node *p;
4185 +- struct ceph_inode_info *ci;
4186 +
4187 + dout("cleanup_session_requests mds%d\n", session->s_mds);
4188 + mutex_lock(&mdsc->mutex);
4189 +@@ -1488,16 +1487,10 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
4190 + struct ceph_mds_request, r_unsafe_item);
4191 + pr_warn_ratelimited(" dropping unsafe request %llu\n",
4192 + req->r_tid);
4193 +- if (req->r_target_inode) {
4194 +- /* dropping unsafe change of inode's attributes */
4195 +- ci = ceph_inode(req->r_target_inode);
4196 +- errseq_set(&ci->i_meta_err, -EIO);
4197 +- }
4198 +- if (req->r_unsafe_dir) {
4199 +- /* dropping unsafe directory operation */
4200 +- ci = ceph_inode(req->r_unsafe_dir);
4201 +- errseq_set(&ci->i_meta_err, -EIO);
4202 +- }
4203 ++ if (req->r_target_inode)
4204 ++ mapping_set_error(req->r_target_inode->i_mapping, -EIO);
4205 ++ if (req->r_unsafe_dir)
4206 ++ mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
4207 + __unregister_request(mdsc, req);
4208 + }
4209 + /* zero r_attempts, so kick_requests() will re-send requests */
4210 +@@ -1664,7 +1657,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
4211 + spin_unlock(&mdsc->cap_dirty_lock);
4212 +
4213 + if (dirty_dropped) {
4214 +- errseq_set(&ci->i_meta_err, -EIO);
4215 ++ mapping_set_error(inode->i_mapping, -EIO);
4216 +
4217 + if (ci->i_wrbuffer_ref_head == 0 &&
4218 + ci->i_wr_ref == 0 &&
4219 +diff --git a/fs/ceph/super.c b/fs/ceph/super.c
4220 +index 9b1b7f4cfdd4b..fd8742bae8471 100644
4221 +--- a/fs/ceph/super.c
4222 ++++ b/fs/ceph/super.c
4223 +@@ -1002,16 +1002,16 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
4224 + struct ceph_fs_client *new = fc->s_fs_info;
4225 + struct ceph_mount_options *fsopt = new->mount_options;
4226 + struct ceph_options *opt = new->client->options;
4227 +- struct ceph_fs_client *other = ceph_sb_to_client(sb);
4228 ++ struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
4229 +
4230 + dout("ceph_compare_super %p\n", sb);
4231 +
4232 +- if (compare_mount_options(fsopt, opt, other)) {
4233 ++ if (compare_mount_options(fsopt, opt, fsc)) {
4234 + dout("monitor(s)/mount options don't match\n");
4235 + return 0;
4236 + }
4237 + if ((opt->flags & CEPH_OPT_FSID) &&
4238 +- ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
4239 ++ ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
4240 + dout("fsid doesn't match\n");
4241 + return 0;
4242 + }
4243 +@@ -1019,6 +1019,17 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
4244 + dout("flags differ\n");
4245 + return 0;
4246 + }
4247 ++
4248 ++ if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
4249 ++ dout("client is blocklisted (and CLEANRECOVER is not set)\n");
4250 ++ return 0;
4251 ++ }
4252 ++
4253 ++ if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
4254 ++ dout("client has been forcibly unmounted\n");
4255 ++ return 0;
4256 ++ }
4257 ++
4258 + return 1;
4259 + }
4260 +
4261 +diff --git a/fs/ceph/super.h b/fs/ceph/super.h
4262 +index 2200ed76b1230..3f96d2ff91ef2 100644
4263 +--- a/fs/ceph/super.h
4264 ++++ b/fs/ceph/super.h
4265 +@@ -430,8 +430,6 @@ struct ceph_inode_info {
4266 + #ifdef CONFIG_CEPH_FSCACHE
4267 + struct fscache_cookie *fscache;
4268 + #endif
4269 +- errseq_t i_meta_err;
4270 +-
4271 + struct inode vfs_inode; /* at end */
4272 + };
4273 +
4274 +@@ -775,7 +773,6 @@ struct ceph_file_info {
4275 + spinlock_t rw_contexts_lock;
4276 + struct list_head rw_contexts;
4277 +
4278 +- errseq_t meta_err;
4279 + u32 filp_gen;
4280 + atomic_t num_locks;
4281 + };
4282 +diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
4283 +index 87aac4c72c37d..1b07550485b96 100644
4284 +--- a/fs/kernel_read_file.c
4285 ++++ b/fs/kernel_read_file.c
4286 +@@ -178,7 +178,7 @@ int kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
4287 + struct fd f = fdget(fd);
4288 + int ret = -EBADF;
4289 +
4290 +- if (!f.file)
4291 ++ if (!f.file || !(f.file->f_mode & FMODE_READ))
4292 + goto out;
4293 +
4294 + ret = kernel_read_file(f.file, offset, buf, buf_size, file_size, id);
4295 +diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
4296 +index 09ae1a0873d05..070e5dd03e26f 100644
4297 +--- a/fs/nfsd/nfsctl.c
4298 ++++ b/fs/nfsd/nfsctl.c
4299 +@@ -793,7 +793,10 @@ out_close:
4300 + svc_xprt_put(xprt);
4301 + }
4302 + out_err:
4303 +- nfsd_destroy(net);
4304 ++ if (!list_empty(&nn->nfsd_serv->sv_permsocks))
4305 ++ nn->nfsd_serv->sv_nrthreads--;
4306 ++ else
4307 ++ nfsd_destroy(net);
4308 + return err;
4309 + }
4310 +
4311 +diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
4312 +index f1cc8258d34a4..5d9ae17bd443f 100644
4313 +--- a/fs/ocfs2/alloc.c
4314 ++++ b/fs/ocfs2/alloc.c
4315 +@@ -7045,7 +7045,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
4316 + int ocfs2_convert_inline_data_to_extents(struct inode *inode,
4317 + struct buffer_head *di_bh)
4318 + {
4319 +- int ret, i, has_data, num_pages = 0;
4320 ++ int ret, has_data, num_pages = 0;
4321 + int need_free = 0;
4322 + u32 bit_off, num;
4323 + handle_t *handle;
4324 +@@ -7054,26 +7054,17 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
4325 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
4326 + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
4327 + struct ocfs2_alloc_context *data_ac = NULL;
4328 +- struct page **pages = NULL;
4329 +- loff_t end = osb->s_clustersize;
4330 ++ struct page *page = NULL;
4331 + struct ocfs2_extent_tree et;
4332 + int did_quota = 0;
4333 +
4334 + has_data = i_size_read(inode) ? 1 : 0;
4335 +
4336 + if (has_data) {
4337 +- pages = kcalloc(ocfs2_pages_per_cluster(osb->sb),
4338 +- sizeof(struct page *), GFP_NOFS);
4339 +- if (pages == NULL) {
4340 +- ret = -ENOMEM;
4341 +- mlog_errno(ret);
4342 +- return ret;
4343 +- }
4344 +-
4345 + ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
4346 + if (ret) {
4347 + mlog_errno(ret);
4348 +- goto free_pages;
4349 ++ goto out;
4350 + }
4351 + }
4352 +
4353 +@@ -7093,7 +7084,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
4354 + }
4355 +
4356 + if (has_data) {
4357 +- unsigned int page_end;
4358 ++ unsigned int page_end = min_t(unsigned, PAGE_SIZE,
4359 ++ osb->s_clustersize);
4360 + u64 phys;
4361 +
4362 + ret = dquot_alloc_space_nodirty(inode,
4363 +@@ -7117,15 +7109,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
4364 + */
4365 + block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
4366 +
4367 +- /*
4368 +- * Non sparse file systems zero on extend, so no need
4369 +- * to do that now.
4370 +- */
4371 +- if (!ocfs2_sparse_alloc(osb) &&
4372 +- PAGE_SIZE < osb->s_clustersize)
4373 +- end = PAGE_SIZE;
4374 +-
4375 +- ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
4376 ++ ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
4377 ++ &num_pages);
4378 + if (ret) {
4379 + mlog_errno(ret);
4380 + need_free = 1;
4381 +@@ -7136,20 +7121,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
4382 + * This should populate the 1st page for us and mark
4383 + * it up to date.
4384 + */
4385 +- ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
4386 ++ ret = ocfs2_read_inline_data(inode, page, di_bh);
4387 + if (ret) {
4388 + mlog_errno(ret);
4389 + need_free = 1;
4390 + goto out_unlock;
4391 + }
4392 +
4393 +- page_end = PAGE_SIZE;
4394 +- if (PAGE_SIZE > osb->s_clustersize)
4395 +- page_end = osb->s_clustersize;
4396 +-
4397 +- for (i = 0; i < num_pages; i++)
4398 +- ocfs2_map_and_dirty_page(inode, handle, 0, page_end,
4399 +- pages[i], i > 0, &phys);
4400 ++ ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
4401 ++ &phys);
4402 + }
4403 +
4404 + spin_lock(&oi->ip_lock);
4405 +@@ -7180,8 +7160,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
4406 + }
4407 +
4408 + out_unlock:
4409 +- if (pages)
4410 +- ocfs2_unlock_and_free_pages(pages, num_pages);
4411 ++ if (page)
4412 ++ ocfs2_unlock_and_free_pages(&page, num_pages);
4413 +
4414 + out_commit:
4415 + if (ret < 0 && did_quota)
4416 +@@ -7205,8 +7185,6 @@ out_commit:
4417 + out:
4418 + if (data_ac)
4419 + ocfs2_free_alloc_context(data_ac);
4420 +-free_pages:
4421 +- kfree(pages);
4422 + return ret;
4423 + }
4424 +
4425 +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
4426 +index c86bd4e60e207..5c914ce9b3ac9 100644
4427 +--- a/fs/ocfs2/super.c
4428 ++++ b/fs/ocfs2/super.c
4429 +@@ -2167,11 +2167,17 @@ static int ocfs2_initialize_super(struct super_block *sb,
4430 + }
4431 +
4432 + if (ocfs2_clusterinfo_valid(osb)) {
4433 ++ /*
4434 ++ * ci_stack and ci_cluster in ocfs2_cluster_info may not be null
4435 ++ * terminated, so make sure no overflow happens here by using
4436 ++ * memcpy. Destination strings will always be null terminated
4437 ++ * because osb is allocated using kzalloc.
4438 ++ */
4439 + osb->osb_stackflags =
4440 + OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags;
4441 +- strlcpy(osb->osb_cluster_stack,
4442 ++ memcpy(osb->osb_cluster_stack,
4443 + OCFS2_RAW_SB(di)->s_cluster_info.ci_stack,
4444 +- OCFS2_STACK_LABEL_LEN + 1);
4445 ++ OCFS2_STACK_LABEL_LEN);
4446 + if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) {
4447 + mlog(ML_ERROR,
4448 + "couldn't mount because of an invalid "
4449 +@@ -2180,9 +2186,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
4450 + status = -EINVAL;
4451 + goto bail;
4452 + }
4453 +- strlcpy(osb->osb_cluster_name,
4454 ++ memcpy(osb->osb_cluster_name,
4455 + OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster,
4456 +- OCFS2_CLUSTER_NAME_LEN + 1);
4457 ++ OCFS2_CLUSTER_NAME_LEN);
4458 + } else {
4459 + /* The empty string is identical with classic tools that
4460 + * don't know about s_cluster_info. */
4461 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
4462 +index c830cc4ea60fb..a87af3333739d 100644
4463 +--- a/fs/userfaultfd.c
4464 ++++ b/fs/userfaultfd.c
4465 +@@ -1826,9 +1826,15 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
4466 + if (mode_wp && mode_dontwake)
4467 + return -EINVAL;
4468 +
4469 +- ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
4470 +- uffdio_wp.range.len, mode_wp,
4471 +- &ctx->mmap_changing);
4472 ++ if (mmget_not_zero(ctx->mm)) {
4473 ++ ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
4474 ++ uffdio_wp.range.len, mode_wp,
4475 ++ &ctx->mmap_changing);
4476 ++ mmput(ctx->mm);
4477 ++ } else {
4478 ++ return -ESRCH;
4479 ++ }
4480 ++
4481 + if (ret)
4482 + return ret;
4483 +
4484 +diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
4485 +index 2aaa15779d50b..957ebec35aad0 100644
4486 +--- a/include/linux/elfcore.h
4487 ++++ b/include/linux/elfcore.h
4488 +@@ -109,7 +109,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
4489 + #endif
4490 + }
4491 +
4492 +-#if defined(CONFIG_UM) || defined(CONFIG_IA64)
4493 ++#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
4494 + /*
4495 + * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
4496 + * extra segments containing the gate DSO contents. Dumping its
4497 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
4498 +index 25a8be58d2895..9b8add8eac0cb 100644
4499 +--- a/include/linux/mlx5/driver.h
4500 ++++ b/include/linux/mlx5/driver.h
4501 +@@ -1135,7 +1135,6 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
4502 + int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
4503 + bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
4504 + bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
4505 +-bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
4506 + bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
4507 + struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
4508 + u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
4509 +diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
4510 +index 21c3771e6a56b..988528b5da438 100644
4511 +--- a/include/linux/secretmem.h
4512 ++++ b/include/linux/secretmem.h
4513 +@@ -23,7 +23,7 @@ static inline bool page_is_secretmem(struct page *page)
4514 + mapping = (struct address_space *)
4515 + ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
4516 +
4517 +- if (mapping != page->mapping)
4518 ++ if (!mapping || mapping != page->mapping)
4519 + return false;
4520 +
4521 + return mapping->a_ops == &secretmem_aops;
4522 +diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
4523 +index 97b8d12b5f2bb..5d80c6fd2a223 100644
4524 +--- a/include/linux/spi/spi.h
4525 ++++ b/include/linux/spi/spi.h
4526 +@@ -527,6 +527,9 @@ struct spi_controller {
4527 + /* I/O mutex */
4528 + struct mutex io_mutex;
4529 +
4530 ++ /* Used to avoid adding the same CS twice */
4531 ++ struct mutex add_lock;
4532 ++
4533 + /* lock and mutex for SPI bus locking */
4534 + spinlock_t bus_lock_spinlock;
4535 + struct mutex bus_lock_mutex;
4536 +diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
4537 +index a9f9c5714e650..fe95f09225266 100644
4538 +--- a/include/linux/trace_recursion.h
4539 ++++ b/include/linux/trace_recursion.h
4540 +@@ -16,23 +16,8 @@
4541 + * When function tracing occurs, the following steps are made:
4542 + * If arch does not support a ftrace feature:
4543 + * call internal function (uses INTERNAL bits) which calls...
4544 +- * If callback is registered to the "global" list, the list
4545 +- * function is called and recursion checks the GLOBAL bits.
4546 +- * then this function calls...
4547 + * The function callback, which can use the FTRACE bits to
4548 + * check for recursion.
4549 +- *
4550 +- * Now if the arch does not support a feature, and it calls
4551 +- * the global list function which calls the ftrace callback
4552 +- * all three of these steps will do a recursion protection.
4553 +- * There's no reason to do one if the previous caller already
4554 +- * did. The recursion that we are protecting against will
4555 +- * go through the same steps again.
4556 +- *
4557 +- * To prevent the multiple recursion checks, if a recursion
4558 +- * bit is set that is higher than the MAX bit of the current
4559 +- * check, then we know that the check was made by the previous
4560 +- * caller, and we can skip the current check.
4561 + */
4562 + enum {
4563 + /* Function recursion bits */
4564 +@@ -40,12 +25,14 @@ enum {
4565 + TRACE_FTRACE_NMI_BIT,
4566 + TRACE_FTRACE_IRQ_BIT,
4567 + TRACE_FTRACE_SIRQ_BIT,
4568 ++ TRACE_FTRACE_TRANSITION_BIT,
4569 +
4570 +- /* INTERNAL_BITs must be greater than FTRACE_BITs */
4571 ++ /* Internal use recursion bits */
4572 + TRACE_INTERNAL_BIT,
4573 + TRACE_INTERNAL_NMI_BIT,
4574 + TRACE_INTERNAL_IRQ_BIT,
4575 + TRACE_INTERNAL_SIRQ_BIT,
4576 ++ TRACE_INTERNAL_TRANSITION_BIT,
4577 +
4578 + TRACE_BRANCH_BIT,
4579 + /*
4580 +@@ -86,12 +73,6 @@ enum {
4581 + */
4582 + TRACE_GRAPH_NOTRACE_BIT,
4583 +
4584 +- /*
4585 +- * When transitioning between context, the preempt_count() may
4586 +- * not be correct. Allow for a single recursion to cover this case.
4587 +- */
4588 +- TRACE_TRANSITION_BIT,
4589 +-
4590 + /* Used to prevent recursion recording from recursing. */
4591 + TRACE_RECORD_RECURSION_BIT,
4592 + };
4593 +@@ -113,12 +94,10 @@ enum {
4594 + #define TRACE_CONTEXT_BITS 4
4595 +
4596 + #define TRACE_FTRACE_START TRACE_FTRACE_BIT
4597 +-#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
4598 +
4599 + #define TRACE_LIST_START TRACE_INTERNAL_BIT
4600 +-#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
4601 +
4602 +-#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
4603 ++#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
4604 +
4605 + /*
4606 + * Used for setting context
4607 +@@ -132,6 +111,7 @@ enum {
4608 + TRACE_CTX_IRQ,
4609 + TRACE_CTX_SOFTIRQ,
4610 + TRACE_CTX_NORMAL,
4611 ++ TRACE_CTX_TRANSITION,
4612 + };
4613 +
4614 + static __always_inline int trace_get_context_bit(void)
4615 +@@ -160,45 +140,34 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
4616 + #endif
4617 +
4618 + static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
4619 +- int start, int max)
4620 ++ int start)
4621 + {
4622 + unsigned int val = READ_ONCE(current->trace_recursion);
4623 + int bit;
4624 +
4625 +- /* A previous recursion check was made */
4626 +- if ((val & TRACE_CONTEXT_MASK) > max)
4627 +- return 0;
4628 +-
4629 + bit = trace_get_context_bit() + start;
4630 + if (unlikely(val & (1 << bit))) {
4631 + /*
4632 + * It could be that preempt_count has not been updated during
4633 + * a switch between contexts. Allow for a single recursion.
4634 + */
4635 +- bit = TRACE_TRANSITION_BIT;
4636 ++ bit = TRACE_CTX_TRANSITION + start;
4637 + if (val & (1 << bit)) {
4638 + do_ftrace_record_recursion(ip, pip);
4639 + return -1;
4640 + }
4641 +- } else {
4642 +- /* Normal check passed, clear the transition to allow it again */
4643 +- val &= ~(1 << TRACE_TRANSITION_BIT);
4644 + }
4645 +
4646 + val |= 1 << bit;
4647 + current->trace_recursion = val;
4648 + barrier();
4649 +
4650 +- return bit + 1;
4651 ++ return bit;
4652 + }
4653 +
4654 + static __always_inline void trace_clear_recursion(int bit)
4655 + {
4656 +- if (!bit)
4657 +- return;
4658 +-
4659 + barrier();
4660 +- bit--;
4661 + trace_recursion_clear(bit);
4662 + }
4663 +
4664 +@@ -214,7 +183,7 @@ static __always_inline void trace_clear_recursion(int bit)
4665 + static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
4666 + unsigned long parent_ip)
4667 + {
4668 +- return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX);
4669 ++ return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
4670 + }
4671 +
4672 + /**
4673 +diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
4674 +index eb70cabe6e7f2..33a4240e6a6f1 100644
4675 +--- a/include/linux/user_namespace.h
4676 ++++ b/include/linux/user_namespace.h
4677 +@@ -127,6 +127,8 @@ static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type t
4678 +
4679 + long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
4680 + bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
4681 ++long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type);
4682 ++void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type);
4683 + bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max);
4684 +
4685 + static inline void set_rlimit_ucount_max(struct user_namespace *ns,
4686 +diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
4687 +index 2eb6d7c2c9310..f37c7a558d6dd 100644
4688 +--- a/include/net/sctp/sm.h
4689 ++++ b/include/net/sctp/sm.h
4690 +@@ -384,11 +384,11 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
4691 + * Verification Tag value does not match the receiver's own
4692 + * tag value, the receiver shall silently discard the packet...
4693 + */
4694 +- if (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)
4695 +- return 1;
4696 ++ if (ntohl(chunk->sctp_hdr->vtag) != asoc->c.my_vtag)
4697 ++ return 0;
4698 +
4699 + chunk->transport->encap_port = SCTP_INPUT_CB(chunk->skb)->encap_port;
4700 +- return 0;
4701 ++ return 1;
4702 + }
4703 +
4704 + /* Check VTAG of the packet matches the sender's own tag and the T bit is
4705 +diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
4706 +index 2e8d51937acdf..47d2cad21b56a 100644
4707 +--- a/include/sound/hda_codec.h
4708 ++++ b/include/sound/hda_codec.h
4709 +@@ -225,6 +225,7 @@ struct hda_codec {
4710 + #endif
4711 +
4712 + /* misc flags */
4713 ++ unsigned int configured:1; /* codec was configured */
4714 + unsigned int in_freeing:1; /* being released */
4715 + unsigned int registered:1; /* codec was registered */
4716 + unsigned int display_power_control:1; /* needs display power */
4717 +diff --git a/kernel/auditsc.c b/kernel/auditsc.c
4718 +index 8dd73a64f9216..b1cb1dbf7417f 100644
4719 +--- a/kernel/auditsc.c
4720 ++++ b/kernel/auditsc.c
4721 +@@ -657,7 +657,7 @@ static int audit_filter_rules(struct task_struct *tsk,
4722 + result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
4723 + break;
4724 + case AUDIT_SADDR_FAM:
4725 +- if (ctx->sockaddr)
4726 ++ if (ctx && ctx->sockaddr)
4727 + result = audit_comparator(ctx->sockaddr->ss_family,
4728 + f->op, f->val);
4729 + break;
4730 +diff --git a/kernel/cred.c b/kernel/cred.c
4731 +index f784e08c2fbd6..1ae0b4948a5a8 100644
4732 +--- a/kernel/cred.c
4733 ++++ b/kernel/cred.c
4734 +@@ -225,8 +225,6 @@ struct cred *cred_alloc_blank(void)
4735 + #ifdef CONFIG_DEBUG_CREDENTIALS
4736 + new->magic = CRED_MAGIC;
4737 + #endif
4738 +- new->ucounts = get_ucounts(&init_ucounts);
4739 +-
4740 + if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
4741 + goto error;
4742 +
4743 +@@ -501,7 +499,7 @@ int commit_creds(struct cred *new)
4744 + inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1);
4745 + rcu_assign_pointer(task->real_cred, new);
4746 + rcu_assign_pointer(task->cred, new);
4747 +- if (new->user != old->user)
4748 ++ if (new->user != old->user || new->user_ns != old->user_ns)
4749 + dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1);
4750 + alter_cred_subscribers(old, -2);
4751 +
4752 +@@ -669,7 +667,7 @@ int set_cred_ucounts(struct cred *new)
4753 + {
4754 + struct task_struct *task = current;
4755 + const struct cred *old = task->real_cred;
4756 +- struct ucounts *old_ucounts = new->ucounts;
4757 ++ struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
4758 +
4759 + if (new->user == old->user && new->user_ns == old->user_ns)
4760 + return 0;
4761 +@@ -681,9 +679,10 @@ int set_cred_ucounts(struct cred *new)
4762 + if (old_ucounts && old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
4763 + return 0;
4764 +
4765 +- if (!(new->ucounts = alloc_ucounts(new->user_ns, new->euid)))
4766 ++ if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
4767 + return -EAGAIN;
4768 +
4769 ++ new->ucounts = new_ucounts;
4770 + if (old_ucounts)
4771 + put_ucounts(old_ucounts);
4772 +
4773 +diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
4774 +index 70519f67556f9..fad3c77c1da17 100644
4775 +--- a/kernel/dma/debug.c
4776 ++++ b/kernel/dma/debug.c
4777 +@@ -1299,6 +1299,12 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
4778 + if (unlikely(dma_debug_disabled()))
4779 + return;
4780 +
4781 ++ for_each_sg(sg, s, nents, i) {
4782 ++ check_for_stack(dev, sg_page(s), s->offset);
4783 ++ if (!PageHighMem(sg_page(s)))
4784 ++ check_for_illegal_area(dev, sg_virt(s), s->length);
4785 ++ }
4786 ++
4787 + for_each_sg(sg, s, mapped_ents, i) {
4788 + entry = dma_entry_alloc();
4789 + if (!entry)
4790 +@@ -1314,12 +1320,6 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
4791 + entry->sg_call_ents = nents;
4792 + entry->sg_mapped_ents = mapped_ents;
4793 +
4794 +- check_for_stack(dev, sg_page(s), s->offset);
4795 +-
4796 +- if (!PageHighMem(sg_page(s))) {
4797 +- check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
4798 +- }
4799 +-
4800 + check_sg_segment(dev, s);
4801 +
4802 + add_dma_entry(entry);
4803 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4804 +index 399c37c95392e..e165d28cf73be 100644
4805 +--- a/kernel/sched/core.c
4806 ++++ b/kernel/sched/core.c
4807 +@@ -8495,6 +8495,7 @@ void idle_task_exit(void)
4808 + finish_arch_post_lock_switch();
4809 + }
4810 +
4811 ++ scs_task_reset(current);
4812 + /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
4813 + }
4814 +
4815 +diff --git a/kernel/signal.c b/kernel/signal.c
4816 +index a3229add44554..13d2505a14a0e 100644
4817 +--- a/kernel/signal.c
4818 ++++ b/kernel/signal.c
4819 +@@ -425,22 +425,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
4820 + */
4821 + rcu_read_lock();
4822 + ucounts = task_ucounts(t);
4823 +- sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
4824 +- switch (sigpending) {
4825 +- case 1:
4826 +- if (likely(get_ucounts(ucounts)))
4827 +- break;
4828 +- fallthrough;
4829 +- case LONG_MAX:
4830 +- /*
4831 +- * we need to decrease the ucount in the userns tree on any
4832 +- * failure to avoid counts leaking.
4833 +- */
4834 +- dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
4835 +- rcu_read_unlock();
4836 +- return NULL;
4837 +- }
4838 ++ sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
4839 + rcu_read_unlock();
4840 ++ if (!sigpending)
4841 ++ return NULL;
4842 +
4843 + if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
4844 + q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
4845 +@@ -449,8 +437,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
4846 + }
4847 +
4848 + if (unlikely(q == NULL)) {
4849 +- if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
4850 +- put_ucounts(ucounts);
4851 ++ dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
4852 + } else {
4853 + INIT_LIST_HEAD(&q->list);
4854 + q->flags = sigqueue_flags;
4855 +@@ -463,8 +450,8 @@ static void __sigqueue_free(struct sigqueue *q)
4856 + {
4857 + if (q->flags & SIGQUEUE_PREALLOC)
4858 + return;
4859 +- if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
4860 +- put_ucounts(q->ucounts);
4861 ++ if (q->ucounts) {
4862 ++ dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
4863 + q->ucounts = NULL;
4864 + }
4865 + kmem_cache_free(sigqueue_cachep, q);
4866 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
4867 +index 7b180f61e6d3c..06700d5b11717 100644
4868 +--- a/kernel/trace/ftrace.c
4869 ++++ b/kernel/trace/ftrace.c
4870 +@@ -6977,7 +6977,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4871 + struct ftrace_ops *op;
4872 + int bit;
4873 +
4874 +- bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
4875 ++ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
4876 + if (bit < 0)
4877 + return;
4878 +
4879 +@@ -7052,7 +7052,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
4880 + {
4881 + int bit;
4882 +
4883 +- bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
4884 ++ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
4885 + if (bit < 0)
4886 + return;
4887 +
4888 +diff --git a/kernel/ucount.c b/kernel/ucount.c
4889 +index bb51849e63752..eb03f3c68375d 100644
4890 +--- a/kernel/ucount.c
4891 ++++ b/kernel/ucount.c
4892 +@@ -284,6 +284,55 @@ bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
4893 + return (new == 0);
4894 + }
4895 +
4896 ++static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts,
4897 ++ struct ucounts *last, enum ucount_type type)
4898 ++{
4899 ++ struct ucounts *iter, *next;
4900 ++ for (iter = ucounts; iter != last; iter = next) {
4901 ++ long dec = atomic_long_add_return(-1, &iter->ucount[type]);
4902 ++ WARN_ON_ONCE(dec < 0);
4903 ++ next = iter->ns->ucounts;
4904 ++ if (dec == 0)
4905 ++ put_ucounts(iter);
4906 ++ }
4907 ++}
4908 ++
4909 ++void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type)
4910 ++{
4911 ++ do_dec_rlimit_put_ucounts(ucounts, NULL, type);
4912 ++}
4913 ++
4914 ++long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
4915 ++{
4916 ++ /* Caller must hold a reference to ucounts */
4917 ++ struct ucounts *iter;
4918 ++ long dec, ret = 0;
4919 ++
4920 ++ for (iter = ucounts; iter; iter = iter->ns->ucounts) {
4921 ++ long max = READ_ONCE(iter->ns->ucount_max[type]);
4922 ++ long new = atomic_long_add_return(1, &iter->ucount[type]);
4923 ++ if (new < 0 || new > max)
4924 ++ goto unwind;
4925 ++ if (iter == ucounts)
4926 ++ ret = new;
4927 ++ /*
4928 ++ * Grab an extra ucount reference for the caller when
4929 ++ * the rlimit count was previously 0.
4930 ++ */
4931 ++ if (new != 1)
4932 ++ continue;
4933 ++ if (!get_ucounts(iter))
4934 ++ goto dec_unwind;
4935 ++ }
4936 ++ return ret;
4937 ++dec_unwind:
4938 ++ dec = atomic_long_add_return(-1, &iter->ucount[type]);
4939 ++ WARN_ON_ONCE(dec < 0);
4940 ++unwind:
4941 ++ do_dec_rlimit_put_ucounts(ucounts, iter, type);
4942 ++ return 0;
4943 ++}
4944 ++
4945 + bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
4946 + {
4947 + struct ucounts *iter;
4948 +diff --git a/lib/Makefile b/lib/Makefile
4949 +index 5efd1b435a37c..a841be5244ac6 100644
4950 +--- a/lib/Makefile
4951 ++++ b/lib/Makefile
4952 +@@ -351,7 +351,7 @@ obj-$(CONFIG_OBJAGG) += objagg.o
4953 + obj-$(CONFIG_PLDMFW) += pldmfw/
4954 +
4955 + # KUnit tests
4956 +-CFLAGS_bitfield_kunit.o := $(call cc-option,-Wframe-larger-than=10240)
4957 ++CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
4958 + obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
4959 + obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
4960 + obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
4961 +diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
4962 +index cdbe54b165017..e14a18af573dd 100644
4963 +--- a/lib/kunit/executor_test.c
4964 ++++ b/lib/kunit/executor_test.c
4965 +@@ -116,8 +116,8 @@ static void kfree_at_end(struct kunit *test, const void *to_free)
4966 + /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
4967 + if (IS_ERR_OR_NULL(to_free))
4968 + return;
4969 +- kunit_alloc_and_get_resource(test, NULL, kfree_res_free, GFP_KERNEL,
4970 +- (void *)to_free);
4971 ++ kunit_alloc_resource(test, NULL, kfree_res_free, GFP_KERNEL,
4972 ++ (void *)to_free);
4973 + }
4974 +
4975 + static struct kunit_suite *alloc_fake_suite(struct kunit *test,
4976 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4977 +index afff3ac870673..163c2da2a6548 100644
4978 +--- a/mm/huge_memory.c
4979 ++++ b/mm/huge_memory.c
4980 +@@ -2724,12 +2724,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
4981 + if (mapping) {
4982 + int nr = thp_nr_pages(head);
4983 +
4984 +- if (PageSwapBacked(head))
4985 ++ if (PageSwapBacked(head)) {
4986 + __mod_lruvec_page_state(head, NR_SHMEM_THPS,
4987 + -nr);
4988 +- else
4989 ++ } else {
4990 + __mod_lruvec_page_state(head, NR_FILE_THPS,
4991 + -nr);
4992 ++ filemap_nr_thps_dec(mapping);
4993 ++ }
4994 + }
4995 +
4996 + __split_huge_page(page, list, end);
4997 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4998 +index 54f6eaff18c52..aeebe8d11ad1f 100644
4999 +--- a/mm/mempolicy.c
5000 ++++ b/mm/mempolicy.c
5001 +@@ -857,16 +857,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
5002 + goto out;
5003 + }
5004 +
5005 +- if (flags & MPOL_F_NUMA_BALANCING) {
5006 +- if (new && new->mode == MPOL_BIND) {
5007 +- new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
5008 +- } else {
5009 +- ret = -EINVAL;
5010 +- mpol_put(new);
5011 +- goto out;
5012 +- }
5013 +- }
5014 +-
5015 + ret = mpol_set_nodemask(new, nodes, scratch);
5016 + if (ret) {
5017 + mpol_put(new);
5018 +@@ -1450,7 +1440,11 @@ static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
5019 + return -EINVAL;
5020 + if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
5021 + return -EINVAL;
5022 +-
5023 ++ if (*flags & MPOL_F_NUMA_BALANCING) {
5024 ++ if (*mode != MPOL_BIND)
5025 ++ return -EINVAL;
5026 ++ *flags |= (MPOL_F_MOF | MPOL_F_MORON);
5027 ++ }
5028 + return 0;
5029 + }
5030 +
5031 +diff --git a/mm/slub.c b/mm/slub.c
5032 +index f77d8cd79ef7f..1d2587d9dbfda 100644
5033 +--- a/mm/slub.c
5034 ++++ b/mm/slub.c
5035 +@@ -1629,7 +1629,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
5036 + }
5037 +
5038 + static inline bool slab_free_freelist_hook(struct kmem_cache *s,
5039 +- void **head, void **tail)
5040 ++ void **head, void **tail,
5041 ++ int *cnt)
5042 + {
5043 +
5044 + void *object;
5045 +@@ -1656,6 +1657,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
5046 + *head = object;
5047 + if (!*tail)
5048 + *tail = object;
5049 ++ } else {
5050 ++ /*
5051 ++ * Adjust the reconstructed freelist depth
5052 ++ * accordingly if object's reuse is delayed.
5053 ++ */
5054 ++ --(*cnt);
5055 + }
5056 + } while (object != old_tail);
5057 +
5058 +@@ -3166,7 +3173,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
5059 + struct kmem_cache_cpu *c;
5060 + unsigned long tid;
5061 +
5062 +- memcg_slab_free_hook(s, &head, 1);
5063 ++ /* memcg_slab_free_hook() is already called for bulk free. */
5064 ++ if (!tail)
5065 ++ memcg_slab_free_hook(s, &head, 1);
5066 + redo:
5067 + /*
5068 + * Determine the currently cpus per cpu slab.
5069 +@@ -3210,7 +3219,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
5070 + * With KASAN enabled slab_free_freelist_hook modifies the freelist
5071 + * to remove objects, whose reuse must be delayed.
5072 + */
5073 +- if (slab_free_freelist_hook(s, &head, &tail))
5074 ++ if (slab_free_freelist_hook(s, &head, &tail, &cnt))
5075 + do_slab_free(s, page, head, tail, cnt, addr);
5076 + }
5077 +
5078 +@@ -3928,8 +3937,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
5079 + if (alloc_kmem_cache_cpus(s))
5080 + return 0;
5081 +
5082 +- free_kmem_cache_nodes(s);
5083 + error:
5084 ++ __kmem_cache_release(s);
5085 + return -EINVAL;
5086 + }
5087 +
5088 +@@ -4597,13 +4606,15 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
5089 + return 0;
5090 +
5091 + err = sysfs_slab_add(s);
5092 +- if (err)
5093 ++ if (err) {
5094 + __kmem_cache_release(s);
5095 ++ return err;
5096 ++ }
5097 +
5098 + if (s->flags & SLAB_STORE_USER)
5099 + debugfs_slab_add(s);
5100 +
5101 +- return err;
5102 ++ return 0;
5103 + }
5104 +
5105 + void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
5106 +diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
5107 +index caa16bf30fb55..a63f1cc8568bc 100644
5108 +--- a/net/bpf/test_run.c
5109 ++++ b/net/bpf/test_run.c
5110 +@@ -552,6 +552,12 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
5111 + __skb->gso_segs = skb_shinfo(skb)->gso_segs;
5112 + }
5113 +
5114 ++static struct proto bpf_dummy_proto = {
5115 ++ .name = "bpf_dummy",
5116 ++ .owner = THIS_MODULE,
5117 ++ .obj_size = sizeof(struct sock),
5118 ++};
5119 ++
5120 + int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
5121 + union bpf_attr __user *uattr)
5122 + {
5123 +@@ -596,20 +602,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
5124 + break;
5125 + }
5126 +
5127 +- sk = kzalloc(sizeof(struct sock), GFP_USER);
5128 ++ sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
5129 + if (!sk) {
5130 + kfree(data);
5131 + kfree(ctx);
5132 + return -ENOMEM;
5133 + }
5134 +- sock_net_set(sk, net);
5135 + sock_init_data(NULL, sk);
5136 +
5137 + skb = build_skb(data, 0);
5138 + if (!skb) {
5139 + kfree(data);
5140 + kfree(ctx);
5141 +- kfree(sk);
5142 ++ sk_free(sk);
5143 + return -ENOMEM;
5144 + }
5145 + skb->sk = sk;
5146 +@@ -682,8 +687,7 @@ out:
5147 + if (dev && dev != net->loopback_dev)
5148 + dev_put(dev);
5149 + kfree_skb(skb);
5150 +- bpf_sk_storage_free(sk);
5151 +- kfree(sk);
5152 ++ sk_free(sk);
5153 + kfree(ctx);
5154 + return ret;
5155 + }
5156 +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
5157 +index 2b48b204205e6..7d3155283af93 100644
5158 +--- a/net/bridge/br_private.h
5159 ++++ b/net/bridge/br_private.h
5160 +@@ -1002,9 +1002,7 @@ static inline unsigned long br_multicast_lmqt(const struct net_bridge *br)
5161 +
5162 + static inline unsigned long br_multicast_gmi(const struct net_bridge *br)
5163 + {
5164 +- /* use the RFC default of 2 for QRV */
5165 +- return 2 * br->multicast_query_interval +
5166 +- br->multicast_query_response_interval;
5167 ++ return br->multicast_membership_interval;
5168 + }
5169 + #else
5170 + static inline int br_multicast_rcv(struct net_bridge *br,
5171 +diff --git a/net/can/isotp.c b/net/can/isotp.c
5172 +index caaa532ece949..df6968b28bf41 100644
5173 +--- a/net/can/isotp.c
5174 ++++ b/net/can/isotp.c
5175 +@@ -121,7 +121,7 @@ enum {
5176 + struct tpcon {
5177 + int idx;
5178 + int len;
5179 +- u8 state;
5180 ++ u32 state;
5181 + u8 bs;
5182 + u8 sn;
5183 + u8 ll_dl;
5184 +@@ -848,6 +848,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
5185 + {
5186 + struct sock *sk = sock->sk;
5187 + struct isotp_sock *so = isotp_sk(sk);
5188 ++ u32 old_state = so->tx.state;
5189 + struct sk_buff *skb;
5190 + struct net_device *dev;
5191 + struct canfd_frame *cf;
5192 +@@ -860,45 +861,55 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
5193 + return -EADDRNOTAVAIL;
5194 +
5195 + /* we do not support multiple buffers - for now */
5196 +- if (so->tx.state != ISOTP_IDLE || wq_has_sleeper(&so->wait)) {
5197 +- if (msg->msg_flags & MSG_DONTWAIT)
5198 +- return -EAGAIN;
5199 ++ if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
5200 ++ wq_has_sleeper(&so->wait)) {
5201 ++ if (msg->msg_flags & MSG_DONTWAIT) {
5202 ++ err = -EAGAIN;
5203 ++ goto err_out;
5204 ++ }
5205 +
5206 + /* wait for complete transmission of current pdu */
5207 +- wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
5208 ++ err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
5209 ++ if (err)
5210 ++ goto err_out;
5211 + }
5212 +
5213 +- if (!size || size > MAX_MSG_LENGTH)
5214 +- return -EINVAL;
5215 ++ if (!size || size > MAX_MSG_LENGTH) {
5216 ++ err = -EINVAL;
5217 ++ goto err_out;
5218 ++ }
5219 +
5220 + /* take care of a potential SF_DL ESC offset for TX_DL > 8 */
5221 + off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
5222 +
5223 + /* does the given data fit into a single frame for SF_BROADCAST? */
5224 + if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
5225 +- (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off))
5226 +- return -EINVAL;
5227 ++ (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
5228 ++ err = -EINVAL;
5229 ++ goto err_out;
5230 ++ }
5231 +
5232 + err = memcpy_from_msg(so->tx.buf, msg, size);
5233 + if (err < 0)
5234 +- return err;
5235 ++ goto err_out;
5236 +
5237 + dev = dev_get_by_index(sock_net(sk), so->ifindex);
5238 +- if (!dev)
5239 +- return -ENXIO;
5240 ++ if (!dev) {
5241 ++ err = -ENXIO;
5242 ++ goto err_out;
5243 ++ }
5244 +
5245 + skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
5246 + msg->msg_flags & MSG_DONTWAIT, &err);
5247 + if (!skb) {
5248 + dev_put(dev);
5249 +- return err;
5250 ++ goto err_out;
5251 + }
5252 +
5253 + can_skb_reserve(skb);
5254 + can_skb_prv(skb)->ifindex = dev->ifindex;
5255 + can_skb_prv(skb)->skbcnt = 0;
5256 +
5257 +- so->tx.state = ISOTP_SENDING;
5258 + so->tx.len = size;
5259 + so->tx.idx = 0;
5260 +
5261 +@@ -954,15 +965,25 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
5262 + if (err) {
5263 + pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
5264 + __func__, ERR_PTR(err));
5265 +- return err;
5266 ++ goto err_out;
5267 + }
5268 +
5269 + if (wait_tx_done) {
5270 + /* wait for complete transmission of current pdu */
5271 + wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
5272 ++
5273 ++ if (sk->sk_err)
5274 ++ return -sk->sk_err;
5275 + }
5276 +
5277 + return size;
5278 ++
5279 ++err_out:
5280 ++ so->tx.state = old_state;
5281 ++ if (so->tx.state == ISOTP_IDLE)
5282 ++ wake_up_interruptible(&so->wait);
5283 ++
5284 ++ return err;
5285 + }
5286 +
5287 + static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
5288 +diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
5289 +index 12369b604ce95..cea712fb2a9e0 100644
5290 +--- a/net/can/j1939/j1939-priv.h
5291 ++++ b/net/can/j1939/j1939-priv.h
5292 +@@ -326,6 +326,7 @@ int j1939_session_activate(struct j1939_session *session);
5293 + void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec);
5294 + void j1939_session_timers_cancel(struct j1939_session *session);
5295 +
5296 ++#define J1939_MIN_TP_PACKET_SIZE 9
5297 + #define J1939_MAX_TP_PACKET_SIZE (7 * 0xff)
5298 + #define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff)
5299 +
5300 +diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
5301 +index 08c8606cfd9c7..9bc55ecb37f9f 100644
5302 +--- a/net/can/j1939/main.c
5303 ++++ b/net/can/j1939/main.c
5304 +@@ -249,11 +249,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
5305 + struct j1939_priv *priv, *priv_new;
5306 + int ret;
5307 +
5308 +- priv = j1939_priv_get_by_ndev(ndev);
5309 ++ spin_lock(&j1939_netdev_lock);
5310 ++ priv = j1939_priv_get_by_ndev_locked(ndev);
5311 + if (priv) {
5312 + kref_get(&priv->rx_kref);
5313 ++ spin_unlock(&j1939_netdev_lock);
5314 + return priv;
5315 + }
5316 ++ spin_unlock(&j1939_netdev_lock);
5317 +
5318 + priv = j1939_priv_create(ndev);
5319 + if (!priv)
5320 +@@ -269,10 +272,10 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
5321 + /* Someone was faster than us, use their priv and roll
5322 + * back our's.
5323 + */
5324 ++ kref_get(&priv_new->rx_kref);
5325 + spin_unlock(&j1939_netdev_lock);
5326 + dev_put(ndev);
5327 + kfree(priv);
5328 +- kref_get(&priv_new->rx_kref);
5329 + return priv_new;
5330 + }
5331 + j1939_priv_set(ndev, priv);
5332 +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
5333 +index bdc95bd7a851f..e59fbbffa31ce 100644
5334 +--- a/net/can/j1939/transport.c
5335 ++++ b/net/can/j1939/transport.c
5336 +@@ -1230,12 +1230,11 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
5337 + session->err = -ETIME;
5338 + j1939_session_deactivate(session);
5339 + } else {
5340 +- netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
5341 +- __func__, session);
5342 +-
5343 + j1939_session_list_lock(session->priv);
5344 + if (session->state >= J1939_SESSION_ACTIVE &&
5345 + session->state < J1939_SESSION_ACTIVE_MAX) {
5346 ++ netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
5347 ++ __func__, session);
5348 + j1939_session_get(session);
5349 + hrtimer_start(&session->rxtimer,
5350 + ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
5351 +@@ -1597,6 +1596,8 @@ j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
5352 + abort = J1939_XTP_ABORT_FAULT;
5353 + else if (len > priv->tp_max_packet_size)
5354 + abort = J1939_XTP_ABORT_RESOURCE;
5355 ++ else if (len < J1939_MIN_TP_PACKET_SIZE)
5356 ++ abort = J1939_XTP_ABORT_FAULT;
5357 + }
5358 +
5359 + if (abort != J1939_XTP_NO_ABORT) {
5360 +@@ -1771,6 +1772,7 @@ static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
5361 + static void j1939_xtp_rx_dat_one(struct j1939_session *session,
5362 + struct sk_buff *skb)
5363 + {
5364 ++ enum j1939_xtp_abort abort = J1939_XTP_ABORT_FAULT;
5365 + struct j1939_priv *priv = session->priv;
5366 + struct j1939_sk_buff_cb *skcb;
5367 + struct sk_buff *se_skb = NULL;
5368 +@@ -1785,9 +1787,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
5369 +
5370 + skcb = j1939_skb_to_cb(skb);
5371 + dat = skb->data;
5372 +- if (skb->len <= 1)
5373 ++ if (skb->len != 8) {
5374 + /* makes no sense */
5375 ++ abort = J1939_XTP_ABORT_UNEXPECTED_DATA;
5376 + goto out_session_cancel;
5377 ++ }
5378 +
5379 + switch (session->last_cmd) {
5380 + case 0xff:
5381 +@@ -1885,7 +1889,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
5382 + out_session_cancel:
5383 + kfree_skb(se_skb);
5384 + j1939_session_timers_cancel(session);
5385 +- j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
5386 ++ j1939_session_cancel(session, abort);
5387 + j1939_session_put(session);
5388 + }
5389 +
5390 +diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
5391 +index 76ed5ef0e36a8..28326ca34b523 100644
5392 +--- a/net/dsa/dsa2.c
5393 ++++ b/net/dsa/dsa2.c
5394 +@@ -1283,12 +1283,15 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
5395 +
5396 + for_each_available_child_of_node(ports, port) {
5397 + err = of_property_read_u32(port, "reg", &reg);
5398 +- if (err)
5399 ++ if (err) {
5400 ++ of_node_put(port);
5401 + goto out_put_node;
5402 ++ }
5403 +
5404 + if (reg >= ds->num_ports) {
5405 + dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
5406 + port, reg, ds->num_ports);
5407 ++ of_node_put(port);
5408 + err = -EINVAL;
5409 + goto out_put_node;
5410 + }
5411 +@@ -1296,8 +1299,10 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
5412 + dp = dsa_to_port(ds, reg);
5413 +
5414 + err = dsa_port_parse_of(dp, port);
5415 +- if (err)
5416 ++ if (err) {
5417 ++ of_node_put(port);
5418 + goto out_put_node;
5419 ++ }
5420 + }
5421 +
5422 + out_put_node:
5423 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
5424 +index db07c05736b25..609d7048d04de 100644
5425 +--- a/net/ipv4/tcp_ipv4.c
5426 ++++ b/net/ipv4/tcp_ipv4.c
5427 +@@ -1037,6 +1037,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
5428 + DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
5429 + EXPORT_SYMBOL(tcp_md5_needed);
5430 +
5431 ++static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
5432 ++{
5433 ++ if (!old)
5434 ++ return true;
5435 ++
5436 ++ /* l3index always overrides non-l3index */
5437 ++ if (old->l3index && new->l3index == 0)
5438 ++ return false;
5439 ++ if (old->l3index == 0 && new->l3index)
5440 ++ return true;
5441 ++
5442 ++ return old->prefixlen < new->prefixlen;
5443 ++}
5444 ++
5445 + /* Find the Key structure for an address. */
5446 + struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
5447 + const union tcp_md5_addr *addr,
5448 +@@ -1074,8 +1088,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
5449 + match = false;
5450 + }
5451 +
5452 +- if (match && (!best_match ||
5453 +- key->prefixlen > best_match->prefixlen))
5454 ++ if (match && better_md5_match(best_match, key))
5455 + best_match = key;
5456 + }
5457 + return best_match;
5458 +@@ -1105,7 +1118,7 @@ static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
5459 + lockdep_sock_is_held(sk)) {
5460 + if (key->family != family)
5461 + continue;
5462 +- if (key->l3index && key->l3index != l3index)
5463 ++ if (key->l3index != l3index)
5464 + continue;
5465 + if (!memcmp(&key->addr, addr, size) &&
5466 + key->prefixlen == prefixlen)
5467 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
5468 +index 8e6ca9ad68121..80b1a7838cff7 100644
5469 +--- a/net/ipv6/ip6_output.c
5470 ++++ b/net/ipv6/ip6_output.c
5471 +@@ -488,13 +488,14 @@ static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
5472 +
5473 + int ip6_forward(struct sk_buff *skb)
5474 + {
5475 +- struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
5476 + struct dst_entry *dst = skb_dst(skb);
5477 + struct ipv6hdr *hdr = ipv6_hdr(skb);
5478 + struct inet6_skb_parm *opt = IP6CB(skb);
5479 + struct net *net = dev_net(dst->dev);
5480 ++ struct inet6_dev *idev;
5481 + u32 mtu;
5482 +
5483 ++ idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
5484 + if (net->ipv6.devconf_all->forwarding == 0)
5485 + goto error;
5486 +
5487 +diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
5488 +index 733c83d38b308..4ad8b2032f1f9 100644
5489 +--- a/net/ipv6/netfilter/ip6t_rt.c
5490 ++++ b/net/ipv6/netfilter/ip6t_rt.c
5491 +@@ -25,12 +25,7 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@×××××××.hu>");
5492 + static inline bool
5493 + segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert)
5494 + {
5495 +- bool r;
5496 +- pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n",
5497 +- invert ? '!' : ' ', min, id, max);
5498 +- r = (id >= min && id <= max) ^ invert;
5499 +- pr_debug(" result %s\n", r ? "PASS" : "FAILED");
5500 +- return r;
5501 ++ return (id >= min && id <= max) ^ invert;
5502 + }
5503 +
5504 + static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
5505 +@@ -65,30 +60,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
5506 + return false;
5507 + }
5508 +
5509 +- pr_debug("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
5510 +- pr_debug("TYPE %04X ", rh->type);
5511 +- pr_debug("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
5512 +-
5513 +- pr_debug("IPv6 RT segsleft %02X ",
5514 +- segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
5515 +- rh->segments_left,
5516 +- !!(rtinfo->invflags & IP6T_RT_INV_SGS)));
5517 +- pr_debug("type %02X %02X %02X ",
5518 +- rtinfo->rt_type, rh->type,
5519 +- (!(rtinfo->flags & IP6T_RT_TYP) ||
5520 +- ((rtinfo->rt_type == rh->type) ^
5521 +- !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
5522 +- pr_debug("len %02X %04X %02X ",
5523 +- rtinfo->hdrlen, hdrlen,
5524 +- !(rtinfo->flags & IP6T_RT_LEN) ||
5525 +- ((rtinfo->hdrlen == hdrlen) ^
5526 +- !!(rtinfo->invflags & IP6T_RT_INV_LEN)));
5527 +- pr_debug("res %02X %02X %02X ",
5528 +- rtinfo->flags & IP6T_RT_RES,
5529 +- ((const struct rt0_hdr *)rh)->reserved,
5530 +- !((rtinfo->flags & IP6T_RT_RES) &&
5531 +- (((const struct rt0_hdr *)rh)->reserved)));
5532 +-
5533 + ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
5534 + rh->segments_left,
5535 + !!(rtinfo->invflags & IP6T_RT_INV_SGS))) &&
5536 +@@ -107,22 +78,22 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
5537 + reserved),
5538 + sizeof(_reserved),
5539 + &_reserved);
5540 ++ if (!rp) {
5541 ++ par->hotdrop = true;
5542 ++ return false;
5543 ++ }
5544 +
5545 + ret = (*rp == 0);
5546 + }
5547 +
5548 +- pr_debug("#%d ", rtinfo->addrnr);
5549 + if (!(rtinfo->flags & IP6T_RT_FST)) {
5550 + return ret;
5551 + } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
5552 +- pr_debug("Not strict ");
5553 + if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
5554 +- pr_debug("There isn't enough space\n");
5555 + return false;
5556 + } else {
5557 + unsigned int i = 0;
5558 +
5559 +- pr_debug("#%d ", rtinfo->addrnr);
5560 + for (temp = 0;
5561 + temp < (unsigned int)((hdrlen - 8) / 16);
5562 + temp++) {
5563 +@@ -138,26 +109,20 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
5564 + return false;
5565 + }
5566 +
5567 +- if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
5568 +- pr_debug("i=%d temp=%d;\n", i, temp);
5569 ++ if (ipv6_addr_equal(ap, &rtinfo->addrs[i]))
5570 + i++;
5571 +- }
5572 + if (i == rtinfo->addrnr)
5573 + break;
5574 + }
5575 +- pr_debug("i=%d #%d\n", i, rtinfo->addrnr);
5576 + if (i == rtinfo->addrnr)
5577 + return ret;
5578 + else
5579 + return false;
5580 + }
5581 + } else {
5582 +- pr_debug("Strict ");
5583 + if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
5584 +- pr_debug("There isn't enough space\n");
5585 + return false;
5586 + } else {
5587 +- pr_debug("#%d ", rtinfo->addrnr);
5588 + for (temp = 0; temp < rtinfo->addrnr; temp++) {
5589 + ap = skb_header_pointer(skb,
5590 + ptr
5591 +@@ -173,7 +138,6 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
5592 + if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
5593 + break;
5594 + }
5595 +- pr_debug("temp=%d #%d\n", temp, rtinfo->addrnr);
5596 + if (temp == rtinfo->addrnr &&
5597 + temp == (unsigned int)((hdrlen - 8) / 16))
5598 + return ret;
5599 +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
5600 +index 54395266339d7..92a747896f808 100644
5601 +--- a/net/netfilter/Kconfig
5602 ++++ b/net/netfilter/Kconfig
5603 +@@ -109,7 +109,7 @@ config NF_CONNTRACK_MARK
5604 + config NF_CONNTRACK_SECMARK
5605 + bool 'Connection tracking security mark support'
5606 + depends on NETWORK_SECMARK
5607 +- default m if NETFILTER_ADVANCED=n
5608 ++ default y if NETFILTER_ADVANCED=n
5609 + help
5610 + This option enables security markings to be applied to
5611 + connections. Typically they are copied to connections from
5612 +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
5613 +index c25097092a060..29ec3ef63edc7 100644
5614 +--- a/net/netfilter/ipvs/ip_vs_ctl.c
5615 ++++ b/net/netfilter/ipvs/ip_vs_ctl.c
5616 +@@ -4090,6 +4090,11 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
5617 + tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode;
5618 + tbl[idx++].data = &ipvs->sysctl_schedule_icmp;
5619 + tbl[idx++].data = &ipvs->sysctl_ignore_tunneled;
5620 ++#ifdef CONFIG_IP_VS_DEBUG
5621 ++ /* Global sysctls must be ro in non-init netns */
5622 ++ if (!net_eq(net, &init_net))
5623 ++ tbl[idx++].mode = 0444;
5624 ++#endif
5625 +
5626 + ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
5627 + if (ipvs->sysctl_hdr == NULL) {
5628 +diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
5629 +index 5b02408a920bf..3ced0eb6b7c3b 100644
5630 +--- a/net/netfilter/nft_chain_filter.c
5631 ++++ b/net/netfilter/nft_chain_filter.c
5632 +@@ -342,12 +342,6 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
5633 + return;
5634 + }
5635 +
5636 +- /* UNREGISTER events are also happening on netns exit.
5637 +- *
5638 +- * Although nf_tables core releases all tables/chains, only this event
5639 +- * handler provides guarantee that hook->ops.dev is still accessible,
5640 +- * so we cannot skip exiting net namespaces.
5641 +- */
5642 + __nft_release_basechain(ctx);
5643 + }
5644 +
5645 +@@ -366,6 +360,9 @@ static int nf_tables_netdev_event(struct notifier_block *this,
5646 + event != NETDEV_CHANGENAME)
5647 + return NOTIFY_DONE;
5648 +
5649 ++ if (!check_net(ctx.net))
5650 ++ return NOTIFY_DONE;
5651 ++
5652 + nft_net = nft_pernet(ctx.net);
5653 + mutex_lock(&nft_net->commit_mutex);
5654 + list_for_each_entry(table, &nft_net->tables, list) {
5655 +diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
5656 +index 7b2f359bfce46..2f7cf5ecebf4f 100644
5657 +--- a/net/netfilter/xt_IDLETIMER.c
5658 ++++ b/net/netfilter/xt_IDLETIMER.c
5659 +@@ -137,7 +137,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
5660 + {
5661 + int ret;
5662 +
5663 +- info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
5664 ++ info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
5665 + if (!info->timer) {
5666 + ret = -ENOMEM;
5667 + goto out;
5668 +diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
5669 +index e9605922a3228..49cbc44e075d5 100644
5670 +--- a/net/nfc/nci/rsp.c
5671 ++++ b/net/nfc/nci/rsp.c
5672 +@@ -330,6 +330,8 @@ static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev,
5673 + ndev->cur_conn_id);
5674 + if (conn_info) {
5675 + list_del(&conn_info->list);
5676 ++ if (conn_info == ndev->rf_conn_info)
5677 ++ ndev->rf_conn_info = NULL;
5678 + devm_kfree(&ndev->nfc_dev->dev, conn_info);
5679 + }
5680 + }
5681 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
5682 +index 1b4b3514c94f2..07f4dce7b5352 100644
5683 +--- a/net/sched/act_ct.c
5684 ++++ b/net/sched/act_ct.c
5685 +@@ -960,6 +960,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
5686 + tmpl = p->tmpl;
5687 +
5688 + tcf_lastuse_update(&c->tcf_tm);
5689 ++ tcf_action_update_bstats(&c->common, skb);
5690 +
5691 + if (clear) {
5692 + qdisc_skb_cb(skb)->post_ct = false;
5693 +@@ -1049,7 +1050,6 @@ out_push:
5694 +
5695 + qdisc_skb_cb(skb)->post_ct = true;
5696 + out_clear:
5697 +- tcf_action_update_bstats(&c->common, skb);
5698 + if (defrag)
5699 + qdisc_skb_cb(skb)->pkt_len = skb->len;
5700 + return retval;
5701 +diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
5702 +index 952e46876329a..4aad284800355 100644
5703 +--- a/scripts/Makefile.gcc-plugins
5704 ++++ b/scripts/Makefile.gcc-plugins
5705 +@@ -19,6 +19,10 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF) \
5706 + += -fplugin-arg-structleak_plugin-byref
5707 + gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL) \
5708 + += -fplugin-arg-structleak_plugin-byref-all
5709 ++ifdef CONFIG_GCC_PLUGIN_STRUCTLEAK
5710 ++ DISABLE_STRUCTLEAK_PLUGIN += -fplugin-arg-structleak_plugin-disable
5711 ++endif
5712 ++export DISABLE_STRUCTLEAK_PLUGIN
5713 + gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) \
5714 + += -DSTRUCTLEAK_PLUGIN
5715 +
5716 +diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
5717 +index e3d79a7b6db66..b5d5333ab3300 100644
5718 +--- a/security/keys/process_keys.c
5719 ++++ b/security/keys/process_keys.c
5720 +@@ -918,6 +918,13 @@ void key_change_session_keyring(struct callback_head *twork)
5721 + return;
5722 + }
5723 +
5724 ++ /* If get_ucounts fails more bits are needed in the refcount */
5725 ++ if (unlikely(!get_ucounts(old->ucounts))) {
5726 ++ WARN_ONCE(1, "In %s get_ucounts failed\n", __func__);
5727 ++ put_cred(new);
5728 ++ return;
5729 ++ }
5730 ++
5731 + new-> uid = old-> uid;
5732 + new-> euid = old-> euid;
5733 + new-> suid = old-> suid;
5734 +@@ -927,6 +934,7 @@ void key_change_session_keyring(struct callback_head *twork)
5735 + new-> sgid = old-> sgid;
5736 + new->fsgid = old->fsgid;
5737 + new->user = get_uid(old->user);
5738 ++ new->ucounts = old->ucounts;
5739 + new->user_ns = get_user_ns(old->user_ns);
5740 + new->group_info = get_group_info(old->group_info);
5741 +
5742 +diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
5743 +index 062da7a7a5861..f7bd6e2db085b 100644
5744 +--- a/sound/hda/hdac_controller.c
5745 ++++ b/sound/hda/hdac_controller.c
5746 +@@ -421,8 +421,9 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
5747 + if (!full_reset)
5748 + goto skip_reset;
5749 +
5750 +- /* clear STATESTS */
5751 +- snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
5752 ++ /* clear STATESTS if not in reset */
5753 ++ if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET)
5754 ++ snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK);
5755 +
5756 + /* reset controller */
5757 + snd_hdac_bus_enter_link_reset(bus);
5758 +diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
5759 +index e8dee24c309da..50a58fb5ad9c4 100644
5760 +--- a/sound/pci/hda/hda_bind.c
5761 ++++ b/sound/pci/hda/hda_bind.c
5762 +@@ -304,29 +304,31 @@ int snd_hda_codec_configure(struct hda_codec *codec)
5763 + {
5764 + int err;
5765 +
5766 ++ if (codec->configured)
5767 ++ return 0;
5768 ++
5769 + if (is_generic_config(codec))
5770 + codec->probe_id = HDA_CODEC_ID_GENERIC;
5771 + else
5772 + codec->probe_id = 0;
5773 +
5774 +- err = snd_hdac_device_register(&codec->core);
5775 +- if (err < 0)
5776 +- return err;
5777 ++ if (!device_is_registered(&codec->core.dev)) {
5778 ++ err = snd_hdac_device_register(&codec->core);
5779 ++ if (err < 0)
5780 ++ return err;
5781 ++ }
5782 +
5783 + if (!codec->preset)
5784 + codec_bind_module(codec);
5785 + if (!codec->preset) {
5786 + err = codec_bind_generic(codec);
5787 + if (err < 0) {
5788 +- codec_err(codec, "Unable to bind the codec\n");
5789 +- goto error;
5790 ++ codec_dbg(codec, "Unable to bind the codec\n");
5791 ++ return err;
5792 + }
5793 + }
5794 +
5795 ++ codec->configured = 1;
5796 + return 0;
5797 +-
5798 +- error:
5799 +- snd_hdac_device_unregister(&codec->core);
5800 +- return err;
5801 + }
5802 + EXPORT_SYMBOL_GPL(snd_hda_codec_configure);
5803 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
5804 +index 7a717e1511569..8afcce6478cdd 100644
5805 +--- a/sound/pci/hda/hda_codec.c
5806 ++++ b/sound/pci/hda/hda_codec.c
5807 +@@ -791,6 +791,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
5808 + snd_array_free(&codec->nids);
5809 + remove_conn_list(codec);
5810 + snd_hdac_regmap_exit(&codec->core);
5811 ++ codec->configured = 0;
5812 + }
5813 + EXPORT_SYMBOL_GPL(snd_hda_codec_cleanup_for_unbind);
5814 +
5815 +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
5816 +index ca2f2ecd14888..5a49ee4f6ce03 100644
5817 +--- a/sound/pci/hda/hda_controller.c
5818 ++++ b/sound/pci/hda/hda_controller.c
5819 +@@ -25,6 +25,7 @@
5820 + #include <sound/core.h>
5821 + #include <sound/initval.h>
5822 + #include "hda_controller.h"
5823 ++#include "hda_local.h"
5824 +
5825 + #define CREATE_TRACE_POINTS
5826 + #include "hda_controller_trace.h"
5827 +@@ -1259,17 +1260,24 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
5828 + int azx_codec_configure(struct azx *chip)
5829 + {
5830 + struct hda_codec *codec, *next;
5831 ++ int success = 0;
5832 +
5833 +- /* use _safe version here since snd_hda_codec_configure() deregisters
5834 +- * the device upon error and deletes itself from the bus list.
5835 +- */
5836 +- list_for_each_codec_safe(codec, next, &chip->bus) {
5837 +- snd_hda_codec_configure(codec);
5838 ++ list_for_each_codec(codec, &chip->bus) {
5839 ++ if (!snd_hda_codec_configure(codec))
5840 ++ success++;
5841 + }
5842 +
5843 +- if (!azx_bus(chip)->num_codecs)
5844 +- return -ENODEV;
5845 +- return 0;
5846 ++ if (success) {
5847 ++ /* unregister failed codecs if any codec has been probed */
5848 ++ list_for_each_codec_safe(codec, next, &chip->bus) {
5849 ++ if (!codec->configured) {
5850 ++ codec_err(codec, "Unable to configure, disabling\n");
5851 ++ snd_hdac_device_unregister(&codec->core);
5852 ++ }
5853 ++ }
5854 ++ }
5855 ++
5856 ++ return success ? 0 : -ENODEV;
5857 + }
5858 + EXPORT_SYMBOL_GPL(azx_codec_configure);
5859 +
5860 +diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
5861 +index 68f9668788ea2..324cba13c7bac 100644
5862 +--- a/sound/pci/hda/hda_controller.h
5863 ++++ b/sound/pci/hda/hda_controller.h
5864 +@@ -41,7 +41,7 @@
5865 + /* 24 unused */
5866 + #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
5867 + #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
5868 +-/* 27 unused */
5869 ++#define AZX_DCAPS_RETRY_PROBE (1 << 27) /* retry probe if no codec is configured */
5870 + #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
5871 + #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
5872 + #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
5873 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5874 +index 0062c18b646af..89f135a6a1f6d 100644
5875 +--- a/sound/pci/hda/hda_intel.c
5876 ++++ b/sound/pci/hda/hda_intel.c
5877 +@@ -307,7 +307,8 @@ enum {
5878 + /* quirks for AMD SB */
5879 + #define AZX_DCAPS_PRESET_AMD_SB \
5880 + (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_AMD_WORKAROUND |\
5881 +- AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
5882 ++ AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME |\
5883 ++ AZX_DCAPS_RETRY_PROBE)
5884 +
5885 + /* quirks for Nvidia */
5886 + #define AZX_DCAPS_PRESET_NVIDIA \
5887 +@@ -1730,7 +1731,7 @@ static void azx_check_snoop_available(struct azx *chip)
5888 +
5889 + static void azx_probe_work(struct work_struct *work)
5890 + {
5891 +- struct hda_intel *hda = container_of(work, struct hda_intel, probe_work);
5892 ++ struct hda_intel *hda = container_of(work, struct hda_intel, probe_work.work);
5893 + azx_probe_continue(&hda->chip);
5894 + }
5895 +
5896 +@@ -1839,7 +1840,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
5897 + }
5898 +
5899 + /* continue probing in work context as may trigger request module */
5900 +- INIT_WORK(&hda->probe_work, azx_probe_work);
5901 ++ INIT_DELAYED_WORK(&hda->probe_work, azx_probe_work);
5902 +
5903 + *rchip = chip;
5904 +
5905 +@@ -2170,7 +2171,7 @@ static int azx_probe(struct pci_dev *pci,
5906 + #endif
5907 +
5908 + if (schedule_probe)
5909 +- schedule_work(&hda->probe_work);
5910 ++ schedule_delayed_work(&hda->probe_work, 0);
5911 +
5912 + dev++;
5913 + if (chip->disabled)
5914 +@@ -2256,6 +2257,11 @@ static int azx_probe_continue(struct azx *chip)
5915 + int dev = chip->dev_index;
5916 + int err;
5917 +
5918 ++ if (chip->disabled || hda->init_failed)
5919 ++ return -EIO;
5920 ++ if (hda->probe_retry)
5921 ++ goto probe_retry;
5922 ++
5923 + to_hda_bus(bus)->bus_probing = 1;
5924 + hda->probe_continued = 1;
5925 +
5926 +@@ -2317,10 +2323,20 @@ static int azx_probe_continue(struct azx *chip)
5927 + #endif
5928 + }
5929 + #endif
5930 ++
5931 ++ probe_retry:
5932 + if (bus->codec_mask && !(probe_only[dev] & 1)) {
5933 + err = azx_codec_configure(chip);
5934 +- if (err < 0)
5935 ++ if (err) {
5936 ++ if ((chip->driver_caps & AZX_DCAPS_RETRY_PROBE) &&
5937 ++ ++hda->probe_retry < 60) {
5938 ++ schedule_delayed_work(&hda->probe_work,
5939 ++ msecs_to_jiffies(1000));
5940 ++ return 0; /* keep things up */
5941 ++ }
5942 ++ dev_err(chip->card->dev, "Cannot probe codecs, giving up\n");
5943 + goto out_free;
5944 ++ }
5945 + }
5946 +
5947 + err = snd_card_register(chip->card);
5948 +@@ -2350,6 +2366,7 @@ out_free:
5949 + display_power(chip, false);
5950 + complete_all(&hda->probe_wait);
5951 + to_hda_bus(bus)->bus_probing = 0;
5952 ++ hda->probe_retry = 0;
5953 + return 0;
5954 + }
5955 +
5956 +@@ -2375,7 +2392,7 @@ static void azx_remove(struct pci_dev *pci)
5957 + * device during cancel_work_sync() call.
5958 + */
5959 + device_unlock(&pci->dev);
5960 +- cancel_work_sync(&hda->probe_work);
5961 ++ cancel_delayed_work_sync(&hda->probe_work);
5962 + device_lock(&pci->dev);
5963 +
5964 + snd_card_free(card);
5965 +diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
5966 +index 3fb119f090408..0f39418f9328b 100644
5967 +--- a/sound/pci/hda/hda_intel.h
5968 ++++ b/sound/pci/hda/hda_intel.h
5969 +@@ -14,7 +14,7 @@ struct hda_intel {
5970 +
5971 + /* sync probing */
5972 + struct completion probe_wait;
5973 +- struct work_struct probe_work;
5974 ++ struct delayed_work probe_work;
5975 +
5976 + /* card list (for power_save trigger) */
5977 + struct list_head list;
5978 +@@ -30,6 +30,8 @@ struct hda_intel {
5979 + unsigned int freed:1; /* resources already released */
5980 +
5981 + bool need_i915_power:1; /* the hda controller needs i915 power */
5982 ++
5983 ++ int probe_retry; /* being probe-retry */
5984 + };
5985 +
5986 + #endif
5987 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5988 +index 8e6ff50f0f94f..b30e1843273bf 100644
5989 +--- a/sound/pci/hda/patch_realtek.c
5990 ++++ b/sound/pci/hda/patch_realtek.c
5991 +@@ -2547,6 +2547,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5992 + SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
5993 + SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
5994 + SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
5995 ++ SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
5996 + SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
5997 + SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
5998 + SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
5999 +diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
6000 +index db16071205ba9..dd1ae611fc2a5 100644
6001 +--- a/sound/soc/codecs/Kconfig
6002 ++++ b/sound/soc/codecs/Kconfig
6003 +@@ -1564,6 +1564,7 @@ config SND_SOC_WCD938X
6004 + config SND_SOC_WCD938X_SDW
6005 + tristate "WCD9380/WCD9385 Codec - SDW"
6006 + select SND_SOC_WCD938X
6007 ++ select REGMAP_IRQ
6008 + depends on SOUNDWIRE
6009 + select REGMAP_SOUNDWIRE
6010 + help
6011 +diff --git a/sound/soc/codecs/cs4341.c b/sound/soc/codecs/cs4341.c
6012 +index 7d3e54d8eef36..29d05e32d3417 100644
6013 +--- a/sound/soc/codecs/cs4341.c
6014 ++++ b/sound/soc/codecs/cs4341.c
6015 +@@ -305,12 +305,19 @@ static int cs4341_spi_probe(struct spi_device *spi)
6016 + return cs4341_probe(&spi->dev);
6017 + }
6018 +
6019 ++static const struct spi_device_id cs4341_spi_ids[] = {
6020 ++ { "cs4341a" },
6021 ++ { }
6022 ++};
6023 ++MODULE_DEVICE_TABLE(spi, cs4341_spi_ids);
6024 ++
6025 + static struct spi_driver cs4341_spi_driver = {
6026 + .driver = {
6027 + .name = "cs4341-spi",
6028 + .of_match_table = of_match_ptr(cs4341_dt_ids),
6029 + },
6030 + .probe = cs4341_spi_probe,
6031 ++ .id_table = cs4341_spi_ids,
6032 + };
6033 + #endif
6034 +
6035 +diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
6036 +index db88be48c9980..f946ef65a4c19 100644
6037 +--- a/sound/soc/codecs/nau8824.c
6038 ++++ b/sound/soc/codecs/nau8824.c
6039 +@@ -867,8 +867,8 @@ static void nau8824_jdet_work(struct work_struct *work)
6040 + struct regmap *regmap = nau8824->regmap;
6041 + int adc_value, event = 0, event_mask = 0;
6042 +
6043 +- snd_soc_dapm_enable_pin(dapm, "MICBIAS");
6044 +- snd_soc_dapm_enable_pin(dapm, "SAR");
6045 ++ snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
6046 ++ snd_soc_dapm_force_enable_pin(dapm, "SAR");
6047 + snd_soc_dapm_sync(dapm);
6048 +
6049 + msleep(100);
6050 +diff --git a/sound/soc/codecs/pcm179x-spi.c b/sound/soc/codecs/pcm179x-spi.c
6051 +index 0a542924ec5f9..ebf63ea90a1c4 100644
6052 +--- a/sound/soc/codecs/pcm179x-spi.c
6053 ++++ b/sound/soc/codecs/pcm179x-spi.c
6054 +@@ -36,6 +36,7 @@ static const struct of_device_id pcm179x_of_match[] = {
6055 + MODULE_DEVICE_TABLE(of, pcm179x_of_match);
6056 +
6057 + static const struct spi_device_id pcm179x_spi_ids[] = {
6058 ++ { "pcm1792a", 0 },
6059 + { "pcm179x", 0 },
6060 + { },
6061 + };
6062 +diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
6063 +index 4dc844f3c1fc0..60dee41816dc2 100644
6064 +--- a/sound/soc/codecs/pcm512x.c
6065 ++++ b/sound/soc/codecs/pcm512x.c
6066 +@@ -116,6 +116,8 @@ static const struct reg_default pcm512x_reg_defaults[] = {
6067 + { PCM512x_FS_SPEED_MODE, 0x00 },
6068 + { PCM512x_IDAC_1, 0x01 },
6069 + { PCM512x_IDAC_2, 0x00 },
6070 ++ { PCM512x_I2S_1, 0x02 },
6071 ++ { PCM512x_I2S_2, 0x00 },
6072 + };
6073 +
6074 + static bool pcm512x_readable(struct device *dev, unsigned int reg)
6075 +diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
6076 +index 9e621a254392c..499604f1e1789 100644
6077 +--- a/sound/soc/codecs/wm8960.c
6078 ++++ b/sound/soc/codecs/wm8960.c
6079 +@@ -742,9 +742,16 @@ static int wm8960_configure_clocking(struct snd_soc_component *component)
6080 + int i, j, k;
6081 + int ret;
6082 +
6083 +- if (!(iface1 & (1<<6))) {
6084 +- dev_dbg(component->dev,
6085 +- "Codec is slave mode, no need to configure clock\n");
6086 ++ /*
6087 ++ * For Slave mode clocking should still be configured,
6088 ++ * so this if statement should be removed, but some platform
6089 ++ * may not work if the sysclk is not configured, to avoid such
6090 ++ * compatible issue, just add '!wm8960->sysclk' condition in
6091 ++ * this if statement.
6092 ++ */
6093 ++ if (!(iface1 & (1 << 6)) && !wm8960->sysclk) {
6094 ++ dev_warn(component->dev,
6095 ++ "slave mode, but proceeding with no clock configuration\n");
6096 + return 0;
6097 + }
6098 +
6099 +diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
6100 +index 477d16713e72e..a9b6c2b0c871b 100644
6101 +--- a/sound/soc/fsl/fsl_xcvr.c
6102 ++++ b/sound/soc/fsl/fsl_xcvr.c
6103 +@@ -487,8 +487,9 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
6104 + return ret;
6105 + }
6106 +
6107 +- /* clear DPATH RESET */
6108 ++ /* set DPATH RESET */
6109 + m_ctl |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
6110 ++ v_ctl |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
6111 + ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL, m_ctl, v_ctl);
6112 + if (ret < 0) {
6113 + dev_err(dai->dev, "Error while setting EXT_CTRL: %d\n", ret);
6114 +@@ -590,10 +591,6 @@ static void fsl_xcvr_shutdown(struct snd_pcm_substream *substream,
6115 + val |= FSL_XCVR_EXT_CTRL_CMDC_RESET(tx);
6116 + }
6117 +
6118 +- /* set DPATH RESET */
6119 +- mask |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
6120 +- val |= FSL_XCVR_EXT_CTRL_DPTH_RESET(tx);
6121 +-
6122 + ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL, mask, val);
6123 + if (ret < 0) {
6124 + dev_err(dai->dev, "Err setting DPATH RESET: %d\n", ret);
6125 +@@ -643,6 +640,16 @@ static int fsl_xcvr_trigger(struct snd_pcm_substream *substream, int cmd,
6126 + dev_err(dai->dev, "Failed to enable DMA: %d\n", ret);
6127 + return ret;
6128 + }
6129 ++
6130 ++ /* clear DPATH RESET */
6131 ++ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
6132 ++ FSL_XCVR_EXT_CTRL_DPTH_RESET(tx),
6133 ++ 0);
6134 ++ if (ret < 0) {
6135 ++ dev_err(dai->dev, "Failed to clear DPATH RESET: %d\n", ret);
6136 ++ return ret;
6137 ++ }
6138 ++
6139 + break;
6140 + case SNDRV_PCM_TRIGGER_STOP:
6141 + case SNDRV_PCM_TRIGGER_SUSPEND:
6142 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
6143 +index 91bf939d5233e..8477071141e28 100644
6144 +--- a/sound/soc/soc-dapm.c
6145 ++++ b/sound/soc/soc-dapm.c
6146 +@@ -2559,6 +2559,7 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
6147 + const char *pin, int status)
6148 + {
6149 + struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
6150 ++ int ret = 0;
6151 +
6152 + dapm_assert_locked(dapm);
6153 +
6154 +@@ -2571,13 +2572,14 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
6155 + dapm_mark_dirty(w, "pin configuration");
6156 + dapm_widget_invalidate_input_paths(w);
6157 + dapm_widget_invalidate_output_paths(w);
6158 ++ ret = 1;
6159 + }
6160 +
6161 + w->connected = status;
6162 + if (status == 0)
6163 + w->force = 0;
6164 +
6165 +- return 0;
6166 ++ return ret;
6167 + }
6168 +
6169 + /**
6170 +@@ -3582,14 +3584,15 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
6171 + {
6172 + struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
6173 + const char *pin = (const char *)kcontrol->private_value;
6174 ++ int ret;
6175 +
6176 + if (ucontrol->value.integer.value[0])
6177 +- snd_soc_dapm_enable_pin(&card->dapm, pin);
6178 ++ ret = snd_soc_dapm_enable_pin(&card->dapm, pin);
6179 + else
6180 +- snd_soc_dapm_disable_pin(&card->dapm, pin);
6181 ++ ret = snd_soc_dapm_disable_pin(&card->dapm, pin);
6182 +
6183 + snd_soc_dapm_sync(&card->dapm);
6184 +- return 0;
6185 ++ return ret;
6186 + }
6187 + EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch);
6188 +
6189 +@@ -4023,7 +4026,7 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
6190 +
6191 + rtd->params_select = ucontrol->value.enumerated.item[0];
6192 +
6193 +- return 0;
6194 ++ return 1;
6195 + }
6196 +
6197 + static void
6198 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
6199 +index 147b831e1a82d..91d40b4c851c1 100644
6200 +--- a/sound/usb/quirks-table.h
6201 ++++ b/sound/usb/quirks-table.h
6202 +@@ -4080,6 +4080,38 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
6203 + }
6204 + }
6205 + },
6206 ++{
6207 ++ /*
6208 ++ * Sennheiser GSP670
6209 ++ * Change order of interfaces loaded
6210 ++ */
6211 ++ USB_DEVICE(0x1395, 0x0300),
6212 ++ .bInterfaceClass = USB_CLASS_PER_INTERFACE,
6213 ++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
6214 ++ .ifnum = QUIRK_ANY_INTERFACE,
6215 ++ .type = QUIRK_COMPOSITE,
6216 ++ .data = &(const struct snd_usb_audio_quirk[]) {
6217 ++ // Communication
6218 ++ {
6219 ++ .ifnum = 3,
6220 ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
6221 ++ },
6222 ++ // Recording
6223 ++ {
6224 ++ .ifnum = 4,
6225 ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
6226 ++ },
6227 ++ // Main
6228 ++ {
6229 ++ .ifnum = 1,
6230 ++ .type = QUIRK_AUDIO_STANDARD_INTERFACE
6231 ++ },
6232 ++ {
6233 ++ .ifnum = -1
6234 ++ }
6235 ++ }
6236 ++ }
6237 ++},
6238 +
6239 + #undef USB_DEVICE_VENDOR_SPEC
6240 + #undef USB_AUDIO_DEVICE
6241 +diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
6242 +index c67c833991708..ce91a582f0e41 100644
6243 +--- a/tools/lib/perf/tests/test-evlist.c
6244 ++++ b/tools/lib/perf/tests/test-evlist.c
6245 +@@ -40,7 +40,7 @@ static int test_stat_cpu(void)
6246 + .type = PERF_TYPE_SOFTWARE,
6247 + .config = PERF_COUNT_SW_TASK_CLOCK,
6248 + };
6249 +- int err, cpu, tmp;
6250 ++ int err, idx;
6251 +
6252 + cpus = perf_cpu_map__new(NULL);
6253 + __T("failed to create cpus", cpus);
6254 +@@ -70,10 +70,10 @@ static int test_stat_cpu(void)
6255 + perf_evlist__for_each_evsel(evlist, evsel) {
6256 + cpus = perf_evsel__cpus(evsel);
6257 +
6258 +- perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
6259 ++ for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
6260 + struct perf_counts_values counts = { .val = 0 };
6261 +
6262 +- perf_evsel__read(evsel, cpu, 0, &counts);
6263 ++ perf_evsel__read(evsel, idx, 0, &counts);
6264 + __T("failed to read value for evsel", counts.val != 0);
6265 + }
6266 + }
6267 +diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
6268 +index a184e4861627e..33ae9334861a2 100644
6269 +--- a/tools/lib/perf/tests/test-evsel.c
6270 ++++ b/tools/lib/perf/tests/test-evsel.c
6271 +@@ -22,7 +22,7 @@ static int test_stat_cpu(void)
6272 + .type = PERF_TYPE_SOFTWARE,
6273 + .config = PERF_COUNT_SW_CPU_CLOCK,
6274 + };
6275 +- int err, cpu, tmp;
6276 ++ int err, idx;
6277 +
6278 + cpus = perf_cpu_map__new(NULL);
6279 + __T("failed to create cpus", cpus);
6280 +@@ -33,10 +33,10 @@ static int test_stat_cpu(void)
6281 + err = perf_evsel__open(evsel, cpus, NULL);
6282 + __T("failed to open evsel", err == 0);
6283 +
6284 +- perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
6285 ++ for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
6286 + struct perf_counts_values counts = { .val = 0 };
6287 +
6288 +- perf_evsel__read(evsel, cpu, 0, &counts);
6289 ++ perf_evsel__read(evsel, idx, 0, &counts);
6290 + __T("failed to read value for evsel", counts.val != 0);
6291 + }
6292 +
6293 +@@ -148,6 +148,7 @@ static int test_stat_user_read(int event)
6294 + __T("failed to mmap evsel", err == 0);
6295 +
6296 + pc = perf_evsel__mmap_base(evsel, 0, 0);
6297 ++ __T("failed to get mmapped address", pc);
6298 +
6299 + #if defined(__i386__) || defined(__x86_64__)
6300 + __T("userspace counter access not supported", pc->cap_user_rdpmc);
6301 +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
6302 +index 8676c75987281..a9c2bebd7576e 100644
6303 +--- a/tools/objtool/elf.c
6304 ++++ b/tools/objtool/elf.c
6305 +@@ -509,6 +509,7 @@ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
6306 + list_add_tail(&reloc->list, &sec->reloc->reloc_list);
6307 + elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
6308 +
6309 ++ sec->reloc->sh.sh_size += sec->reloc->sh.sh_entsize;
6310 + sec->reloc->changed = true;
6311 +
6312 + return 0;
6313 +@@ -979,63 +980,63 @@ static struct section *elf_create_reloc_section(struct elf *elf,
6314 + }
6315 + }
6316 +
6317 +-static int elf_rebuild_rel_reloc_section(struct section *sec, int nr)
6318 ++static int elf_rebuild_rel_reloc_section(struct section *sec)
6319 + {
6320 + struct reloc *reloc;
6321 +- int idx = 0, size;
6322 ++ int idx = 0;
6323 + void *buf;
6324 +
6325 + /* Allocate a buffer for relocations */
6326 +- size = nr * sizeof(GElf_Rel);
6327 +- buf = malloc(size);
6328 ++ buf = malloc(sec->sh.sh_size);
6329 + if (!buf) {
6330 + perror("malloc");
6331 + return -1;
6332 + }
6333 +
6334 + sec->data->d_buf = buf;
6335 +- sec->data->d_size = size;
6336 ++ sec->data->d_size = sec->sh.sh_size;
6337 + sec->data->d_type = ELF_T_REL;
6338 +
6339 +- sec->sh.sh_size = size;
6340 +-
6341 + idx = 0;
6342 + list_for_each_entry(reloc, &sec->reloc_list, list) {
6343 + reloc->rel.r_offset = reloc->offset;
6344 + reloc->rel.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
6345 +- gelf_update_rel(sec->data, idx, &reloc->rel);
6346 ++ if (!gelf_update_rel(sec->data, idx, &reloc->rel)) {
6347 ++ WARN_ELF("gelf_update_rel");
6348 ++ return -1;
6349 ++ }
6350 + idx++;
6351 + }
6352 +
6353 + return 0;
6354 + }
6355 +
6356 +-static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
6357 ++static int elf_rebuild_rela_reloc_section(struct section *sec)
6358 + {
6359 + struct reloc *reloc;
6360 +- int idx = 0, size;
6361 ++ int idx = 0;
6362 + void *buf;
6363 +
6364 + /* Allocate a buffer for relocations with addends */
6365 +- size = nr * sizeof(GElf_Rela);
6366 +- buf = malloc(size);
6367 ++ buf = malloc(sec->sh.sh_size);
6368 + if (!buf) {
6369 + perror("malloc");
6370 + return -1;
6371 + }
6372 +
6373 + sec->data->d_buf = buf;
6374 +- sec->data->d_size = size;
6375 ++ sec->data->d_size = sec->sh.sh_size;
6376 + sec->data->d_type = ELF_T_RELA;
6377 +
6378 +- sec->sh.sh_size = size;
6379 +-
6380 + idx = 0;
6381 + list_for_each_entry(reloc, &sec->reloc_list, list) {
6382 + reloc->rela.r_offset = reloc->offset;
6383 + reloc->rela.r_addend = reloc->addend;
6384 + reloc->rela.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
6385 +- gelf_update_rela(sec->data, idx, &reloc->rela);
6386 ++ if (!gelf_update_rela(sec->data, idx, &reloc->rela)) {
6387 ++ WARN_ELF("gelf_update_rela");
6388 ++ return -1;
6389 ++ }
6390 + idx++;
6391 + }
6392 +
6393 +@@ -1044,16 +1045,9 @@ static int elf_rebuild_rela_reloc_section(struct section *sec, int nr)
6394 +
6395 + static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
6396 + {
6397 +- struct reloc *reloc;
6398 +- int nr;
6399 +-
6400 +- nr = 0;
6401 +- list_for_each_entry(reloc, &sec->reloc_list, list)
6402 +- nr++;
6403 +-
6404 + switch (sec->sh.sh_type) {
6405 +- case SHT_REL: return elf_rebuild_rel_reloc_section(sec, nr);
6406 +- case SHT_RELA: return elf_rebuild_rela_reloc_section(sec, nr);
6407 ++ case SHT_REL: return elf_rebuild_rel_reloc_section(sec);
6408 ++ case SHT_RELA: return elf_rebuild_rela_reloc_section(sec);
6409 + default: return -1;
6410 + }
6411 + }
6412 +@@ -1113,12 +1107,6 @@ int elf_write(struct elf *elf)
6413 + /* Update changed relocation sections and section headers: */
6414 + list_for_each_entry(sec, &elf->sections, list) {
6415 + if (sec->changed) {
6416 +- if (sec->base &&
6417 +- elf_rebuild_reloc_section(elf, sec)) {
6418 +- WARN("elf_rebuild_reloc_section");
6419 +- return -1;
6420 +- }
6421 +-
6422 + s = elf_getscn(elf->elf, sec->idx);
6423 + if (!s) {
6424 + WARN_ELF("elf_getscn");
6425 +@@ -1129,6 +1117,12 @@ int elf_write(struct elf *elf)
6426 + return -1;
6427 + }
6428 +
6429 ++ if (sec->base &&
6430 ++ elf_rebuild_reloc_section(elf, sec)) {
6431 ++ WARN("elf_rebuild_reloc_section");
6432 ++ return -1;
6433 ++ }
6434 ++
6435 + sec->changed = false;
6436 + elf->changed = true;
6437 + }
6438 +diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
6439 +index d97bd6889446d..72ee644d47bfa 100644
6440 +--- a/tools/testing/selftests/net/forwarding/Makefile
6441 ++++ b/tools/testing/selftests/net/forwarding/Makefile
6442 +@@ -9,6 +9,7 @@ TEST_PROGS = bridge_igmp.sh \
6443 + gre_inner_v4_multipath.sh \
6444 + gre_inner_v6_multipath.sh \
6445 + gre_multipath.sh \
6446 ++ ip6_forward_instats_vrf.sh \
6447 + ip6gre_inner_v4_multipath.sh \
6448 + ip6gre_inner_v6_multipath.sh \
6449 + ipip_flat_gre_key.sh \
6450 +diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample
6451 +index b802c14d29509..e5e2fbeca22ec 100644
6452 +--- a/tools/testing/selftests/net/forwarding/forwarding.config.sample
6453 ++++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample
6454 +@@ -39,3 +39,5 @@ NETIF_CREATE=yes
6455 + # Timeout (in seconds) before ping exits regardless of how many packets have
6456 + # been sent or received
6457 + PING_TIMEOUT=5
6458 ++# IPv6 traceroute utility name.
6459 ++TROUTE6=traceroute6
6460 +diff --git a/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
6461 +new file mode 100755
6462 +index 0000000000000..9f5b3e2e5e954
6463 +--- /dev/null
6464 ++++ b/tools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh
6465 +@@ -0,0 +1,172 @@
6466 ++#!/bin/bash
6467 ++# SPDX-License-Identifier: GPL-2.0
6468 ++
6469 ++# Test ipv6 stats on the incoming if when forwarding with VRF
6470 ++
6471 ++ALL_TESTS="
6472 ++ ipv6_ping
6473 ++ ipv6_in_too_big_err
6474 ++ ipv6_in_hdr_err
6475 ++ ipv6_in_addr_err
6476 ++ ipv6_in_discard
6477 ++"
6478 ++
6479 ++NUM_NETIFS=4
6480 ++source lib.sh
6481 ++
6482 ++h1_create()
6483 ++{
6484 ++ simple_if_init $h1 2001:1:1::2/64
6485 ++ ip -6 route add vrf v$h1 2001:1:2::/64 via 2001:1:1::1
6486 ++}
6487 ++
6488 ++h1_destroy()
6489 ++{
6490 ++ ip -6 route del vrf v$h1 2001:1:2::/64 via 2001:1:1::1
6491 ++ simple_if_fini $h1 2001:1:1::2/64
6492 ++}
6493 ++
6494 ++router_create()
6495 ++{
6496 ++ vrf_create router
6497 ++ __simple_if_init $rtr1 router 2001:1:1::1/64
6498 ++ __simple_if_init $rtr2 router 2001:1:2::1/64
6499 ++ mtu_set $rtr2 1280
6500 ++}
6501 ++
6502 ++router_destroy()
6503 ++{
6504 ++ mtu_restore $rtr2
6505 ++ __simple_if_fini $rtr2 2001:1:2::1/64
6506 ++ __simple_if_fini $rtr1 2001:1:1::1/64
6507 ++ vrf_destroy router
6508 ++}
6509 ++
6510 ++h2_create()
6511 ++{
6512 ++ simple_if_init $h2 2001:1:2::2/64
6513 ++ ip -6 route add vrf v$h2 2001:1:1::/64 via 2001:1:2::1
6514 ++ mtu_set $h2 1280
6515 ++}
6516 ++
6517 ++h2_destroy()
6518 ++{
6519 ++ mtu_restore $h2
6520 ++ ip -6 route del vrf v$h2 2001:1:1::/64 via 2001:1:2::1
6521 ++ simple_if_fini $h2 2001:1:2::2/64
6522 ++}
6523 ++
6524 ++setup_prepare()
6525 ++{
6526 ++ h1=${NETIFS[p1]}
6527 ++ rtr1=${NETIFS[p2]}
6528 ++
6529 ++ rtr2=${NETIFS[p3]}
6530 ++ h2=${NETIFS[p4]}
6531 ++
6532 ++ vrf_prepare
6533 ++ h1_create
6534 ++ router_create
6535 ++ h2_create
6536 ++
6537 ++ forwarding_enable
6538 ++}
6539 ++
6540 ++cleanup()
6541 ++{
6542 ++ pre_cleanup
6543 ++
6544 ++ forwarding_restore
6545 ++
6546 ++ h2_destroy
6547 ++ router_destroy
6548 ++ h1_destroy
6549 ++ vrf_cleanup
6550 ++}
6551 ++
6552 ++ipv6_in_too_big_err()
6553 ++{
6554 ++ RET=0
6555 ++
6556 ++ local t0=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
6557 ++ local vrf_name=$(master_name_get $h1)
6558 ++
6559 ++ # Send too big packets
6560 ++ ip vrf exec $vrf_name \
6561 ++ $PING6 -s 1300 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
6562 ++
6563 ++ local t1=$(ipv6_stats_get $rtr1 Ip6InTooBigErrors)
6564 ++ test "$((t1 - t0))" -ne 0
6565 ++ check_err $?
6566 ++ log_test "Ip6InTooBigErrors"
6567 ++}
6568 ++
6569 ++ipv6_in_hdr_err()
6570 ++{
6571 ++ RET=0
6572 ++
6573 ++ local t0=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
6574 ++ local vrf_name=$(master_name_get $h1)
6575 ++
6576 ++ # Send packets with hop limit 1, easiest with traceroute6 as some ping6
6577 ++ # doesn't allow hop limit to be specified
6578 ++ ip vrf exec $vrf_name \
6579 ++ $TROUTE6 2001:1:2::2 &> /dev/null
6580 ++
6581 ++ local t1=$(ipv6_stats_get $rtr1 Ip6InHdrErrors)
6582 ++ test "$((t1 - t0))" -ne 0
6583 ++ check_err $?
6584 ++ log_test "Ip6InHdrErrors"
6585 ++}
6586 ++
6587 ++ipv6_in_addr_err()
6588 ++{
6589 ++ RET=0
6590 ++
6591 ++ local t0=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
6592 ++ local vrf_name=$(master_name_get $h1)
6593 ++
6594 ++ # Disable forwarding temporary while sending the packet
6595 ++ sysctl -qw net.ipv6.conf.all.forwarding=0
6596 ++ ip vrf exec $vrf_name \
6597 ++ $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
6598 ++ sysctl -qw net.ipv6.conf.all.forwarding=1
6599 ++
6600 ++ local t1=$(ipv6_stats_get $rtr1 Ip6InAddrErrors)
6601 ++ test "$((t1 - t0))" -ne 0
6602 ++ check_err $?
6603 ++ log_test "Ip6InAddrErrors"
6604 ++}
6605 ++
6606 ++ipv6_in_discard()
6607 ++{
6608 ++ RET=0
6609 ++
6610 ++ local t0=$(ipv6_stats_get $rtr1 Ip6InDiscards)
6611 ++ local vrf_name=$(master_name_get $h1)
6612 ++
6613 ++ # Add a policy to discard
6614 ++ ip xfrm policy add dst 2001:1:2::2/128 dir fwd action block
6615 ++ ip vrf exec $vrf_name \
6616 ++ $PING6 2001:1:2::2 -c 1 -w $PING_TIMEOUT &> /dev/null
6617 ++ ip xfrm policy del dst 2001:1:2::2/128 dir fwd
6618 ++
6619 ++ local t1=$(ipv6_stats_get $rtr1 Ip6InDiscards)
6620 ++ test "$((t1 - t0))" -ne 0
6621 ++ check_err $?
6622 ++ log_test "Ip6InDiscards"
6623 ++}
6624 ++ipv6_ping()
6625 ++{
6626 ++ RET=0
6627 ++
6628 ++ ping6_test $h1 2001:1:2::2
6629 ++}
6630 ++
6631 ++trap cleanup EXIT
6632 ++
6633 ++setup_prepare
6634 ++setup_wait
6635 ++tests_run
6636 ++
6637 ++exit $EXIT_STATUS
6638 +diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
6639 +index 42e28c983d41b..5140980f54758 100644
6640 +--- a/tools/testing/selftests/net/forwarding/lib.sh
6641 ++++ b/tools/testing/selftests/net/forwarding/lib.sh
6642 +@@ -748,6 +748,14 @@ qdisc_parent_stats_get()
6643 + | jq '.[] | select(.parent == "'"$parent"'") | '"$selector"
6644 + }
6645 +
6646 ++ipv6_stats_get()
6647 ++{
6648 ++ local dev=$1; shift
6649 ++ local stat=$1; shift
6650 ++
6651 ++ cat /proc/net/dev_snmp6/$dev | grep "^$stat" | cut -f2
6652 ++}
6653 ++
6654 + humanize()
6655 + {
6656 + local speed=$1; shift
6657 +diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
6658 +index 427d94816f2d6..d4ffebb989f88 100755
6659 +--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
6660 ++++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
6661 +@@ -199,7 +199,6 @@ fi
6662 + # test basic connectivity
6663 + if ! ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
6664 + echo "ERROR: ns1 cannot reach ns2" 1>&2
6665 +- bash
6666 + exit 1
6667 + fi
6668 +
6669 +diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
6670 +index 2ea438e6b8b1f..a7e512a81d82c 100644
6671 +--- a/tools/testing/selftests/vm/userfaultfd.c
6672 ++++ b/tools/testing/selftests/vm/userfaultfd.c
6673 +@@ -414,9 +414,6 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
6674 + uffd_test_ops->allocate_area((void **)&area_src);
6675 + uffd_test_ops->allocate_area((void **)&area_dst);
6676 +
6677 +- uffd_test_ops->release_pages(area_src);
6678 +- uffd_test_ops->release_pages(area_dst);
6679 +-
6680 + userfaultfd_open(features);
6681 +
6682 + count_verify = malloc(nr_pages * sizeof(unsigned long long));
6683 +@@ -437,6 +434,26 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
6684 + *(area_count(area_src, nr) + 1) = 1;
6685 + }
6686 +
6687 ++ /*
6688 ++ * After initialization of area_src, we must explicitly release pages
6689 ++ * for area_dst to make sure it's fully empty. Otherwise we could have
6690 ++ * some area_dst pages be errornously initialized with zero pages,
6691 ++ * hence we could hit memory corruption later in the test.
6692 ++ *
6693 ++ * One example is when THP is globally enabled, above allocate_area()
6694 ++ * calls could have the two areas merged into a single VMA (as they
6695 ++ * will have the same VMA flags so they're mergeable). When we
6696 ++ * initialize the area_src above, it's possible that some part of
6697 ++ * area_dst could have been faulted in via one huge THP that will be
6698 ++ * shared between area_src and area_dst. It could cause some of the
6699 ++ * area_dst won't be trapped by missing userfaults.
6700 ++ *
6701 ++ * This release_pages() will guarantee even if that happened, we'll
6702 ++ * proactively split the thp and drop any accidentally initialized
6703 ++ * pages within area_dst.
6704 ++ */
6705 ++ uffd_test_ops->release_pages(area_dst);
6706 ++
6707 + pipefd = malloc(sizeof(int) * nr_cpus * 2);
6708 + if (!pipefd)
6709 + err("pipefd");