Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.0 commit in: /
Date: Wed, 08 May 2019 10:08:00
Message-Id: 1557310057.25848a16762409a137897779ef10e7684c59c4b5.mpagano@gentoo
1 commit: 25848a16762409a137897779ef10e7684c59c4b5
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 8 10:07:37 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 8 10:07:37 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=25848a16
7
8 Linux patch 5.0.14
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1013_linux-5.0.14.patch | 4322 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4326 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index dcd9694..b2a5389 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -95,6 +95,10 @@ Patch: 1012_linux-5.0.13.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.0.13
23
24 +Patch: 1013_linux-5.0.14.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.0.14
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1013_linux-5.0.14.patch b/1013_linux-5.0.14.patch
33 new file mode 100644
34 index 0000000..133615f
35 --- /dev/null
36 +++ b/1013_linux-5.0.14.patch
37 @@ -0,0 +1,4322 @@
38 +diff --git a/Documentation/driver-api/usb/power-management.rst b/Documentation/driver-api/usb/power-management.rst
39 +index 79beb807996b..4a74cf6f2797 100644
40 +--- a/Documentation/driver-api/usb/power-management.rst
41 ++++ b/Documentation/driver-api/usb/power-management.rst
42 +@@ -370,11 +370,15 @@ autosuspend the interface's device. When the usage counter is = 0
43 + then the interface is considered to be idle, and the kernel may
44 + autosuspend the device.
45 +
46 +-Drivers need not be concerned about balancing changes to the usage
47 +-counter; the USB core will undo any remaining "get"s when a driver
48 +-is unbound from its interface. As a corollary, drivers must not call
49 +-any of the ``usb_autopm_*`` functions after their ``disconnect``
50 +-routine has returned.
51 ++Drivers must be careful to balance their overall changes to the usage
52 ++counter. Unbalanced "get"s will remain in effect when a driver is
53 ++unbound from its interface, preventing the device from going into
54 ++runtime suspend should the interface be bound to a driver again. On
55 ++the other hand, drivers are allowed to achieve this balance by calling
56 ++the ``usb_autopm_*`` functions even after their ``disconnect`` routine
57 ++has returned -- say from within a work-queue routine -- provided they
58 ++retain an active reference to the interface (via ``usb_get_intf`` and
59 ++``usb_put_intf``).
60 +
61 + Drivers using the async routines are responsible for their own
62 + synchronization and mutual exclusion.
63 +diff --git a/Makefile b/Makefile
64 +index 51a819544505..5ce29665eeed 100644
65 +--- a/Makefile
66 ++++ b/Makefile
67 +@@ -1,7 +1,7 @@
68 + # SPDX-License-Identifier: GPL-2.0
69 + VERSION = 5
70 + PATCHLEVEL = 0
71 +-SUBLEVEL = 13
72 ++SUBLEVEL = 14
73 + EXTRAVERSION =
74 + NAME = Shy Crocodile
75 +
76 +diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
77 +index f230bb7092fd..b3373f5c88e0 100644
78 +--- a/arch/arc/lib/memset-archs.S
79 ++++ b/arch/arc/lib/memset-archs.S
80 +@@ -30,10 +30,10 @@
81 +
82 + #else
83 +
84 +-.macro PREALLOC_INSTR
85 ++.macro PREALLOC_INSTR reg, off
86 + .endm
87 +
88 +-.macro PREFETCHW_INSTR
89 ++.macro PREFETCHW_INSTR reg, off
90 + .endm
91 +
92 + #endif
93 +diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
94 +index 7b818d9d2eab..8396faa9ac28 100644
95 +--- a/arch/arm/boot/dts/am33xx-l4.dtsi
96 ++++ b/arch/arm/boot/dts/am33xx-l4.dtsi
97 +@@ -1763,7 +1763,7 @@
98 + reg = <0xcc000 0x4>;
99 + reg-names = "rev";
100 + /* Domains (P, C): per_pwrdm, l4ls_clkdm */
101 +- clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>;
102 ++ clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>;
103 + clock-names = "fck";
104 + #address-cells = <1>;
105 + #size-cells = <1>;
106 +@@ -1786,7 +1786,7 @@
107 + reg = <0xd0000 0x4>;
108 + reg-names = "rev";
109 + /* Domains (P, C): per_pwrdm, l4ls_clkdm */
110 +- clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>;
111 ++ clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>;
112 + clock-names = "fck";
113 + #address-cells = <1>;
114 + #size-cells = <1>;
115 +diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
116 +index 09868dcee34b..df0c5456c94f 100644
117 +--- a/arch/arm/boot/dts/rk3288.dtsi
118 ++++ b/arch/arm/boot/dts/rk3288.dtsi
119 +@@ -1282,27 +1282,27 @@
120 + gpu_opp_table: gpu-opp-table {
121 + compatible = "operating-points-v2";
122 +
123 +- opp@100000000 {
124 ++ opp-100000000 {
125 + opp-hz = /bits/ 64 <100000000>;
126 + opp-microvolt = <950000>;
127 + };
128 +- opp@200000000 {
129 ++ opp-200000000 {
130 + opp-hz = /bits/ 64 <200000000>;
131 + opp-microvolt = <950000>;
132 + };
133 +- opp@300000000 {
134 ++ opp-300000000 {
135 + opp-hz = /bits/ 64 <300000000>;
136 + opp-microvolt = <1000000>;
137 + };
138 +- opp@400000000 {
139 ++ opp-400000000 {
140 + opp-hz = /bits/ 64 <400000000>;
141 + opp-microvolt = <1100000>;
142 + };
143 +- opp@500000000 {
144 ++ opp-500000000 {
145 + opp-hz = /bits/ 64 <500000000>;
146 + opp-microvolt = <1200000>;
147 + };
148 +- opp@600000000 {
149 ++ opp-600000000 {
150 + opp-hz = /bits/ 64 <600000000>;
151 + opp-microvolt = <1250000>;
152 + };
153 +diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
154 +index 51e808adb00c..2a757dcaa1a5 100644
155 +--- a/arch/arm/mach-at91/pm.c
156 ++++ b/arch/arm/mach-at91/pm.c
157 +@@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void)
158 +
159 + np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
160 + if (!np)
161 +- goto securam_fail;
162 ++ goto securam_fail_no_ref_dev;
163 +
164 + pdev = of_find_device_by_node(np);
165 + of_node_put(np);
166 + if (!pdev) {
167 + pr_warn("%s: failed to find securam device!\n", __func__);
168 +- goto securam_fail;
169 ++ goto securam_fail_no_ref_dev;
170 + }
171 +
172 + sram_pool = gen_pool_get(&pdev->dev, NULL);
173 +@@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void)
174 + return 0;
175 +
176 + securam_fail:
177 ++ put_device(&pdev->dev);
178 ++securam_fail_no_ref_dev:
179 + iounmap(pm_data.sfrbu);
180 + pm_data.sfrbu = NULL;
181 + return ret;
182 +diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
183 +index 53c316f7301e..fe4932fda01d 100644
184 +--- a/arch/arm/mach-iop13xx/setup.c
185 ++++ b/arch/arm/mach-iop13xx/setup.c
186 +@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
187 + }
188 + };
189 +
190 +-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
191 ++static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
192 + static struct iop_adma_platform_data iop13xx_adma_0_data = {
193 + .hw_id = 0,
194 + .pool_size = PAGE_SIZE,
195 +@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
196 + .resource = iop13xx_adma_0_resources,
197 + .dev = {
198 + .dma_mask = &iop13xx_adma_dmamask,
199 +- .coherent_dma_mask = DMA_BIT_MASK(64),
200 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
201 + .platform_data = (void *) &iop13xx_adma_0_data,
202 + },
203 + };
204 +@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
205 + .resource = iop13xx_adma_1_resources,
206 + .dev = {
207 + .dma_mask = &iop13xx_adma_dmamask,
208 +- .coherent_dma_mask = DMA_BIT_MASK(64),
209 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
210 + .platform_data = (void *) &iop13xx_adma_1_data,
211 + },
212 + };
213 +@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
214 + .resource = iop13xx_adma_2_resources,
215 + .dev = {
216 + .dma_mask = &iop13xx_adma_dmamask,
217 +- .coherent_dma_mask = DMA_BIT_MASK(64),
218 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
219 + .platform_data = (void *) &iop13xx_adma_2_data,
220 + },
221 + };
222 +diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c
223 +index db511ec2b1df..116feb6b261e 100644
224 +--- a/arch/arm/mach-iop13xx/tpmi.c
225 ++++ b/arch/arm/mach-iop13xx/tpmi.c
226 +@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
227 + }
228 + };
229 +
230 +-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
231 ++u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
232 + static struct platform_device iop13xx_tpmi_0_device = {
233 + .name = "iop-tpmi",
234 + .id = 0,
235 +@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
236 + .resource = iop13xx_tpmi_0_resources,
237 + .dev = {
238 + .dma_mask = &iop13xx_tpmi_mask,
239 +- .coherent_dma_mask = DMA_BIT_MASK(64),
240 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
241 + },
242 + };
243 +
244 +@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
245 + .resource = iop13xx_tpmi_1_resources,
246 + .dev = {
247 + .dma_mask = &iop13xx_tpmi_mask,
248 +- .coherent_dma_mask = DMA_BIT_MASK(64),
249 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
250 + },
251 + };
252 +
253 +@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
254 + .resource = iop13xx_tpmi_2_resources,
255 + .dev = {
256 + .dma_mask = &iop13xx_tpmi_mask,
257 +- .coherent_dma_mask = DMA_BIT_MASK(64),
258 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
259 + },
260 + };
261 +
262 +@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
263 + .resource = iop13xx_tpmi_3_resources,
264 + .dev = {
265 + .dma_mask = &iop13xx_tpmi_mask,
266 +- .coherent_dma_mask = DMA_BIT_MASK(64),
267 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
268 + },
269 + };
270 +
271 +diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
272 +index 1444b4b4bd9f..439e143cad7b 100644
273 +--- a/arch/arm/mach-omap2/display.c
274 ++++ b/arch/arm/mach-omap2/display.c
275 +@@ -250,8 +250,10 @@ static int __init omapdss_init_of(void)
276 + if (!node)
277 + return 0;
278 +
279 +- if (!of_device_is_available(node))
280 ++ if (!of_device_is_available(node)) {
281 ++ of_node_put(node);
282 + return 0;
283 ++ }
284 +
285 + pdev = of_find_device_by_node(node);
286 +
287 +diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
288 +index a4d1f8de3b5b..d9612221e484 100644
289 +--- a/arch/arm/plat-iop/adma.c
290 ++++ b/arch/arm/plat-iop/adma.c
291 +@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
292 + .resource = iop3xx_dma_0_resources,
293 + .dev = {
294 + .dma_mask = &iop3xx_adma_dmamask,
295 +- .coherent_dma_mask = DMA_BIT_MASK(64),
296 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
297 + .platform_data = (void *) &iop3xx_dma_0_data,
298 + },
299 + };
300 +@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
301 + .resource = iop3xx_dma_1_resources,
302 + .dev = {
303 + .dma_mask = &iop3xx_adma_dmamask,
304 +- .coherent_dma_mask = DMA_BIT_MASK(64),
305 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
306 + .platform_data = (void *) &iop3xx_dma_1_data,
307 + },
308 + };
309 +@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
310 + .resource = iop3xx_aau_resources,
311 + .dev = {
312 + .dma_mask = &iop3xx_adma_dmamask,
313 +- .coherent_dma_mask = DMA_BIT_MASK(64),
314 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
315 + .platform_data = (void *) &iop3xx_aau_data,
316 + },
317 + };
318 +diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
319 +index a2399fd66e97..1e970873439c 100644
320 +--- a/arch/arm/plat-orion/common.c
321 ++++ b/arch/arm/plat-orion/common.c
322 +@@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
323 + .resource = orion_xor0_shared_resources,
324 + .dev = {
325 + .dma_mask = &orion_xor_dmamask,
326 +- .coherent_dma_mask = DMA_BIT_MASK(64),
327 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
328 + .platform_data = &orion_xor0_pdata,
329 + },
330 + };
331 +@@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
332 + .resource = orion_xor1_shared_resources,
333 + .dev = {
334 + .dma_mask = &orion_xor_dmamask,
335 +- .coherent_dma_mask = DMA_BIT_MASK(64),
336 ++ .coherent_dma_mask = DMA_BIT_MASK(32),
337 + .platform_data = &orion_xor1_pdata,
338 + },
339 + };
340 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
341 +index 99d0d9912950..a91f87df662e 100644
342 +--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
343 ++++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
344 +@@ -107,8 +107,8 @@
345 + snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
346 + snps,reset-active-low;
347 + snps,reset-delays-us = <0 10000 50000>;
348 +- tx_delay = <0x25>;
349 +- rx_delay = <0x11>;
350 ++ tx_delay = <0x24>;
351 ++ rx_delay = <0x18>;
352 + status = "okay";
353 + };
354 +
355 +diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
356 +index 5ba4465e44f0..ea94cf8f9dc6 100644
357 +--- a/arch/arm64/kernel/sdei.c
358 ++++ b/arch/arm64/kernel/sdei.c
359 +@@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
360 + unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
361 + unsigned long high = low + SDEI_STACK_SIZE;
362 +
363 ++ if (!low)
364 ++ return false;
365 ++
366 + if (sp < low || sp >= high)
367 + return false;
368 +
369 +@@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
370 + unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
371 + unsigned long high = low + SDEI_STACK_SIZE;
372 +
373 ++ if (!low)
374 ++ return false;
375 ++
376 + if (sp < low || sp >= high)
377 + return false;
378 +
379 +diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
380 +index 683b5b3805bd..cd381e2291df 100644
381 +--- a/arch/powerpc/kernel/kvm.c
382 ++++ b/arch/powerpc/kernel/kvm.c
383 +@@ -22,6 +22,7 @@
384 + #include <linux/kvm_host.h>
385 + #include <linux/init.h>
386 + #include <linux/export.h>
387 ++#include <linux/kmemleak.h>
388 + #include <linux/kvm_para.h>
389 + #include <linux/slab.h>
390 + #include <linux/of.h>
391 +@@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
392 +
393 + static __init void kvm_free_tmp(void)
394 + {
395 ++ /*
396 ++ * Inform kmemleak about the hole in the .bss section since the
397 ++ * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
398 ++ */
399 ++ kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
400 ++ ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
401 + free_reserved_area(&kvm_tmp[kvm_tmp_index],
402 + &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
403 + }
404 +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
405 +index 06898c13901d..aec91dbcdc0b 100644
406 +--- a/arch/powerpc/mm/slice.c
407 ++++ b/arch/powerpc/mm/slice.c
408 +@@ -32,6 +32,7 @@
409 + #include <linux/export.h>
410 + #include <linux/hugetlb.h>
411 + #include <linux/sched/mm.h>
412 ++#include <linux/security.h>
413 + #include <asm/mman.h>
414 + #include <asm/mmu.h>
415 + #include <asm/copro.h>
416 +@@ -377,6 +378,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
417 + int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
418 + unsigned long addr, found, prev;
419 + struct vm_unmapped_area_info info;
420 ++ unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
421 +
422 + info.flags = VM_UNMAPPED_AREA_TOPDOWN;
423 + info.length = len;
424 +@@ -393,7 +395,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
425 + if (high_limit > DEFAULT_MAP_WINDOW)
426 + addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
427 +
428 +- while (addr > PAGE_SIZE) {
429 ++ while (addr > min_addr) {
430 + info.high_limit = addr;
431 + if (!slice_scan_available(addr - 1, available, 0, &addr))
432 + continue;
433 +@@ -405,8 +407,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
434 + * Check if we need to reduce the range, or if we can
435 + * extend it to cover the previous available slice.
436 + */
437 +- if (addr < PAGE_SIZE)
438 +- addr = PAGE_SIZE;
439 ++ if (addr < min_addr)
440 ++ addr = min_addr;
441 + else if (slice_scan_available(addr - 1, available, 0, &prev)) {
442 + addr = prev;
443 + goto prev_slice;
444 +@@ -528,7 +530,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
445 + addr = _ALIGN_UP(addr, page_size);
446 + slice_dbg(" aligned addr=%lx\n", addr);
447 + /* Ignore hint if it's too large or overlaps a VMA */
448 +- if (addr > high_limit - len ||
449 ++ if (addr > high_limit - len || addr < mmap_min_addr ||
450 + !slice_area_is_free(mm, addr, len))
451 + addr = 0;
452 + }
453 +diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
454 +index 637b896894fc..aa82df30e38a 100644
455 +--- a/arch/riscv/include/asm/uaccess.h
456 ++++ b/arch/riscv/include/asm/uaccess.h
457 +@@ -301,7 +301,7 @@ do { \
458 + " .balign 4\n" \
459 + "4:\n" \
460 + " li %0, %6\n" \
461 +- " jump 2b, %1\n" \
462 ++ " jump 3b, %1\n" \
463 + " .previous\n" \
464 + " .section __ex_table,\"a\"\n" \
465 + " .balign " RISCV_SZPTR "\n" \
466 +diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
467 +index 958f46da3a79..d91065e81a4e 100644
468 +--- a/arch/sh/boards/of-generic.c
469 ++++ b/arch/sh/boards/of-generic.c
470 +@@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
471 +
472 + struct sh_clk_ops;
473 +
474 +-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
475 ++void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
476 + {
477 + }
478 +
479 +-void __init plat_irq_setup(void)
480 ++void __init __weak plat_irq_setup(void)
481 + {
482 + }
483 +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
484 +index d45f3fbd232e..f15441b07dad 100644
485 +--- a/arch/x86/events/amd/core.c
486 ++++ b/arch/x86/events/amd/core.c
487 +@@ -116,6 +116,110 @@ static __initconst const u64 amd_hw_cache_event_ids
488 + },
489 + };
490 +
491 ++static __initconst const u64 amd_hw_cache_event_ids_f17h
492 ++ [PERF_COUNT_HW_CACHE_MAX]
493 ++ [PERF_COUNT_HW_CACHE_OP_MAX]
494 ++ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
495 ++[C(L1D)] = {
496 ++ [C(OP_READ)] = {
497 ++ [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
498 ++ [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
499 ++ },
500 ++ [C(OP_WRITE)] = {
501 ++ [C(RESULT_ACCESS)] = 0,
502 ++ [C(RESULT_MISS)] = 0,
503 ++ },
504 ++ [C(OP_PREFETCH)] = {
505 ++ [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
506 ++ [C(RESULT_MISS)] = 0,
507 ++ },
508 ++},
509 ++[C(L1I)] = {
510 ++ [C(OP_READ)] = {
511 ++ [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
512 ++ [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
513 ++ },
514 ++ [C(OP_WRITE)] = {
515 ++ [C(RESULT_ACCESS)] = -1,
516 ++ [C(RESULT_MISS)] = -1,
517 ++ },
518 ++ [C(OP_PREFETCH)] = {
519 ++ [C(RESULT_ACCESS)] = 0,
520 ++ [C(RESULT_MISS)] = 0,
521 ++ },
522 ++},
523 ++[C(LL)] = {
524 ++ [C(OP_READ)] = {
525 ++ [C(RESULT_ACCESS)] = 0,
526 ++ [C(RESULT_MISS)] = 0,
527 ++ },
528 ++ [C(OP_WRITE)] = {
529 ++ [C(RESULT_ACCESS)] = 0,
530 ++ [C(RESULT_MISS)] = 0,
531 ++ },
532 ++ [C(OP_PREFETCH)] = {
533 ++ [C(RESULT_ACCESS)] = 0,
534 ++ [C(RESULT_MISS)] = 0,
535 ++ },
536 ++},
537 ++[C(DTLB)] = {
538 ++ [C(OP_READ)] = {
539 ++ [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
540 ++ [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
541 ++ },
542 ++ [C(OP_WRITE)] = {
543 ++ [C(RESULT_ACCESS)] = 0,
544 ++ [C(RESULT_MISS)] = 0,
545 ++ },
546 ++ [C(OP_PREFETCH)] = {
547 ++ [C(RESULT_ACCESS)] = 0,
548 ++ [C(RESULT_MISS)] = 0,
549 ++ },
550 ++},
551 ++[C(ITLB)] = {
552 ++ [C(OP_READ)] = {
553 ++ [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
554 ++ [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
555 ++ },
556 ++ [C(OP_WRITE)] = {
557 ++ [C(RESULT_ACCESS)] = -1,
558 ++ [C(RESULT_MISS)] = -1,
559 ++ },
560 ++ [C(OP_PREFETCH)] = {
561 ++ [C(RESULT_ACCESS)] = -1,
562 ++ [C(RESULT_MISS)] = -1,
563 ++ },
564 ++},
565 ++[C(BPU)] = {
566 ++ [C(OP_READ)] = {
567 ++ [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
568 ++ [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
569 ++ },
570 ++ [C(OP_WRITE)] = {
571 ++ [C(RESULT_ACCESS)] = -1,
572 ++ [C(RESULT_MISS)] = -1,
573 ++ },
574 ++ [C(OP_PREFETCH)] = {
575 ++ [C(RESULT_ACCESS)] = -1,
576 ++ [C(RESULT_MISS)] = -1,
577 ++ },
578 ++},
579 ++[C(NODE)] = {
580 ++ [C(OP_READ)] = {
581 ++ [C(RESULT_ACCESS)] = 0,
582 ++ [C(RESULT_MISS)] = 0,
583 ++ },
584 ++ [C(OP_WRITE)] = {
585 ++ [C(RESULT_ACCESS)] = -1,
586 ++ [C(RESULT_MISS)] = -1,
587 ++ },
588 ++ [C(OP_PREFETCH)] = {
589 ++ [C(RESULT_ACCESS)] = -1,
590 ++ [C(RESULT_MISS)] = -1,
591 ++ },
592 ++},
593 ++};
594 ++
595 + /*
596 + * AMD Performance Monitor K7 and later, up to and including Family 16h:
597 + */
598 +@@ -865,9 +969,10 @@ __init int amd_pmu_init(void)
599 + x86_pmu.amd_nb_constraints = 0;
600 + }
601 +
602 +- /* Events are common for all AMDs */
603 +- memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
604 +- sizeof(hw_cache_event_ids));
605 ++ if (boot_cpu_data.x86 >= 0x17)
606 ++ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
607 ++ else
608 ++ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
609 +
610 + return 0;
611 + }
612 +diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
613 +index dc3e26e905a3..65201e180fe0 100644
614 +--- a/arch/x86/kernel/cpu/mce/severity.c
615 ++++ b/arch/x86/kernel/cpu/mce/severity.c
616 +@@ -165,6 +165,11 @@ static struct severity {
617 + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
618 + KERNEL
619 + ),
620 ++ MCESEV(
621 ++ PANIC, "Instruction fetch error in kernel",
622 ++ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
623 ++ KERNEL
624 ++ ),
625 + #endif
626 + MCESEV(
627 + PANIC, "Action required: unknown MCACOD",
628 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
629 +index 4b6c2da7265c..3339697de6e5 100644
630 +--- a/arch/x86/kvm/lapic.c
631 ++++ b/arch/x86/kvm/lapic.c
632 +@@ -70,7 +70,6 @@
633 + #define APIC_BROADCAST 0xFF
634 + #define X2APIC_BROADCAST 0xFFFFFFFFul
635 +
636 +-static bool lapic_timer_advance_adjust_done = false;
637 + #define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100
638 + /* step-by-step approximation to mitigate fluctuation */
639 + #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
640 +@@ -1479,14 +1478,32 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
641 + return false;
642 + }
643 +
644 ++static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
645 ++{
646 ++ u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
647 ++
648 ++ /*
649 ++ * If the guest TSC is running at a different ratio than the host, then
650 ++ * convert the delay to nanoseconds to achieve an accurate delay. Note
651 ++ * that __delay() uses delay_tsc whenever the hardware has TSC, thus
652 ++ * always for VMX enabled hardware.
653 ++ */
654 ++ if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
655 ++ __delay(min(guest_cycles,
656 ++ nsec_to_cycles(vcpu, timer_advance_ns)));
657 ++ } else {
658 ++ u64 delay_ns = guest_cycles * 1000000ULL;
659 ++ do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
660 ++ ndelay(min_t(u32, delay_ns, timer_advance_ns));
661 ++ }
662 ++}
663 ++
664 + void wait_lapic_expire(struct kvm_vcpu *vcpu)
665 + {
666 + struct kvm_lapic *apic = vcpu->arch.apic;
667 ++ u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
668 + u64 guest_tsc, tsc_deadline, ns;
669 +
670 +- if (!lapic_in_kernel(vcpu))
671 +- return;
672 +-
673 + if (apic->lapic_timer.expired_tscdeadline == 0)
674 + return;
675 +
676 +@@ -1498,33 +1515,37 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
677 + guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
678 + trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
679 +
680 +- /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
681 + if (guest_tsc < tsc_deadline)
682 +- __delay(min(tsc_deadline - guest_tsc,
683 +- nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
684 ++ __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
685 +
686 +- if (!lapic_timer_advance_adjust_done) {
687 ++ if (!apic->lapic_timer.timer_advance_adjust_done) {
688 + /* too early */
689 + if (guest_tsc < tsc_deadline) {
690 + ns = (tsc_deadline - guest_tsc) * 1000000ULL;
691 + do_div(ns, vcpu->arch.virtual_tsc_khz);
692 +- lapic_timer_advance_ns -= min((unsigned int)ns,
693 +- lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
694 ++ timer_advance_ns -= min((u32)ns,
695 ++ timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
696 + } else {
697 + /* too late */
698 + ns = (guest_tsc - tsc_deadline) * 1000000ULL;
699 + do_div(ns, vcpu->arch.virtual_tsc_khz);
700 +- lapic_timer_advance_ns += min((unsigned int)ns,
701 +- lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
702 ++ timer_advance_ns += min((u32)ns,
703 ++ timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
704 + }
705 + if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
706 +- lapic_timer_advance_adjust_done = true;
707 ++ apic->lapic_timer.timer_advance_adjust_done = true;
708 ++ if (unlikely(timer_advance_ns > 5000)) {
709 ++ timer_advance_ns = 0;
710 ++ apic->lapic_timer.timer_advance_adjust_done = true;
711 ++ }
712 ++ apic->lapic_timer.timer_advance_ns = timer_advance_ns;
713 + }
714 + }
715 +
716 + static void start_sw_tscdeadline(struct kvm_lapic *apic)
717 + {
718 +- u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
719 ++ struct kvm_timer *ktimer = &apic->lapic_timer;
720 ++ u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
721 + u64 ns = 0;
722 + ktime_t expire;
723 + struct kvm_vcpu *vcpu = apic->vcpu;
724 +@@ -1539,13 +1560,15 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
725 +
726 + now = ktime_get();
727 + guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
728 +- if (likely(tscdeadline > guest_tsc)) {
729 +- ns = (tscdeadline - guest_tsc) * 1000000ULL;
730 +- do_div(ns, this_tsc_khz);
731 ++
732 ++ ns = (tscdeadline - guest_tsc) * 1000000ULL;
733 ++ do_div(ns, this_tsc_khz);
734 ++
735 ++ if (likely(tscdeadline > guest_tsc) &&
736 ++ likely(ns > apic->lapic_timer.timer_advance_ns)) {
737 + expire = ktime_add_ns(now, ns);
738 +- expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
739 +- hrtimer_start(&apic->lapic_timer.timer,
740 +- expire, HRTIMER_MODE_ABS_PINNED);
741 ++ expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
742 ++ hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_PINNED);
743 + } else
744 + apic_timer_expired(apic);
745 +
746 +@@ -2252,7 +2275,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
747 + return HRTIMER_NORESTART;
748 + }
749 +
750 +-int kvm_create_lapic(struct kvm_vcpu *vcpu)
751 ++int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
752 + {
753 + struct kvm_lapic *apic;
754 +
755 +@@ -2276,6 +2299,14 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
756 + hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
757 + HRTIMER_MODE_ABS_PINNED);
758 + apic->lapic_timer.timer.function = apic_timer_fn;
759 ++ if (timer_advance_ns == -1) {
760 ++ apic->lapic_timer.timer_advance_ns = 1000;
761 ++ apic->lapic_timer.timer_advance_adjust_done = false;
762 ++ } else {
763 ++ apic->lapic_timer.timer_advance_ns = timer_advance_ns;
764 ++ apic->lapic_timer.timer_advance_adjust_done = true;
765 ++ }
766 ++
767 +
768 + /*
769 + * APIC is created enabled. This will prevent kvm_lapic_set_base from
770 +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
771 +index ff6ef9c3d760..d6d049ba3045 100644
772 +--- a/arch/x86/kvm/lapic.h
773 ++++ b/arch/x86/kvm/lapic.h
774 +@@ -31,8 +31,10 @@ struct kvm_timer {
775 + u32 timer_mode_mask;
776 + u64 tscdeadline;
777 + u64 expired_tscdeadline;
778 ++ u32 timer_advance_ns;
779 + atomic_t pending; /* accumulated triggered timers */
780 + bool hv_timer_in_use;
781 ++ bool timer_advance_adjust_done;
782 + };
783 +
784 + struct kvm_lapic {
785 +@@ -62,7 +64,7 @@ struct kvm_lapic {
786 +
787 + struct dest_map;
788 +
789 +-int kvm_create_lapic(struct kvm_vcpu *vcpu);
790 ++int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns);
791 + void kvm_free_lapic(struct kvm_vcpu *vcpu);
792 +
793 + int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
794 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
795 +index e544cec812f9..2a07e43ee666 100644
796 +--- a/arch/x86/kvm/svm.c
797 ++++ b/arch/x86/kvm/svm.c
798 +@@ -6815,7 +6815,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
799 + struct page **src_p, **dst_p;
800 + struct kvm_sev_dbg debug;
801 + unsigned long n;
802 +- int ret, size;
803 ++ unsigned int size;
804 ++ int ret;
805 +
806 + if (!sev_guest(kvm))
807 + return -ENOTTY;
808 +@@ -6823,6 +6824,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
809 + if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
810 + return -EFAULT;
811 +
812 ++ if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
813 ++ return -EINVAL;
814 ++ if (!debug.dst_uaddr)
815 ++ return -EINVAL;
816 ++
817 + vaddr = debug.src_uaddr;
818 + size = debug.len;
819 + vaddr_end = vaddr + size;
820 +@@ -6873,8 +6879,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
821 + dst_vaddr,
822 + len, &argp->error);
823 +
824 +- sev_unpin_memory(kvm, src_p, 1);
825 +- sev_unpin_memory(kvm, dst_p, 1);
826 ++ sev_unpin_memory(kvm, src_p, n);
827 ++ sev_unpin_memory(kvm, dst_p, n);
828 +
829 + if (ret)
830 + goto err;
831 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
832 +index a4bcac94392c..8f8c42b04875 100644
833 +--- a/arch/x86/kvm/vmx/nested.c
834 ++++ b/arch/x86/kvm/vmx/nested.c
835 +@@ -2793,7 +2793,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
836 + [fail]"i"(offsetof(struct vcpu_vmx, fail)),
837 + [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
838 + [wordsize]"i"(sizeof(ulong))
839 +- : "rax", "cc", "memory"
840 ++ : "cc", "memory"
841 + );
842 +
843 + preempt_enable();
844 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
845 +index e7fe8c692362..da6fdd5434a1 100644
846 +--- a/arch/x86/kvm/vmx/vmx.c
847 ++++ b/arch/x86/kvm/vmx/vmx.c
848 +@@ -6465,7 +6465,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
849 + "xor %%edi, %%edi \n\t"
850 + "xor %%ebp, %%ebp \n\t"
851 + "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
852 +- : ASM_CALL_CONSTRAINT
853 ++ : ASM_CALL_CONSTRAINT, "=S"((int){0})
854 + : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
855 + [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
856 + [fail]"i"(offsetof(struct vcpu_vmx, fail)),
857 +@@ -7133,6 +7133,7 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
858 + {
859 + struct vcpu_vmx *vmx;
860 + u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
861 ++ struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
862 +
863 + if (kvm_mwait_in_guest(vcpu->kvm))
864 + return -EOPNOTSUPP;
865 +@@ -7141,7 +7142,8 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
866 + tscl = rdtsc();
867 + guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
868 + delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
869 +- lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns);
870 ++ lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
871 ++ ktimer->timer_advance_ns);
872 +
873 + if (delta_tsc > lapic_timer_advance_cycles)
874 + delta_tsc -= lapic_timer_advance_cycles;
875 +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
876 +index 1abae731c3e4..b26cb680ba38 100644
877 +--- a/arch/x86/kvm/vmx/vmx.h
878 ++++ b/arch/x86/kvm/vmx/vmx.h
879 +@@ -444,7 +444,8 @@ static inline u32 vmx_vmentry_ctrl(void)
880 + {
881 + u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
882 + if (pt_mode == PT_MODE_SYSTEM)
883 +- vmentry_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | VM_EXIT_CLEAR_IA32_RTIT_CTL);
884 ++ vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
885 ++ VM_ENTRY_LOAD_IA32_RTIT_CTL);
886 + /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
887 + return vmentry_ctrl &
888 + ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
889 +@@ -454,9 +455,10 @@ static inline u32 vmx_vmexit_ctrl(void)
890 + {
891 + u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
892 + if (pt_mode == PT_MODE_SYSTEM)
893 +- vmexit_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | VM_ENTRY_LOAD_IA32_RTIT_CTL);
894 ++ vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
895 ++ VM_EXIT_CLEAR_IA32_RTIT_CTL);
896 + /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
897 +- return vmcs_config.vmexit_ctrl &
898 ++ return vmexit_ctrl &
899 + ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
900 + }
901 +
902 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
903 +index 7e413ea19a9a..3eeb7183fc09 100644
904 +--- a/arch/x86/kvm/x86.c
905 ++++ b/arch/x86/kvm/x86.c
906 +@@ -136,10 +136,14 @@ EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
907 + static u32 __read_mostly tsc_tolerance_ppm = 250;
908 + module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
909 +
910 +-/* lapic timer advance (tscdeadline mode only) in nanoseconds */
911 +-unsigned int __read_mostly lapic_timer_advance_ns = 1000;
912 ++/*
913 ++ * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
914 ++ * adaptive tuning starting from default advancment of 1000ns. '0' disables
915 ++ * advancement entirely. Any other value is used as-is and disables adaptive
916 ++ * tuning, i.e. allows priveleged userspace to set an exact advancement time.
917 ++ */
918 ++static int __read_mostly lapic_timer_advance_ns = -1;
919 + module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
920 +-EXPORT_SYMBOL_GPL(lapic_timer_advance_ns);
921 +
922 + static bool __read_mostly vector_hashing = true;
923 + module_param(vector_hashing, bool, S_IRUGO);
924 +@@ -7882,7 +7886,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
925 + }
926 +
927 + trace_kvm_entry(vcpu->vcpu_id);
928 +- if (lapic_timer_advance_ns)
929 ++ if (lapic_in_kernel(vcpu) &&
930 ++ vcpu->arch.apic->lapic_timer.timer_advance_ns)
931 + wait_lapic_expire(vcpu);
932 + guest_enter_irqoff();
933 +
934 +@@ -9070,7 +9075,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
935 + goto fail_free_pio_data;
936 +
937 + if (irqchip_in_kernel(vcpu->kvm)) {
938 +- r = kvm_create_lapic(vcpu);
939 ++ r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
940 + if (r < 0)
941 + goto fail_mmu_destroy;
942 + } else
943 +diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
944 +index de3d46769ee3..b457160dc7ba 100644
945 +--- a/arch/x86/kvm/x86.h
946 ++++ b/arch/x86/kvm/x86.h
947 +@@ -294,8 +294,6 @@ extern u64 kvm_supported_xcr0(void);
948 +
949 + extern unsigned int min_timer_period_us;
950 +
951 +-extern unsigned int lapic_timer_advance_ns;
952 +-
953 + extern bool enable_vmware_backdoor;
954 +
955 + extern struct static_key kvm_no_apic_vcpu;
956 +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
957 +index f905a2371080..8dacdb96899e 100644
958 +--- a/arch/x86/mm/init.c
959 ++++ b/arch/x86/mm/init.c
960 +@@ -5,6 +5,7 @@
961 + #include <linux/memblock.h>
962 + #include <linux/swapfile.h>
963 + #include <linux/swapops.h>
964 ++#include <linux/kmemleak.h>
965 +
966 + #include <asm/set_memory.h>
967 + #include <asm/e820/api.h>
968 +@@ -766,6 +767,11 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
969 + if (debug_pagealloc_enabled()) {
970 + pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
971 + begin, end - 1);
972 ++ /*
973 ++ * Inform kmemleak about the hole in the memory since the
974 ++ * corresponding pages will be unmapped.
975 ++ */
976 ++ kmemleak_free_part((void *)begin, end - begin);
977 + set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
978 + } else {
979 + /*
980 +diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
981 +index 3f452ffed7e9..d669c5e797e0 100644
982 +--- a/arch/x86/mm/kaslr.c
983 ++++ b/arch/x86/mm/kaslr.c
984 +@@ -94,7 +94,7 @@ void __init kernel_randomize_memory(void)
985 + if (!kaslr_memory_enabled())
986 + return;
987 +
988 +- kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
989 ++ kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
990 + kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
991 +
992 + /*
993 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
994 +index 999d6d8f0bef..9a49335e717a 100644
995 +--- a/arch/x86/mm/tlb.c
996 ++++ b/arch/x86/mm/tlb.c
997 +@@ -731,7 +731,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
998 + {
999 + int cpu;
1000 +
1001 +- struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
1002 ++ struct flush_tlb_info info = {
1003 + .mm = mm,
1004 + .stride_shift = stride_shift,
1005 + .freed_tables = freed_tables,
1006 +diff --git a/block/blk-mq.c b/block/blk-mq.c
1007 +index 16f9675c57e6..5a2585d69c81 100644
1008 +--- a/block/blk-mq.c
1009 ++++ b/block/blk-mq.c
1010 +@@ -1716,11 +1716,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1011 + unsigned int depth;
1012 +
1013 + list_splice_init(&plug->mq_list, &list);
1014 +- plug->rq_count = 0;
1015 +
1016 + if (plug->rq_count > 2 && plug->multiple_queues)
1017 + list_sort(NULL, &list, plug_rq_cmp);
1018 +
1019 ++ plug->rq_count = 0;
1020 ++
1021 + this_q = NULL;
1022 + this_hctx = NULL;
1023 + this_ctx = NULL;
1024 +@@ -2341,7 +2342,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
1025 + return 0;
1026 +
1027 + free_fq:
1028 +- kfree(hctx->fq);
1029 ++ blk_free_flush_queue(hctx->fq);
1030 + exit_hctx:
1031 + if (set->ops->exit_hctx)
1032 + set->ops->exit_hctx(hctx, hctx_idx);
1033 +diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
1034 +index 62c9654b9ce8..fd7a9be54595 100644
1035 +--- a/drivers/block/null_blk_main.c
1036 ++++ b/drivers/block/null_blk_main.c
1037 +@@ -1749,6 +1749,11 @@ static int __init null_init(void)
1038 + return -EINVAL;
1039 + }
1040 +
1041 ++ if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
1042 ++ pr_err("null_blk: invalid home_node value\n");
1043 ++ g_home_node = NUMA_NO_NODE;
1044 ++ }
1045 ++
1046 + if (g_queue_mode == NULL_Q_RQ) {
1047 + pr_err("null_blk: legacy IO path no longer available\n");
1048 + return -EINVAL;
1049 +diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
1050 +index 87ccef4bd69e..32a21b8d1d85 100644
1051 +--- a/drivers/block/xsysace.c
1052 ++++ b/drivers/block/xsysace.c
1053 +@@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace)
1054 + return 0;
1055 +
1056 + err_read:
1057 ++ /* prevent double queue cleanup */
1058 ++ ace->gd->queue = NULL;
1059 + put_disk(ace->gd);
1060 + err_alloc_disk:
1061 + blk_cleanup_queue(ace->queue);
1062 +diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
1063 +index 4593baff2bc9..19eecf198321 100644
1064 +--- a/drivers/bluetooth/btmtkuart.c
1065 ++++ b/drivers/bluetooth/btmtkuart.c
1066 +@@ -115,11 +115,13 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen,
1067 + TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
1068 + if (err == -EINTR) {
1069 + bt_dev_err(hdev, "Execution of wmt command interrupted");
1070 ++ clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
1071 + return err;
1072 + }
1073 +
1074 + if (err) {
1075 + bt_dev_err(hdev, "Execution of wmt command timed out");
1076 ++ clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
1077 + return -ETIMEDOUT;
1078 + }
1079 +
1080 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1081 +index 4761499db9ee..470ee68555d9 100644
1082 +--- a/drivers/bluetooth/btusb.c
1083 ++++ b/drivers/bluetooth/btusb.c
1084 +@@ -2885,6 +2885,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
1085 + return 0;
1086 + }
1087 +
1088 ++ irq_set_status_flags(irq, IRQ_NOAUTOEN);
1089 + ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
1090 + 0, "OOB Wake-on-BT", data);
1091 + if (ret) {
1092 +@@ -2899,7 +2900,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
1093 + }
1094 +
1095 + data->oob_wake_irq = irq;
1096 +- disable_irq(irq);
1097 + bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
1098 + return 0;
1099 + }
1100 +diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
1101 +index 1b779396e04f..42de947173f8 100644
1102 +--- a/drivers/clk/qcom/gcc-msm8998.c
1103 ++++ b/drivers/clk/qcom/gcc-msm8998.c
1104 +@@ -1112,6 +1112,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
1105 +
1106 + static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
1107 + F(19200000, P_XO, 1, 0, 0),
1108 ++ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
1109 + F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
1110 + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
1111 + { }
1112 +diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
1113 +index d977193842df..19174835693b 100644
1114 +--- a/drivers/clk/x86/clk-pmc-atom.c
1115 ++++ b/drivers/clk/x86/clk-pmc-atom.c
1116 +@@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = {
1117 + };
1118 +
1119 + static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
1120 +- void __iomem *base,
1121 ++ const struct pmc_clk_data *pmc_data,
1122 + const char **parent_names,
1123 + int num_parents)
1124 + {
1125 +@@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
1126 + init.num_parents = num_parents;
1127 +
1128 + pclk->hw.init = &init;
1129 +- pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
1130 ++ pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
1131 + spin_lock_init(&pclk->lock);
1132 +
1133 ++ /*
1134 ++ * On some systems, the pmc_plt_clocks already enabled by the
1135 ++ * firmware are being marked as critical to avoid them being
1136 ++ * gated by the clock framework.
1137 ++ */
1138 ++ if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
1139 ++ init.flags |= CLK_IS_CRITICAL;
1140 ++
1141 + ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
1142 + if (ret) {
1143 + pclk = ERR_PTR(ret);
1144 +@@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev)
1145 + return PTR_ERR(parent_names);
1146 +
1147 + for (i = 0; i < PMC_CLK_NUM; i++) {
1148 +- data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
1149 ++ data->clks[i] = plt_clk_register(pdev, i, pmc_data,
1150 + parent_names, data->nparents);
1151 + if (IS_ERR(data->clks[i])) {
1152 + err = PTR_ERR(data->clks[i]);
1153 +diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
1154 +index 2d1dfa1e0745..e86e61dda4b7 100644
1155 +--- a/drivers/gpio/gpio-mxc.c
1156 ++++ b/drivers/gpio/gpio-mxc.c
1157 +@@ -438,8 +438,11 @@ static int mxc_gpio_probe(struct platform_device *pdev)
1158 +
1159 + /* the controller clock is optional */
1160 + port->clk = devm_clk_get(&pdev->dev, NULL);
1161 +- if (IS_ERR(port->clk))
1162 ++ if (IS_ERR(port->clk)) {
1163 ++ if (PTR_ERR(port->clk) == -EPROBE_DEFER)
1164 ++ return -EPROBE_DEFER;
1165 + port->clk = NULL;
1166 ++ }
1167 +
1168 + err = clk_prepare_enable(port->clk);
1169 + if (err) {
1170 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1171 +index 9993b692598f..860e21ec6a49 100644
1172 +--- a/drivers/hid/hid-core.c
1173 ++++ b/drivers/hid/hid-core.c
1174 +@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
1175 + u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1176 + unsigned offset, unsigned n)
1177 + {
1178 +- if (n > 32) {
1179 +- hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
1180 ++ if (n > 256) {
1181 ++ hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
1182 + n, current->comm);
1183 +- n = 32;
1184 ++ n = 256;
1185 + }
1186 +
1187 + return __extract(report, offset, n);
1188 +diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
1189 +index ac9fda1b5a72..1384e57182af 100644
1190 +--- a/drivers/hid/hid-debug.c
1191 ++++ b/drivers/hid/hid-debug.c
1192 +@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
1193 + seq_printf(f, "\n\n");
1194 +
1195 + /* dump parsed data and input mappings */
1196 ++ if (down_interruptible(&hdev->driver_input_lock))
1197 ++ return 0;
1198 ++
1199 + hid_dump_device(hdev, f);
1200 + seq_printf(f, "\n");
1201 + hid_dump_input_mapping(hdev, f);
1202 +
1203 ++ up(&hdev->driver_input_lock);
1204 ++
1205 + return 0;
1206 + }
1207 +
1208 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1209 +index 59a5608b8dc0..ff92a7b2fc89 100644
1210 +--- a/drivers/hid/hid-input.c
1211 ++++ b/drivers/hid/hid-input.c
1212 +@@ -995,6 +995,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
1213 + case 0x1b8: map_key_clear(KEY_VIDEO); break;
1214 + case 0x1bc: map_key_clear(KEY_MESSENGER); break;
1215 + case 0x1bd: map_key_clear(KEY_INFO); break;
1216 ++ case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
1217 + case 0x201: map_key_clear(KEY_NEW); break;
1218 + case 0x202: map_key_clear(KEY_OPEN); break;
1219 + case 0x203: map_key_clear(KEY_CLOSE); break;
1220 +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
1221 +index f040c8a7f9a9..199cc256e9d9 100644
1222 +--- a/drivers/hid/hid-logitech-hidpp.c
1223 ++++ b/drivers/hid/hid-logitech-hidpp.c
1224 +@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
1225 + kfree(data);
1226 + return -ENOMEM;
1227 + }
1228 ++ data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
1229 ++ if (!data->wq) {
1230 ++ kfree(data->effect_ids);
1231 ++ kfree(data);
1232 ++ return -ENOMEM;
1233 ++ }
1234 ++
1235 + data->hidpp = hidpp;
1236 + data->feature_index = feature_index;
1237 + data->version = version;
1238 +@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
1239 + /* ignore boost value at response.fap.params[2] */
1240 +
1241 + /* init the hardware command queue */
1242 +- data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
1243 + atomic_set(&data->workqueue_size, 0);
1244 +
1245 + /* initialize with zero autocenter to get wheel in usable state */
1246 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
1247 +index 94088c0ed68a..e24790c988c0 100644
1248 +--- a/drivers/hid/hid-quirks.c
1249 ++++ b/drivers/hid/hid-quirks.c
1250 +@@ -744,7 +744,6 @@ static const struct hid_device_id hid_ignore_list[] = {
1251 + { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
1252 + { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
1253 + { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
1254 +- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
1255 + { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
1256 + { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
1257 + { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
1258 +@@ -1025,6 +1024,10 @@ bool hid_ignore(struct hid_device *hdev)
1259 + if (hdev->product == 0x0401 &&
1260 + strncmp(hdev->name, "ELAN0800", 8) != 0)
1261 + return true;
1262 ++ /* Same with product id 0x0400 */
1263 ++ if (hdev->product == 0x0400 &&
1264 ++ strncmp(hdev->name, "QTEC0001", 8) != 0)
1265 ++ return true;
1266 + break;
1267 + }
1268 +
1269 +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
1270 +index fa9ad53845d9..d4b72e4ffd71 100644
1271 +--- a/drivers/i2c/busses/i2c-imx.c
1272 ++++ b/drivers/i2c/busses/i2c-imx.c
1273 +@@ -510,9 +510,9 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
1274 + unsigned long action, void *data)
1275 + {
1276 + struct clk_notifier_data *ndata = data;
1277 +- struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk,
1278 ++ struct imx_i2c_struct *i2c_imx = container_of(nb,
1279 + struct imx_i2c_struct,
1280 +- clk);
1281 ++ clk_change_nb);
1282 +
1283 + if (action & POST_RATE_CHANGE)
1284 + i2c_imx_set_clk(i2c_imx, ndata->new_rate);
1285 +diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
1286 +index 13e1213561d4..4284fc991cfd 100644
1287 +--- a/drivers/i2c/busses/i2c-stm32f7.c
1288 ++++ b/drivers/i2c/busses/i2c-stm32f7.c
1289 +@@ -432,7 +432,7 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
1290 + STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0);
1291 + dnf_delay = setup->dnf * i2cclk;
1292 +
1293 +- sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min -
1294 ++ sdadel_min = i2c_specs[setup->speed].hddat_min + setup->fall_time -
1295 + af_delay_min - (setup->dnf + 3) * i2cclk;
1296 +
1297 + sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time -
1298 +diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
1299 +index 2184b7c3580e..6b8d803bd30e 100644
1300 +--- a/drivers/i2c/busses/i2c-synquacer.c
1301 ++++ b/drivers/i2c/busses/i2c-synquacer.c
1302 +@@ -602,6 +602,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
1303 + i2c->adapter = synquacer_i2c_ops;
1304 + i2c_set_adapdata(&i2c->adapter, i2c);
1305 + i2c->adapter.dev.parent = &pdev->dev;
1306 ++ i2c->adapter.dev.of_node = pdev->dev.of_node;
1307 ++ ACPI_COMPANION_SET(&i2c->adapter.dev, ACPI_COMPANION(&pdev->dev));
1308 + i2c->adapter.nr = pdev->id;
1309 + init_completion(&i2c->completion);
1310 +
1311 +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
1312 +index af87a16ac3a5..60fb2afc0e50 100644
1313 +--- a/drivers/i2c/i2c-core-base.c
1314 ++++ b/drivers/i2c/i2c-core-base.c
1315 +@@ -327,6 +327,8 @@ static int i2c_device_probe(struct device *dev)
1316 +
1317 + if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
1318 + dev_dbg(dev, "Using Host Notify IRQ\n");
1319 ++ /* Keep adapter active when Host Notify is required */
1320 ++ pm_runtime_get_sync(&client->adapter->dev);
1321 + irq = i2c_smbus_host_notify_to_irq(client);
1322 + } else if (dev->of_node) {
1323 + irq = of_irq_get_byname(dev->of_node, "irq");
1324 +@@ -431,6 +433,8 @@ static int i2c_device_remove(struct device *dev)
1325 + device_init_wakeup(&client->dev, false);
1326 +
1327 + client->irq = client->init_irq;
1328 ++ if (client->flags & I2C_CLIENT_HOST_NOTIFY)
1329 ++ pm_runtime_put(&client->adapter->dev);
1330 +
1331 + return status;
1332 + }
1333 +diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
1334 +index 1efadbccf394..7662e9347238 100644
1335 +--- a/drivers/infiniband/core/security.c
1336 ++++ b/drivers/infiniband/core/security.c
1337 +@@ -710,16 +710,20 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
1338 + dev_name(&agent->device->dev),
1339 + agent->port_num);
1340 + if (ret)
1341 +- return ret;
1342 ++ goto free_security;
1343 +
1344 + agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
1345 + ret = register_lsm_notifier(&agent->lsm_nb);
1346 + if (ret)
1347 +- return ret;
1348 ++ goto free_security;
1349 +
1350 + agent->smp_allowed = true;
1351 + agent->lsm_nb_reg = true;
1352 + return 0;
1353 ++
1354 ++free_security:
1355 ++ security_ib_free_security(agent->security);
1356 ++ return ret;
1357 + }
1358 +
1359 + void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
1360 +@@ -727,9 +731,10 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
1361 + if (!rdma_protocol_ib(agent->device, agent->port_num))
1362 + return;
1363 +
1364 +- security_ib_free_security(agent->security);
1365 + if (agent->lsm_nb_reg)
1366 + unregister_lsm_notifier(&agent->lsm_nb);
1367 ++
1368 ++ security_ib_free_security(agent->security);
1369 + }
1370 +
1371 + int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
1372 +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
1373 +index ac011836bb54..3220fb42ecce 100644
1374 +--- a/drivers/infiniband/core/verbs.c
1375 ++++ b/drivers/infiniband/core/verbs.c
1376 +@@ -1106,8 +1106,8 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1377 + }
1378 + EXPORT_SYMBOL(ib_open_qp);
1379 +
1380 +-static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
1381 +- struct ib_qp_init_attr *qp_init_attr)
1382 ++static struct ib_qp *create_xrc_qp(struct ib_qp *qp,
1383 ++ struct ib_qp_init_attr *qp_init_attr)
1384 + {
1385 + struct ib_qp *real_qp = qp;
1386 +
1387 +@@ -1122,10 +1122,10 @@ static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
1388 +
1389 + qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1390 + qp_init_attr->qp_context);
1391 +- if (!IS_ERR(qp))
1392 +- __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
1393 +- else
1394 +- real_qp->device->ops.destroy_qp(real_qp);
1395 ++ if (IS_ERR(qp))
1396 ++ return qp;
1397 ++
1398 ++ __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
1399 + return qp;
1400 + }
1401 +
1402 +@@ -1156,10 +1156,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
1403 + return qp;
1404 +
1405 + ret = ib_create_qp_security(qp, device);
1406 +- if (ret) {
1407 +- ib_destroy_qp(qp);
1408 +- return ERR_PTR(ret);
1409 +- }
1410 ++ if (ret)
1411 ++ goto err;
1412 +
1413 + qp->real_qp = qp;
1414 + qp->qp_type = qp_init_attr->qp_type;
1415 +@@ -1172,8 +1170,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
1416 + INIT_LIST_HEAD(&qp->sig_mrs);
1417 + qp->port = 0;
1418 +
1419 +- if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
1420 +- return ib_create_xrc_qp(qp, qp_init_attr);
1421 ++ if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
1422 ++ struct ib_qp *xrc_qp = create_xrc_qp(qp, qp_init_attr);
1423 ++
1424 ++ if (IS_ERR(xrc_qp)) {
1425 ++ ret = PTR_ERR(xrc_qp);
1426 ++ goto err;
1427 ++ }
1428 ++ return xrc_qp;
1429 ++ }
1430 +
1431 + qp->event_handler = qp_init_attr->event_handler;
1432 + qp->qp_context = qp_init_attr->qp_context;
1433 +@@ -1200,11 +1205,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
1434 +
1435 + if (qp_init_attr->cap.max_rdma_ctxs) {
1436 + ret = rdma_rw_init_mrs(qp, qp_init_attr);
1437 +- if (ret) {
1438 +- pr_err("failed to init MR pool ret= %d\n", ret);
1439 +- ib_destroy_qp(qp);
1440 +- return ERR_PTR(ret);
1441 +- }
1442 ++ if (ret)
1443 ++ goto err;
1444 + }
1445 +
1446 + /*
1447 +@@ -1217,6 +1219,11 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
1448 + device->attrs.max_sge_rd);
1449 +
1450 + return qp;
1451 ++
1452 ++err:
1453 ++ ib_destroy_qp(qp);
1454 ++ return ERR_PTR(ret);
1455 ++
1456 + }
1457 + EXPORT_SYMBOL(ib_create_qp);
1458 +
1459 +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
1460 +index e9c336cff8f5..f367f3db7ff8 100644
1461 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
1462 ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
1463 +@@ -2887,8 +2887,19 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
1464 + srpt_queue_response(cmd);
1465 + }
1466 +
1467 ++/*
1468 ++ * This function is called for aborted commands if no response is sent to the
1469 ++ * initiator. Make sure that the credits freed by aborting a command are
1470 ++ * returned to the initiator the next time a response is sent by incrementing
1471 ++ * ch->req_lim_delta.
1472 ++ */
1473 + static void srpt_aborted_task(struct se_cmd *cmd)
1474 + {
1475 ++ struct srpt_send_ioctx *ioctx = container_of(cmd,
1476 ++ struct srpt_send_ioctx, cmd);
1477 ++ struct srpt_rdma_ch *ch = ioctx->ch;
1478 ++
1479 ++ atomic_inc(&ch->req_lim_delta);
1480 + }
1481 +
1482 + static int srpt_queue_status(struct se_cmd *cmd)
1483 +diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
1484 +index effb63205d3d..4c67cf30a5d9 100644
1485 +--- a/drivers/input/keyboard/snvs_pwrkey.c
1486 ++++ b/drivers/input/keyboard/snvs_pwrkey.c
1487 +@@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
1488 + return error;
1489 + }
1490 +
1491 ++ pdata->input = input;
1492 ++ platform_set_drvdata(pdev, pdata);
1493 ++
1494 + error = devm_request_irq(&pdev->dev, pdata->irq,
1495 + imx_snvs_pwrkey_interrupt,
1496 + 0, pdev->name, pdev);
1497 +@@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
1498 + return error;
1499 + }
1500 +
1501 +- pdata->input = input;
1502 +- platform_set_drvdata(pdev, pdata);
1503 +-
1504 + device_init_wakeup(&pdev->dev, pdata->wakeup);
1505 +
1506 + return 0;
1507 +diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
1508 +index 704e99046916..b6f95f20f924 100644
1509 +--- a/drivers/input/touchscreen/stmfts.c
1510 ++++ b/drivers/input/touchscreen/stmfts.c
1511 +@@ -106,27 +106,29 @@ struct stmfts_data {
1512 + bool running;
1513 + };
1514 +
1515 +-static void stmfts_brightness_set(struct led_classdev *led_cdev,
1516 ++static int stmfts_brightness_set(struct led_classdev *led_cdev,
1517 + enum led_brightness value)
1518 + {
1519 + struct stmfts_data *sdata = container_of(led_cdev,
1520 + struct stmfts_data, led_cdev);
1521 + int err;
1522 +
1523 +- if (value == sdata->led_status || !sdata->ledvdd)
1524 +- return;
1525 +-
1526 +- if (!value) {
1527 +- regulator_disable(sdata->ledvdd);
1528 +- } else {
1529 +- err = regulator_enable(sdata->ledvdd);
1530 +- if (err)
1531 +- dev_warn(&sdata->client->dev,
1532 +- "failed to disable ledvdd regulator: %d\n",
1533 +- err);
1534 ++ if (value != sdata->led_status && sdata->ledvdd) {
1535 ++ if (!value) {
1536 ++ regulator_disable(sdata->ledvdd);
1537 ++ } else {
1538 ++ err = regulator_enable(sdata->ledvdd);
1539 ++ if (err) {
1540 ++ dev_warn(&sdata->client->dev,
1541 ++ "failed to disable ledvdd regulator: %d\n",
1542 ++ err);
1543 ++ return err;
1544 ++ }
1545 ++ }
1546 ++ sdata->led_status = value;
1547 + }
1548 +
1549 +- sdata->led_status = value;
1550 ++ return 0;
1551 + }
1552 +
1553 + static enum led_brightness stmfts_brightness_get(struct led_classdev *led_cdev)
1554 +@@ -608,7 +610,7 @@ static int stmfts_enable_led(struct stmfts_data *sdata)
1555 + sdata->led_cdev.name = STMFTS_DEV_NAME;
1556 + sdata->led_cdev.max_brightness = LED_ON;
1557 + sdata->led_cdev.brightness = LED_OFF;
1558 +- sdata->led_cdev.brightness_set = stmfts_brightness_set;
1559 ++ sdata->led_cdev.brightness_set_blocking = stmfts_brightness_set;
1560 + sdata->led_cdev.brightness_get = stmfts_brightness_get;
1561 +
1562 + err = devm_led_classdev_register(&sdata->client->dev, &sdata->led_cdev);
1563 +diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
1564 +index a70a6ff7b36e..4939a83b50e4 100644
1565 +--- a/drivers/media/i2c/ov7670.c
1566 ++++ b/drivers/media/i2c/ov7670.c
1567 +@@ -160,10 +160,10 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
1568 + #define REG_GFIX 0x69 /* Fix gain control */
1569 +
1570 + #define REG_DBLV 0x6b /* PLL control an debugging */
1571 +-#define DBLV_BYPASS 0x00 /* Bypass PLL */
1572 +-#define DBLV_X4 0x01 /* clock x4 */
1573 +-#define DBLV_X6 0x10 /* clock x6 */
1574 +-#define DBLV_X8 0x11 /* clock x8 */
1575 ++#define DBLV_BYPASS 0x0a /* Bypass PLL */
1576 ++#define DBLV_X4 0x4a /* clock x4 */
1577 ++#define DBLV_X6 0x8a /* clock x6 */
1578 ++#define DBLV_X8 0xca /* clock x8 */
1579 +
1580 + #define REG_SCALING_XSC 0x70 /* Test pattern and horizontal scale factor */
1581 + #define TEST_PATTTERN_0 0x80
1582 +@@ -863,7 +863,7 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd,
1583 + if (ret < 0)
1584 + return ret;
1585 +
1586 +- return ov7670_write(sd, REG_DBLV, DBLV_X4);
1587 ++ return 0;
1588 + }
1589 +
1590 + static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd,
1591 +@@ -1801,11 +1801,7 @@ static int ov7670_probe(struct i2c_client *client,
1592 + if (config->clock_speed)
1593 + info->clock_speed = config->clock_speed;
1594 +
1595 +- /*
1596 +- * It should be allowed for ov7670 too when it is migrated to
1597 +- * the new frame rate formula.
1598 +- */
1599 +- if (config->pll_bypass && id->driver_data != MODEL_OV7670)
1600 ++ if (config->pll_bypass)
1601 + info->pll_bypass = true;
1602 +
1603 + if (config->pclk_hb_disable)
1604 +diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
1605 +index 299016bc46d9..104477b512a2 100644
1606 +--- a/drivers/mfd/twl-core.c
1607 ++++ b/drivers/mfd/twl-core.c
1608 +@@ -1245,6 +1245,28 @@ free:
1609 + return status;
1610 + }
1611 +
1612 ++static int __maybe_unused twl_suspend(struct device *dev)
1613 ++{
1614 ++ struct i2c_client *client = to_i2c_client(dev);
1615 ++
1616 ++ if (client->irq)
1617 ++ disable_irq(client->irq);
1618 ++
1619 ++ return 0;
1620 ++}
1621 ++
1622 ++static int __maybe_unused twl_resume(struct device *dev)
1623 ++{
1624 ++ struct i2c_client *client = to_i2c_client(dev);
1625 ++
1626 ++ if (client->irq)
1627 ++ enable_irq(client->irq);
1628 ++
1629 ++ return 0;
1630 ++}
1631 ++
1632 ++static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
1633 ++
1634 + static const struct i2c_device_id twl_ids[] = {
1635 + { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
1636 + { "twl5030", 0 }, /* T2 updated */
1637 +@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
1638 + /* One Client Driver , 4 Clients */
1639 + static struct i2c_driver twl_driver = {
1640 + .driver.name = DRIVER_NAME,
1641 ++ .driver.pm = &twl_dev_pm_ops,
1642 + .id_table = twl_ids,
1643 + .probe = twl_probe,
1644 + .remove = twl_remove,
1645 +diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
1646 +index 84283c6bb0ff..66a161b8f745 100644
1647 +--- a/drivers/mtd/nand/raw/marvell_nand.c
1648 ++++ b/drivers/mtd/nand/raw/marvell_nand.c
1649 +@@ -722,12 +722,6 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
1650 + struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1651 + u32 ndcr_generic;
1652 +
1653 +- if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
1654 +- return;
1655 +-
1656 +- writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
1657 +- writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
1658 +-
1659 + /*
1660 + * Reset the NDCR register to a clean state for this particular chip,
1661 + * also clear ND_RUN bit.
1662 +@@ -739,6 +733,12 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
1663 + /* Also reset the interrupt status register */
1664 + marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
1665 +
1666 ++ if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
1667 ++ return;
1668 ++
1669 ++ writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
1670 ++ writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
1671 ++
1672 + nfc->selected_chip = chip;
1673 + marvell_nand->selected_die = die_nr;
1674 + }
1675 +diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
1676 +index 2f120b2ffef0..4985268e2273 100644
1677 +--- a/drivers/net/bonding/bond_sysfs_slave.c
1678 ++++ b/drivers/net/bonding/bond_sysfs_slave.c
1679 +@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
1680 +
1681 + static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
1682 + {
1683 +- return sprintf(buf, "%pM\n", slave->perm_hwaddr);
1684 ++ return sprintf(buf, "%*phC\n",
1685 ++ slave->dev->addr_len,
1686 ++ slave->perm_hwaddr);
1687 + }
1688 + static SLAVE_ATTR_RO(perm_hwaddr);
1689 +
1690 +diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
1691 +index 74849be5f004..e2919005ead3 100644
1692 +--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
1693 ++++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
1694 +@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
1695 + ppmax = max;
1696 +
1697 + /* pool size must be multiple of unsigned long */
1698 +- bmap = BITS_TO_LONGS(ppmax);
1699 ++ bmap = ppmax / BITS_PER_TYPE(unsigned long);
1700 ++ if (!bmap)
1701 ++ return NULL;
1702 ++
1703 + ppmax = (bmap * sizeof(unsigned long)) << 3;
1704 +
1705 + alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
1706 +@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
1707 + if (reserve_factor) {
1708 + ppmax_pool = ppmax / reserve_factor;
1709 + pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
1710 ++ if (!pool) {
1711 ++ ppmax_pool = 0;
1712 ++ reserve_factor = 0;
1713 ++ }
1714 +
1715 + pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
1716 + ndev->name, ppmax, ppmax_pool, pool_index_max);
1717 +diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
1718 +index 79d03f8ee7b1..c7fa97a7e1f4 100644
1719 +--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
1720 ++++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
1721 +@@ -150,7 +150,6 @@ out_buffer_fail:
1722 + /* free desc along with its attached buffer */
1723 + static void hnae_free_desc(struct hnae_ring *ring)
1724 + {
1725 +- hnae_free_buffers(ring);
1726 + dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1727 + ring->desc_num * sizeof(ring->desc[0]),
1728 + ring_to_dma_dir(ring));
1729 +@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
1730 + /* fini ring, also free the buffer for the ring */
1731 + static void hnae_fini_ring(struct hnae_ring *ring)
1732 + {
1733 ++ if (is_rx_ring(ring))
1734 ++ hnae_free_buffers(ring);
1735 ++
1736 + hnae_free_desc(ring);
1737 + kfree(ring->desc_cb);
1738 + ring->desc_cb = NULL;
1739 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1740 +index ac55db065f16..f5ff07cb2b72 100644
1741 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1742 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1743 +@@ -2750,6 +2750,17 @@ int hns_dsaf_get_regs_count(void)
1744 + return DSAF_DUMP_REGS_NUM;
1745 + }
1746 +
1747 ++static int hns_dsaf_get_port_id(u8 port)
1748 ++{
1749 ++ if (port < DSAF_SERVICE_NW_NUM)
1750 ++ return port;
1751 ++
1752 ++ if (port >= DSAF_BASE_INNER_PORT_NUM)
1753 ++ return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
1754 ++
1755 ++ return -EINVAL;
1756 ++}
1757 ++
1758 + static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
1759 + {
1760 + struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
1761 +@@ -2815,23 +2826,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
1762 + memset(&temp_key, 0x0, sizeof(temp_key));
1763 + mask_entry.addr[0] = 0x01;
1764 + hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
1765 +- port, mask_entry.addr);
1766 ++ 0xf, mask_entry.addr);
1767 + tbl_tcam_mcast.tbl_mcast_item_vld = 1;
1768 + tbl_tcam_mcast.tbl_mcast_old_en = 0;
1769 +
1770 +- if (port < DSAF_SERVICE_NW_NUM) {
1771 +- mskid = port;
1772 +- } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
1773 +- mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
1774 +- } else {
1775 ++ /* set MAC port to handle multicast */
1776 ++ mskid = hns_dsaf_get_port_id(port);
1777 ++ if (mskid == -EINVAL) {
1778 + dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
1779 + dsaf_dev->ae_dev.name, port,
1780 + mask_key.high.val, mask_key.low.val);
1781 + return;
1782 + }
1783 ++ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
1784 ++ mskid % 32, 1);
1785 +
1786 ++ /* set pool bit map to handle multicast */
1787 ++ mskid = hns_dsaf_get_port_id(port_num);
1788 ++ if (mskid == -EINVAL) {
1789 ++ dev_err(dsaf_dev->dev,
1790 ++ "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
1791 ++ dsaf_dev->ae_dev.name, port_num,
1792 ++ mask_key.high.val, mask_key.low.val);
1793 ++ return;
1794 ++ }
1795 + dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
1796 + mskid % 32, 1);
1797 ++
1798 + memcpy(&temp_key, &mask_key, sizeof(mask_key));
1799 + hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
1800 + (struct dsaf_tbl_tcam_data *)(&mask_key),
1801 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
1802 +index ba4316910dea..a60f207768fc 100644
1803 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
1804 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
1805 +@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
1806 + dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
1807 + dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
1808 + dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
1809 +- dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
1810 ++ dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
1811 + }
1812 +
1813 + /**
1814 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1815 +index 60e7d7ae3787..4cd86ba1f050 100644
1816 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1817 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1818 +@@ -29,9 +29,6 @@
1819 +
1820 + #define SERVICE_TIMER_HZ (1 * HZ)
1821 +
1822 +-#define NIC_TX_CLEAN_MAX_NUM 256
1823 +-#define NIC_RX_CLEAN_MAX_NUM 64
1824 +-
1825 + #define RCB_IRQ_NOT_INITED 0
1826 + #define RCB_IRQ_INITED 1
1827 + #define HNS_BUFFER_SIZE_2048 2048
1828 +@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
1829 + wmb(); /* commit all data before submit */
1830 + assert(skb->queue_mapping < priv->ae_handle->q_num);
1831 + hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
1832 +- ring->stats.tx_pkts++;
1833 +- ring->stats.tx_bytes += skb->len;
1834 +
1835 + return NETDEV_TX_OK;
1836 +
1837 +@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
1838 + /* issue prefetch for next Tx descriptor */
1839 + prefetch(&ring->desc_cb[ring->next_to_clean]);
1840 + }
1841 ++ /* update tx ring statistics. */
1842 ++ ring->stats.tx_pkts += pkts;
1843 ++ ring->stats.tx_bytes += bytes;
1844 +
1845 + NETIF_TX_UNLOCK(ring);
1846 +
1847 +@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1848 + hns_nic_tx_fini_pro_v2;
1849 +
1850 + netif_napi_add(priv->netdev, &rd->napi,
1851 +- hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
1852 ++ hns_nic_common_poll, NAPI_POLL_WEIGHT);
1853 + rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1854 + }
1855 + for (i = h->q_num; i < h->q_num * 2; i++) {
1856 +@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1857 + hns_nic_rx_fini_pro_v2;
1858 +
1859 + netif_napi_add(priv->netdev, &rd->napi,
1860 +- hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
1861 ++ hns_nic_common_poll, NAPI_POLL_WEIGHT);
1862 + rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1863 + }
1864 +
1865 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
1866 +index fffe8c1c45d3..0fb61d440d3b 100644
1867 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
1868 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
1869 +@@ -3,7 +3,7 @@
1870 + # Makefile for the HISILICON network device drivers.
1871 + #
1872 +
1873 +-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
1874 ++ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
1875 +
1876 + obj-$(CONFIG_HNS3_HCLGE) += hclge.o
1877 + hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
1878 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
1879 +index fb93bbd35845..6193f8fa7cf3 100644
1880 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
1881 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
1882 +@@ -3,7 +3,7 @@
1883 + # Makefile for the HISILICON network device drivers.
1884 + #
1885 +
1886 +-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
1887 ++ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
1888 +
1889 + obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
1890 + hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
1891 +\ No newline at end of file
1892 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
1893 +index a6bc7847346b..5d544e661445 100644
1894 +--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
1895 ++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
1896 +@@ -2378,8 +2378,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1897 + return -EOPNOTSUPP;
1898 +
1899 + /* only magic packet is supported */
1900 +- if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
1901 +- | (wol->wolopts != WAKE_FILTER))
1902 ++ if (wol->wolopts & ~WAKE_MAGIC)
1903 + return -EOPNOTSUPP;
1904 +
1905 + /* is this a new value? */
1906 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
1907 +index 5fb4353c742b..31575c0bb884 100644
1908 +--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
1909 ++++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
1910 +@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1911 + static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1912 + {
1913 + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
1914 +- struct timespec64 now;
1915 ++ struct timespec64 now, then;
1916 +
1917 ++ then = ns_to_timespec64(delta);
1918 + mutex_lock(&pf->tmreg_lock);
1919 +
1920 + i40e_ptp_read(pf, &now, NULL);
1921 +- timespec64_add_ns(&now, delta);
1922 ++ now = timespec64_add(now, then);
1923 + i40e_ptp_write(pf, (const struct timespec64 *)&now);
1924 +
1925 + mutex_unlock(&pf->tmreg_lock);
1926 +diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
1927 +index 01fcfc6f3415..d2e2c50ce257 100644
1928 +--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
1929 ++++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
1930 +@@ -194,6 +194,8 @@
1931 + /* enable link status from external LINK_0 and LINK_1 pins */
1932 + #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
1933 + #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
1934 ++#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
1935 ++#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
1936 + #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
1937 + #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
1938 + #define E1000_CTRL_RST 0x04000000 /* Global reset */
1939 +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
1940 +index 7137e7f9c7f3..21ccadb720d1 100644
1941 +--- a/drivers/net/ethernet/intel/igb/igb_main.c
1942 ++++ b/drivers/net/ethernet/intel/igb/igb_main.c
1943 +@@ -8755,9 +8755,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
1944 + struct e1000_hw *hw = &adapter->hw;
1945 + u32 ctrl, rctl, status;
1946 + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
1947 +-#ifdef CONFIG_PM
1948 +- int retval = 0;
1949 +-#endif
1950 ++ bool wake;
1951 +
1952 + rtnl_lock();
1953 + netif_device_detach(netdev);
1954 +@@ -8770,14 +8768,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
1955 + igb_clear_interrupt_scheme(adapter);
1956 + rtnl_unlock();
1957 +
1958 +-#ifdef CONFIG_PM
1959 +- if (!runtime) {
1960 +- retval = pci_save_state(pdev);
1961 +- if (retval)
1962 +- return retval;
1963 +- }
1964 +-#endif
1965 +-
1966 + status = rd32(E1000_STATUS);
1967 + if (status & E1000_STATUS_LU)
1968 + wufc &= ~E1000_WUFC_LNKC;
1969 +@@ -8794,10 +8784,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
1970 + }
1971 +
1972 + ctrl = rd32(E1000_CTRL);
1973 +- /* advertise wake from D3Cold */
1974 +- #define E1000_CTRL_ADVD3WUC 0x00100000
1975 +- /* phy power management enable */
1976 +- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
1977 + ctrl |= E1000_CTRL_ADVD3WUC;
1978 + wr32(E1000_CTRL, ctrl);
1979 +
1980 +@@ -8811,12 +8797,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
1981 + wr32(E1000_WUFC, 0);
1982 + }
1983 +
1984 +- *enable_wake = wufc || adapter->en_mng_pt;
1985 +- if (!*enable_wake)
1986 ++ wake = wufc || adapter->en_mng_pt;
1987 ++ if (!wake)
1988 + igb_power_down_link(adapter);
1989 + else
1990 + igb_power_up_link(adapter);
1991 +
1992 ++ if (enable_wake)
1993 ++ *enable_wake = wake;
1994 ++
1995 + /* Release control of h/w to f/w. If f/w is AMT enabled, this
1996 + * would have already happened in close and is redundant.
1997 + */
1998 +@@ -8859,22 +8848,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
1999 +
2000 + static int __maybe_unused igb_suspend(struct device *dev)
2001 + {
2002 +- int retval;
2003 +- bool wake;
2004 +- struct pci_dev *pdev = to_pci_dev(dev);
2005 +-
2006 +- retval = __igb_shutdown(pdev, &wake, 0);
2007 +- if (retval)
2008 +- return retval;
2009 +-
2010 +- if (wake) {
2011 +- pci_prepare_to_sleep(pdev);
2012 +- } else {
2013 +- pci_wake_from_d3(pdev, false);
2014 +- pci_set_power_state(pdev, PCI_D3hot);
2015 +- }
2016 +-
2017 +- return 0;
2018 ++ return __igb_shutdown(to_pci_dev(dev), NULL, 0);
2019 + }
2020 +
2021 + static int __maybe_unused igb_resume(struct device *dev)
2022 +@@ -8945,22 +8919,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
2023 +
2024 + static int __maybe_unused igb_runtime_suspend(struct device *dev)
2025 + {
2026 +- struct pci_dev *pdev = to_pci_dev(dev);
2027 +- int retval;
2028 +- bool wake;
2029 +-
2030 +- retval = __igb_shutdown(pdev, &wake, 1);
2031 +- if (retval)
2032 +- return retval;
2033 +-
2034 +- if (wake) {
2035 +- pci_prepare_to_sleep(pdev);
2036 +- } else {
2037 +- pci_wake_from_d3(pdev, false);
2038 +- pci_set_power_state(pdev, PCI_D3hot);
2039 +- }
2040 +-
2041 +- return 0;
2042 ++ return __igb_shutdown(to_pci_dev(dev), NULL, 1);
2043 + }
2044 +
2045 + static int __maybe_unused igb_runtime_resume(struct device *dev)
2046 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
2047 +index cc4907f9ff02..2fb97967961c 100644
2048 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
2049 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
2050 +@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
2051 + struct pci_dev *pdev = adapter->pdev;
2052 + struct device *dev = &adapter->netdev->dev;
2053 + struct mii_bus *bus;
2054 ++ int err = -ENODEV;
2055 +
2056 +- adapter->mii_bus = devm_mdiobus_alloc(dev);
2057 +- if (!adapter->mii_bus)
2058 ++ bus = devm_mdiobus_alloc(dev);
2059 ++ if (!bus)
2060 + return -ENOMEM;
2061 +
2062 +- bus = adapter->mii_bus;
2063 +-
2064 + switch (hw->device_id) {
2065 + /* C3000 SoCs */
2066 + case IXGBE_DEV_ID_X550EM_A_KR:
2067 +@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
2068 + */
2069 + hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
2070 +
2071 +- return mdiobus_register(bus);
2072 ++ err = mdiobus_register(bus);
2073 ++ if (!err) {
2074 ++ adapter->mii_bus = bus;
2075 ++ return 0;
2076 ++ }
2077 +
2078 + ixgbe_no_mii_bus:
2079 + devm_mdiobus_free(dev, bus);
2080 +- adapter->mii_bus = NULL;
2081 +- return -ENODEV;
2082 ++ return err;
2083 + }
2084 +
2085 + /**
2086 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2087 +index 13c48883ed61..619f96940b65 100644
2088 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2089 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2090 +@@ -81,8 +81,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
2091 + opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
2092 + MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
2093 + MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
2094 +- if (vport)
2095 +- MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
2096 ++ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
2097 + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
2098 + in, nic_vport_context);
2099 +
2100 +@@ -110,8 +109,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
2101 + MLX5_SET(modify_esw_vport_context_in, in, opcode,
2102 + MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
2103 + MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
2104 +- if (vport)
2105 +- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
2106 ++ MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
2107 + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
2108 + }
2109 +
2110 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2111 +index d4e6fe5b9300..ce5766a26baa 100644
2112 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2113 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
2114 +@@ -1402,6 +1402,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
2115 + {
2116 + int err;
2117 +
2118 ++ memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
2119 + mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
2120 +
2121 + err = esw_create_offloads_fdb_tables(esw, nvports);
2122 +diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
2123 +index 40d6356a7e73..3dfb07a78952 100644
2124 +--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
2125 ++++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
2126 +@@ -29,11 +29,13 @@
2127 + /* Specific functions used for Ring mode */
2128 +
2129 + /* Enhanced descriptors */
2130 +-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
2131 ++static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
2132 ++ int bfsize)
2133 + {
2134 +- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
2135 +- << ERDES1_BUFFER2_SIZE_SHIFT)
2136 +- & ERDES1_BUFFER2_SIZE_MASK);
2137 ++ if (bfsize == BUF_SIZE_16KiB)
2138 ++ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
2139 ++ << ERDES1_BUFFER2_SIZE_SHIFT)
2140 ++ & ERDES1_BUFFER2_SIZE_MASK);
2141 +
2142 + if (end)
2143 + p->des1 |= cpu_to_le32(ERDES1_END_RING);
2144 +@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
2145 + }
2146 +
2147 + /* Normal descriptors */
2148 +-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
2149 ++static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
2150 + {
2151 +- p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
2152 +- << RDES1_BUFFER2_SIZE_SHIFT)
2153 +- & RDES1_BUFFER2_SIZE_MASK);
2154 ++ if (bfsize >= BUF_SIZE_2KiB) {
2155 ++ int bfsize2;
2156 ++
2157 ++ bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
2158 ++ p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
2159 ++ & RDES1_BUFFER2_SIZE_MASK);
2160 ++ }
2161 +
2162 + if (end)
2163 + p->des1 |= cpu_to_le32(RDES1_END_RING);
2164 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
2165 +index 736e29635b77..313a58b68fee 100644
2166 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
2167 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
2168 +@@ -296,7 +296,7 @@ exit:
2169 + }
2170 +
2171 + static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
2172 +- int mode, int end)
2173 ++ int mode, int end, int bfsize)
2174 + {
2175 + dwmac4_set_rx_owner(p, disable_rx_ic);
2176 + }
2177 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
2178 +index 1d858fdec997..98fa471da7c0 100644
2179 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
2180 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
2181 +@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
2182 + }
2183 +
2184 + static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
2185 +- int mode, int end)
2186 ++ int mode, int end, int bfsize)
2187 + {
2188 + dwxgmac2_set_rx_owner(p, disable_rx_ic);
2189 + }
2190 +diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2191 +index 5ef91a790f9d..5202d6ad7919 100644
2192 +--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2193 ++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2194 +@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
2195 + if (unlikely(rdes0 & RDES0_OWN))
2196 + return dma_own;
2197 +
2198 ++ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
2199 ++ stats->rx_length_errors++;
2200 ++ return discard_frame;
2201 ++ }
2202 ++
2203 + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
2204 + if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
2205 + x->rx_desc++;
2206 +@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
2207 + * It doesn't match with the information reported into the databook.
2208 + * At any rate, we need to understand if the CSUM hw computation is ok
2209 + * and report this info to the upper layers. */
2210 +- ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
2211 +- !!(rdes0 & RDES0_FRAME_TYPE),
2212 +- !!(rdes0 & ERDES0_RX_MAC_ADDR));
2213 ++ if (likely(ret == good_frame))
2214 ++ ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
2215 ++ !!(rdes0 & RDES0_FRAME_TYPE),
2216 ++ !!(rdes0 & ERDES0_RX_MAC_ADDR));
2217 +
2218 + if (unlikely(rdes0 & RDES0_DRIBBLING))
2219 + x->dribbling_bit++;
2220 +@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
2221 + }
2222 +
2223 + static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
2224 +- int mode, int end)
2225 ++ int mode, int end, int bfsize)
2226 + {
2227 ++ int bfsize1;
2228 ++
2229 + p->des0 |= cpu_to_le32(RDES0_OWN);
2230 +- p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
2231 ++
2232 ++ bfsize1 = min(bfsize, BUF_SIZE_8KiB);
2233 ++ p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
2234 +
2235 + if (mode == STMMAC_CHAIN_MODE)
2236 + ehn_desc_rx_set_on_chain(p);
2237 + else
2238 +- ehn_desc_rx_set_on_ring(p, end);
2239 ++ ehn_desc_rx_set_on_ring(p, end, bfsize);
2240 +
2241 + if (disable_rx_ic)
2242 + p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
2243 +diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
2244 +index 92b8944f26e3..5bb00234d961 100644
2245 +--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
2246 ++++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
2247 +@@ -33,7 +33,7 @@ struct dma_extended_desc;
2248 + struct stmmac_desc_ops {
2249 + /* DMA RX descriptor ring initialization */
2250 + void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
2251 +- int end);
2252 ++ int end, int bfsize);
2253 + /* DMA TX descriptor ring initialization */
2254 + void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
2255 + /* Invoked by the xmit function to prepare the tx descriptor */
2256 +diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2257 +index de65bb29feba..b7dd4e3c760d 100644
2258 +--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2259 ++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2260 +@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
2261 + return dma_own;
2262 +
2263 + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
2264 +- pr_warn("%s: Oversized frame spanned multiple buffers\n",
2265 +- __func__);
2266 + stats->rx_length_errors++;
2267 + return discard_frame;
2268 + }
2269 +@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
2270 + }
2271 +
2272 + static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
2273 +- int end)
2274 ++ int end, int bfsize)
2275 + {
2276 ++ int bfsize1;
2277 ++
2278 + p->des0 |= cpu_to_le32(RDES0_OWN);
2279 +- p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
2280 ++
2281 ++ bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
2282 ++ p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
2283 +
2284 + if (mode == STMMAC_CHAIN_MODE)
2285 + ndesc_rx_set_on_chain(p, end);
2286 + else
2287 +- ndesc_rx_set_on_ring(p, end);
2288 ++ ndesc_rx_set_on_ring(p, end, bfsize);
2289 +
2290 + if (disable_rx_ic)
2291 + p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
2292 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2293 +index 0bc3632880b5..f0e0593e54f3 100644
2294 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2295 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2296 +@@ -1114,11 +1114,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
2297 + if (priv->extend_desc)
2298 + stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
2299 + priv->use_riwt, priv->mode,
2300 +- (i == DMA_RX_SIZE - 1));
2301 ++ (i == DMA_RX_SIZE - 1),
2302 ++ priv->dma_buf_sz);
2303 + else
2304 + stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
2305 + priv->use_riwt, priv->mode,
2306 +- (i == DMA_RX_SIZE - 1));
2307 ++ (i == DMA_RX_SIZE - 1),
2308 ++ priv->dma_buf_sz);
2309 + }
2310 +
2311 + /**
2312 +@@ -3326,9 +3328,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
2313 + {
2314 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2315 + struct stmmac_channel *ch = &priv->channel[queue];
2316 +- unsigned int entry = rx_q->cur_rx;
2317 ++ unsigned int next_entry = rx_q->cur_rx;
2318 + int coe = priv->hw->rx_csum;
2319 +- unsigned int next_entry;
2320 + unsigned int count = 0;
2321 + bool xmac;
2322 +
2323 +@@ -3346,10 +3347,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
2324 + stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
2325 + }
2326 + while (count < limit) {
2327 +- int status;
2328 ++ int entry, status;
2329 + struct dma_desc *p;
2330 + struct dma_desc *np;
2331 +
2332 ++ entry = next_entry;
2333 ++
2334 + if (priv->extend_desc)
2335 + p = (struct dma_desc *)(rx_q->dma_erx + entry);
2336 + else
2337 +@@ -3405,11 +3408,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
2338 + * ignored
2339 + */
2340 + if (frame_len > priv->dma_buf_sz) {
2341 +- netdev_err(priv->dev,
2342 +- "len %d larger than size (%d)\n",
2343 +- frame_len, priv->dma_buf_sz);
2344 ++ if (net_ratelimit())
2345 ++ netdev_err(priv->dev,
2346 ++ "len %d larger than size (%d)\n",
2347 ++ frame_len, priv->dma_buf_sz);
2348 + priv->dev->stats.rx_length_errors++;
2349 +- break;
2350 ++ continue;
2351 + }
2352 +
2353 + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2354 +@@ -3444,7 +3448,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
2355 + dev_warn(priv->device,
2356 + "packet dropped\n");
2357 + priv->dev->stats.rx_dropped++;
2358 +- break;
2359 ++ continue;
2360 + }
2361 +
2362 + dma_sync_single_for_cpu(priv->device,
2363 +@@ -3464,11 +3468,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
2364 + } else {
2365 + skb = rx_q->rx_skbuff[entry];
2366 + if (unlikely(!skb)) {
2367 +- netdev_err(priv->dev,
2368 +- "%s: Inconsistent Rx chain\n",
2369 +- priv->dev->name);
2370 ++ if (net_ratelimit())
2371 ++ netdev_err(priv->dev,
2372 ++ "%s: Inconsistent Rx chain\n",
2373 ++ priv->dev->name);
2374 + priv->dev->stats.rx_dropped++;
2375 +- break;
2376 ++ continue;
2377 + }
2378 + prefetch(skb->data - NET_IP_ALIGN);
2379 + rx_q->rx_skbuff[entry] = NULL;
2380 +@@ -3503,7 +3508,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
2381 + priv->dev->stats.rx_packets++;
2382 + priv->dev->stats.rx_bytes += frame_len;
2383 + }
2384 +- entry = next_entry;
2385 + }
2386 +
2387 + stmmac_rx_refill(priv, queue);
2388 +diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
2389 +index 575a7022d045..3846064d51a5 100644
2390 +--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
2391 ++++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
2392 +@@ -1,7 +1,7 @@
2393 + /******************************************************************************
2394 + *
2395 + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
2396 +- * Copyright(c) 2018 Intel Corporation
2397 ++ * Copyright(c) 2018 - 2019 Intel Corporation
2398 + *
2399 + * This program is free software; you can redistribute it and/or modify it
2400 + * under the terms of version 2 of the GNU General Public License as
2401 +@@ -136,6 +136,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
2402 + .ht_params = &iwl5000_ht_params,
2403 + .led_mode = IWL_LED_BLINK,
2404 + .internal_wimax_coex = true,
2405 ++ .csr = &iwl_csr_v1,
2406 + };
2407 +
2408 + #define IWL_DEVICE_5150 \
2409 +diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
2410 +index d49fbd58afa7..bfbe3aa058d9 100644
2411 +--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
2412 ++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
2413 +@@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
2414 +
2415 + adapter = card->adapter;
2416 +
2417 +- if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
2418 ++ if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
2419 + mwifiex_dbg(adapter, WARN,
2420 + "device already resumed\n");
2421 + return 0;
2422 +diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
2423 +index a9cbe5be277b..e88e183914af 100644
2424 +--- a/drivers/platform/x86/intel_pmc_core.c
2425 ++++ b/drivers/platform/x86/intel_pmc_core.c
2426 +@@ -205,7 +205,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
2427 + {"CNVI", BIT(3)},
2428 + {"UFS0", BIT(4)},
2429 + {"EMMC", BIT(5)},
2430 +- {"Res_6", BIT(6)},
2431 ++ {"SPF", BIT(6)},
2432 + {"SBR6", BIT(7)},
2433 +
2434 + {"SBR7", BIT(0)},
2435 +@@ -802,7 +802,7 @@ static int __init pmc_core_probe(void)
2436 + * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap
2437 + * in this case.
2438 + */
2439 +- if (!pci_dev_present(pmc_pci_ids))
2440 ++ if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
2441 + pmcdev->map = &cnp_reg_map;
2442 +
2443 + if (lpit_read_residency_count_address(&slp_s0_addr))
2444 +diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
2445 +index 8f018b3f3cd4..eaec2d306481 100644
2446 +--- a/drivers/platform/x86/pmc_atom.c
2447 ++++ b/drivers/platform/x86/pmc_atom.c
2448 +@@ -17,6 +17,7 @@
2449 +
2450 + #include <linux/debugfs.h>
2451 + #include <linux/device.h>
2452 ++#include <linux/dmi.h>
2453 + #include <linux/init.h>
2454 + #include <linux/io.h>
2455 + #include <linux/platform_data/x86/clk-pmc-atom.h>
2456 +@@ -391,11 +392,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
2457 + }
2458 + #endif /* CONFIG_DEBUG_FS */
2459 +
2460 ++/*
2461 ++ * Some systems need one or more of their pmc_plt_clks to be
2462 ++ * marked as critical.
2463 ++ */
2464 ++static const struct dmi_system_id critclk_systems[] __initconst = {
2465 ++ {
2466 ++ .ident = "MPL CEC1x",
2467 ++ .matches = {
2468 ++ DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
2469 ++ DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
2470 ++ },
2471 ++ },
2472 ++ { /*sentinel*/ }
2473 ++};
2474 ++
2475 + static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
2476 + const struct pmc_data *pmc_data)
2477 + {
2478 + struct platform_device *clkdev;
2479 + struct pmc_clk_data *clk_data;
2480 ++ const struct dmi_system_id *d = dmi_first_match(critclk_systems);
2481 +
2482 + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
2483 + if (!clk_data)
2484 +@@ -403,6 +420,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
2485 +
2486 + clk_data->base = pmc_regmap; /* offset is added by client */
2487 + clk_data->clks = pmc_data->clks;
2488 ++ if (d) {
2489 ++ clk_data->critical = true;
2490 ++ pr_info("%s critclks quirk enabled\n", d->ident);
2491 ++ }
2492 +
2493 + clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
2494 + PLATFORM_DEVID_NONE,
2495 +diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
2496 +index 91751617b37a..c53a2185a039 100644
2497 +--- a/drivers/reset/reset-meson-audio-arb.c
2498 ++++ b/drivers/reset/reset-meson-audio-arb.c
2499 +@@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
2500 + arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
2501 + arb->rstc.ops = &meson_audio_arb_rstc_ops;
2502 + arb->rstc.of_node = dev->of_node;
2503 ++ arb->rstc.owner = THIS_MODULE;
2504 +
2505 + /*
2506 + * Enable general :
2507 +diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
2508 +index e5444296075e..4d6bf9304ceb 100644
2509 +--- a/drivers/rtc/rtc-cros-ec.c
2510 ++++ b/drivers/rtc/rtc-cros-ec.c
2511 +@@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
2512 + struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
2513 +
2514 + if (device_may_wakeup(dev))
2515 +- enable_irq_wake(cros_ec_rtc->cros_ec->irq);
2516 ++ return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
2517 +
2518 + return 0;
2519 + }
2520 +@@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
2521 + struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
2522 +
2523 + if (device_may_wakeup(dev))
2524 +- disable_irq_wake(cros_ec_rtc->cros_ec->irq);
2525 ++ return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
2526 +
2527 + return 0;
2528 + }
2529 +diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
2530 +index b4e054c64bad..69b54e5556c0 100644
2531 +--- a/drivers/rtc/rtc-da9063.c
2532 ++++ b/drivers/rtc/rtc-da9063.c
2533 +@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
2534 + da9063_data_to_tm(data, &rtc->alarm_time, rtc);
2535 + rtc->rtc_sync = false;
2536 +
2537 ++ /*
2538 ++ * TODO: some models have alarms on a minute boundary but still support
2539 ++ * real hardware interrupts. Add this once the core supports it.
2540 ++ */
2541 ++ if (config->rtc_data_start != RTC_SEC)
2542 ++ rtc->rtc_dev->uie_unsupported = 1;
2543 ++
2544 + irq_alarm = platform_get_irq_byname(pdev, "ALARM");
2545 + ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
2546 + da9063_alarm_event,
2547 +diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
2548 +index d417b203cbc5..1d3de2a3d1a4 100644
2549 +--- a/drivers/rtc/rtc-sh.c
2550 ++++ b/drivers/rtc/rtc-sh.c
2551 +@@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
2552 + static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
2553 + {
2554 + unsigned int byte;
2555 +- int value = 0xff; /* return 0xff for ignored values */
2556 ++ int value = -1; /* return -1 for ignored values */
2557 +
2558 + byte = readb(rtc->regbase + reg_off);
2559 + if (byte & AR_ENB) {
2560 +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2561 +index e0570fd8466e..45e52dd870c8 100644
2562 +--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2563 ++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
2564 +@@ -1033,8 +1033,8 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
2565 + struct sas_ssp_task *ssp_task = &task->ssp_task;
2566 + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
2567 + struct hisi_sas_tmf_task *tmf = slot->tmf;
2568 +- unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
2569 + int has_data = 0, priority = !!tmf;
2570 ++ unsigned char prot_op;
2571 + u8 *buf_cmd;
2572 + u32 dw1 = 0, dw2 = 0, len = 0;
2573 +
2574 +@@ -1049,6 +1049,7 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
2575 + dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
2576 + dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
2577 + } else {
2578 ++ prot_op = scsi_get_prot_op(scsi_cmnd);
2579 + dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
2580 + switch (scsi_cmnd->sc_data_direction) {
2581 + case DMA_TO_DEVICE:
2582 +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
2583 +index c4cbfd07b916..a08ff3bd6310 100644
2584 +--- a/drivers/scsi/scsi_devinfo.c
2585 ++++ b/drivers/scsi/scsi_devinfo.c
2586 +@@ -238,6 +238,7 @@ static struct {
2587 + {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
2588 + {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
2589 + {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
2590 ++ {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
2591 + {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
2592 + {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
2593 + {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
2594 +diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
2595 +index 5a58cbf3a75d..c14006ac98f9 100644
2596 +--- a/drivers/scsi/scsi_dh.c
2597 ++++ b/drivers/scsi/scsi_dh.c
2598 +@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
2599 + {"NETAPP", "INF-01-00", "rdac", },
2600 + {"LSI", "INF-01-00", "rdac", },
2601 + {"ENGENIO", "INF-01-00", "rdac", },
2602 ++ {"LENOVO", "DE_Series", "rdac", },
2603 + {NULL, NULL, NULL },
2604 + };
2605 +
2606 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
2607 +index 84380bae20f1..e186743033f4 100644
2608 +--- a/drivers/scsi/storvsc_drv.c
2609 ++++ b/drivers/scsi/storvsc_drv.c
2610 +@@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
2611 + {
2612 + struct device *dev = &device->device;
2613 + struct storvsc_device *stor_device;
2614 +- int num_cpus = num_online_cpus();
2615 + int num_sc;
2616 + struct storvsc_cmd_request *request;
2617 + struct vstor_packet *vstor_packet;
2618 + int ret, t;
2619 +
2620 +- num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
2621 ++ /*
2622 ++ * If the number of CPUs is artificially restricted, such as
2623 ++ * with maxcpus=1 on the kernel boot line, Hyper-V could offer
2624 ++ * sub-channels >= the number of CPUs. These sub-channels
2625 ++ * should not be created. The primary channel is already created
2626 ++ * and assigned to one CPU, so check against # CPUs - 1.
2627 ++ */
2628 ++ num_sc = min((int)(num_online_cpus() - 1), max_chns);
2629 ++ if (!num_sc)
2630 ++ return;
2631 ++
2632 + stor_device = get_out_stor_device(device);
2633 + if (!stor_device)
2634 + return;
2635 +diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
2636 +index 7839d869d25d..ecbb29e2153e 100644
2637 +--- a/drivers/staging/iio/addac/adt7316.c
2638 ++++ b/drivers/staging/iio/addac/adt7316.c
2639 +@@ -47,6 +47,8 @@
2640 + #define ADT7516_MSB_AIN3 0xA
2641 + #define ADT7516_MSB_AIN4 0xB
2642 + #define ADT7316_DA_DATA_BASE 0x10
2643 ++#define ADT7316_DA_10_BIT_LSB_SHIFT 6
2644 ++#define ADT7316_DA_12_BIT_LSB_SHIFT 4
2645 + #define ADT7316_DA_MSB_DATA_REGS 4
2646 + #define ADT7316_LSB_DAC_A 0x10
2647 + #define ADT7316_MSB_DAC_A 0x11
2648 +@@ -632,9 +634,7 @@ static ssize_t adt7316_show_da_high_resolution(struct device *dev,
2649 + struct adt7316_chip_info *chip = iio_priv(dev_info);
2650 +
2651 + if (chip->config3 & ADT7316_DA_HIGH_RESOLUTION) {
2652 +- if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
2653 +- return sprintf(buf, "1 (12 bits)\n");
2654 +- if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
2655 ++ if (chip->id != ID_ADT7318 && chip->id != ID_ADT7519)
2656 + return sprintf(buf, "1 (10 bits)\n");
2657 + }
2658 +
2659 +@@ -651,10 +651,12 @@ static ssize_t adt7316_store_da_high_resolution(struct device *dev,
2660 + u8 config3;
2661 + int ret;
2662 +
2663 ++ if (chip->id == ID_ADT7318 || chip->id == ID_ADT7519)
2664 ++ return -EPERM;
2665 ++
2666 ++ config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION);
2667 + if (buf[0] == '1')
2668 +- config3 = chip->config3 | ADT7316_DA_HIGH_RESOLUTION;
2669 +- else
2670 +- config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION);
2671 ++ config3 |= ADT7316_DA_HIGH_RESOLUTION;
2672 +
2673 + ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
2674 + if (ret)
2675 +@@ -1079,7 +1081,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
2676 + ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK);
2677 + if (data & 0x1)
2678 + ldac_config |= ADT7516_DAC_AB_IN_VREF;
2679 +- else if (data & 0x2)
2680 ++ if (data & 0x2)
2681 + ldac_config |= ADT7516_DAC_CD_IN_VREF;
2682 + } else {
2683 + ret = kstrtou8(buf, 16, &data);
2684 +@@ -1403,7 +1405,7 @@ static IIO_DEVICE_ATTR(ex_analog_temp_offset, 0644,
2685 + static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
2686 + int channel, char *buf)
2687 + {
2688 +- u16 data;
2689 ++ u16 data = 0;
2690 + u8 msb, lsb, offset;
2691 + int ret;
2692 +
2693 +@@ -1428,7 +1430,11 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
2694 + if (ret)
2695 + return -EIO;
2696 +
2697 +- data = (msb << offset) + (lsb & ((1 << offset) - 1));
2698 ++ if (chip->dac_bits == 12)
2699 ++ data = lsb >> ADT7316_DA_12_BIT_LSB_SHIFT;
2700 ++ else if (chip->dac_bits == 10)
2701 ++ data = lsb >> ADT7316_DA_10_BIT_LSB_SHIFT;
2702 ++ data |= msb << offset;
2703 +
2704 + return sprintf(buf, "%d\n", data);
2705 + }
2706 +@@ -1436,7 +1442,7 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
2707 + static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
2708 + int channel, const char *buf, size_t len)
2709 + {
2710 +- u8 msb, lsb, offset;
2711 ++ u8 msb, lsb, lsb_reg, offset;
2712 + u16 data;
2713 + int ret;
2714 +
2715 +@@ -1454,9 +1460,13 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
2716 + return -EINVAL;
2717 +
2718 + if (chip->dac_bits > 8) {
2719 +- lsb = data & (1 << offset);
2720 ++ lsb = data & ((1 << offset) - 1);
2721 ++ if (chip->dac_bits == 12)
2722 ++ lsb_reg = lsb << ADT7316_DA_12_BIT_LSB_SHIFT;
2723 ++ else
2724 ++ lsb_reg = lsb << ADT7316_DA_10_BIT_LSB_SHIFT;
2725 + ret = chip->bus.write(chip->bus.client,
2726 +- ADT7316_DA_DATA_BASE + channel * 2, lsb);
2727 ++ ADT7316_DA_DATA_BASE + channel * 2, lsb_reg);
2728 + if (ret)
2729 + return -EIO;
2730 + }
2731 +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
2732 +index 8987cec9549d..ebcadaad89d1 100644
2733 +--- a/drivers/usb/core/driver.c
2734 ++++ b/drivers/usb/core/driver.c
2735 +@@ -473,11 +473,6 @@ static int usb_unbind_interface(struct device *dev)
2736 + pm_runtime_disable(dev);
2737 + pm_runtime_set_suspended(dev);
2738 +
2739 +- /* Undo any residual pm_autopm_get_interface_* calls */
2740 +- for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
2741 +- usb_autopm_put_interface_no_suspend(intf);
2742 +- atomic_set(&intf->pm_usage_cnt, 0);
2743 +-
2744 + if (!error)
2745 + usb_autosuspend_device(udev);
2746 +
2747 +@@ -1633,7 +1628,6 @@ void usb_autopm_put_interface(struct usb_interface *intf)
2748 + int status;
2749 +
2750 + usb_mark_last_busy(udev);
2751 +- atomic_dec(&intf->pm_usage_cnt);
2752 + status = pm_runtime_put_sync(&intf->dev);
2753 + dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
2754 + __func__, atomic_read(&intf->dev.power.usage_count),
2755 +@@ -1662,7 +1656,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
2756 + int status;
2757 +
2758 + usb_mark_last_busy(udev);
2759 +- atomic_dec(&intf->pm_usage_cnt);
2760 + status = pm_runtime_put(&intf->dev);
2761 + dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
2762 + __func__, atomic_read(&intf->dev.power.usage_count),
2763 +@@ -1684,7 +1677,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
2764 + struct usb_device *udev = interface_to_usbdev(intf);
2765 +
2766 + usb_mark_last_busy(udev);
2767 +- atomic_dec(&intf->pm_usage_cnt);
2768 + pm_runtime_put_noidle(&intf->dev);
2769 + }
2770 + EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
2771 +@@ -1715,8 +1707,6 @@ int usb_autopm_get_interface(struct usb_interface *intf)
2772 + status = pm_runtime_get_sync(&intf->dev);
2773 + if (status < 0)
2774 + pm_runtime_put_sync(&intf->dev);
2775 +- else
2776 +- atomic_inc(&intf->pm_usage_cnt);
2777 + dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
2778 + __func__, atomic_read(&intf->dev.power.usage_count),
2779 + status);
2780 +@@ -1750,8 +1740,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
2781 + status = pm_runtime_get(&intf->dev);
2782 + if (status < 0 && status != -EINPROGRESS)
2783 + pm_runtime_put_noidle(&intf->dev);
2784 +- else
2785 +- atomic_inc(&intf->pm_usage_cnt);
2786 + dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
2787 + __func__, atomic_read(&intf->dev.power.usage_count),
2788 + status);
2789 +@@ -1775,7 +1763,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
2790 + struct usb_device *udev = interface_to_usbdev(intf);
2791 +
2792 + usb_mark_last_busy(udev);
2793 +- atomic_inc(&intf->pm_usage_cnt);
2794 + pm_runtime_get_noresume(&intf->dev);
2795 + }
2796 + EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
2797 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
2798 +index 4f33eb632a88..4020ce8db6ce 100644
2799 +--- a/drivers/usb/core/message.c
2800 ++++ b/drivers/usb/core/message.c
2801 +@@ -820,9 +820,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
2802 +
2803 + if (dev->state == USB_STATE_SUSPENDED)
2804 + return -EHOSTUNREACH;
2805 +- if (size <= 0 || !buf || !index)
2806 ++ if (size <= 0 || !buf)
2807 + return -EINVAL;
2808 + buf[0] = 0;
2809 ++ if (index <= 0 || index >= 256)
2810 ++ return -EINVAL;
2811 + tbuf = kmalloc(256, GFP_NOIO);
2812 + if (!tbuf)
2813 + return -ENOMEM;
2814 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2815 +index 8d1dbe36db92..9f941cdb0691 100644
2816 +--- a/drivers/usb/dwc3/gadget.c
2817 ++++ b/drivers/usb/dwc3/gadget.c
2818 +@@ -1506,6 +1506,8 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
2819 + trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
2820 + dwc3_ep_inc_deq(dep);
2821 + }
2822 ++
2823 ++ req->num_trbs = 0;
2824 + }
2825 +
2826 + static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
2827 +diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
2828 +index baf72f95f0f1..213b52508621 100644
2829 +--- a/drivers/usb/gadget/udc/dummy_hcd.c
2830 ++++ b/drivers/usb/gadget/udc/dummy_hcd.c
2831 +@@ -979,8 +979,18 @@ static int dummy_udc_start(struct usb_gadget *g,
2832 + struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
2833 + struct dummy *dum = dum_hcd->dum;
2834 +
2835 +- if (driver->max_speed == USB_SPEED_UNKNOWN)
2836 ++ switch (g->speed) {
2837 ++ /* All the speeds we support */
2838 ++ case USB_SPEED_LOW:
2839 ++ case USB_SPEED_FULL:
2840 ++ case USB_SPEED_HIGH:
2841 ++ case USB_SPEED_SUPER:
2842 ++ break;
2843 ++ default:
2844 ++ dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
2845 ++ driver->max_speed);
2846 + return -EINVAL;
2847 ++ }
2848 +
2849 + /*
2850 + * SLAVE side init ... the layer above hardware, which
2851 +@@ -1784,9 +1794,10 @@ static void dummy_timer(struct timer_list *t)
2852 + /* Bus speed is 500000 bytes/ms, so use a little less */
2853 + total = 490000;
2854 + break;
2855 +- default:
2856 ++ default: /* Can't happen */
2857 + dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
2858 +- return;
2859 ++ total = 0;
2860 ++ break;
2861 + }
2862 +
2863 + /* FIXME if HZ != 1000 this will probably misbehave ... */
2864 +@@ -1828,7 +1839,7 @@ restart:
2865 +
2866 + /* Used up this frame's bandwidth? */
2867 + if (total <= 0)
2868 +- break;
2869 ++ continue;
2870 +
2871 + /* find the gadget's ep for this request (if configured) */
2872 + address = usb_pipeendpoint (urb->pipe);
2873 +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
2874 +index 6d9fd5f64903..7b306aa22d25 100644
2875 +--- a/drivers/usb/misc/yurex.c
2876 ++++ b/drivers/usb/misc/yurex.c
2877 +@@ -314,6 +314,7 @@ static void yurex_disconnect(struct usb_interface *interface)
2878 + usb_deregister_dev(interface, &yurex_class);
2879 +
2880 + /* prevent more I/O from starting */
2881 ++ usb_poison_urb(dev->urb);
2882 + mutex_lock(&dev->io_mutex);
2883 + dev->interface = NULL;
2884 + mutex_unlock(&dev->io_mutex);
2885 +diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
2886 +index 31b024441938..cc794e25a0b6 100644
2887 +--- a/drivers/usb/storage/realtek_cr.c
2888 ++++ b/drivers/usb/storage/realtek_cr.c
2889 +@@ -763,18 +763,16 @@ static void rts51x_suspend_timer_fn(struct timer_list *t)
2890 + break;
2891 + case RTS51X_STAT_IDLE:
2892 + case RTS51X_STAT_SS:
2893 +- usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n",
2894 +- atomic_read(&us->pusb_intf->pm_usage_cnt),
2895 ++ usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
2896 + atomic_read(&us->pusb_intf->dev.power.usage_count));
2897 +
2898 +- if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) {
2899 ++ if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
2900 + usb_stor_dbg(us, "Ready to enter SS state\n");
2901 + rts51x_set_stat(chip, RTS51X_STAT_SS);
2902 + /* ignore mass storage interface's children */
2903 + pm_suspend_ignore_children(&us->pusb_intf->dev, true);
2904 + usb_autopm_put_interface_async(us->pusb_intf);
2905 +- usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n",
2906 +- atomic_read(&us->pusb_intf->pm_usage_cnt),
2907 ++ usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
2908 + atomic_read(&us->pusb_intf->dev.power.usage_count));
2909 + }
2910 + break;
2911 +@@ -807,11 +805,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
2912 + int ret;
2913 +
2914 + if (working_scsi(srb)) {
2915 +- usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n",
2916 +- atomic_read(&us->pusb_intf->pm_usage_cnt),
2917 ++ usb_stor_dbg(us, "working scsi, power.usage:%d\n",
2918 + atomic_read(&us->pusb_intf->dev.power.usage_count));
2919 +
2920 +- if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) {
2921 ++ if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
2922 + ret = usb_autopm_get_interface(us->pusb_intf);
2923 + usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
2924 + }
2925 +diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
2926 +index 97b09a42a10c..dbfb2f24d71e 100644
2927 +--- a/drivers/usb/usbip/stub_rx.c
2928 ++++ b/drivers/usb/usbip/stub_rx.c
2929 +@@ -361,16 +361,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
2930 + }
2931 +
2932 + if (usb_endpoint_xfer_isoc(epd)) {
2933 +- /* validate packet size and number of packets */
2934 +- unsigned int maxp, packets, bytes;
2935 +-
2936 +- maxp = usb_endpoint_maxp(epd);
2937 +- maxp *= usb_endpoint_maxp_mult(epd);
2938 +- bytes = pdu->u.cmd_submit.transfer_buffer_length;
2939 +- packets = DIV_ROUND_UP(bytes, maxp);
2940 +-
2941 ++ /* validate number of packets */
2942 + if (pdu->u.cmd_submit.number_of_packets < 0 ||
2943 +- pdu->u.cmd_submit.number_of_packets > packets) {
2944 ++ pdu->u.cmd_submit.number_of_packets >
2945 ++ USBIP_MAX_ISO_PACKETS) {
2946 + dev_err(&sdev->udev->dev,
2947 + "CMD_SUBMIT: isoc invalid num packets %d\n",
2948 + pdu->u.cmd_submit.number_of_packets);
2949 +diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
2950 +index bf8afe9b5883..8be857a4fa13 100644
2951 +--- a/drivers/usb/usbip/usbip_common.h
2952 ++++ b/drivers/usb/usbip/usbip_common.h
2953 +@@ -121,6 +121,13 @@ extern struct device_attribute dev_attr_usbip_debug;
2954 + #define USBIP_DIR_OUT 0x00
2955 + #define USBIP_DIR_IN 0x01
2956 +
2957 ++/*
2958 ++ * Arbitrary limit for the maximum number of isochronous packets in an URB,
2959 ++ * compare for example the uhci_submit_isochronous function in
2960 ++ * drivers/usb/host/uhci-q.c
2961 ++ */
2962 ++#define USBIP_MAX_ISO_PACKETS 1024
2963 ++
2964 + /**
2965 + * struct usbip_header_basic - data pertinent to every request
2966 + * @command: the usbip request type
2967 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
2968 +index ff60bd1ea587..eb8fc8ccffc6 100644
2969 +--- a/drivers/vfio/pci/vfio_pci.c
2970 ++++ b/drivers/vfio/pci/vfio_pci.c
2971 +@@ -1597,11 +1597,11 @@ static void __init vfio_pci_fill_ids(void)
2972 + rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
2973 + subvendor, subdevice, class, class_mask, 0);
2974 + if (rc)
2975 +- pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
2976 ++ pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
2977 + vendor, device, subvendor, subdevice,
2978 + class, class_mask, rc);
2979 + else
2980 +- pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
2981 ++ pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
2982 + vendor, device, subvendor, subdevice,
2983 + class, class_mask);
2984 + }
2985 +diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
2986 +index 0f4ecfcdb549..a9fb77585272 100644
2987 +--- a/drivers/w1/masters/ds2490.c
2988 ++++ b/drivers/w1/masters/ds2490.c
2989 +@@ -1016,15 +1016,15 @@ static int ds_probe(struct usb_interface *intf,
2990 + /* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
2991 + alt = 3;
2992 + err = usb_set_interface(dev->udev,
2993 +- intf->altsetting[alt].desc.bInterfaceNumber, alt);
2994 ++ intf->cur_altsetting->desc.bInterfaceNumber, alt);
2995 + if (err) {
2996 + dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
2997 + "for %d interface: err=%d.\n", alt,
2998 +- intf->altsetting[alt].desc.bInterfaceNumber, err);
2999 ++ intf->cur_altsetting->desc.bInterfaceNumber, err);
3000 + goto err_out_clear;
3001 + }
3002 +
3003 +- iface_desc = &intf->altsetting[alt];
3004 ++ iface_desc = intf->cur_altsetting;
3005 + if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
3006 + pr_info("Num endpoints=%d. It is not DS9490R.\n",
3007 + iface_desc->desc.bNumEndpoints);
3008 +diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
3009 +index c3e201025ef0..0782ff3c2273 100644
3010 +--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
3011 ++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
3012 +@@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
3013 + if (xen_store_evtchn == 0)
3014 + return -ENOENT;
3015 +
3016 +- nonseekable_open(inode, filp);
3017 +-
3018 +- filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
3019 ++ stream_open(inode, filp);
3020 +
3021 + u = kzalloc(sizeof(*u), GFP_KERNEL);
3022 + if (u == NULL)
3023 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
3024 +index 29c68c5d44d5..c4a4fc6f1a95 100644
3025 +--- a/fs/debugfs/inode.c
3026 ++++ b/fs/debugfs/inode.c
3027 +@@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
3028 + return 0;
3029 + }
3030 +
3031 +-static void debugfs_evict_inode(struct inode *inode)
3032 ++static void debugfs_i_callback(struct rcu_head *head)
3033 + {
3034 +- truncate_inode_pages_final(&inode->i_data);
3035 +- clear_inode(inode);
3036 ++ struct inode *inode = container_of(head, struct inode, i_rcu);
3037 + if (S_ISLNK(inode->i_mode))
3038 + kfree(inode->i_link);
3039 ++ free_inode_nonrcu(inode);
3040 ++}
3041 ++
3042 ++static void debugfs_destroy_inode(struct inode *inode)
3043 ++{
3044 ++ call_rcu(&inode->i_rcu, debugfs_i_callback);
3045 + }
3046 +
3047 + static const struct super_operations debugfs_super_operations = {
3048 + .statfs = simple_statfs,
3049 + .remount_fs = debugfs_remount,
3050 + .show_options = debugfs_show_options,
3051 +- .evict_inode = debugfs_evict_inode,
3052 ++ .destroy_inode = debugfs_destroy_inode,
3053 + };
3054 +
3055 + static void debugfs_release_dentry(struct dentry *dentry)
3056 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
3057 +index a7fa037b876b..a3a3d256fb0e 100644
3058 +--- a/fs/hugetlbfs/inode.c
3059 ++++ b/fs/hugetlbfs/inode.c
3060 +@@ -741,11 +741,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
3061 + umode_t mode, dev_t dev)
3062 + {
3063 + struct inode *inode;
3064 +- struct resv_map *resv_map;
3065 ++ struct resv_map *resv_map = NULL;
3066 +
3067 +- resv_map = resv_map_alloc();
3068 +- if (!resv_map)
3069 +- return NULL;
3070 ++ /*
3071 ++ * Reserve maps are only needed for inodes that can have associated
3072 ++ * page allocations.
3073 ++ */
3074 ++ if (S_ISREG(mode) || S_ISLNK(mode)) {
3075 ++ resv_map = resv_map_alloc();
3076 ++ if (!resv_map)
3077 ++ return NULL;
3078 ++ }
3079 +
3080 + inode = new_inode(sb);
3081 + if (inode) {
3082 +@@ -780,8 +786,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
3083 + break;
3084 + }
3085 + lockdep_annotate_inode_mutex_key(inode);
3086 +- } else
3087 +- kref_put(&resv_map->refs, resv_map_release);
3088 ++ } else {
3089 ++ if (resv_map)
3090 ++ kref_put(&resv_map->refs, resv_map_release);
3091 ++ }
3092 +
3093 + return inode;
3094 + }
3095 +diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
3096 +index 389ea53ea487..bccfc40b3a74 100644
3097 +--- a/fs/jffs2/readinode.c
3098 ++++ b/fs/jffs2/readinode.c
3099 +@@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
3100 +
3101 + jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
3102 +
3103 +- if (f->target) {
3104 +- kfree(f->target);
3105 +- f->target = NULL;
3106 +- }
3107 +-
3108 + fds = f->dents;
3109 + while(fds) {
3110 + fd = fds;
3111 +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
3112 +index bb6ae387469f..05d892c79339 100644
3113 +--- a/fs/jffs2/super.c
3114 ++++ b/fs/jffs2/super.c
3115 +@@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
3116 + static void jffs2_i_callback(struct rcu_head *head)
3117 + {
3118 + struct inode *inode = container_of(head, struct inode, i_rcu);
3119 +- kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
3120 ++ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
3121 ++
3122 ++ kfree(f->target);
3123 ++ kmem_cache_free(jffs2_inode_cachep, f);
3124 + }
3125 +
3126 + static void jffs2_destroy_inode(struct inode *inode)
3127 +diff --git a/fs/open.c b/fs/open.c
3128 +index f1c2f855fd43..a00350018a47 100644
3129 +--- a/fs/open.c
3130 ++++ b/fs/open.c
3131 +@@ -1215,3 +1215,21 @@ int nonseekable_open(struct inode *inode, struct file *filp)
3132 + }
3133 +
3134 + EXPORT_SYMBOL(nonseekable_open);
3135 ++
3136 ++/*
3137 ++ * stream_open is used by subsystems that want stream-like file descriptors.
3138 ++ * Such file descriptors are not seekable and don't have notion of position
3139 ++ * (file.f_pos is always 0). Contrary to file descriptors of other regular
3140 ++ * files, .read() and .write() can run simultaneously.
3141 ++ *
3142 ++ * stream_open never fails and is marked to return int so that it could be
3143 ++ * directly used as file_operations.open .
3144 ++ */
3145 ++int stream_open(struct inode *inode, struct file *filp)
3146 ++{
3147 ++ filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
3148 ++ filp->f_mode |= FMODE_STREAM;
3149 ++ return 0;
3150 ++}
3151 ++
3152 ++EXPORT_SYMBOL(stream_open);
3153 +diff --git a/fs/read_write.c b/fs/read_write.c
3154 +index 27b69b85d49f..3d3194e32201 100644
3155 +--- a/fs/read_write.c
3156 ++++ b/fs/read_write.c
3157 +@@ -560,12 +560,13 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
3158 +
3159 + static inline loff_t file_pos_read(struct file *file)
3160 + {
3161 +- return file->f_pos;
3162 ++ return file->f_mode & FMODE_STREAM ? 0 : file->f_pos;
3163 + }
3164 +
3165 + static inline void file_pos_write(struct file *file, loff_t pos)
3166 + {
3167 +- file->f_pos = pos;
3168 ++ if ((file->f_mode & FMODE_STREAM) == 0)
3169 ++ file->f_pos = pos;
3170 + }
3171 +
3172 + ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
3173 +diff --git a/include/linux/fs.h b/include/linux/fs.h
3174 +index fd423fec8d83..09ce2646c78a 100644
3175 +--- a/include/linux/fs.h
3176 ++++ b/include/linux/fs.h
3177 +@@ -153,6 +153,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
3178 + #define FMODE_OPENED ((__force fmode_t)0x80000)
3179 + #define FMODE_CREATED ((__force fmode_t)0x100000)
3180 +
3181 ++/* File is stream-like */
3182 ++#define FMODE_STREAM ((__force fmode_t)0x200000)
3183 ++
3184 + /* File was opened by fanotify and shouldn't generate fanotify events */
3185 + #define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
3186 +
3187 +@@ -3074,6 +3077,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
3188 + extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
3189 + extern int generic_file_open(struct inode * inode, struct file * filp);
3190 + extern int nonseekable_open(struct inode * inode, struct file * filp);
3191 ++extern int stream_open(struct inode * inode, struct file * filp);
3192 +
3193 + #ifdef CONFIG_BLOCK
3194 + typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
3195 +diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h
3196 +index 3ab892208343..7a37ac27d0fb 100644
3197 +--- a/include/linux/platform_data/x86/clk-pmc-atom.h
3198 ++++ b/include/linux/platform_data/x86/clk-pmc-atom.h
3199 +@@ -35,10 +35,13 @@ struct pmc_clk {
3200 + *
3201 + * @base: PMC clock register base offset
3202 + * @clks: pointer to set of registered clocks, typically 0..5
3203 ++ * @critical: flag to indicate if firmware enabled pmc_plt_clks
3204 ++ * should be marked as critial or not
3205 + */
3206 + struct pmc_clk_data {
3207 + void __iomem *base;
3208 + const struct pmc_clk *clks;
3209 ++ bool critical;
3210 + };
3211 +
3212 + #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */
3213 +diff --git a/include/linux/usb.h b/include/linux/usb.h
3214 +index 5e49e82c4368..ff010d1fd1c7 100644
3215 +--- a/include/linux/usb.h
3216 ++++ b/include/linux/usb.h
3217 +@@ -200,7 +200,6 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
3218 + * @dev: driver model's view of this device
3219 + * @usb_dev: if an interface is bound to the USB major, this will point
3220 + * to the sysfs representation for that device.
3221 +- * @pm_usage_cnt: PM usage counter for this interface
3222 + * @reset_ws: Used for scheduling resets from atomic context.
3223 + * @resetting_device: USB core reset the device, so use alt setting 0 as
3224 + * current; needs bandwidth alloc after reset.
3225 +@@ -257,7 +256,6 @@ struct usb_interface {
3226 +
3227 + struct device dev; /* interface specific device info */
3228 + struct device *usb_dev;
3229 +- atomic_t pm_usage_cnt; /* usage counter for autosuspend */
3230 + struct work_struct reset_ws; /* for resets in atomic context */
3231 + };
3232 + #define to_usb_interface(d) container_of(d, struct usb_interface, dev)
3233 +diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
3234 +index 8974b3755670..3c18260403dd 100644
3235 +--- a/kernel/bpf/cpumap.c
3236 ++++ b/kernel/bpf/cpumap.c
3237 +@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work)
3238 + static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
3239 + struct xdp_frame *xdpf)
3240 + {
3241 ++ unsigned int hard_start_headroom;
3242 + unsigned int frame_size;
3243 + void *pkt_data_start;
3244 + struct sk_buff *skb;
3245 +
3246 ++ /* Part of headroom was reserved to xdpf */
3247 ++ hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
3248 ++
3249 + /* build_skb need to place skb_shared_info after SKB end, and
3250 + * also want to know the memory "truesize". Thus, need to
3251 + * know the memory frame size backing xdp_buff.
3252 +@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
3253 + * is not at a fixed memory location, with mixed length
3254 + * packets, which is bad for cache-line hotness.
3255 + */
3256 +- frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) +
3257 ++ frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
3258 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3259 +
3260 +- pkt_data_start = xdpf->data - xdpf->headroom;
3261 ++ pkt_data_start = xdpf->data - hard_start_headroom;
3262 + skb = build_skb(pkt_data_start, frame_size);
3263 + if (!skb)
3264 + return NULL;
3265 +
3266 +- skb_reserve(skb, xdpf->headroom);
3267 ++ skb_reserve(skb, hard_start_headroom);
3268 + __skb_put(skb, xdpf->len);
3269 + if (xdpf->metasize)
3270 + skb_metadata_set(skb, xdpf->metasize);
3271 +@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
3272 + * - RX ring dev queue index (skb_record_rx_queue)
3273 + */
3274 +
3275 ++ /* Allow SKB to reuse area used by xdp_frame */
3276 ++ xdp_scrub_frame(xdpf);
3277 ++
3278 + return skb;
3279 + }
3280 +
3281 +diff --git a/kernel/seccomp.c b/kernel/seccomp.c
3282 +index e815781ed751..181e72718434 100644
3283 +--- a/kernel/seccomp.c
3284 ++++ b/kernel/seccomp.c
3285 +@@ -500,7 +500,10 @@ out:
3286 + *
3287 + * Caller must be holding current->sighand->siglock lock.
3288 + *
3289 +- * Returns 0 on success, -ve on error.
3290 ++ * Returns 0 on success, -ve on error, or
3291 ++ * - in TSYNC mode: the pid of a thread which was either not in the correct
3292 ++ * seccomp mode or did not have an ancestral seccomp filter
3293 ++ * - in NEW_LISTENER mode: the fd of the new listener
3294 + */
3295 + static long seccomp_attach_filter(unsigned int flags,
3296 + struct seccomp_filter *filter)
3297 +@@ -1256,6 +1259,16 @@ static long seccomp_set_mode_filter(unsigned int flags,
3298 + if (flags & ~SECCOMP_FILTER_FLAG_MASK)
3299 + return -EINVAL;
3300 +
3301 ++ /*
3302 ++ * In the successful case, NEW_LISTENER returns the new listener fd.
3303 ++ * But in the failure case, TSYNC returns the thread that died. If you
3304 ++ * combine these two flags, there's no way to tell whether something
3305 ++ * succeeded or failed. So, let's disallow this combination.
3306 ++ */
3307 ++ if ((flags & SECCOMP_FILTER_FLAG_TSYNC) &&
3308 ++ (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER))
3309 ++ return -EINVAL;
3310 ++
3311 + /* Prepare the new filter before holding any locks. */
3312 + prepared = seccomp_prepare_user_filter(filter);
3313 + if (IS_ERR(prepared))
3314 +@@ -1302,7 +1315,7 @@ out:
3315 + mutex_unlock(&current->signal->cred_guard_mutex);
3316 + out_put_fd:
3317 + if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
3318 +- if (ret < 0) {
3319 ++ if (ret) {
3320 + listener_f->private_data = NULL;
3321 + fput(listener_f);
3322 + put_unused_fd(listener);
3323 +diff --git a/mm/kmemleak.c b/mm/kmemleak.c
3324 +index 707fa5579f66..2e435b8142e5 100644
3325 +--- a/mm/kmemleak.c
3326 ++++ b/mm/kmemleak.c
3327 +@@ -1401,6 +1401,7 @@ static void scan_block(void *_start, void *_end,
3328 + /*
3329 + * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
3330 + */
3331 ++#ifdef CONFIG_SMP
3332 + static void scan_large_block(void *start, void *end)
3333 + {
3334 + void *next;
3335 +@@ -1412,6 +1413,7 @@ static void scan_large_block(void *start, void *end)
3336 + cond_resched();
3337 + }
3338 + }
3339 ++#endif
3340 +
3341 + /*
3342 + * Scan a memory block corresponding to a kmemleak_object. A condition is
3343 +@@ -1529,11 +1531,6 @@ static void kmemleak_scan(void)
3344 + }
3345 + rcu_read_unlock();
3346 +
3347 +- /* data/bss scanning */
3348 +- scan_large_block(_sdata, _edata);
3349 +- scan_large_block(__bss_start, __bss_stop);
3350 +- scan_large_block(__start_ro_after_init, __end_ro_after_init);
3351 +-
3352 + #ifdef CONFIG_SMP
3353 + /* per-cpu sections scanning */
3354 + for_each_possible_cpu(i)
3355 +@@ -2071,6 +2068,17 @@ void __init kmemleak_init(void)
3356 + }
3357 + local_irq_restore(flags);
3358 +
3359 ++ /* register the data/bss sections */
3360 ++ create_object((unsigned long)_sdata, _edata - _sdata,
3361 ++ KMEMLEAK_GREY, GFP_ATOMIC);
3362 ++ create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
3363 ++ KMEMLEAK_GREY, GFP_ATOMIC);
3364 ++ /* only register .data..ro_after_init if not within .data */
3365 ++ if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
3366 ++ create_object((unsigned long)__start_ro_after_init,
3367 ++ __end_ro_after_init - __start_ro_after_init,
3368 ++ KMEMLEAK_GREY, GFP_ATOMIC);
3369 ++
3370 + /*
3371 + * This is the point where tracking allocations is safe. Automatic
3372 + * scanning is started during the late initcall. Add the early logged
3373 +diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
3374 +index ef0dec20c7d8..5da183b2f4c9 100644
3375 +--- a/net/batman-adv/bat_v_elp.c
3376 ++++ b/net/batman-adv/bat_v_elp.c
3377 +@@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
3378 +
3379 + ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
3380 +
3381 +- /* free the TID stats immediately */
3382 +- cfg80211_sinfo_release_content(&sinfo);
3383 ++ if (!ret) {
3384 ++ /* free the TID stats immediately */
3385 ++ cfg80211_sinfo_release_content(&sinfo);
3386 ++ }
3387 +
3388 + dev_put(real_netdev);
3389 + if (ret == -ENOENT) {
3390 +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
3391 +index 5fdde2947802..cf2bcea7df82 100644
3392 +--- a/net/batman-adv/bridge_loop_avoidance.c
3393 ++++ b/net/batman-adv/bridge_loop_avoidance.c
3394 +@@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
3395 + const u8 *mac, const unsigned short vid)
3396 + {
3397 + struct batadv_bla_claim search_claim, *claim;
3398 ++ struct batadv_bla_claim *claim_removed_entry;
3399 ++ struct hlist_node *claim_removed_node;
3400 +
3401 + ether_addr_copy(search_claim.addr, mac);
3402 + search_claim.vid = vid;
3403 +@@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
3404 + batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
3405 + mac, batadv_print_vid(vid));
3406 +
3407 +- batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
3408 +- batadv_choose_claim, claim);
3409 +- batadv_claim_put(claim); /* reference from the hash is gone */
3410 ++ claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
3411 ++ batadv_compare_claim,
3412 ++ batadv_choose_claim, claim);
3413 ++ if (!claim_removed_node)
3414 ++ goto free_claim;
3415 +
3416 ++ /* reference from the hash is gone */
3417 ++ claim_removed_entry = hlist_entry(claim_removed_node,
3418 ++ struct batadv_bla_claim, hash_entry);
3419 ++ batadv_claim_put(claim_removed_entry);
3420 ++
3421 ++free_claim:
3422 + /* don't need the reference from hash_find() anymore */
3423 + batadv_claim_put(claim);
3424 + }
3425 +diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
3426 +index 8dcd4968cde7..6ec0e67be560 100644
3427 +--- a/net/batman-adv/translation-table.c
3428 ++++ b/net/batman-adv/translation-table.c
3429 +@@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
3430 + struct batadv_tt_global_entry *tt_global,
3431 + const char *message)
3432 + {
3433 ++ struct batadv_tt_global_entry *tt_removed_entry;
3434 ++ struct hlist_node *tt_removed_node;
3435 ++
3436 + batadv_dbg(BATADV_DBG_TT, bat_priv,
3437 + "Deleting global tt entry %pM (vid: %d): %s\n",
3438 + tt_global->common.addr,
3439 + batadv_print_vid(tt_global->common.vid), message);
3440 +
3441 +- batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
3442 +- batadv_choose_tt, &tt_global->common);
3443 +- batadv_tt_global_entry_put(tt_global);
3444 ++ tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
3445 ++ batadv_compare_tt,
3446 ++ batadv_choose_tt,
3447 ++ &tt_global->common);
3448 ++ if (!tt_removed_node)
3449 ++ return;
3450 ++
3451 ++ /* drop reference of remove hash entry */
3452 ++ tt_removed_entry = hlist_entry(tt_removed_node,
3453 ++ struct batadv_tt_global_entry,
3454 ++ common.hash_entry);
3455 ++ batadv_tt_global_entry_put(tt_removed_entry);
3456 + }
3457 +
3458 + /**
3459 +@@ -1337,9 +1349,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
3460 + unsigned short vid, const char *message,
3461 + bool roaming)
3462 + {
3463 ++ struct batadv_tt_local_entry *tt_removed_entry;
3464 + struct batadv_tt_local_entry *tt_local_entry;
3465 + u16 flags, curr_flags = BATADV_NO_FLAGS;
3466 +- void *tt_entry_exists;
3467 ++ struct hlist_node *tt_removed_node;
3468 +
3469 + tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
3470 + if (!tt_local_entry)
3471 +@@ -1368,15 +1381,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
3472 + */
3473 + batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
3474 +
3475 +- tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
3476 ++ tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
3477 + batadv_compare_tt,
3478 + batadv_choose_tt,
3479 + &tt_local_entry->common);
3480 +- if (!tt_entry_exists)
3481 ++ if (!tt_removed_node)
3482 + goto out;
3483 +
3484 +- /* extra call to free the local tt entry */
3485 +- batadv_tt_local_entry_put(tt_local_entry);
3486 ++ /* drop reference of remove hash entry */
3487 ++ tt_removed_entry = hlist_entry(tt_removed_node,
3488 ++ struct batadv_tt_local_entry,
3489 ++ common.hash_entry);
3490 ++ batadv_tt_local_entry_put(tt_removed_entry);
3491 +
3492 + out:
3493 + if (tt_local_entry)
3494 +diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
3495 +index cff0fb3578c9..deb3faf08337 100644
3496 +--- a/net/mac80211/debugfs_netdev.c
3497 ++++ b/net/mac80211/debugfs_netdev.c
3498 +@@ -841,7 +841,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
3499 +
3500 + dir = sdata->vif.debugfs_dir;
3501 +
3502 +- if (!dir)
3503 ++ if (IS_ERR_OR_NULL(dir))
3504 + return;
3505 +
3506 + sprintf(buf, "netdev:%s", sdata->name);
3507 +diff --git a/net/mac80211/key.c b/net/mac80211/key.c
3508 +index 4700718e010f..37e372896230 100644
3509 +--- a/net/mac80211/key.c
3510 ++++ b/net/mac80211/key.c
3511 +@@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
3512 + * The driver doesn't know anything about VLAN interfaces.
3513 + * Hence, don't send GTKs for VLAN interfaces to the driver.
3514 + */
3515 +- if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
3516 ++ if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3517 ++ ret = 1;
3518 + goto out_unsupported;
3519 ++ }
3520 + }
3521 +
3522 + ret = drv_set_key(key->local, SET_KEY, sdata,
3523 +@@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
3524 + /* all of these we can do in software - if driver can */
3525 + if (ret == 1)
3526 + return 0;
3527 +- if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
3528 +- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
3529 +- return 0;
3530 ++ if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
3531 + return -EINVAL;
3532 +- }
3533 + return 0;
3534 + default:
3535 + return -EINVAL;
3536 +diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci
3537 +new file mode 100644
3538 +index 000000000000..350145da7669
3539 +--- /dev/null
3540 ++++ b/scripts/coccinelle/api/stream_open.cocci
3541 +@@ -0,0 +1,363 @@
3542 ++// SPDX-License-Identifier: GPL-2.0
3543 ++// Author: Kirill Smelkov (kirr@××××××.com)
3544 ++//
3545 ++// Search for stream-like files that are using nonseekable_open and convert
3546 ++// them to stream_open. A stream-like file is a file that does not use ppos in
3547 ++// its read and write. Rationale for the conversion is to avoid deadlock in
3548 ++// between read and write.
3549 ++
3550 ++virtual report
3551 ++virtual patch
3552 ++virtual explain // explain decisions in the patch (SPFLAGS="-D explain")
3553 ++
3554 ++// stream-like reader & writer - ones that do not depend on f_pos.
3555 ++@ stream_reader @
3556 ++identifier readstream, ppos;
3557 ++identifier f, buf, len;
3558 ++type loff_t;
3559 ++@@
3560 ++ ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos)
3561 ++ {
3562 ++ ... when != ppos
3563 ++ }
3564 ++
3565 ++@ stream_writer @
3566 ++identifier writestream, ppos;
3567 ++identifier f, buf, len;
3568 ++type loff_t;
3569 ++@@
3570 ++ ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos)
3571 ++ {
3572 ++ ... when != ppos
3573 ++ }
3574 ++
3575 ++
3576 ++// a function that blocks
3577 ++@ blocks @
3578 ++identifier block_f;
3579 ++identifier wait_event =~ "^wait_event_.*";
3580 ++@@
3581 ++ block_f(...) {
3582 ++ ... when exists
3583 ++ wait_event(...)
3584 ++ ... when exists
3585 ++ }
3586 ++
3587 ++// stream_reader that can block inside.
3588 ++//
3589 ++// XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait())
3590 ++// XXX currently reader_blocks supports only direct and 1-level indirect cases.
3591 ++@ reader_blocks_direct @
3592 ++identifier stream_reader.readstream;
3593 ++identifier wait_event =~ "^wait_event_.*";
3594 ++@@
3595 ++ readstream(...)
3596 ++ {
3597 ++ ... when exists
3598 ++ wait_event(...)
3599 ++ ... when exists
3600 ++ }
3601 ++
3602 ++@ reader_blocks_1 @
3603 ++identifier stream_reader.readstream;
3604 ++identifier blocks.block_f;
3605 ++@@
3606 ++ readstream(...)
3607 ++ {
3608 ++ ... when exists
3609 ++ block_f(...)
3610 ++ ... when exists
3611 ++ }
3612 ++
3613 ++@ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @
3614 ++identifier stream_reader.readstream;
3615 ++@@
3616 ++ readstream(...) {
3617 ++ ...
3618 ++ }
3619 ++
3620 ++
3621 ++// file_operations + whether they have _any_ .read, .write, .llseek ... at all.
3622 ++//
3623 ++// XXX add support for file_operations xxx[N] = ... (sound/core/pcm_native.c)
3624 ++@ fops0 @
3625 ++identifier fops;
3626 ++@@
3627 ++ struct file_operations fops = {
3628 ++ ...
3629 ++ };
3630 ++
3631 ++@ has_read @
3632 ++identifier fops0.fops;
3633 ++identifier read_f;
3634 ++@@
3635 ++ struct file_operations fops = {
3636 ++ .read = read_f,
3637 ++ };
3638 ++
3639 ++@ has_read_iter @
3640 ++identifier fops0.fops;
3641 ++identifier read_iter_f;
3642 ++@@
3643 ++ struct file_operations fops = {
3644 ++ .read_iter = read_iter_f,
3645 ++ };
3646 ++
3647 ++@ has_write @
3648 ++identifier fops0.fops;
3649 ++identifier write_f;
3650 ++@@
3651 ++ struct file_operations fops = {
3652 ++ .write = write_f,
3653 ++ };
3654 ++
3655 ++@ has_write_iter @
3656 ++identifier fops0.fops;
3657 ++identifier write_iter_f;
3658 ++@@
3659 ++ struct file_operations fops = {
3660 ++ .write_iter = write_iter_f,
3661 ++ };
3662 ++
3663 ++@ has_llseek @
3664 ++identifier fops0.fops;
3665 ++identifier llseek_f;
3666 ++@@
3667 ++ struct file_operations fops = {
3668 ++ .llseek = llseek_f,
3669 ++ };
3670 ++
3671 ++@ has_no_llseek @
3672 ++identifier fops0.fops;
3673 ++@@
3674 ++ struct file_operations fops = {
3675 ++ .llseek = no_llseek,
3676 ++ };
3677 ++
3678 ++@ has_mmap @
3679 ++identifier fops0.fops;
3680 ++identifier mmap_f;
3681 ++@@
3682 ++ struct file_operations fops = {
3683 ++ .mmap = mmap_f,
3684 ++ };
3685 ++
3686 ++@ has_copy_file_range @
3687 ++identifier fops0.fops;
3688 ++identifier copy_file_range_f;
3689 ++@@
3690 ++ struct file_operations fops = {
3691 ++ .copy_file_range = copy_file_range_f,
3692 ++ };
3693 ++
3694 ++@ has_remap_file_range @
3695 ++identifier fops0.fops;
3696 ++identifier remap_file_range_f;
3697 ++@@
3698 ++ struct file_operations fops = {
3699 ++ .remap_file_range = remap_file_range_f,
3700 ++ };
3701 ++
3702 ++@ has_splice_read @
3703 ++identifier fops0.fops;
3704 ++identifier splice_read_f;
3705 ++@@
3706 ++ struct file_operations fops = {
3707 ++ .splice_read = splice_read_f,
3708 ++ };
3709 ++
3710 ++@ has_splice_write @
3711 ++identifier fops0.fops;
3712 ++identifier splice_write_f;
3713 ++@@
3714 ++ struct file_operations fops = {
3715 ++ .splice_write = splice_write_f,
3716 ++ };
3717 ++
3718 ++
3719 ++// file_operations that is candidate for stream_open conversion - it does not
3720 ++// use mmap and other methods that assume @offset access to file.
3721 ++//
3722 ++// XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now.
3723 ++// XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops".
3724 ++@ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @
3725 ++identifier fops0.fops;
3726 ++@@
3727 ++ struct file_operations fops = {
3728 ++ };
3729 ++
3730 ++
3731 ++// ---- conversions ----
3732 ++
3733 ++// XXX .open = nonseekable_open -> .open = stream_open
3734 ++// XXX .open = func -> openfunc -> nonseekable_open
3735 ++
3736 ++// read & write
3737 ++//
3738 ++// if both are used in the same file_operations together with an opener -
3739 ++// under that conditions we can use stream_open instead of nonseekable_open.
3740 ++@ fops_rw depends on maybe_stream @
3741 ++identifier fops0.fops, openfunc;
3742 ++identifier stream_reader.readstream;
3743 ++identifier stream_writer.writestream;
3744 ++@@
3745 ++ struct file_operations fops = {
3746 ++ .open = openfunc,
3747 ++ .read = readstream,
3748 ++ .write = writestream,
3749 ++ };
3750 ++
3751 ++@ report_rw depends on report @
3752 ++identifier fops_rw.openfunc;
3753 ++position p1;
3754 ++@@
3755 ++ openfunc(...) {
3756 ++ <...
3757 ++ nonseekable_open@p1
3758 ++ ...>
3759 ++ }
3760 ++
3761 ++@ script:python depends on report && reader_blocks @
3762 ++fops << fops0.fops;
3763 ++p << report_rw.p1;
3764 ++@@
3765 ++coccilib.report.print_report(p[0],
3766 ++ "ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,))
3767 ++
3768 ++@ script:python depends on report && !reader_blocks @
3769 ++fops << fops0.fops;
3770 ++p << report_rw.p1;
3771 ++@@
3772 ++coccilib.report.print_report(p[0],
3773 ++ "WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
3774 ++
3775 ++
3776 ++@ explain_rw_deadlocked depends on explain && reader_blocks @
3777 ++identifier fops_rw.openfunc;
3778 ++@@
3779 ++ openfunc(...) {
3780 ++ <...
3781 ++- nonseekable_open
3782 +++ nonseekable_open /* read & write (was deadlock) */
3783 ++ ...>
3784 ++ }
3785 ++
3786 ++
3787 ++@ explain_rw_nodeadlock depends on explain && !reader_blocks @
3788 ++identifier fops_rw.openfunc;
3789 ++@@
3790 ++ openfunc(...) {
3791 ++ <...
3792 ++- nonseekable_open
3793 +++ nonseekable_open /* read & write (no direct deadlock) */
3794 ++ ...>
3795 ++ }
3796 ++
3797 ++@ patch_rw depends on patch @
3798 ++identifier fops_rw.openfunc;
3799 ++@@
3800 ++ openfunc(...) {
3801 ++ <...
3802 ++- nonseekable_open
3803 +++ stream_open
3804 ++ ...>
3805 ++ }
3806 ++
3807 ++
3808 ++// read, but not write
3809 ++@ fops_r depends on maybe_stream && !has_write @
3810 ++identifier fops0.fops, openfunc;
3811 ++identifier stream_reader.readstream;
3812 ++@@
3813 ++ struct file_operations fops = {
3814 ++ .open = openfunc,
3815 ++ .read = readstream,
3816 ++ };
3817 ++
3818 ++@ report_r depends on report @
3819 ++identifier fops_r.openfunc;
3820 ++position p1;
3821 ++@@
3822 ++ openfunc(...) {
3823 ++ <...
3824 ++ nonseekable_open@p1
3825 ++ ...>
3826 ++ }
3827 ++
3828 ++@ script:python depends on report @
3829 ++fops << fops0.fops;
3830 ++p << report_r.p1;
3831 ++@@
3832 ++coccilib.report.print_report(p[0],
3833 ++ "WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
3834 ++
3835 ++@ explain_r depends on explain @
3836 ++identifier fops_r.openfunc;
3837 ++@@
3838 ++ openfunc(...) {
3839 ++ <...
3840 ++- nonseekable_open
3841 +++ nonseekable_open /* read only */
3842 ++ ...>
3843 ++ }
3844 ++
3845 ++@ patch_r depends on patch @
3846 ++identifier fops_r.openfunc;
3847 ++@@
3848 ++ openfunc(...) {
3849 ++ <...
3850 ++- nonseekable_open
3851 +++ stream_open
3852 ++ ...>
3853 ++ }
3854 ++
3855 ++
3856 ++// write, but not read
3857 ++@ fops_w depends on maybe_stream && !has_read @
3858 ++identifier fops0.fops, openfunc;
3859 ++identifier stream_writer.writestream;
3860 ++@@
3861 ++ struct file_operations fops = {
3862 ++ .open = openfunc,
3863 ++ .write = writestream,
3864 ++ };
3865 ++
3866 ++@ report_w depends on report @
3867 ++identifier fops_w.openfunc;
3868 ++position p1;
3869 ++@@
3870 ++ openfunc(...) {
3871 ++ <...
3872 ++ nonseekable_open@p1
3873 ++ ...>
3874 ++ }
3875 ++
3876 ++@ script:python depends on report @
3877 ++fops << fops0.fops;
3878 ++p << report_w.p1;
3879 ++@@
3880 ++coccilib.report.print_report(p[0],
3881 ++ "WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
3882 ++
3883 ++@ explain_w depends on explain @
3884 ++identifier fops_w.openfunc;
3885 ++@@
3886 ++ openfunc(...) {
3887 ++ <...
3888 ++- nonseekable_open
3889 +++ nonseekable_open /* write only */
3890 ++ ...>
3891 ++ }
3892 ++
3893 ++@ patch_w depends on patch @
3894 ++identifier fops_w.openfunc;
3895 ++@@
3896 ++ openfunc(...) {
3897 ++ <...
3898 ++- nonseekable_open
3899 +++ stream_open
3900 ++ ...>
3901 ++ }
3902 ++
3903 ++
3904 ++// no read, no write - don't change anything
3905 +diff --git a/security/selinux/avc.c b/security/selinux/avc.c
3906 +index 635e5c1e3e48..5de18a6d5c3f 100644
3907 +--- a/security/selinux/avc.c
3908 ++++ b/security/selinux/avc.c
3909 +@@ -838,6 +838,7 @@ out:
3910 + * @ssid,@tsid,@tclass : identifier of an AVC entry
3911 + * @seqno : sequence number when decision was made
3912 + * @xpd: extended_perms_decision to be added to the node
3913 ++ * @flags: the AVC_* flags, e.g. AVC_NONBLOCKING, AVC_EXTENDED_PERMS, or 0.
3914 + *
3915 + * if a valid AVC entry doesn't exist,this function returns -ENOENT.
3916 + * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
3917 +@@ -856,6 +857,23 @@ static int avc_update_node(struct selinux_avc *avc,
3918 + struct hlist_head *head;
3919 + spinlock_t *lock;
3920 +
3921 ++ /*
3922 ++ * If we are in a non-blocking code path, e.g. VFS RCU walk,
3923 ++ * then we must not add permissions to a cache entry
3924 ++ * because we cannot safely audit the denial. Otherwise,
3925 ++ * during the subsequent blocking retry (e.g. VFS ref walk), we
3926 ++ * will find the permissions already granted in the cache entry
3927 ++ * and won't audit anything at all, leading to silent denials in
3928 ++ * permissive mode that only appear when in enforcing mode.
3929 ++ *
3930 ++ * See the corresponding handling in slow_avc_audit(), and the
3931 ++ * logic in selinux_inode_follow_link and selinux_inode_permission
3932 ++ * for the VFS MAY_NOT_BLOCK flag, which is transliterated into
3933 ++ * AVC_NONBLOCKING for avc_has_perm_noaudit().
3934 ++ */
3935 ++ if (flags & AVC_NONBLOCKING)
3936 ++ return 0;
3937 ++
3938 + node = avc_alloc_node(avc);
3939 + if (!node) {
3940 + rc = -ENOMEM;
3941 +@@ -1115,7 +1133,7 @@ decision:
3942 + * @tsid: target security identifier
3943 + * @tclass: target security class
3944 + * @requested: requested permissions, interpreted based on @tclass
3945 +- * @flags: AVC_STRICT or 0
3946 ++ * @flags: AVC_STRICT, AVC_NONBLOCKING, or 0
3947 + * @avd: access vector decisions
3948 + *
3949 + * Check the AVC to determine whether the @requested permissions are granted
3950 +@@ -1199,7 +1217,8 @@ int avc_has_perm_flags(struct selinux_state *state,
3951 + struct av_decision avd;
3952 + int rc, rc2;
3953 +
3954 +- rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested, 0,
3955 ++ rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
3956 ++ (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
3957 + &avd);
3958 +
3959 + rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
3960 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
3961 +index 07b11b5aaf1f..b005283f0090 100644
3962 +--- a/security/selinux/hooks.c
3963 ++++ b/security/selinux/hooks.c
3964 +@@ -534,16 +534,10 @@ static int may_context_mount_inode_relabel(u32 sid,
3965 + return rc;
3966 + }
3967 +
3968 +-static int selinux_is_sblabel_mnt(struct super_block *sb)
3969 ++static int selinux_is_genfs_special_handling(struct super_block *sb)
3970 + {
3971 +- struct superblock_security_struct *sbsec = sb->s_security;
3972 +-
3973 +- return sbsec->behavior == SECURITY_FS_USE_XATTR ||
3974 +- sbsec->behavior == SECURITY_FS_USE_TRANS ||
3975 +- sbsec->behavior == SECURITY_FS_USE_TASK ||
3976 +- sbsec->behavior == SECURITY_FS_USE_NATIVE ||
3977 +- /* Special handling. Genfs but also in-core setxattr handler */
3978 +- !strcmp(sb->s_type->name, "sysfs") ||
3979 ++ /* Special handling. Genfs but also in-core setxattr handler */
3980 ++ return !strcmp(sb->s_type->name, "sysfs") ||
3981 + !strcmp(sb->s_type->name, "pstore") ||
3982 + !strcmp(sb->s_type->name, "debugfs") ||
3983 + !strcmp(sb->s_type->name, "tracefs") ||
3984 +@@ -553,6 +547,34 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
3985 + !strcmp(sb->s_type->name, "cgroup2")));
3986 + }
3987 +
3988 ++static int selinux_is_sblabel_mnt(struct super_block *sb)
3989 ++{
3990 ++ struct superblock_security_struct *sbsec = sb->s_security;
3991 ++
3992 ++ /*
3993 ++ * IMPORTANT: Double-check logic in this function when adding a new
3994 ++ * SECURITY_FS_USE_* definition!
3995 ++ */
3996 ++ BUILD_BUG_ON(SECURITY_FS_USE_MAX != 7);
3997 ++
3998 ++ switch (sbsec->behavior) {
3999 ++ case SECURITY_FS_USE_XATTR:
4000 ++ case SECURITY_FS_USE_TRANS:
4001 ++ case SECURITY_FS_USE_TASK:
4002 ++ case SECURITY_FS_USE_NATIVE:
4003 ++ return 1;
4004 ++
4005 ++ case SECURITY_FS_USE_GENFS:
4006 ++ return selinux_is_genfs_special_handling(sb);
4007 ++
4008 ++ /* Never allow relabeling on context mounts */
4009 ++ case SECURITY_FS_USE_MNTPOINT:
4010 ++ case SECURITY_FS_USE_NONE:
4011 ++ default:
4012 ++ return 0;
4013 ++ }
4014 ++}
4015 ++
4016 + static int sb_finish_set_opts(struct super_block *sb)
4017 + {
4018 + struct superblock_security_struct *sbsec = sb->s_security;
4019 +@@ -2985,7 +3007,9 @@ static int selinux_inode_permission(struct inode *inode, int mask)
4020 + return PTR_ERR(isec);
4021 +
4022 + rc = avc_has_perm_noaudit(&selinux_state,
4023 +- sid, isec->sid, isec->sclass, perms, 0, &avd);
4024 ++ sid, isec->sid, isec->sclass, perms,
4025 ++ (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
4026 ++ &avd);
4027 + audited = avc_audit_required(perms, &avd, rc,
4028 + from_access ? FILE__AUDIT_ACCESS : 0,
4029 + &denied);
4030 +diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
4031 +index ef899bcfd2cb..74ea50977c20 100644
4032 +--- a/security/selinux/include/avc.h
4033 ++++ b/security/selinux/include/avc.h
4034 +@@ -142,6 +142,7 @@ static inline int avc_audit(struct selinux_state *state,
4035 +
4036 + #define AVC_STRICT 1 /* Ignore permissive mode. */
4037 + #define AVC_EXTENDED_PERMS 2 /* update extended permissions */
4038 ++#define AVC_NONBLOCKING 4 /* non blocking */
4039 + int avc_has_perm_noaudit(struct selinux_state *state,
4040 + u32 ssid, u32 tsid,
4041 + u16 tclass, u32 requested,
4042 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4043 +index a9f69c3a3e0b..5ce28b4f0218 100644
4044 +--- a/sound/pci/hda/patch_realtek.c
4045 ++++ b/sound/pci/hda/patch_realtek.c
4046 +@@ -5448,6 +5448,8 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
4047 + return;
4048 +
4049 + spec->gen.preferred_dacs = preferred_pairs;
4050 ++ spec->gen.auto_mute_via_amp = 1;
4051 ++ codec->power_save_node = 0;
4052 + }
4053 +
4054 + /* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
4055 +@@ -7266,6 +7268,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4056 + {0x21, 0x02211020}),
4057 + SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4058 + {0x21, 0x02211020}),
4059 ++ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4060 ++ {0x12, 0x40000000},
4061 ++ {0x14, 0x90170110},
4062 ++ {0x21, 0x02211020}),
4063 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
4064 + {0x14, 0x90170110},
4065 + {0x21, 0x02211020}),
4066 +@@ -7539,6 +7545,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4067 + {0x12, 0x90a60130},
4068 + {0x17, 0x90170110},
4069 + {0x21, 0x04211020}),
4070 ++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
4071 ++ {0x12, 0x90a60130},
4072 ++ {0x17, 0x90170110},
4073 ++ {0x21, 0x03211020}),
4074 ++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
4075 ++ {0x14, 0x90170110},
4076 ++ {0x21, 0x04211020}),
4077 + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
4078 + ALC295_STANDARD_PINS,
4079 + {0x17, 0x21014020},
4080 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
4081 +index 1dd291cebe67..0600e4404f90 100644
4082 +--- a/sound/soc/codecs/wm_adsp.c
4083 ++++ b/sound/soc/codecs/wm_adsp.c
4084 +@@ -3443,8 +3443,6 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
4085 + }
4086 + }
4087 +
4088 +- wm_adsp_buffer_clear(compr->buf);
4089 +-
4090 + /* Trigger the IRQ at one fragment of data */
4091 + ret = wm_adsp_buffer_write(compr->buf,
4092 + HOST_BUFFER_FIELD(high_water_mark),
4093 +@@ -3456,6 +3454,8 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
4094 + }
4095 + break;
4096 + case SNDRV_PCM_TRIGGER_STOP:
4097 ++ if (wm_adsp_compr_attached(compr))
4098 ++ wm_adsp_buffer_clear(compr->buf);
4099 + break;
4100 + default:
4101 + ret = -EINVAL;
4102 +diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
4103 +index e528995668b7..0ed844f2ad01 100644
4104 +--- a/sound/soc/intel/boards/bytcr_rt5651.c
4105 ++++ b/sound/soc/intel/boards/bytcr_rt5651.c
4106 +@@ -266,7 +266,7 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
4107 + static const struct snd_soc_dapm_route byt_rt5651_intmic_dmic_map[] = {
4108 + {"DMIC L1", NULL, "Internal Mic"},
4109 + {"DMIC R1", NULL, "Internal Mic"},
4110 +- {"IN3P", NULL, "Headset Mic"},
4111 ++ {"IN2P", NULL, "Headset Mic"},
4112 + };
4113 +
4114 + static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_map[] = {
4115 +diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
4116 +index 7cda60188f41..af19010b9d88 100644
4117 +--- a/sound/soc/sh/rcar/gen.c
4118 ++++ b/sound/soc/sh/rcar/gen.c
4119 +@@ -255,6 +255,30 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
4120 + RSND_GEN_M_REG(SSI_MODE, 0xc, 0x80),
4121 + RSND_GEN_M_REG(SSI_CTRL, 0x10, 0x80),
4122 + RSND_GEN_M_REG(SSI_INT_ENABLE, 0x18, 0x80),
4123 ++ RSND_GEN_S_REG(SSI9_BUSIF0_MODE, 0x48c),
4124 ++ RSND_GEN_S_REG(SSI9_BUSIF0_ADINR, 0x484),
4125 ++ RSND_GEN_S_REG(SSI9_BUSIF0_DALIGN, 0x488),
4126 ++ RSND_GEN_S_REG(SSI9_BUSIF1_MODE, 0x4a0),
4127 ++ RSND_GEN_S_REG(SSI9_BUSIF1_ADINR, 0x4a4),
4128 ++ RSND_GEN_S_REG(SSI9_BUSIF1_DALIGN, 0x4a8),
4129 ++ RSND_GEN_S_REG(SSI9_BUSIF2_MODE, 0x4c0),
4130 ++ RSND_GEN_S_REG(SSI9_BUSIF2_ADINR, 0x4c4),
4131 ++ RSND_GEN_S_REG(SSI9_BUSIF2_DALIGN, 0x4c8),
4132 ++ RSND_GEN_S_REG(SSI9_BUSIF3_MODE, 0x4e0),
4133 ++ RSND_GEN_S_REG(SSI9_BUSIF3_ADINR, 0x4e4),
4134 ++ RSND_GEN_S_REG(SSI9_BUSIF3_DALIGN, 0x4e8),
4135 ++ RSND_GEN_S_REG(SSI9_BUSIF4_MODE, 0xd80),
4136 ++ RSND_GEN_S_REG(SSI9_BUSIF4_ADINR, 0xd84),
4137 ++ RSND_GEN_S_REG(SSI9_BUSIF4_DALIGN, 0xd88),
4138 ++ RSND_GEN_S_REG(SSI9_BUSIF5_MODE, 0xda0),
4139 ++ RSND_GEN_S_REG(SSI9_BUSIF5_ADINR, 0xda4),
4140 ++ RSND_GEN_S_REG(SSI9_BUSIF5_DALIGN, 0xda8),
4141 ++ RSND_GEN_S_REG(SSI9_BUSIF6_MODE, 0xdc0),
4142 ++ RSND_GEN_S_REG(SSI9_BUSIF6_ADINR, 0xdc4),
4143 ++ RSND_GEN_S_REG(SSI9_BUSIF6_DALIGN, 0xdc8),
4144 ++ RSND_GEN_S_REG(SSI9_BUSIF7_MODE, 0xde0),
4145 ++ RSND_GEN_S_REG(SSI9_BUSIF7_ADINR, 0xde4),
4146 ++ RSND_GEN_S_REG(SSI9_BUSIF7_DALIGN, 0xde8),
4147 + };
4148 +
4149 + static const struct rsnd_regmap_field_conf conf_scu[] = {
4150 +diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
4151 +index 605e4b934982..90625c57847b 100644
4152 +--- a/sound/soc/sh/rcar/rsnd.h
4153 ++++ b/sound/soc/sh/rcar/rsnd.h
4154 +@@ -191,6 +191,30 @@ enum rsnd_reg {
4155 + SSI_SYS_STATUS7,
4156 + HDMI0_SEL,
4157 + HDMI1_SEL,
4158 ++ SSI9_BUSIF0_MODE,
4159 ++ SSI9_BUSIF1_MODE,
4160 ++ SSI9_BUSIF2_MODE,
4161 ++ SSI9_BUSIF3_MODE,
4162 ++ SSI9_BUSIF4_MODE,
4163 ++ SSI9_BUSIF5_MODE,
4164 ++ SSI9_BUSIF6_MODE,
4165 ++ SSI9_BUSIF7_MODE,
4166 ++ SSI9_BUSIF0_ADINR,
4167 ++ SSI9_BUSIF1_ADINR,
4168 ++ SSI9_BUSIF2_ADINR,
4169 ++ SSI9_BUSIF3_ADINR,
4170 ++ SSI9_BUSIF4_ADINR,
4171 ++ SSI9_BUSIF5_ADINR,
4172 ++ SSI9_BUSIF6_ADINR,
4173 ++ SSI9_BUSIF7_ADINR,
4174 ++ SSI9_BUSIF0_DALIGN,
4175 ++ SSI9_BUSIF1_DALIGN,
4176 ++ SSI9_BUSIF2_DALIGN,
4177 ++ SSI9_BUSIF3_DALIGN,
4178 ++ SSI9_BUSIF4_DALIGN,
4179 ++ SSI9_BUSIF5_DALIGN,
4180 ++ SSI9_BUSIF6_DALIGN,
4181 ++ SSI9_BUSIF7_DALIGN,
4182 +
4183 + /* SSI */
4184 + SSICR,
4185 +@@ -209,6 +233,9 @@ enum rsnd_reg {
4186 + #define SSI_BUSIF_MODE(i) (SSI_BUSIF0_MODE + (i))
4187 + #define SSI_BUSIF_ADINR(i) (SSI_BUSIF0_ADINR + (i))
4188 + #define SSI_BUSIF_DALIGN(i) (SSI_BUSIF0_DALIGN + (i))
4189 ++#define SSI9_BUSIF_MODE(i) (SSI9_BUSIF0_MODE + (i))
4190 ++#define SSI9_BUSIF_ADINR(i) (SSI9_BUSIF0_ADINR + (i))
4191 ++#define SSI9_BUSIF_DALIGN(i) (SSI9_BUSIF0_DALIGN + (i))
4192 + #define SSI_SYS_STATUS(i) (SSI_SYS_STATUS0 + (i))
4193 +
4194 +
4195 +diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
4196 +index c74991dd18ab..2347f3404c06 100644
4197 +--- a/sound/soc/sh/rcar/ssiu.c
4198 ++++ b/sound/soc/sh/rcar/ssiu.c
4199 +@@ -181,28 +181,26 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
4200 + if (rsnd_ssi_use_busif(io)) {
4201 + int id = rsnd_mod_id(mod);
4202 + int busif = rsnd_mod_id_sub(mod);
4203 ++ enum rsnd_reg adinr_reg, mode_reg, dalign_reg;
4204 +
4205 +- /*
4206 +- * FIXME
4207 +- *
4208 +- * We can't support SSI9-4/5/6/7, because its address is
4209 +- * out of calculation rule
4210 +- */
4211 + if ((id == 9) && (busif >= 4)) {
4212 +- struct device *dev = rsnd_priv_to_dev(priv);
4213 +-
4214 +- dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
4215 +- id, busif);
4216 ++ adinr_reg = SSI9_BUSIF_ADINR(busif);
4217 ++ mode_reg = SSI9_BUSIF_MODE(busif);
4218 ++ dalign_reg = SSI9_BUSIF_DALIGN(busif);
4219 ++ } else {
4220 ++ adinr_reg = SSI_BUSIF_ADINR(busif);
4221 ++ mode_reg = SSI_BUSIF_MODE(busif);
4222 ++ dalign_reg = SSI_BUSIF_DALIGN(busif);
4223 + }
4224 +
4225 +- rsnd_mod_write(mod, SSI_BUSIF_ADINR(busif),
4226 ++ rsnd_mod_write(mod, adinr_reg,
4227 + rsnd_get_adinr_bit(mod, io) |
4228 + (rsnd_io_is_play(io) ?
4229 + rsnd_runtime_channel_after_ctu(io) :
4230 + rsnd_runtime_channel_original(io)));
4231 +- rsnd_mod_write(mod, SSI_BUSIF_MODE(busif),
4232 ++ rsnd_mod_write(mod, mode_reg,
4233 + rsnd_get_busif_shift(io, mod) | 1);
4234 +- rsnd_mod_write(mod, SSI_BUSIF_DALIGN(busif),
4235 ++ rsnd_mod_write(mod, dalign_reg,
4236 + rsnd_get_dalign(mod, io));
4237 + }
4238 +
4239 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
4240 +index 03f36e534050..0c1dd6bd67ab 100644
4241 +--- a/sound/soc/soc-pcm.c
4242 ++++ b/sound/soc/soc-pcm.c
4243 +@@ -1895,10 +1895,15 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
4244 + struct snd_soc_pcm_runtime *be = dpcm->be;
4245 + struct snd_pcm_substream *be_substream =
4246 + snd_soc_dpcm_get_substream(be, stream);
4247 +- struct snd_soc_pcm_runtime *rtd = be_substream->private_data;
4248 ++ struct snd_soc_pcm_runtime *rtd;
4249 + struct snd_soc_dai *codec_dai;
4250 + int i;
4251 +
4252 ++ /* A backend may not have the requested substream */
4253 ++ if (!be_substream)
4254 ++ continue;
4255 ++
4256 ++ rtd = be_substream->private_data;
4257 + if (rtd->dai_link->be_hw_params_fixup)
4258 + continue;
4259 +
4260 +diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
4261 +index d4825700b63f..29a131e0569e 100644
4262 +--- a/sound/soc/stm/stm32_sai_sub.c
4263 ++++ b/sound/soc/stm/stm32_sai_sub.c
4264 +@@ -1394,7 +1394,6 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
4265 + if (!sai->cpu_dai_drv)
4266 + return -ENOMEM;
4267 +
4268 +- sai->cpu_dai_drv->name = dev_name(&pdev->dev);
4269 + if (STM_SAI_IS_PLAYBACK(sai)) {
4270 + memcpy(sai->cpu_dai_drv, &stm32_sai_playback_dai,
4271 + sizeof(stm32_sai_playback_dai));
4272 +@@ -1404,6 +1403,7 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
4273 + sizeof(stm32_sai_capture_dai));
4274 + sai->cpu_dai_drv->capture.stream_name = sai->cpu_dai_drv->name;
4275 + }
4276 ++ sai->cpu_dai_drv->name = dev_name(&pdev->dev);
4277 +
4278 + return 0;
4279 + }
4280 +diff --git a/sound/soc/sunxi/sun50i-codec-analog.c b/sound/soc/sunxi/sun50i-codec-analog.c
4281 +index df1fed0aa001..d105c90c3706 100644
4282 +--- a/sound/soc/sunxi/sun50i-codec-analog.c
4283 ++++ b/sound/soc/sunxi/sun50i-codec-analog.c
4284 +@@ -274,7 +274,7 @@ static const struct snd_soc_dapm_widget sun50i_a64_codec_widgets[] = {
4285 + * stream widgets at the card level.
4286 + */
4287 +
4288 +- SND_SOC_DAPM_REGULATOR_SUPPLY("hpvcc", 0, 0),
4289 ++ SND_SOC_DAPM_REGULATOR_SUPPLY("cpvdd", 0, 0),
4290 + SND_SOC_DAPM_MUX("Headphone Source Playback Route",
4291 + SND_SOC_NOPM, 0, 0, sun50i_codec_hp_src),
4292 + SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUN50I_ADDA_HP_CTRL,
4293 +@@ -362,7 +362,7 @@ static const struct snd_soc_dapm_route sun50i_a64_codec_routes[] = {
4294 + { "Headphone Source Playback Route", "Mixer", "Left Mixer" },
4295 + { "Headphone Source Playback Route", "Mixer", "Right Mixer" },
4296 + { "Headphone Amp", NULL, "Headphone Source Playback Route" },
4297 +- { "Headphone Amp", NULL, "hpvcc" },
4298 ++ { "Headphone Amp", NULL, "cpvdd" },
4299 + { "HP", NULL, "Headphone Amp" },
4300 +
4301 + /* Microphone Routes */
4302 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
4303 +index 6d7a81306f8a..1c2509104924 100644
4304 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
4305 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
4306 +@@ -2166,11 +2166,14 @@ TEST(detect_seccomp_filter_flags)
4307 + SECCOMP_FILTER_FLAG_LOG,
4308 + SECCOMP_FILTER_FLAG_SPEC_ALLOW,
4309 + SECCOMP_FILTER_FLAG_NEW_LISTENER };
4310 +- unsigned int flag, all_flags;
4311 ++ unsigned int exclusive[] = {
4312 ++ SECCOMP_FILTER_FLAG_TSYNC,
4313 ++ SECCOMP_FILTER_FLAG_NEW_LISTENER };
4314 ++ unsigned int flag, all_flags, exclusive_mask;
4315 + int i;
4316 + long ret;
4317 +
4318 +- /* Test detection of known-good filter flags */
4319 ++ /* Test detection of individual known-good filter flags */
4320 + for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
4321 + int bits = 0;
4322 +
4323 +@@ -2197,16 +2200,29 @@ TEST(detect_seccomp_filter_flags)
4324 + all_flags |= flag;
4325 + }
4326 +
4327 +- /* Test detection of all known-good filter flags */
4328 +- ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
4329 +- EXPECT_EQ(-1, ret);
4330 +- EXPECT_EQ(EFAULT, errno) {
4331 +- TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
4332 +- all_flags);
4333 ++ /*
4334 ++ * Test detection of all known-good filter flags combined. But
4335 ++ * for the exclusive flags we need to mask them out and try them
4336 ++ * individually for the "all flags" testing.
4337 ++ */
4338 ++ exclusive_mask = 0;
4339 ++ for (i = 0; i < ARRAY_SIZE(exclusive); i++)
4340 ++ exclusive_mask |= exclusive[i];
4341 ++ for (i = 0; i < ARRAY_SIZE(exclusive); i++) {
4342 ++ flag = all_flags & ~exclusive_mask;
4343 ++ flag |= exclusive[i];
4344 ++
4345 ++ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
4346 ++ EXPECT_EQ(-1, ret);
4347 ++ EXPECT_EQ(EFAULT, errno) {
4348 ++ TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
4349 ++ flag);
4350 ++ }
4351 + }
4352 +
4353 +- /* Test detection of an unknown filter flag */
4354 ++ /* Test detection of an unknown filter flags, without exclusives. */
4355 + flag = -1;
4356 ++ flag &= ~exclusive_mask;
4357 + ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
4358 + EXPECT_EQ(-1, ret);
4359 + EXPECT_EQ(EINVAL, errno) {