Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Tue, 27 Nov 2018 16:16:26
Message-Id: 1543335357.82476154d4461d52b9ba7f9b16db179d440f4c05.mpagano@gentoo
1 commit: 82476154d4461d52b9ba7f9b16db179d440f4c05
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Nov 27 16:15:57 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Nov 27 16:15:57 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=82476154
7
8 proj/linux-patches: Linux patch 4.19.5
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1004_linux-4.19.5.patch | 4298 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4302 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f74e5e3..c0b6ddf 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -59,6 +59,10 @@ Patch: 1003_linux-4.19.4.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.4
23
24 +Patch: 1004_linux-4.19.5.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.5
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1004_linux-4.19.5.patch b/1004_linux-4.19.5.patch
33 new file mode 100644
34 index 0000000..008e859
35 --- /dev/null
36 +++ b/1004_linux-4.19.5.patch
37 @@ -0,0 +1,4298 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index 92eb1f42240d..fa4eec22816d 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -1063,7 +1063,7 @@
43 + earlyprintk=serial[,0x...[,baudrate]]
44 + earlyprintk=ttySn[,baudrate]
45 + earlyprintk=dbgp[debugController#]
46 +- earlyprintk=pciserial,bus:device.function[,baudrate]
47 ++ earlyprintk=pciserial[,force],bus:device.function[,baudrate]
48 + earlyprintk=xdbc[xhciController#]
49 +
50 + earlyprintk is useful when the kernel crashes before
51 +@@ -1095,6 +1095,10 @@
52 +
53 + The sclp output can only be used on s390.
54 +
55 ++ The optional "force" to "pciserial" enables use of a
56 ++ PCI device even when its classcode is not of the
57 ++ UART class.
58 ++
59 + edac_report= [HW,EDAC] Control how to report EDAC event
60 + Format: {"on" | "off" | "force"}
61 + on: enable EDAC to report H/W event. May be overridden
62 +@@ -4683,6 +4687,8 @@
63 + prevent spurious wakeup);
64 + n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a
65 + pause after every control message);
66 ++ o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
67 ++ delay after resetting its port);
68 + Example: quirks=0781:5580:bk,0a5c:5834:gij
69 +
70 + usbhid.mousepoll=
71 +diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
72 +index 5432a96d31ff..05ef53d83a41 100644
73 +--- a/Documentation/x86/x86_64/mm.txt
74 ++++ b/Documentation/x86/x86_64/mm.txt
75 +@@ -4,8 +4,9 @@ Virtual memory map with 4 level page tables:
76 + 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
77 + hole caused by [47:63] sign extension
78 + ffff800000000000 - ffff87ffffffffff (=43 bits) guard hole, reserved for hypervisor
79 +-ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory
80 +-ffffc80000000000 - ffffc8ffffffffff (=40 bits) hole
81 ++ffff880000000000 - ffff887fffffffff (=39 bits) LDT remap for PTI
82 ++ffff888000000000 - ffffc87fffffffff (=64 TB) direct mapping of all phys. memory
83 ++ffffc88000000000 - ffffc8ffffffffff (=39 bits) hole
84 + ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
85 + ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
86 + ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
87 +@@ -30,8 +31,9 @@ Virtual memory map with 5 level page tables:
88 + 0000000000000000 - 00ffffffffffffff (=56 bits) user space, different per mm
89 + hole caused by [56:63] sign extension
90 + ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor
91 +-ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory
92 +-ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI
93 ++ff10000000000000 - ff10ffffffffffff (=48 bits) LDT remap for PTI
94 ++ff11000000000000 - ff90ffffffffffff (=55 bits) direct mapping of all phys. memory
95 ++ff91000000000000 - ff9fffffffffffff (=3840 TB) hole
96 + ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB)
97 + ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
98 + ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
99 +diff --git a/Makefile b/Makefile
100 +index 1f3c7adeea63..a07830185bdf 100644
101 +--- a/Makefile
102 ++++ b/Makefile
103 +@@ -1,7 +1,7 @@
104 + # SPDX-License-Identifier: GPL-2.0
105 + VERSION = 4
106 + PATCHLEVEL = 19
107 +-SUBLEVEL = 4
108 ++SUBLEVEL = 5
109 + EXTRAVERSION =
110 + NAME = "People's Front"
111 +
112 +diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
113 +index cdb90bee7b4a..f202396e3f2a 100644
114 +--- a/arch/arm/boot/dts/imx53-ppd.dts
115 ++++ b/arch/arm/boot/dts/imx53-ppd.dts
116 +@@ -55,7 +55,7 @@
117 + };
118 +
119 + chosen {
120 +- stdout-path = "&uart1:115200n8";
121 ++ stdout-path = "serial0:115200n8";
122 + };
123 +
124 + memory@70000000 {
125 +diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
126 +index 000e6136a9d6..3e6ffaf5f104 100644
127 +--- a/arch/arm/boot/dts/imx6sll.dtsi
128 ++++ b/arch/arm/boot/dts/imx6sll.dtsi
129 +@@ -709,7 +709,7 @@
130 + i2c1: i2c@21a0000 {
131 + #address-cells = <1>;
132 + #size-cells = <0>;
133 +- compatible = "fs,imx6sll-i2c", "fsl,imx21-i2c";
134 ++ compatible = "fsl,imx6sll-i2c", "fsl,imx21-i2c";
135 + reg = <0x021a0000 0x4000>;
136 + interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
137 + clocks = <&clks IMX6SLL_CLK_I2C1>;
138 +diff --git a/arch/arm/boot/dts/vf610m4-colibri.dts b/arch/arm/boot/dts/vf610m4-colibri.dts
139 +index 41ec66a96990..ca6249558760 100644
140 +--- a/arch/arm/boot/dts/vf610m4-colibri.dts
141 ++++ b/arch/arm/boot/dts/vf610m4-colibri.dts
142 +@@ -50,8 +50,8 @@
143 + compatible = "fsl,vf610m4";
144 +
145 + chosen {
146 +- bootargs = "console=ttyLP2,115200 clk_ignore_unused init=/linuxrc rw";
147 +- stdout-path = "&uart2";
148 ++ bootargs = "clk_ignore_unused init=/linuxrc rw";
149 ++ stdout-path = "serial2:115200";
150 + };
151 +
152 + memory@8c000000 {
153 +diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
154 +index fb9d08ad7659..c87eed77de2c 100644
155 +--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
156 ++++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
157 +@@ -662,7 +662,7 @@
158 + clock-names = "fck", "brg_int", "scif_clk";
159 + dmas = <&dmac1 0x35>, <&dmac1 0x34>,
160 + <&dmac2 0x35>, <&dmac2 0x34>;
161 +- dma-names = "tx", "rx";
162 ++ dma-names = "tx", "rx", "tx", "rx";
163 + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
164 + resets = <&cpg 518>;
165 + status = "disabled";
166 +diff --git a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
167 +index 9f25c407dfd7..e830b6162375 100644
168 +--- a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
169 ++++ b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts
170 +@@ -15,7 +15,7 @@
171 +
172 + aliases {
173 + serial0 = &scif0;
174 +- ethernet0 = &avb;
175 ++ ethernet0 = &gether;
176 + };
177 +
178 + chosen {
179 +@@ -47,23 +47,6 @@
180 + };
181 + };
182 +
183 +-&avb {
184 +- pinctrl-0 = <&avb_pins>;
185 +- pinctrl-names = "default";
186 +-
187 +- phy-mode = "rgmii-id";
188 +- phy-handle = <&phy0>;
189 +- renesas,no-ether-link;
190 +- status = "okay";
191 +-
192 +- phy0: ethernet-phy@0 {
193 +- rxc-skew-ps = <1500>;
194 +- reg = <0>;
195 +- interrupt-parent = <&gpio1>;
196 +- interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
197 +- };
198 +-};
199 +-
200 + &canfd {
201 + pinctrl-0 = <&canfd0_pins>;
202 + pinctrl-names = "default";
203 +@@ -82,6 +65,23 @@
204 + clock-frequency = <32768>;
205 + };
206 +
207 ++&gether {
208 ++ pinctrl-0 = <&gether_pins>;
209 ++ pinctrl-names = "default";
210 ++
211 ++ phy-mode = "rgmii-id";
212 ++ phy-handle = <&phy0>;
213 ++ renesas,no-ether-link;
214 ++ status = "okay";
215 ++
216 ++ phy0: ethernet-phy@0 {
217 ++ rxc-skew-ps = <1500>;
218 ++ reg = <0>;
219 ++ interrupt-parent = <&gpio4>;
220 ++ interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
221 ++ };
222 ++};
223 ++
224 + &i2c0 {
225 + pinctrl-0 = <&i2c0_pins>;
226 + pinctrl-names = "default";
227 +@@ -118,16 +118,17 @@
228 + };
229 +
230 + &pfc {
231 +- avb_pins: avb {
232 +- groups = "avb_mdio", "avb_rgmii";
233 +- function = "avb";
234 +- };
235 +-
236 + canfd0_pins: canfd0 {
237 + groups = "canfd0_data_a";
238 + function = "canfd0";
239 + };
240 +
241 ++ gether_pins: gether {
242 ++ groups = "gether_mdio_a", "gether_rgmii",
243 ++ "gether_txcrefclk", "gether_txcrefclk_mega";
244 ++ function = "gether";
245 ++ };
246 ++
247 + i2c0_pins: i2c0 {
248 + groups = "i2c0";
249 + function = "i2c0";
250 +diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
251 +index 9234013e759e..21a81b59a0cc 100644
252 +--- a/arch/arm64/include/asm/percpu.h
253 ++++ b/arch/arm64/include/asm/percpu.h
254 +@@ -96,6 +96,7 @@ static inline unsigned long __percpu_##op(void *ptr, \
255 + : [val] "Ir" (val)); \
256 + break; \
257 + default: \
258 ++ ret = 0; \
259 + BUILD_BUG(); \
260 + } \
261 + \
262 +@@ -125,6 +126,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
263 + ret = READ_ONCE(*(u64 *)ptr);
264 + break;
265 + default:
266 ++ ret = 0;
267 + BUILD_BUG();
268 + }
269 +
270 +@@ -194,6 +196,7 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
271 + : [val] "r" (val));
272 + break;
273 + default:
274 ++ ret = 0;
275 + BUILD_BUG();
276 + }
277 +
278 +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
279 +index e78c3ef04d95..b5a367d4bba6 100644
280 +--- a/arch/arm64/kernel/probes/kprobes.c
281 ++++ b/arch/arm64/kernel/probes/kprobes.c
282 +@@ -23,7 +23,9 @@
283 + #include <linux/slab.h>
284 + #include <linux/stop_machine.h>
285 + #include <linux/sched/debug.h>
286 ++#include <linux/set_memory.h>
287 + #include <linux/stringify.h>
288 ++#include <linux/vmalloc.h>
289 + #include <asm/traps.h>
290 + #include <asm/ptrace.h>
291 + #include <asm/cacheflush.h>
292 +@@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
293 + static void __kprobes
294 + post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
295 +
296 ++static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
297 ++{
298 ++ void *addrs[1];
299 ++ u32 insns[1];
300 ++
301 ++ addrs[0] = addr;
302 ++ insns[0] = opcode;
303 ++
304 ++ return aarch64_insn_patch_text(addrs, insns, 1);
305 ++}
306 ++
307 + static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
308 + {
309 + /* prepare insn slot */
310 +- p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);
311 ++ patch_text(p->ainsn.api.insn, p->opcode);
312 +
313 + flush_icache_range((uintptr_t) (p->ainsn.api.insn),
314 + (uintptr_t) (p->ainsn.api.insn) +
315 +@@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
316 + return 0;
317 + }
318 +
319 +-static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
320 ++void *alloc_insn_page(void)
321 + {
322 +- void *addrs[1];
323 +- u32 insns[1];
324 ++ void *page;
325 +
326 +- addrs[0] = (void *)addr;
327 +- insns[0] = (u32)opcode;
328 ++ page = vmalloc_exec(PAGE_SIZE);
329 ++ if (page)
330 ++ set_memory_ro((unsigned long)page, 1);
331 +
332 +- return aarch64_insn_patch_text(addrs, insns, 1);
333 ++ return page;
334 + }
335 +
336 + /* arm kprobe: install breakpoint in text */
337 +diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
338 +index 490b12af103c..c52d0efacd14 100644
339 +--- a/arch/mips/configs/cavium_octeon_defconfig
340 ++++ b/arch/mips/configs/cavium_octeon_defconfig
341 +@@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y
342 + CONFIG_RTC_DRV_DS1307=y
343 + CONFIG_STAGING=y
344 + CONFIG_OCTEON_ETHERNET=y
345 ++CONFIG_OCTEON_USB=y
346 + # CONFIG_IOMMU_SUPPORT is not set
347 + CONFIG_RAS=y
348 + CONFIG_EXT4_FS=y
349 +diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
350 +index 473cfc84e412..8c3e3e3c8be1 100644
351 +--- a/arch/riscv/include/asm/uaccess.h
352 ++++ b/arch/riscv/include/asm/uaccess.h
353 +@@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to,
354 + static inline unsigned long
355 + raw_copy_from_user(void *to, const void __user *from, unsigned long n)
356 + {
357 +- return __asm_copy_to_user(to, from, n);
358 ++ return __asm_copy_from_user(to, from, n);
359 + }
360 +
361 + static inline unsigned long
362 + raw_copy_to_user(void __user *to, const void *from, unsigned long n)
363 + {
364 +- return __asm_copy_from_user(to, from, n);
365 ++ return __asm_copy_to_user(to, from, n);
366 + }
367 +
368 + extern long strncpy_from_user(char *dest, const char __user *src, long count);
369 +diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
370 +index 04609478d18b..b375c6c5ae7b 100644
371 +--- a/arch/s390/boot/compressed/Makefile
372 ++++ b/arch/s390/boot/compressed/Makefile
373 +@@ -20,7 +20,7 @@ KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
374 + OBJECTS := $(addprefix $(obj)/,$(obj-y))
375 +
376 + LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
377 +-$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
378 ++$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
379 + $(call if_changed,ld)
380 +
381 + # extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin
382 +@@ -51,17 +51,17 @@ suffix-$(CONFIG_KERNEL_LZMA) := .lzma
383 + suffix-$(CONFIG_KERNEL_LZO) := .lzo
384 + suffix-$(CONFIG_KERNEL_XZ) := .xz
385 +
386 +-$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
387 ++$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
388 + $(call if_changed,gzip)
389 +-$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
390 ++$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
391 + $(call if_changed,bzip2)
392 +-$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
393 ++$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
394 + $(call if_changed,lz4)
395 +-$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
396 ++$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
397 + $(call if_changed,lzma)
398 +-$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
399 ++$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
400 + $(call if_changed,lzo)
401 +-$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
402 ++$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
403 + $(call if_changed,xzkern)
404 +
405 + LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
406 +diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
407 +index 0717ee76885d..f1ab9420ccfb 100644
408 +--- a/arch/s390/include/asm/mmu_context.h
409 ++++ b/arch/s390/include/asm/mmu_context.h
410 +@@ -45,8 +45,6 @@ static inline int init_new_context(struct task_struct *tsk,
411 + mm->context.asce_limit = STACK_TOP_MAX;
412 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
413 + _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
414 +- /* pgd_alloc() did not account this pud */
415 +- mm_inc_nr_puds(mm);
416 + break;
417 + case -PAGE_SIZE:
418 + /* forked 5-level task, set new asce with new_mm->pgd */
419 +@@ -62,9 +60,6 @@ static inline int init_new_context(struct task_struct *tsk,
420 + /* forked 2-level compat task, set new asce with new mm->pgd */
421 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
422 + _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
423 +- /* pgd_alloc() did not account this pmd */
424 +- mm_inc_nr_pmds(mm);
425 +- mm_inc_nr_puds(mm);
426 + }
427 + crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
428 + return 0;
429 +diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
430 +index f0f9bcf94c03..5ee733720a57 100644
431 +--- a/arch/s390/include/asm/pgalloc.h
432 ++++ b/arch/s390/include/asm/pgalloc.h
433 +@@ -36,11 +36,11 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
434 +
435 + static inline unsigned long pgd_entry_type(struct mm_struct *mm)
436 + {
437 +- if (mm->context.asce_limit <= _REGION3_SIZE)
438 ++ if (mm_pmd_folded(mm))
439 + return _SEGMENT_ENTRY_EMPTY;
440 +- if (mm->context.asce_limit <= _REGION2_SIZE)
441 ++ if (mm_pud_folded(mm))
442 + return _REGION3_ENTRY_EMPTY;
443 +- if (mm->context.asce_limit <= _REGION1_SIZE)
444 ++ if (mm_p4d_folded(mm))
445 + return _REGION2_ENTRY_EMPTY;
446 + return _REGION1_ENTRY_EMPTY;
447 + }
448 +diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
449 +index 0e7cb0dc9c33..de05466ce50c 100644
450 +--- a/arch/s390/include/asm/pgtable.h
451 ++++ b/arch/s390/include/asm/pgtable.h
452 +@@ -485,6 +485,24 @@ static inline int is_module_addr(void *addr)
453 + _REGION_ENTRY_PROTECT | \
454 + _REGION_ENTRY_NOEXEC)
455 +
456 ++static inline bool mm_p4d_folded(struct mm_struct *mm)
457 ++{
458 ++ return mm->context.asce_limit <= _REGION1_SIZE;
459 ++}
460 ++#define mm_p4d_folded(mm) mm_p4d_folded(mm)
461 ++
462 ++static inline bool mm_pud_folded(struct mm_struct *mm)
463 ++{
464 ++ return mm->context.asce_limit <= _REGION2_SIZE;
465 ++}
466 ++#define mm_pud_folded(mm) mm_pud_folded(mm)
467 ++
468 ++static inline bool mm_pmd_folded(struct mm_struct *mm)
469 ++{
470 ++ return mm->context.asce_limit <= _REGION3_SIZE;
471 ++}
472 ++#define mm_pmd_folded(mm) mm_pmd_folded(mm)
473 ++
474 + static inline int mm_has_pgste(struct mm_struct *mm)
475 + {
476 + #ifdef CONFIG_PGSTE
477 +diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
478 +index 457b7ba0fbb6..b31c779cf581 100644
479 +--- a/arch/s390/include/asm/tlb.h
480 ++++ b/arch/s390/include/asm/tlb.h
481 +@@ -136,7 +136,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
482 + static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
483 + unsigned long address)
484 + {
485 +- if (tlb->mm->context.asce_limit <= _REGION3_SIZE)
486 ++ if (mm_pmd_folded(tlb->mm))
487 + return;
488 + pgtable_pmd_page_dtor(virt_to_page(pmd));
489 + tlb_remove_table(tlb, pmd);
490 +@@ -152,7 +152,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
491 + static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
492 + unsigned long address)
493 + {
494 +- if (tlb->mm->context.asce_limit <= _REGION1_SIZE)
495 ++ if (mm_p4d_folded(tlb->mm))
496 + return;
497 + tlb_remove_table(tlb, p4d);
498 + }
499 +@@ -167,7 +167,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
500 + static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
501 + unsigned long address)
502 + {
503 +- if (tlb->mm->context.asce_limit <= _REGION2_SIZE)
504 ++ if (mm_pud_folded(tlb->mm))
505 + return;
506 + tlb_remove_table(tlb, pud);
507 + }
508 +diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
509 +index cc085e2d2ce9..74091fd3101e 100644
510 +--- a/arch/s390/kernel/perf_cpum_cf.c
511 ++++ b/arch/s390/kernel/perf_cpum_cf.c
512 +@@ -373,7 +373,7 @@ static int __hw_perf_event_init(struct perf_event *event)
513 + return -ENOENT;
514 +
515 + if (ev > PERF_CPUM_CF_MAX_CTR)
516 +- return -EINVAL;
517 ++ return -ENOENT;
518 +
519 + /* Obtain the counter set to which the specified counter belongs */
520 + set = get_counter_set(ev);
521 +diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
522 +index c5c856f320bc..04dd3e2c3bd9 100644
523 +--- a/arch/s390/kernel/vdso32/Makefile
524 ++++ b/arch/s390/kernel/vdso32/Makefile
525 +@@ -36,7 +36,7 @@ UBSAN_SANITIZE := n
526 + $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
527 +
528 + # link rule for the .so file, .lds has to be first
529 +-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
530 ++$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
531 + $(call if_changed,vdso32ld)
532 +
533 + # strip rule for the .so file
534 +@@ -45,12 +45,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
535 + $(call if_changed,objcopy)
536 +
537 + # assembly rules for the .S files
538 +-$(obj-vdso32): %.o: %.S
539 ++$(obj-vdso32): %.o: %.S FORCE
540 + $(call if_changed_dep,vdso32as)
541 +
542 + # actual build commands
543 + quiet_cmd_vdso32ld = VDSO32L $@
544 +- cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
545 ++ cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
546 + quiet_cmd_vdso32as = VDSO32A $@
547 + cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
548 +
549 +diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
550 +index 15b1ceafc4c1..ddebc26cd949 100644
551 +--- a/arch/s390/kernel/vdso64/Makefile
552 ++++ b/arch/s390/kernel/vdso64/Makefile
553 +@@ -36,7 +36,7 @@ UBSAN_SANITIZE := n
554 + $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
555 +
556 + # link rule for the .so file, .lds has to be first
557 +-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
558 ++$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
559 + $(call if_changed,vdso64ld)
560 +
561 + # strip rule for the .so file
562 +@@ -45,12 +45,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
563 + $(call if_changed,objcopy)
564 +
565 + # assembly rules for the .S files
566 +-$(obj-vdso64): %.o: %.S
567 ++$(obj-vdso64): %.o: %.S FORCE
568 + $(call if_changed_dep,vdso64as)
569 +
570 + # actual build commands
571 + quiet_cmd_vdso64ld = VDSO64L $@
572 +- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
573 ++ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
574 + quiet_cmd_vdso64as = VDSO64A $@
575 + cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
576 +
577 +diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
578 +index 76d89ee8b428..814f26520aa2 100644
579 +--- a/arch/s390/mm/pgalloc.c
580 ++++ b/arch/s390/mm/pgalloc.c
581 +@@ -101,6 +101,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
582 + mm->context.asce_limit = _REGION1_SIZE;
583 + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
584 + _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
585 ++ mm_inc_nr_puds(mm);
586 + } else {
587 + crst_table_init(table, _REGION1_ENTRY_EMPTY);
588 + pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
589 +diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
590 +index 5bd374491f94..6c151b42e65d 100644
591 +--- a/arch/s390/numa/numa.c
592 ++++ b/arch/s390/numa/numa.c
593 +@@ -54,6 +54,7 @@ int __node_distance(int a, int b)
594 + {
595 + return mode->distance ? mode->distance(a, b) : 0;
596 + }
597 ++EXPORT_SYMBOL(__node_distance);
598 +
599 + int numa_debug_enabled;
600 +
601 +diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
602 +index c94c3bd70ccd..df4a985716eb 100644
603 +--- a/arch/um/os-Linux/skas/process.c
604 ++++ b/arch/um/os-Linux/skas/process.c
605 +@@ -610,6 +610,11 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf)
606 + fatal_sigsegv();
607 + }
608 + longjmp(*switch_buf, 1);
609 ++
610 ++ /* unreachable */
611 ++ printk(UM_KERN_ERR "impossible long jump!");
612 ++ fatal_sigsegv();
613 ++ return 0;
614 + }
615 +
616 + void initial_thread_cb_skas(void (*proc)(void *), void *arg)
617 +diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
618 +index 6afac386a434..b99d497e342d 100644
619 +--- a/arch/x86/include/asm/page_64_types.h
620 ++++ b/arch/x86/include/asm/page_64_types.h
621 +@@ -33,12 +33,14 @@
622 +
623 + /*
624 + * Set __PAGE_OFFSET to the most negative possible address +
625 +- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
626 +- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
627 +- * what Xen requires.
628 ++ * PGDIR_SIZE*17 (pgd slot 273).
629 ++ *
630 ++ * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for
631 ++ * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary,
632 ++ * but it's what Xen requires.
633 + */
634 +-#define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL)
635 +-#define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL)
636 ++#define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL)
637 ++#define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL)
638 +
639 + #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
640 + #define __PAGE_OFFSET page_offset_base
641 +diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
642 +index 04edd2d58211..84bd9bdc1987 100644
643 +--- a/arch/x86/include/asm/pgtable_64_types.h
644 ++++ b/arch/x86/include/asm/pgtable_64_types.h
645 +@@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d;
646 + */
647 + #define MAXMEM (1UL << MAX_PHYSMEM_BITS)
648 +
649 +-#define LDT_PGD_ENTRY_L4 -3UL
650 +-#define LDT_PGD_ENTRY_L5 -112UL
651 +-#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
652 ++#define LDT_PGD_ENTRY -240UL
653 + #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
654 + #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
655 +
656 +diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
657 +index 5e801c8c8ce7..374a52fa5296 100644
658 +--- a/arch/x86/kernel/early_printk.c
659 ++++ b/arch/x86/kernel/early_printk.c
660 +@@ -213,8 +213,9 @@ static unsigned int mem32_serial_in(unsigned long addr, int offset)
661 + * early_pci_serial_init()
662 + *
663 + * This function is invoked when the early_printk param starts with "pciserial"
664 +- * The rest of the param should be ",B:D.F,baud" where B, D & F describe the
665 +- * location of a PCI device that must be a UART device.
666 ++ * The rest of the param should be "[force],B:D.F,baud", where B, D & F describe
667 ++ * the location of a PCI device that must be a UART device. "force" is optional
668 ++ * and overrides the use of an UART device with a wrong PCI class code.
669 + */
670 + static __init void early_pci_serial_init(char *s)
671 + {
672 +@@ -224,17 +225,23 @@ static __init void early_pci_serial_init(char *s)
673 + u32 classcode, bar0;
674 + u16 cmdreg;
675 + char *e;
676 ++ int force = 0;
677 +
678 +-
679 +- /*
680 +- * First, part the param to get the BDF values
681 +- */
682 + if (*s == ',')
683 + ++s;
684 +
685 + if (*s == 0)
686 + return;
687 +
688 ++ /* Force the use of an UART device with wrong class code */
689 ++ if (!strncmp(s, "force,", 6)) {
690 ++ force = 1;
691 ++ s += 6;
692 ++ }
693 ++
694 ++ /*
695 ++ * Part the param to get the BDF values
696 ++ */
697 + bus = (u8)simple_strtoul(s, &e, 16);
698 + s = e;
699 + if (*s != ':')
700 +@@ -253,7 +260,7 @@ static __init void early_pci_serial_init(char *s)
701 + s++;
702 +
703 + /*
704 +- * Second, find the device from the BDF
705 ++ * Find the device from the BDF
706 + */
707 + cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND);
708 + classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
709 +@@ -264,8 +271,10 @@ static __init void early_pci_serial_init(char *s)
710 + */
711 + if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) &&
712 + (classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) ||
713 +- (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */
714 +- return;
715 ++ (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ {
716 ++ if (!force)
717 ++ return;
718 ++ }
719 +
720 + /*
721 + * Determine if it is IO or memory mapped
722 +@@ -289,7 +298,7 @@ static __init void early_pci_serial_init(char *s)
723 + }
724 +
725 + /*
726 +- * Lastly, initialize the hardware
727 ++ * Initialize the hardware
728 + */
729 + if (*s) {
730 + if (strcmp(s, "nocfg") == 0)
731 +diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
732 +index 733e6ace0fa4..65590eee6289 100644
733 +--- a/arch/x86/kernel/ldt.c
734 ++++ b/arch/x86/kernel/ldt.c
735 +@@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm)
736 + /*
737 + * If PTI is enabled, this maps the LDT into the kernelmode and
738 + * usermode tables for the given mm.
739 +- *
740 +- * There is no corresponding unmap function. Even if the LDT is freed, we
741 +- * leave the PTEs around until the slot is reused or the mm is destroyed.
742 +- * This is harmless: the LDT is always in ordinary memory, and no one will
743 +- * access the freed slot.
744 +- *
745 +- * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
746 +- * it useful, and the flush would slow down modify_ldt().
747 + */
748 + static int
749 + map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
750 +@@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
751 + unsigned long va;
752 + bool is_vmalloc;
753 + spinlock_t *ptl;
754 +- pgd_t *pgd;
755 +- int i;
756 ++ int i, nr_pages;
757 +
758 + if (!static_cpu_has(X86_FEATURE_PTI))
759 + return 0;
760 +@@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
761 + /* Check if the current mappings are sane */
762 + sanity_check_ldt_mapping(mm);
763 +
764 +- /*
765 +- * Did we already have the top level entry allocated? We can't
766 +- * use pgd_none() for this because it doens't do anything on
767 +- * 4-level page table kernels.
768 +- */
769 +- pgd = pgd_offset(mm, LDT_BASE_ADDR);
770 +-
771 + is_vmalloc = is_vmalloc_addr(ldt->entries);
772 +
773 +- for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
774 ++ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
775 ++
776 ++ for (i = 0; i < nr_pages; i++) {
777 + unsigned long offset = i << PAGE_SHIFT;
778 + const void *src = (char *)ldt->entries + offset;
779 + unsigned long pfn;
780 +@@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
781 + /* Propagate LDT mapping to the user page-table */
782 + map_ldt_struct_to_user(mm);
783 +
784 +- va = (unsigned long)ldt_slot_va(slot);
785 +- flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
786 +-
787 + ldt->slot = slot;
788 + return 0;
789 + }
790 +
791 ++static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
792 ++{
793 ++ unsigned long va;
794 ++ int i, nr_pages;
795 ++
796 ++ if (!ldt)
797 ++ return;
798 ++
799 ++ /* LDT map/unmap is only required for PTI */
800 ++ if (!static_cpu_has(X86_FEATURE_PTI))
801 ++ return;
802 ++
803 ++ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
804 ++
805 ++ for (i = 0; i < nr_pages; i++) {
806 ++ unsigned long offset = i << PAGE_SHIFT;
807 ++ spinlock_t *ptl;
808 ++ pte_t *ptep;
809 ++
810 ++ va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
811 ++ ptep = get_locked_pte(mm, va, &ptl);
812 ++ pte_clear(mm, va, ptep);
813 ++ pte_unmap_unlock(ptep, ptl);
814 ++ }
815 ++
816 ++ va = (unsigned long)ldt_slot_va(ldt->slot);
817 ++ flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, 0);
818 ++}
819 ++
820 + #else /* !CONFIG_PAGE_TABLE_ISOLATION */
821 +
822 + static int
823 +@@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
824 + {
825 + return 0;
826 + }
827 ++
828 ++static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
829 ++{
830 ++}
831 + #endif /* CONFIG_PAGE_TABLE_ISOLATION */
832 +
833 + static void free_ldt_pgtables(struct mm_struct *mm)
834 +@@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
835 + }
836 +
837 + install_ldt(mm, new_ldt);
838 ++ unmap_ldt_struct(mm, old_ldt);
839 + free_ldt_struct(old_ldt);
840 + error = 0;
841 +
842 +diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
843 +index dd461c0167ef..2c84c6ad8b50 100644
844 +--- a/arch/x86/xen/mmu_pv.c
845 ++++ b/arch/x86/xen/mmu_pv.c
846 +@@ -1897,7 +1897,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
847 + init_top_pgt[0] = __pgd(0);
848 +
849 + /* Pre-constructed entries are in pfn, so convert to mfn */
850 +- /* L4[272] -> level3_ident_pgt */
851 ++ /* L4[273] -> level3_ident_pgt */
852 + /* L4[511] -> level3_kernel_pgt */
853 + convert_pfn_mfn(init_top_pgt);
854 +
855 +@@ -1917,8 +1917,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
856 + addr[0] = (unsigned long)pgd;
857 + addr[1] = (unsigned long)l3;
858 + addr[2] = (unsigned long)l2;
859 +- /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
860 +- * Both L4[272][0] and L4[511][510] have entries that point to the same
861 ++ /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
862 ++ * Both L4[273][0] and L4[511][510] have entries that point to the same
863 + * L2 (PMD) tables. Meaning that if you modify it in __va space
864 + * it will be also modified in the __ka space! (But if you just
865 + * modify the PMD table to point to other PTE's or none, then you
866 +diff --git a/block/bio.c b/block/bio.c
867 +index 0093bed81c0e..41173710430c 100644
868 +--- a/block/bio.c
869 ++++ b/block/bio.c
870 +@@ -1261,6 +1261,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
871 + if (ret)
872 + goto cleanup;
873 + } else {
874 ++ zero_fill_bio(bio);
875 + iov_iter_advance(iter, bio->bi_iter.bi_size);
876 + }
877 +
878 +diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
879 +index eaa60c94205a..1f32caa87686 100644
880 +--- a/drivers/acpi/acpi_platform.c
881 ++++ b/drivers/acpi/acpi_platform.c
882 +@@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
883 + {"PNP0200", 0}, /* AT DMA Controller */
884 + {"ACPI0009", 0}, /* IOxAPIC */
885 + {"ACPI000A", 0}, /* IOAPIC */
886 ++ {"SMB0001", 0}, /* ACPI SMBUS virtual device */
887 + {"", 0},
888 + };
889 +
890 +diff --git a/drivers/block/brd.c b/drivers/block/brd.c
891 +index df8103dd40ac..c18586fccb6f 100644
892 +--- a/drivers/block/brd.c
893 ++++ b/drivers/block/brd.c
894 +@@ -396,15 +396,14 @@ static struct brd_device *brd_alloc(int i)
895 + disk->first_minor = i * max_part;
896 + disk->fops = &brd_fops;
897 + disk->private_data = brd;
898 +- disk->queue = brd->brd_queue;
899 + disk->flags = GENHD_FL_EXT_DEVT;
900 + sprintf(disk->disk_name, "ram%d", i);
901 + set_capacity(disk, rd_size * 2);
902 +- disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
903 ++ brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
904 +
905 + /* Tell the block layer that this is not a rotational device */
906 +- blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
907 +- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
908 ++ blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
909 ++ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue);
910 +
911 + return brd;
912 +
913 +@@ -436,6 +435,7 @@ static struct brd_device *brd_init_one(int i, bool *new)
914 +
915 + brd = brd_alloc(i);
916 + if (brd) {
917 ++ brd->brd_disk->queue = brd->brd_queue;
918 + add_disk(brd->brd_disk);
919 + list_add_tail(&brd->brd_list, &brd_devices);
920 + }
921 +@@ -503,8 +503,14 @@ static int __init brd_init(void)
922 +
923 + /* point of no return */
924 +
925 +- list_for_each_entry(brd, &brd_devices, brd_list)
926 ++ list_for_each_entry(brd, &brd_devices, brd_list) {
927 ++ /*
928 ++ * associate with queue just before adding disk for
929 ++ * avoiding to mess up failure path
930 ++ */
931 ++ brd->brd_disk->queue = brd->brd_queue;
932 + add_disk(brd->brd_disk);
933 ++ }
934 +
935 + blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
936 + THIS_MODULE, brd_probe, NULL, NULL);
937 +diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
938 +index 20724abd38bd..7df6b5b1e7ee 100644
939 +--- a/drivers/clk/clk-fixed-factor.c
940 ++++ b/drivers/clk/clk-fixed-factor.c
941 +@@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev)
942 + {
943 + struct clk *clk = platform_get_drvdata(pdev);
944 +
945 ++ of_clk_del_provider(pdev->dev.of_node);
946 + clk_unregister_fixed_factor(clk);
947 +
948 + return 0;
949 +diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
950 +index b5c46b3f8764..6d6475c32ee5 100644
951 +--- a/drivers/clk/clk-fixed-rate.c
952 ++++ b/drivers/clk/clk-fixed-rate.c
953 +@@ -200,6 +200,7 @@ static int of_fixed_clk_remove(struct platform_device *pdev)
954 + {
955 + struct clk *clk = platform_get_drvdata(pdev);
956 +
957 ++ of_clk_del_provider(pdev->dev.of_node);
958 + clk_unregister_fixed_rate(clk);
959 +
960 + return 0;
961 +diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
962 +index 8cf74fc423e6..02229d051d77 100644
963 +--- a/drivers/clk/meson/axg.c
964 ++++ b/drivers/clk/meson/axg.c
965 +@@ -96,7 +96,6 @@ static struct clk_regmap axg_sys_pll = {
966 + .ops = &meson_clk_pll_ro_ops,
967 + .parent_names = (const char *[]){ "xtal" },
968 + .num_parents = 1,
969 +- .flags = CLK_GET_RATE_NOCACHE,
970 + },
971 + };
972 +
973 +@@ -713,12 +712,14 @@ static struct clk_regmap axg_pcie_mux = {
974 + .offset = HHI_PCIE_PLL_CNTL6,
975 + .mask = 0x1,
976 + .shift = 2,
977 ++ /* skip the parent mpll3, reserved for debug */
978 ++ .table = (u32[]){ 1 },
979 + },
980 + .hw.init = &(struct clk_init_data){
981 + .name = "pcie_mux",
982 + .ops = &clk_regmap_mux_ops,
983 +- .parent_names = (const char *[]){ "mpll3", "pcie_pll" },
984 +- .num_parents = 2,
985 ++ .parent_names = (const char *[]){ "pcie_pll" },
986 ++ .num_parents = 1,
987 + .flags = CLK_SET_RATE_PARENT,
988 + },
989 + };
990 +diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
991 +index 6435d86118f1..6628ffa31383 100644
992 +--- a/drivers/clk/meson/gxbb.c
993 ++++ b/drivers/clk/meson/gxbb.c
994 +@@ -213,7 +213,6 @@ static struct clk_regmap gxbb_fixed_pll = {
995 + .ops = &meson_clk_pll_ro_ops,
996 + .parent_names = (const char *[]){ "xtal" },
997 + .num_parents = 1,
998 +- .flags = CLK_GET_RATE_NOCACHE,
999 + },
1000 + };
1001 +
1002 +@@ -276,6 +275,10 @@ static struct clk_regmap gxbb_hdmi_pll = {
1003 + .ops = &meson_clk_pll_ro_ops,
1004 + .parent_names = (const char *[]){ "hdmi_pll_pre_mult" },
1005 + .num_parents = 1,
1006 ++ /*
1007 ++ * Display directly handle hdmi pll registers ATM, we need
1008 ++ * NOCACHE to keep our view of the clock as accurate as possible
1009 ++ */
1010 + .flags = CLK_GET_RATE_NOCACHE,
1011 + },
1012 + };
1013 +@@ -334,6 +337,10 @@ static struct clk_regmap gxl_hdmi_pll = {
1014 + .ops = &meson_clk_pll_ro_ops,
1015 + .parent_names = (const char *[]){ "xtal" },
1016 + .num_parents = 1,
1017 ++ /*
1018 ++ * Display directly handle hdmi pll registers ATM, we need
1019 ++ * NOCACHE to keep our view of the clock as accurate as possible
1020 ++ */
1021 + .flags = CLK_GET_RATE_NOCACHE,
1022 + },
1023 + };
1024 +@@ -371,7 +378,6 @@ static struct clk_regmap gxbb_sys_pll = {
1025 + .ops = &meson_clk_pll_ro_ops,
1026 + .parent_names = (const char *[]){ "xtal" },
1027 + .num_parents = 1,
1028 +- .flags = CLK_GET_RATE_NOCACHE,
1029 + },
1030 + };
1031 +
1032 +@@ -418,7 +424,6 @@ static struct clk_regmap gxbb_gp0_pll = {
1033 + .ops = &meson_clk_pll_ops,
1034 + .parent_names = (const char *[]){ "xtal" },
1035 + .num_parents = 1,
1036 +- .flags = CLK_GET_RATE_NOCACHE,
1037 + },
1038 + };
1039 +
1040 +@@ -472,7 +477,6 @@ static struct clk_regmap gxl_gp0_pll = {
1041 + .ops = &meson_clk_pll_ops,
1042 + .parent_names = (const char *[]){ "xtal" },
1043 + .num_parents = 1,
1044 +- .flags = CLK_GET_RATE_NOCACHE,
1045 + },
1046 + };
1047 +
1048 +diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
1049 +index 7447d96a265f..74697e145dde 100644
1050 +--- a/drivers/clk/meson/meson8b.c
1051 ++++ b/drivers/clk/meson/meson8b.c
1052 +@@ -132,7 +132,6 @@ static struct clk_regmap meson8b_fixed_pll = {
1053 + .ops = &meson_clk_pll_ro_ops,
1054 + .parent_names = (const char *[]){ "xtal" },
1055 + .num_parents = 1,
1056 +- .flags = CLK_GET_RATE_NOCACHE,
1057 + },
1058 + };
1059 +
1060 +@@ -169,7 +168,6 @@ static struct clk_regmap meson8b_vid_pll = {
1061 + .ops = &meson_clk_pll_ro_ops,
1062 + .parent_names = (const char *[]){ "xtal" },
1063 + .num_parents = 1,
1064 +- .flags = CLK_GET_RATE_NOCACHE,
1065 + },
1066 + };
1067 +
1068 +@@ -207,7 +205,6 @@ static struct clk_regmap meson8b_sys_pll = {
1069 + .ops = &meson_clk_pll_ro_ops,
1070 + .parent_names = (const char *[]){ "xtal" },
1071 + .num_parents = 1,
1072 +- .flags = CLK_GET_RATE_NOCACHE,
1073 + },
1074 + };
1075 +
1076 +diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
1077 +index a0b6ecdc63dd..6d2b56891559 100644
1078 +--- a/drivers/clk/renesas/r9a06g032-clocks.c
1079 ++++ b/drivers/clk/renesas/r9a06g032-clocks.c
1080 +@@ -539,7 +539,8 @@ r9a06g032_div_round_rate(struct clk_hw *hw,
1081 + * several uarts attached to this divider, and changing this impacts
1082 + * everyone.
1083 + */
1084 +- if (clk->index == R9A06G032_DIV_UART) {
1085 ++ if (clk->index == R9A06G032_DIV_UART ||
1086 ++ clk->index == R9A06G032_DIV_P2_PG) {
1087 + pr_devel("%s div uart hack!\n", __func__);
1088 + return clk_get_rate(hw->clk);
1089 + }
1090 +diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
1091 +index 95e1bf69449b..d4f77c4eb277 100644
1092 +--- a/drivers/clk/samsung/clk-exynos5420.c
1093 ++++ b/drivers/clk/samsung/clk-exynos5420.c
1094 +@@ -281,6 +281,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
1095 + { .offset = GATE_BUS_TOP, .value = 0xffffffff, },
1096 + { .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
1097 + { .offset = GATE_IP_PERIC, .value = 0xffffffff, },
1098 ++ { .offset = GATE_IP_PERIS, .value = 0xffffffff, },
1099 + };
1100 +
1101 + static int exynos5420_clk_suspend(void)
1102 +diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
1103 +index 7d22e1af2247..27e0979b3158 100644
1104 +--- a/drivers/clk/ti/clk.c
1105 ++++ b/drivers/clk/ti/clk.c
1106 +@@ -129,7 +129,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
1107 + void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
1108 + {
1109 + struct ti_dt_clk *c;
1110 +- struct device_node *node;
1111 ++ struct device_node *node, *parent;
1112 + struct clk *clk;
1113 + struct of_phandle_args clkspec;
1114 + char buf[64];
1115 +@@ -164,8 +164,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
1116 + continue;
1117 +
1118 + node = of_find_node_by_name(NULL, buf);
1119 +- if (num_args)
1120 +- node = of_find_node_by_name(node, "clk");
1121 ++ if (num_args) {
1122 ++ parent = node;
1123 ++ node = of_get_child_by_name(parent, "clk");
1124 ++ of_node_put(parent);
1125 ++ }
1126 ++
1127 + clkspec.np = node;
1128 + clkspec.args_count = num_args;
1129 + for (i = 0; i < num_args; i++) {
1130 +@@ -173,11 +177,12 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
1131 + if (ret) {
1132 + pr_warn("Bad tag in %s at %d: %s\n",
1133 + c->node_name, i, tags[i]);
1134 ++ of_node_put(node);
1135 + return;
1136 + }
1137 + }
1138 + clk = of_clk_get_from_provider(&clkspec);
1139 +-
1140 ++ of_node_put(node);
1141 + if (!IS_ERR(clk)) {
1142 + c->lk.clk = clk;
1143 + clkdev_add(&c->lk);
1144 +diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
1145 +index b01ba4438501..31e891f00175 100644
1146 +--- a/drivers/gnss/serial.c
1147 ++++ b/drivers/gnss/serial.c
1148 +@@ -13,6 +13,7 @@
1149 + #include <linux/of.h>
1150 + #include <linux/pm.h>
1151 + #include <linux/pm_runtime.h>
1152 ++#include <linux/sched.h>
1153 + #include <linux/serdev.h>
1154 + #include <linux/slab.h>
1155 +
1156 +@@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev,
1157 + int ret;
1158 +
1159 + /* write is only buffered synchronously */
1160 +- ret = serdev_device_write(serdev, buf, count, 0);
1161 ++ ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
1162 + if (ret < 0)
1163 + return ret;
1164 +
1165 +diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
1166 +index 79cb98950013..71d014edd167 100644
1167 +--- a/drivers/gnss/sirf.c
1168 ++++ b/drivers/gnss/sirf.c
1169 +@@ -16,6 +16,7 @@
1170 + #include <linux/pm.h>
1171 + #include <linux/pm_runtime.h>
1172 + #include <linux/regulator/consumer.h>
1173 ++#include <linux/sched.h>
1174 + #include <linux/serdev.h>
1175 + #include <linux/slab.h>
1176 + #include <linux/wait.h>
1177 +@@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
1178 + int ret;
1179 +
1180 + /* write is only buffered synchronously */
1181 +- ret = serdev_device_write(serdev, buf, count, 0);
1182 ++ ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
1183 + if (ret < 0)
1184 + return ret;
1185 +
1186 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1187 +index 6903fe6c894b..ef5c6af4d964 100644
1188 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1189 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1190 +@@ -3167,7 +3167,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane,
1191 + static const struct drm_plane_funcs dm_plane_funcs = {
1192 + .update_plane = drm_atomic_helper_update_plane,
1193 + .disable_plane = drm_atomic_helper_disable_plane,
1194 +- .destroy = drm_plane_cleanup,
1195 ++ .destroy = drm_primary_helper_destroy,
1196 + .reset = dm_drm_plane_reset,
1197 + .atomic_duplicate_state = dm_drm_plane_duplicate_state,
1198 + .atomic_destroy_state = dm_drm_plane_destroy_state,
1199 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
1200 +index a29dc35954c9..aba2c5c1d2f8 100644
1201 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
1202 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
1203 +@@ -173,8 +173,6 @@ struct amdgpu_dm_connector {
1204 + struct mutex hpd_lock;
1205 +
1206 + bool fake_enable;
1207 +-
1208 +- bool mst_connected;
1209 + };
1210 +
1211 + #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
1212 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1213 +index 9a300732ba37..4cc45a1d21db 100644
1214 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1215 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1216 +@@ -317,12 +317,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
1217 + struct amdgpu_device *adev = dev->dev_private;
1218 + struct amdgpu_encoder *amdgpu_encoder;
1219 + struct drm_encoder *encoder;
1220 +- const struct drm_connector_helper_funcs *connector_funcs =
1221 +- connector->base.helper_private;
1222 +- struct drm_encoder *enc_master =
1223 +- connector_funcs->best_encoder(&connector->base);
1224 +
1225 +- DRM_DEBUG_KMS("enc master is %p\n", enc_master);
1226 + amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
1227 + if (!amdgpu_encoder)
1228 + return NULL;
1229 +@@ -352,25 +347,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1230 + struct amdgpu_device *adev = dev->dev_private;
1231 + struct amdgpu_dm_connector *aconnector;
1232 + struct drm_connector *connector;
1233 +- struct drm_connector_list_iter conn_iter;
1234 +-
1235 +- drm_connector_list_iter_begin(dev, &conn_iter);
1236 +- drm_for_each_connector_iter(connector, &conn_iter) {
1237 +- aconnector = to_amdgpu_dm_connector(connector);
1238 +- if (aconnector->mst_port == master
1239 +- && !aconnector->port) {
1240 +- DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
1241 +- aconnector, connector->base.id, aconnector->mst_port);
1242 +-
1243 +- aconnector->port = port;
1244 +- drm_connector_set_path_property(connector, pathprop);
1245 +-
1246 +- drm_connector_list_iter_end(&conn_iter);
1247 +- aconnector->mst_connected = true;
1248 +- return &aconnector->base;
1249 +- }
1250 +- }
1251 +- drm_connector_list_iter_end(&conn_iter);
1252 +
1253 + aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1254 + if (!aconnector)
1255 +@@ -419,8 +395,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1256 + */
1257 + amdgpu_dm_connector_funcs_reset(connector);
1258 +
1259 +- aconnector->mst_connected = true;
1260 +-
1261 + DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
1262 + aconnector, connector->base.id, aconnector->mst_port);
1263 +
1264 +@@ -432,6 +406,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1265 + static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1266 + struct drm_connector *connector)
1267 + {
1268 ++ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
1269 ++ struct drm_device *dev = master->base.dev;
1270 ++ struct amdgpu_device *adev = dev->dev_private;
1271 + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1272 +
1273 + DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
1274 +@@ -445,7 +422,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1275 + aconnector->dc_sink = NULL;
1276 + }
1277 +
1278 +- aconnector->mst_connected = false;
1279 ++ drm_connector_unregister(connector);
1280 ++ if (adev->mode_info.rfbdev)
1281 ++ drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
1282 ++ drm_connector_put(connector);
1283 + }
1284 +
1285 + static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
1286 +@@ -456,18 +436,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
1287 + drm_kms_helper_hotplug_event(dev);
1288 + }
1289 +
1290 +-static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
1291 +-{
1292 +- mutex_lock(&connector->dev->mode_config.mutex);
1293 +- drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
1294 +- mutex_unlock(&connector->dev->mode_config.mutex);
1295 +-}
1296 +-
1297 + static void dm_dp_mst_register_connector(struct drm_connector *connector)
1298 + {
1299 + struct drm_device *dev = connector->dev;
1300 + struct amdgpu_device *adev = dev->dev_private;
1301 +- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1302 +
1303 + if (adev->mode_info.rfbdev)
1304 + drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
1305 +@@ -475,9 +447,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
1306 + DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
1307 +
1308 + drm_connector_register(connector);
1309 +-
1310 +- if (aconnector->mst_connected)
1311 +- dm_dp_mst_link_status_reset(connector);
1312 + }
1313 +
1314 + static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
1315 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1316 +index ff0bfc65a8c1..b506e3622b08 100644
1317 +--- a/drivers/gpu/drm/drm_edid.c
1318 ++++ b/drivers/gpu/drm/drm_edid.c
1319 +@@ -122,6 +122,9 @@ static const struct edid_quirk {
1320 + /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
1321 + { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
1322 +
1323 ++ /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
1324 ++ { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC },
1325 ++
1326 + /* Belinea 10 15 55 */
1327 + { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
1328 + { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
1329 +diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
1330 +index 94529aa82339..aef487dd8731 100644
1331 +--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
1332 ++++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
1333 +@@ -164,13 +164,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
1334 + return frm;
1335 + }
1336 +
1337 +-static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
1338 +-{
1339 +- struct decon_context *ctx = crtc->ctx;
1340 +-
1341 +- return decon_get_frame_count(ctx, false);
1342 +-}
1343 +-
1344 + static void decon_setup_trigger(struct decon_context *ctx)
1345 + {
1346 + if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
1347 +@@ -536,7 +529,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
1348 + .disable = decon_disable,
1349 + .enable_vblank = decon_enable_vblank,
1350 + .disable_vblank = decon_disable_vblank,
1351 +- .get_vblank_counter = decon_get_vblank_counter,
1352 + .atomic_begin = decon_atomic_begin,
1353 + .update_plane = decon_update_plane,
1354 + .disable_plane = decon_disable_plane,
1355 +@@ -554,7 +546,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
1356 + int ret;
1357 +
1358 + ctx->drm_dev = drm_dev;
1359 +- drm_dev->max_vblank_count = 0xffffffff;
1360 +
1361 + for (win = ctx->first_win; win < WINDOWS_NR; win++) {
1362 + ctx->configs[win].pixel_formats = decon_formats;
1363 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
1364 +index eea90251808f..2696289ecc78 100644
1365 +--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
1366 ++++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
1367 +@@ -162,16 +162,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc)
1368 + exynos_crtc->ops->disable_vblank(exynos_crtc);
1369 + }
1370 +
1371 +-static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc)
1372 +-{
1373 +- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
1374 +-
1375 +- if (exynos_crtc->ops->get_vblank_counter)
1376 +- return exynos_crtc->ops->get_vblank_counter(exynos_crtc);
1377 +-
1378 +- return 0;
1379 +-}
1380 +-
1381 + static const struct drm_crtc_funcs exynos_crtc_funcs = {
1382 + .set_config = drm_atomic_helper_set_config,
1383 + .page_flip = drm_atomic_helper_page_flip,
1384 +@@ -181,7 +171,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = {
1385 + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
1386 + .enable_vblank = exynos_drm_crtc_enable_vblank,
1387 + .disable_vblank = exynos_drm_crtc_disable_vblank,
1388 +- .get_vblank_counter = exynos_drm_crtc_get_vblank_counter,
1389 + };
1390 +
1391 + struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
1392 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
1393 +index c737c4bd2c19..630f1edc5de2 100644
1394 +--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
1395 ++++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
1396 +@@ -133,7 +133,6 @@ struct exynos_drm_crtc_ops {
1397 + void (*disable)(struct exynos_drm_crtc *crtc);
1398 + int (*enable_vblank)(struct exynos_drm_crtc *crtc);
1399 + void (*disable_vblank)(struct exynos_drm_crtc *crtc);
1400 +- u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
1401 + enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
1402 + const struct drm_display_mode *mode);
1403 + bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
1404 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1405 +index 4aca5344863d..d6c25bea4382 100644
1406 +--- a/drivers/gpu/drm/i915/i915_drv.h
1407 ++++ b/drivers/gpu/drm/i915/i915_drv.h
1408 +@@ -2248,7 +2248,7 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
1409 + #define for_each_sgt_dma(__dmap, __iter, __sgt) \
1410 + for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
1411 + ((__dmap) = (__iter).dma + (__iter).curr); \
1412 +- (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
1413 ++ (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \
1414 + (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
1415 +
1416 + /**
1417 +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
1418 +index 294a143b85f5..5f57f4e1fbc8 100644
1419 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
1420 ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
1421 +@@ -1058,7 +1058,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
1422 + do {
1423 + vaddr[idx->pte] = pte_encode | iter->dma;
1424 +
1425 +- iter->dma += PAGE_SIZE;
1426 ++ iter->dma += I915_GTT_PAGE_SIZE;
1427 + if (iter->dma >= iter->max) {
1428 + iter->sg = __sg_next(iter->sg);
1429 + if (!iter->sg) {
1430 +@@ -1770,7 +1770,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
1431 +
1432 + seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
1433 + pde, pte,
1434 +- (pde * GEN6_PTES + pte) * PAGE_SIZE);
1435 ++ (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
1436 + for (i = 0; i < 4; i++) {
1437 + if (vaddr[pte + i] != scratch_pte)
1438 + seq_printf(m, " %08x", vaddr[pte + i]);
1439 +@@ -1910,7 +1910,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1440 + do {
1441 + vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1442 +
1443 +- iter.dma += PAGE_SIZE;
1444 ++ iter.dma += I915_GTT_PAGE_SIZE;
1445 + if (iter.dma == iter.max) {
1446 + iter.sg = __sg_next(iter.sg);
1447 + if (!iter.sg)
1448 +@@ -2048,7 +2048,7 @@ static int pd_vma_bind(struct i915_vma *vma,
1449 + {
1450 + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
1451 + struct gen6_hw_ppgtt *ppgtt = vma->private;
1452 +- u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE;
1453 ++ u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
1454 + struct i915_page_table *pt;
1455 + unsigned int pde;
1456 +
1457 +@@ -2174,7 +2174,7 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
1458 + ppgtt->base.vm.i915 = i915;
1459 + ppgtt->base.vm.dma = &i915->drm.pdev->dev;
1460 +
1461 +- ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
1462 ++ ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
1463 +
1464 + i915_address_space_init(&ppgtt->base.vm, i915);
1465 +
1466 +@@ -3031,7 +3031,7 @@ static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1467 + bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1468 +
1469 + #ifdef CONFIG_X86_32
1470 +- /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
1471 ++ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
1472 + if (bdw_gmch_ctl > 4)
1473 + bdw_gmch_ctl = 4;
1474 + #endif
1475 +@@ -3729,9 +3729,9 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
1476 + * the entries so the sg list can be happily traversed.
1477 + * The only thing we need are DMA addresses.
1478 + */
1479 +- sg_set_page(sg, NULL, PAGE_SIZE, 0);
1480 ++ sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
1481 + sg_dma_address(sg) = in[offset + src_idx];
1482 +- sg_dma_len(sg) = PAGE_SIZE;
1483 ++ sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
1484 + sg = sg_next(sg);
1485 + src_idx -= stride;
1486 + }
1487 +@@ -3744,7 +3744,7 @@ static noinline struct sg_table *
1488 + intel_rotate_pages(struct intel_rotation_info *rot_info,
1489 + struct drm_i915_gem_object *obj)
1490 + {
1491 +- const unsigned long n_pages = obj->base.size / PAGE_SIZE;
1492 ++ const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
1493 + unsigned int size = intel_rotation_info_size(rot_info);
1494 + struct sgt_iter sgt_iter;
1495 + dma_addr_t dma_addr;
1496 +diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
1497 +index aec253b44156..3cd7229b6e54 100644
1498 +--- a/drivers/hid/hid-alps.c
1499 ++++ b/drivers/hid/hid-alps.c
1500 +@@ -660,6 +660,20 @@ exit:
1501 + return ret;
1502 + }
1503 +
1504 ++static int alps_sp_open(struct input_dev *dev)
1505 ++{
1506 ++ struct hid_device *hid = input_get_drvdata(dev);
1507 ++
1508 ++ return hid_hw_open(hid);
1509 ++}
1510 ++
1511 ++static void alps_sp_close(struct input_dev *dev)
1512 ++{
1513 ++ struct hid_device *hid = input_get_drvdata(dev);
1514 ++
1515 ++ hid_hw_close(hid);
1516 ++}
1517 ++
1518 + static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
1519 + {
1520 + struct alps_dev *data = hid_get_drvdata(hdev);
1521 +@@ -733,6 +747,10 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
1522 + input2->id.version = input->id.version;
1523 + input2->dev.parent = input->dev.parent;
1524 +
1525 ++ input_set_drvdata(input2, hdev);
1526 ++ input2->open = alps_sp_open;
1527 ++ input2->close = alps_sp_close;
1528 ++
1529 + __set_bit(EV_KEY, input2->evbit);
1530 + data->sp_btn_cnt = (data->sp_btn_info & 0x0F);
1531 + for (i = 0; i < data->sp_btn_cnt; i++)
1532 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1533 +index bc49909aba8e..501c05cbec7e 100644
1534 +--- a/drivers/hid/hid-ids.h
1535 ++++ b/drivers/hid/hid-ids.h
1536 +@@ -799,6 +799,7 @@
1537 + #define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
1538 + #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
1539 + #define USB_DEVICE_ID_MS_POWER_COVER 0x07da
1540 ++#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
1541 +
1542 + #define USB_VENDOR_ID_MOJO 0x8282
1543 + #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
1544 +@@ -921,6 +922,9 @@
1545 + #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
1546 + #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
1547 +
1548 ++#define I2C_VENDOR_ID_RAYDIUM 0x2386
1549 ++#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
1550 ++
1551 + #define USB_VENDOR_ID_RAZER 0x1532
1552 + #define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
1553 +
1554 +@@ -1195,6 +1199,8 @@
1555 + #define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
1556 + #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
1557 + #define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
1558 ++#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f
1559 ++#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22 0x4e22
1560 +
1561 +
1562 + #define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
1563 +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
1564 +index 249d49b6b16c..0a0605a7e481 100644
1565 +--- a/drivers/hid/hid-quirks.c
1566 ++++ b/drivers/hid/hid-quirks.c
1567 +@@ -106,7 +106,7 @@ static const struct hid_device_id hid_quirks[] = {
1568 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
1569 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
1570 + { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
1571 +- { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET },
1572 ++ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
1573 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
1574 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
1575 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
1576 +@@ -129,6 +129,8 @@ static const struct hid_device_id hid_quirks[] = {
1577 + { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
1578 + { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
1579 + { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
1580 ++ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
1581 ++ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },
1582 + { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET },
1583 + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET },
1584 + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
1585 +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
1586 +index 4e3592e7a3f7..88daa388e1f6 100644
1587 +--- a/drivers/hid/i2c-hid/i2c-hid.c
1588 ++++ b/drivers/hid/i2c-hid/i2c-hid.c
1589 +@@ -48,6 +48,7 @@
1590 + #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
1591 + #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
1592 + #define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
1593 ++#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
1594 +
1595 + /* flags */
1596 + #define I2C_HID_STARTED 0
1597 +@@ -157,6 +158,8 @@ struct i2c_hid {
1598 +
1599 + bool irq_wake_enabled;
1600 + struct mutex reset_lock;
1601 ++
1602 ++ unsigned long sleep_delay;
1603 + };
1604 +
1605 + static const struct i2c_hid_quirks {
1606 +@@ -171,6 +174,8 @@ static const struct i2c_hid_quirks {
1607 + { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
1608 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
1609 + I2C_HID_QUIRK_NO_RUNTIME_PM },
1610 ++ { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
1611 ++ I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
1612 + { 0, 0 }
1613 + };
1614 +
1615 +@@ -386,6 +391,7 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
1616 + {
1617 + struct i2c_hid *ihid = i2c_get_clientdata(client);
1618 + int ret;
1619 ++ unsigned long now, delay;
1620 +
1621 + i2c_hid_dbg(ihid, "%s\n", __func__);
1622 +
1623 +@@ -403,9 +409,22 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
1624 + goto set_pwr_exit;
1625 + }
1626 +
1627 ++ if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
1628 ++ power_state == I2C_HID_PWR_ON) {
1629 ++ now = jiffies;
1630 ++ if (time_after(ihid->sleep_delay, now)) {
1631 ++ delay = jiffies_to_usecs(ihid->sleep_delay - now);
1632 ++ usleep_range(delay, delay + 1);
1633 ++ }
1634 ++ }
1635 ++
1636 + ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
1637 + 0, NULL, 0, NULL, 0);
1638 +
1639 ++ if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
1640 ++ power_state == I2C_HID_PWR_SLEEP)
1641 ++ ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
1642 ++
1643 + if (ret)
1644 + dev_err(&client->dev, "failed to change power setting.\n");
1645 +
1646 +diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
1647 +index 3c5507313606..051639c09f72 100644
1648 +--- a/drivers/hid/uhid.c
1649 ++++ b/drivers/hid/uhid.c
1650 +@@ -12,6 +12,7 @@
1651 +
1652 + #include <linux/atomic.h>
1653 + #include <linux/compat.h>
1654 ++#include <linux/cred.h>
1655 + #include <linux/device.h>
1656 + #include <linux/fs.h>
1657 + #include <linux/hid.h>
1658 +@@ -722,6 +723,17 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
1659 +
1660 + switch (uhid->input_buf.type) {
1661 + case UHID_CREATE:
1662 ++ /*
1663 ++ * 'struct uhid_create_req' contains a __user pointer which is
1664 ++ * copied from, so it's unsafe to allow this with elevated
1665 ++ * privileges (e.g. from a setuid binary) or via kernel_write().
1666 ++ */
1667 ++ if (file->f_cred != current_cred() || uaccess_kernel()) {
1668 ++ pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
1669 ++ task_tgid_vnr(current), current->comm);
1670 ++ ret = -EACCES;
1671 ++ goto unlock;
1672 ++ }
1673 + ret = uhid_dev_create(uhid, &uhid->input_buf);
1674 + break;
1675 + case UHID_CREATE2:
1676 +diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
1677 +index 83472808c816..64d05edff130 100644
1678 +--- a/drivers/hwmon/ibmpowernv.c
1679 ++++ b/drivers/hwmon/ibmpowernv.c
1680 +@@ -181,7 +181,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
1681 + return sprintf(buf, "%s\n", sdata->label);
1682 + }
1683 +
1684 +-static int __init get_logical_cpu(int hwcpu)
1685 ++static int get_logical_cpu(int hwcpu)
1686 + {
1687 + int cpu;
1688 +
1689 +@@ -192,9 +192,8 @@ static int __init get_logical_cpu(int hwcpu)
1690 + return -ENOENT;
1691 + }
1692 +
1693 +-static void __init make_sensor_label(struct device_node *np,
1694 +- struct sensor_data *sdata,
1695 +- const char *label)
1696 ++static void make_sensor_label(struct device_node *np,
1697 ++ struct sensor_data *sdata, const char *label)
1698 + {
1699 + u32 id;
1700 + size_t n;
1701 +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
1702 +index 451d4ae50e66..ac4b09642f63 100644
1703 +--- a/drivers/i2c/busses/Kconfig
1704 ++++ b/drivers/i2c/busses/Kconfig
1705 +@@ -751,7 +751,7 @@ config I2C_OCORES
1706 +
1707 + config I2C_OMAP
1708 + tristate "OMAP I2C adapter"
1709 +- depends on ARCH_OMAP
1710 ++ depends on ARCH_OMAP || ARCH_K3
1711 + default y if MACH_OMAP_H3 || MACH_OMAP_OSK
1712 + help
1713 + If you say yes to this option, support will be included for the
1714 +diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
1715 +index 9f2eb02481d3..d7329177b0ea 100644
1716 +--- a/drivers/i2c/busses/i2c-qcom-geni.c
1717 ++++ b/drivers/i2c/busses/i2c-qcom-geni.c
1718 +@@ -590,18 +590,19 @@ static int geni_i2c_probe(struct platform_device *pdev)
1719 +
1720 + dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
1721 +
1722 +- ret = i2c_add_adapter(&gi2c->adap);
1723 +- if (ret) {
1724 +- dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
1725 +- return ret;
1726 +- }
1727 +-
1728 + gi2c->suspended = 1;
1729 + pm_runtime_set_suspended(gi2c->se.dev);
1730 + pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
1731 + pm_runtime_use_autosuspend(gi2c->se.dev);
1732 + pm_runtime_enable(gi2c->se.dev);
1733 +
1734 ++ ret = i2c_add_adapter(&gi2c->adap);
1735 ++ if (ret) {
1736 ++ dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
1737 ++ pm_runtime_disable(gi2c->se.dev);
1738 ++ return ret;
1739 ++ }
1740 ++
1741 + return 0;
1742 + }
1743 +
1744 +@@ -609,8 +610,8 @@ static int geni_i2c_remove(struct platform_device *pdev)
1745 + {
1746 + struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
1747 +
1748 +- pm_runtime_disable(gi2c->se.dev);
1749 + i2c_del_adapter(&gi2c->adap);
1750 ++ pm_runtime_disable(gi2c->se.dev);
1751 + return 0;
1752 + }
1753 +
1754 +diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
1755 +index a3ef1f50a4b3..481e3c65cf97 100644
1756 +--- a/drivers/media/v4l2-core/v4l2-event.c
1757 ++++ b/drivers/media/v4l2-core/v4l2-event.c
1758 +@@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
1759 + }
1760 + EXPORT_SYMBOL_GPL(v4l2_event_pending);
1761 +
1762 ++static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
1763 ++{
1764 ++ struct v4l2_fh *fh = sev->fh;
1765 ++ unsigned int i;
1766 ++
1767 ++ lockdep_assert_held(&fh->subscribe_lock);
1768 ++ assert_spin_locked(&fh->vdev->fh_lock);
1769 ++
1770 ++ /* Remove any pending events for this subscription */
1771 ++ for (i = 0; i < sev->in_use; i++) {
1772 ++ list_del(&sev->events[sev_pos(sev, i)].list);
1773 ++ fh->navailable--;
1774 ++ }
1775 ++ list_del(&sev->list);
1776 ++}
1777 ++
1778 + int v4l2_event_subscribe(struct v4l2_fh *fh,
1779 + const struct v4l2_event_subscription *sub, unsigned elems,
1780 + const struct v4l2_subscribed_event_ops *ops)
1781 +@@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
1782 +
1783 + spin_lock_irqsave(&fh->vdev->fh_lock, flags);
1784 + found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
1785 ++ if (!found_ev)
1786 ++ list_add(&sev->list, &fh->subscribed);
1787 + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
1788 +
1789 + if (found_ev) {
1790 + /* Already listening */
1791 + kvfree(sev);
1792 +- goto out_unlock;
1793 +- }
1794 +-
1795 +- if (sev->ops && sev->ops->add) {
1796 ++ } else if (sev->ops && sev->ops->add) {
1797 + ret = sev->ops->add(sev, elems);
1798 + if (ret) {
1799 ++ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
1800 ++ __v4l2_event_unsubscribe(sev);
1801 ++ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
1802 + kvfree(sev);
1803 +- goto out_unlock;
1804 + }
1805 + }
1806 +
1807 +- spin_lock_irqsave(&fh->vdev->fh_lock, flags);
1808 +- list_add(&sev->list, &fh->subscribed);
1809 +- spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
1810 +-
1811 +-out_unlock:
1812 + mutex_unlock(&fh->subscribe_lock);
1813 +
1814 + return ret;
1815 +@@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
1816 + {
1817 + struct v4l2_subscribed_event *sev;
1818 + unsigned long flags;
1819 +- int i;
1820 +
1821 + if (sub->type == V4L2_EVENT_ALL) {
1822 + v4l2_event_unsubscribe_all(fh);
1823 +@@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
1824 + spin_lock_irqsave(&fh->vdev->fh_lock, flags);
1825 +
1826 + sev = v4l2_event_subscribed(fh, sub->type, sub->id);
1827 +- if (sev != NULL) {
1828 +- /* Remove any pending events for this subscription */
1829 +- for (i = 0; i < sev->in_use; i++) {
1830 +- list_del(&sev->events[sev_pos(sev, i)].list);
1831 +- fh->navailable--;
1832 +- }
1833 +- list_del(&sev->list);
1834 +- }
1835 ++ if (sev != NULL)
1836 ++ __v4l2_event_unsubscribe(sev);
1837 +
1838 + spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
1839 +
1840 +diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
1841 +index b2a0340f277e..d8e3cc2dc747 100644
1842 +--- a/drivers/misc/atmel-ssc.c
1843 ++++ b/drivers/misc/atmel-ssc.c
1844 +@@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
1845 + MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
1846 + #endif
1847 +
1848 +-static inline const struct atmel_ssc_platform_data * __init
1849 ++static inline const struct atmel_ssc_platform_data *
1850 + atmel_ssc_get_driver_data(struct platform_device *pdev)
1851 + {
1852 + if (pdev->dev.of_node) {
1853 +diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
1854 +index 313da3150262..1540a7785e14 100644
1855 +--- a/drivers/misc/sgi-gru/grukdump.c
1856 ++++ b/drivers/misc/sgi-gru/grukdump.c
1857 +@@ -27,6 +27,9 @@
1858 + #include <linux/delay.h>
1859 + #include <linux/bitops.h>
1860 + #include <asm/uv/uv_hub.h>
1861 ++
1862 ++#include <linux/nospec.h>
1863 ++
1864 + #include "gru.h"
1865 + #include "grutables.h"
1866 + #include "gruhandles.h"
1867 +@@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg)
1868 + /* Currently, only dump by gid is implemented */
1869 + if (req.gid >= gru_max_gids)
1870 + return -EINVAL;
1871 ++ req.gid = array_index_nospec(req.gid, gru_max_gids);
1872 +
1873 + gru = GID_TO_GRU(req.gid);
1874 + ubuf = req.buf;
1875 +diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
1876 +index a594fb1e9a99..32e95af486a2 100644
1877 +--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
1878 ++++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
1879 +@@ -2061,8 +2061,7 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
1880 + int ret;
1881 +
1882 + nand_np = dev->of_node;
1883 +- nfc_np = of_find_compatible_node(dev->of_node, NULL,
1884 +- "atmel,sama5d3-nfc");
1885 ++ nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
1886 + if (!nfc_np) {
1887 + dev_err(dev, "Could not find device node for sama5d3-nfc\n");
1888 + return -ENODEV;
1889 +@@ -2476,15 +2475,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev)
1890 + }
1891 +
1892 + if (caps->legacy_of_bindings) {
1893 ++ struct device_node *nfc_node;
1894 + u32 ale_offs = 21;
1895 +
1896 + /*
1897 + * If we are parsing legacy DT props and the DT contains a
1898 + * valid NFC node, forward the request to the sama5 logic.
1899 + */
1900 +- if (of_find_compatible_node(pdev->dev.of_node, NULL,
1901 +- "atmel,sama5d3-nfc"))
1902 ++ nfc_node = of_get_compatible_child(pdev->dev.of_node,
1903 ++ "atmel,sama5d3-nfc");
1904 ++ if (nfc_node) {
1905 + caps = &atmel_sama5_nand_caps;
1906 ++ of_node_put(nfc_node);
1907 ++ }
1908 +
1909 + /*
1910 + * Even if the compatible says we are dealing with an
1911 +diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
1912 +index b939a4c10b84..c89c7d4900d7 100644
1913 +--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
1914 ++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
1915 +@@ -528,7 +528,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1916 + context = &priv->tx_contexts[i];
1917 +
1918 + context->echo_index = i;
1919 +- can_put_echo_skb(skb, netdev, context->echo_index);
1920 + ++priv->active_tx_contexts;
1921 + if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
1922 + netif_stop_queue(netdev);
1923 +@@ -553,7 +552,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1924 + dev_kfree_skb(skb);
1925 + spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1926 +
1927 +- can_free_echo_skb(netdev, context->echo_index);
1928 + context->echo_index = dev->max_tx_urbs;
1929 + --priv->active_tx_contexts;
1930 + netif_wake_queue(netdev);
1931 +@@ -564,6 +562,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1932 +
1933 + context->priv = priv;
1934 +
1935 ++ can_put_echo_skb(skb, netdev, context->echo_index);
1936 ++
1937 + usb_fill_bulk_urb(urb, dev->udev,
1938 + usb_sndbulkpipe(dev->udev,
1939 + dev->bulk_out->bEndpointAddress),
1940 +diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
1941 +index c084bae5ec0a..5fc0be564274 100644
1942 +--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
1943 ++++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
1944 +@@ -1019,6 +1019,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
1945 + new_state : CAN_STATE_ERROR_ACTIVE;
1946 +
1947 + can_change_state(netdev, cf, tx_state, rx_state);
1948 ++
1949 ++ if (priv->can.restart_ms &&
1950 ++ old_state >= CAN_STATE_BUS_OFF &&
1951 ++ new_state < CAN_STATE_BUS_OFF)
1952 ++ cf->can_id |= CAN_ERR_RESTARTED;
1953 + }
1954 +
1955 + if (new_state == CAN_STATE_BUS_OFF) {
1956 +@@ -1028,11 +1033,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
1957 +
1958 + can_bus_off(netdev);
1959 + }
1960 +-
1961 +- if (priv->can.restart_ms &&
1962 +- old_state >= CAN_STATE_BUS_OFF &&
1963 +- new_state < CAN_STATE_BUS_OFF)
1964 +- cf->can_id |= CAN_ERR_RESTARTED;
1965 + }
1966 +
1967 + if (!skb) {
1968 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1969 +index 26dc6782b475..4f34808f1e06 100644
1970 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1971 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1972 +@@ -590,7 +590,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
1973 + }
1974 + }
1975 +
1976 +- if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
1977 ++ if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
1978 + packet_filter |= IFF_MULTICAST;
1979 + self->mc_list.count = i;
1980 + self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
1981 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1982 +index d1e1a0ba8615..7134d0d4cdf7 100644
1983 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1984 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1985 +@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
1986 + return !!budget;
1987 + }
1988 +
1989 ++static void aq_rx_checksum(struct aq_ring_s *self,
1990 ++ struct aq_ring_buff_s *buff,
1991 ++ struct sk_buff *skb)
1992 ++{
1993 ++ if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
1994 ++ return;
1995 ++
1996 ++ if (unlikely(buff->is_cso_err)) {
1997 ++ ++self->stats.rx.errors;
1998 ++ skb->ip_summed = CHECKSUM_NONE;
1999 ++ return;
2000 ++ }
2001 ++ if (buff->is_ip_cso) {
2002 ++ __skb_incr_checksum_unnecessary(skb);
2003 ++ if (buff->is_udp_cso || buff->is_tcp_cso)
2004 ++ __skb_incr_checksum_unnecessary(skb);
2005 ++ } else {
2006 ++ skb->ip_summed = CHECKSUM_NONE;
2007 ++ }
2008 ++}
2009 ++
2010 + #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
2011 + int aq_ring_rx_clean(struct aq_ring_s *self,
2012 + struct napi_struct *napi,
2013 +@@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
2014 + }
2015 +
2016 + skb->protocol = eth_type_trans(skb, ndev);
2017 +- if (unlikely(buff->is_cso_err)) {
2018 +- ++self->stats.rx.errors;
2019 +- skb->ip_summed = CHECKSUM_NONE;
2020 +- } else {
2021 +- if (buff->is_ip_cso) {
2022 +- __skb_incr_checksum_unnecessary(skb);
2023 +- if (buff->is_udp_cso || buff->is_tcp_cso)
2024 +- __skb_incr_checksum_unnecessary(skb);
2025 +- } else {
2026 +- skb->ip_summed = CHECKSUM_NONE;
2027 +- }
2028 +- }
2029 ++
2030 ++ aq_rx_checksum(self, buff, skb);
2031 +
2032 + skb_set_hash(skb, buff->rss_hash,
2033 + buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
2034 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
2035 +index 1d44a386e7d3..88705dee5b95 100644
2036 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
2037 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
2038 +@@ -655,9 +655,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
2039 + struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
2040 + &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
2041 +
2042 +- unsigned int is_err = 1U;
2043 + unsigned int is_rx_check_sum_enabled = 0U;
2044 + unsigned int pkt_type = 0U;
2045 ++ u8 rx_stat = 0U;
2046 +
2047 + if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
2048 + break;
2049 +@@ -665,35 +665,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
2050 +
2051 + buff = &ring->buff_ring[ring->hw_head];
2052 +
2053 +- is_err = (0x0000003CU & rxd_wb->status);
2054 ++ rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
2055 +
2056 + is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
2057 +- is_err &= ~0x20U; /* exclude validity bit */
2058 +
2059 + pkt_type = 0xFFU & (rxd_wb->type >> 4);
2060 +
2061 +- if (is_rx_check_sum_enabled) {
2062 +- if (0x0U == (pkt_type & 0x3U))
2063 +- buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
2064 ++ if (is_rx_check_sum_enabled & BIT(0) &&
2065 ++ (0x0U == (pkt_type & 0x3U)))
2066 ++ buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
2067 +
2068 ++ if (is_rx_check_sum_enabled & BIT(1)) {
2069 + if (0x4U == (pkt_type & 0x1CU))
2070 +- buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
2071 ++ buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
2072 ++ !!(rx_stat & BIT(3));
2073 + else if (0x0U == (pkt_type & 0x1CU))
2074 +- buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
2075 +-
2076 +- /* Checksum offload workaround for small packets */
2077 +- if (rxd_wb->pkt_len <= 60) {
2078 +- buff->is_ip_cso = 0U;
2079 +- buff->is_cso_err = 0U;
2080 +- }
2081 ++ buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
2082 ++ !!(rx_stat & BIT(3));
2083 ++ }
2084 ++ buff->is_cso_err = !!(rx_stat & 0x6);
2085 ++ /* Checksum offload workaround for small packets */
2086 ++ if (unlikely(rxd_wb->pkt_len <= 60)) {
2087 ++ buff->is_ip_cso = 0U;
2088 ++ buff->is_cso_err = 0U;
2089 + }
2090 +-
2091 +- is_err &= ~0x18U;
2092 +
2093 + dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
2094 +
2095 +- if (is_err || rxd_wb->type & 0x1000U) {
2096 +- /* status error or DMA error */
2097 ++ if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
2098 ++ /* MAC error or DMA error */
2099 + buff->is_error = 1U;
2100 + } else {
2101 + if (self->aq_nic_cfg->is_rss) {
2102 +@@ -915,6 +915,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
2103 + static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
2104 + {
2105 + hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
2106 ++
2107 ++ /* Invalidate Descriptor Cache to prevent writing to the cached
2108 ++ * descriptors and to the data pointer of those descriptors
2109 ++ */
2110 ++ hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
2111 ++
2112 + return aq_hw_err_from_flags(self);
2113 + }
2114 +
2115 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
2116 +index 10ba035dadb1..10ec5dc88e24 100644
2117 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
2118 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
2119 +@@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
2120 + HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
2121 + }
2122 +
2123 ++void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
2124 ++{
2125 ++ aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
2126 ++ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
2127 ++ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
2128 ++ init);
2129 ++}
2130 ++
2131 + void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
2132 + u32 rx_pkt_buff_size_per_tc, u32 buffer)
2133 + {
2134 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
2135 +index dfb426f2dc2c..b3bf64b48b93 100644
2136 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
2137 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
2138 +@@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
2139 + u32 rx_pkt_buff_size_per_tc,
2140 + u32 buffer);
2141 +
2142 ++/* set rdm rx dma descriptor cache init */
2143 ++void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
2144 ++
2145 + /* set rx xoff enable (per tc) */
2146 + void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
2147 + u32 buffer);
2148 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
2149 +index e0cf70120f1d..e2ecdb1c5a5c 100644
2150 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
2151 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
2152 +@@ -293,6 +293,24 @@
2153 + /* default value of bitfield desc{d}_reset */
2154 + #define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
2155 +
2156 ++/* rdm_desc_init_i bitfield definitions
2157 ++ * preprocessor definitions for the bitfield rdm_desc_init_i.
2158 ++ * port="pif_rdm_desc_init_i"
2159 ++ */
2160 ++
2161 ++/* register address for bitfield rdm_desc_init_i */
2162 ++#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00
2163 ++/* bitmask for bitfield rdm_desc_init_i */
2164 ++#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff
2165 ++/* inverted bitmask for bitfield rdm_desc_init_i */
2166 ++#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000
2167 ++/* lower bit position of bitfield rdm_desc_init_i */
2168 ++#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0
2169 ++/* width of bitfield rdm_desc_init_i */
2170 ++#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32
2171 ++/* default value of bitfield rdm_desc_init_i */
2172 ++#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
2173 ++
2174 + /* rx int_desc_wrb_en bitfield definitions
2175 + * preprocessor definitions for the bitfield "int_desc_wrb_en".
2176 + * port="pif_rdm_int_desc_wrb_en_i"
2177 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
2178 +index ac13cb2b168e..68026a5ad7e7 100644
2179 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
2180 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
2181 +@@ -304,6 +304,10 @@ int hclge_cmd_queue_init(struct hclge_dev *hdev)
2182 + {
2183 + int ret;
2184 +
2185 ++ /* Setup the lock for command queue */
2186 ++ spin_lock_init(&hdev->hw.cmq.csq.lock);
2187 ++ spin_lock_init(&hdev->hw.cmq.crq.lock);
2188 ++
2189 + /* Setup the queue entries for use cmd queue */
2190 + hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
2191 + hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
2192 +@@ -337,18 +341,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
2193 + u32 version;
2194 + int ret;
2195 +
2196 ++ spin_lock_bh(&hdev->hw.cmq.csq.lock);
2197 ++ spin_lock_bh(&hdev->hw.cmq.crq.lock);
2198 ++
2199 + hdev->hw.cmq.csq.next_to_clean = 0;
2200 + hdev->hw.cmq.csq.next_to_use = 0;
2201 + hdev->hw.cmq.crq.next_to_clean = 0;
2202 + hdev->hw.cmq.crq.next_to_use = 0;
2203 +
2204 +- /* Setup the lock for command queue */
2205 +- spin_lock_init(&hdev->hw.cmq.csq.lock);
2206 +- spin_lock_init(&hdev->hw.cmq.crq.lock);
2207 +-
2208 + hclge_cmd_init_regs(&hdev->hw);
2209 + clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2210 +
2211 ++ spin_unlock_bh(&hdev->hw.cmq.crq.lock);
2212 ++ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
2213 ++
2214 + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
2215 + if (ret) {
2216 + dev_err(&hdev->pdev->dev,
2217 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2218 +index db763450e5e3..340baf6a470c 100644
2219 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2220 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2221 +@@ -2804,14 +2804,17 @@ static void hclge_reset(struct hclge_dev *hdev)
2222 + handle = &hdev->vport[0].nic;
2223 + rtnl_lock();
2224 + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2225 ++ rtnl_unlock();
2226 +
2227 + if (!hclge_reset_wait(hdev)) {
2228 ++ rtnl_lock();
2229 + hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2230 + hclge_reset_ae_dev(hdev->ae_dev);
2231 + hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2232 +
2233 + hclge_clear_reset_cause(hdev);
2234 + } else {
2235 ++ rtnl_lock();
2236 + /* schedule again to check pending resets later */
2237 + set_bit(hdev->reset_type, &hdev->reset_pending);
2238 + hclge_reset_task_schedule(hdev);
2239 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
2240 +index f34851c91eb3..e08e82020402 100644
2241 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
2242 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
2243 +@@ -458,6 +458,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
2244 +
2245 + /* handle all the mailbox requests in the queue */
2246 + while (!hclge_cmd_crq_empty(&hdev->hw)) {
2247 ++ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
2248 ++ dev_warn(&hdev->pdev->dev,
2249 ++ "command queue needs re-initializing\n");
2250 ++ return;
2251 ++ }
2252 ++
2253 + desc = &crq->desc[crq->next_to_use];
2254 + req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
2255 +
2256 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2257 +index 5db70a1451c5..48235dc2dd56 100644
2258 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2259 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
2260 +@@ -1167,14 +1167,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
2261 + */
2262 + static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
2263 + {
2264 +- struct hclge_vport *vport = hdev->vport;
2265 +- u32 i, k, qs_bitmap;
2266 +- int ret;
2267 ++ int i;
2268 +
2269 + for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
2270 +- qs_bitmap = 0;
2271 ++ u32 qs_bitmap = 0;
2272 ++ int k, ret;
2273 +
2274 + for (k = 0; k < hdev->num_alloc_vport; k++) {
2275 ++ struct hclge_vport *vport = &hdev->vport[k];
2276 + u16 qs_id = vport->qs_offset + tc;
2277 + u8 grp, sub_grp;
2278 +
2279 +@@ -1184,8 +1184,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
2280 + HCLGE_BP_SUB_GRP_ID_S);
2281 + if (i == grp)
2282 + qs_bitmap |= (1 << sub_grp);
2283 +-
2284 +- vport++;
2285 + }
2286 +
2287 + ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
2288 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2289 +index 320043e87fc6..5570fb5dc2eb 100644
2290 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2291 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
2292 +@@ -1065,6 +1065,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
2293 + /* bring down the nic to stop any ongoing TX/RX */
2294 + hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
2295 +
2296 ++ rtnl_unlock();
2297 ++
2298 + /* check if VF could successfully fetch the hardware reset completion
2299 + * status from the hardware
2300 + */
2301 +@@ -1076,12 +1078,15 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
2302 + ret);
2303 +
2304 + dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
2305 ++ rtnl_lock();
2306 + hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2307 +
2308 + rtnl_unlock();
2309 + return ret;
2310 + }
2311 +
2312 ++ rtnl_lock();
2313 ++
2314 + /* now, re-initialize the nic client and ae device*/
2315 + ret = hclgevf_reset_stack(hdev);
2316 + if (ret)
2317 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2318 +index ac685ad4d877..6eccfa82ca94 100644
2319 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2320 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2321 +@@ -11926,6 +11926,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
2322 + NETIF_F_GSO_GRE |
2323 + NETIF_F_GSO_GRE_CSUM |
2324 + NETIF_F_GSO_PARTIAL |
2325 ++ NETIF_F_GSO_IPXIP4 |
2326 ++ NETIF_F_GSO_IPXIP6 |
2327 + NETIF_F_GSO_UDP_TUNNEL |
2328 + NETIF_F_GSO_UDP_TUNNEL_CSUM |
2329 + NETIF_F_SCTP_CRC |
2330 +diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
2331 +index 9d6754f65a1a..4c5c87b158f5 100644
2332 +--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
2333 ++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
2334 +@@ -797,10 +797,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
2335 + }
2336 +
2337 + if (!test_bit(__ICE_DOWN, pf->state)) {
2338 +- /* Give it a little more time to try to come back */
2339 ++ /* Give it a little more time to try to come back. If still
2340 ++ * down, restart autoneg link or reinitialize the interface.
2341 ++ */
2342 + msleep(75);
2343 + if (!test_bit(__ICE_DOWN, pf->state))
2344 + return ice_nway_reset(netdev);
2345 ++
2346 ++ ice_down(vsi);
2347 ++ ice_up(vsi);
2348 + }
2349 +
2350 + return err;
2351 +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
2352 +index 6481e3d86374..0c95c8f83432 100644
2353 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
2354 ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
2355 +@@ -1519,7 +1519,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2356 +
2357 + /* update gso_segs and bytecount */
2358 + first->gso_segs = skb_shinfo(skb)->gso_segs;
2359 +- first->bytecount = (first->gso_segs - 1) * off->header_len;
2360 ++ first->bytecount += (first->gso_segs - 1) * off->header_len;
2361 +
2362 + cd_tso_len = skb->len - off->header_len;
2363 + cd_mss = skb_shinfo(skb)->gso_size;
2364 +diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
2365 +index 97c366e0ca59..ba11b5898833 100644
2366 +--- a/drivers/net/ethernet/intel/ice/ice_type.h
2367 ++++ b/drivers/net/ethernet/intel/ice/ice_type.h
2368 +@@ -83,12 +83,12 @@ struct ice_link_status {
2369 + u64 phy_type_low;
2370 + u16 max_frame_size;
2371 + u16 link_speed;
2372 ++ u16 req_speeds;
2373 + u8 lse_ena; /* Link Status Event notification */
2374 + u8 link_info;
2375 + u8 an_info;
2376 + u8 ext_info;
2377 + u8 pacing;
2378 +- u8 req_speeds;
2379 + /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
2380 + * ice_aqc_get_phy_caps structure
2381 + */
2382 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
2383 +index 3c6f01c41b78..eea63a99f29c 100644
2384 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
2385 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
2386 +@@ -721,8 +721,10 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
2387 + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
2388 + adapter->default_up, vf);
2389 +
2390 +- if (vfinfo->spoofchk_enabled)
2391 ++ if (vfinfo->spoofchk_enabled) {
2392 + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
2393 ++ hw->mac.ops.set_mac_anti_spoofing(hw, true, vf);
2394 ++ }
2395 + }
2396 +
2397 + /* reset multicast table array for vf */
2398 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
2399 +index cc1b373c0ace..46dc93d3b9b5 100644
2400 +--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
2401 ++++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
2402 +@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
2403 + "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
2404 + fcoe_pf_params->num_cqs,
2405 + p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
2406 +- return -EINVAL;
2407 ++ rc = -EINVAL;
2408 ++ goto err;
2409 + }
2410 +
2411 + p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
2412 +@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
2413 +
2414 + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
2415 + if (rc)
2416 +- return rc;
2417 ++ goto err;
2418 +
2419 + cxt_info.iid = dummy_cid;
2420 + rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
2421 + if (rc) {
2422 + DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
2423 + dummy_cid);
2424 +- return rc;
2425 ++ goto err;
2426 + }
2427 + p_cxt = cxt_info.p_cxt;
2428 + SET_FIELD(p_cxt->tstorm_ag_context.flags3,
2429 +@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
2430 + rc = qed_spq_post(p_hwfn, p_ent, NULL);
2431 +
2432 + return rc;
2433 ++
2434 ++err:
2435 ++ qed_sp_destroy_request(p_hwfn, p_ent);
2436 ++ return rc;
2437 + }
2438 +
2439 + static int
2440 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
2441 +index 1135387bd99d..4f8a685d1a55 100644
2442 +--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
2443 ++++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
2444 +@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
2445 + "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
2446 + p_params->num_queues,
2447 + p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
2448 ++ qed_sp_destroy_request(p_hwfn, p_ent);
2449 + return -EINVAL;
2450 + }
2451 +
2452 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2453 +index 82a1bd1f8a8c..67c02ea93906 100644
2454 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
2455 ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2456 +@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
2457 +
2458 + rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
2459 + if (rc) {
2460 +- /* Return spq entry which is taken in qed_sp_init_request()*/
2461 +- qed_spq_return_entry(p_hwfn, p_ent);
2462 ++ qed_sp_destroy_request(p_hwfn, p_ent);
2463 + return rc;
2464 + }
2465 +
2466 +@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
2467 + DP_NOTICE(p_hwfn,
2468 + "%d is not supported yet\n",
2469 + p_filter_cmd->opcode);
2470 ++ qed_sp_destroy_request(p_hwfn, *pp_ent);
2471 + return -EINVAL;
2472 + }
2473 +
2474 +@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2475 + } else {
2476 + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2477 + if (rc)
2478 +- return rc;
2479 ++ goto err;
2480 +
2481 + if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2482 + rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
2483 + &abs_rx_q_id);
2484 + if (rc)
2485 +- return rc;
2486 ++ goto err;
2487 +
2488 + p_ramrod->rx_qid_valid = 1;
2489 + p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
2490 +@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2491 + (u64)p_params->addr, p_params->length);
2492 +
2493 + return qed_spq_post(p_hwfn, p_ent, NULL);
2494 ++
2495 ++err:
2496 ++ qed_sp_destroy_request(p_hwfn, p_ent);
2497 ++ return rc;
2498 + }
2499 +
2500 + int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
2501 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2502 +index c71391b9c757..62113438c880 100644
2503 +--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2504 ++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2505 +@@ -1514,6 +1514,7 @@ qed_rdma_register_tid(void *rdma_cxt,
2506 + default:
2507 + rc = -EINVAL;
2508 + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2509 ++ qed_sp_destroy_request(p_hwfn, p_ent);
2510 + return rc;
2511 + }
2512 + SET_FIELD(p_ramrod->flags1,
2513 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
2514 +index f9167d1354bb..e49fada85410 100644
2515 +--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
2516 ++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
2517 +@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
2518 + DP_NOTICE(p_hwfn,
2519 + "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
2520 + rc);
2521 ++ qed_sp_destroy_request(p_hwfn, p_ent);
2522 + return rc;
2523 + }
2524 +
2525 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
2526 +index e95431f6acd4..3157c0d99441 100644
2527 +--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
2528 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
2529 +@@ -167,6 +167,9 @@ struct qed_spq_entry {
2530 + enum spq_mode comp_mode;
2531 + struct qed_spq_comp_cb comp_cb;
2532 + struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
2533 ++
2534 ++ /* Posted entry for unlimited list entry in EBLOCK mode */
2535 ++ struct qed_spq_entry *post_ent;
2536 + };
2537 +
2538 + struct qed_eq {
2539 +@@ -396,6 +399,17 @@ struct qed_sp_init_data {
2540 + struct qed_spq_comp_cb *p_comp_data;
2541 + };
2542 +
2543 ++/**
2544 ++ * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
2545 ++ * Should be called on in error flows after initializing the SPQ entry
2546 ++ * and before posting it.
2547 ++ *
2548 ++ * @param p_hwfn
2549 ++ * @param p_ent
2550 ++ */
2551 ++void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
2552 ++ struct qed_spq_entry *p_ent);
2553 ++
2554 + int qed_sp_init_request(struct qed_hwfn *p_hwfn,
2555 + struct qed_spq_entry **pp_ent,
2556 + u8 cmd,
2557 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
2558 +index 77b6248ad3b9..888274fa208b 100644
2559 +--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
2560 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
2561 +@@ -47,6 +47,19 @@
2562 + #include "qed_sp.h"
2563 + #include "qed_sriov.h"
2564 +
2565 ++void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
2566 ++ struct qed_spq_entry *p_ent)
2567 ++{
2568 ++ /* qed_spq_get_entry() can either get an entry from the free_pool,
2569 ++ * or, if no entries are left, allocate a new entry and add it to
2570 ++ * the unlimited_pending list.
2571 ++ */
2572 ++ if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
2573 ++ kfree(p_ent);
2574 ++ else
2575 ++ qed_spq_return_entry(p_hwfn, p_ent);
2576 ++}
2577 ++
2578 + int qed_sp_init_request(struct qed_hwfn *p_hwfn,
2579 + struct qed_spq_entry **pp_ent,
2580 + u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
2581 +@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
2582 +
2583 + case QED_SPQ_MODE_BLOCK:
2584 + if (!p_data->p_comp_data)
2585 +- return -EINVAL;
2586 ++ goto err;
2587 +
2588 + p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
2589 + break;
2590 +@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
2591 + default:
2592 + DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
2593 + p_ent->comp_mode);
2594 +- return -EINVAL;
2595 ++ goto err;
2596 + }
2597 +
2598 + DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
2599 +@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
2600 + memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
2601 +
2602 + return 0;
2603 ++
2604 ++err:
2605 ++ qed_sp_destroy_request(p_hwfn, p_ent);
2606 ++
2607 ++ return -EINVAL;
2608 + }
2609 +
2610 + static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
2611 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
2612 +index 1673fc90027f..7106ad17afe2 100644
2613 +--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
2614 ++++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
2615 +@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
2616 +
2617 + DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
2618 + rc = qed_mcp_drain(p_hwfn, p_ptt);
2619 ++ qed_ptt_release(p_hwfn, p_ptt);
2620 + if (rc) {
2621 + DP_NOTICE(p_hwfn, "MCP drain failed\n");
2622 + goto err;
2623 +@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
2624 + /* Retry after drain */
2625 + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
2626 + if (!rc)
2627 +- goto out;
2628 ++ return 0;
2629 +
2630 + comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
2631 +- if (comp_done->done == 1)
2632 ++ if (comp_done->done == 1) {
2633 + if (p_fw_ret)
2634 + *p_fw_ret = comp_done->fw_return_code;
2635 +-out:
2636 +- qed_ptt_release(p_hwfn, p_ptt);
2637 +- return 0;
2638 +-
2639 ++ return 0;
2640 ++ }
2641 + err:
2642 +- qed_ptt_release(p_hwfn, p_ptt);
2643 + DP_NOTICE(p_hwfn,
2644 + "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
2645 + le32_to_cpu(p_ent->elem.hdr.cid),
2646 +@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
2647 + /* EBLOCK responsible to free the allocated p_ent */
2648 + if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
2649 + kfree(p_ent);
2650 ++ else
2651 ++ p_ent->post_ent = p_en2;
2652 +
2653 + p_ent = p_en2;
2654 + }
2655 +@@ -768,6 +768,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
2656 + SPQ_HIGH_PRI_RESERVE_DEFAULT);
2657 + }
2658 +
2659 ++/* Avoid overriding of SPQ entries when getting out-of-order completions, by
2660 ++ * marking the completions in a bitmap and increasing the chain consumer only
2661 ++ * for the first successive completed entries.
2662 ++ */
2663 ++static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
2664 ++{
2665 ++ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
2666 ++ struct qed_spq *p_spq = p_hwfn->p_spq;
2667 ++
2668 ++ __set_bit(pos, p_spq->p_comp_bitmap);
2669 ++ while (test_bit(p_spq->comp_bitmap_idx,
2670 ++ p_spq->p_comp_bitmap)) {
2671 ++ __clear_bit(p_spq->comp_bitmap_idx,
2672 ++ p_spq->p_comp_bitmap);
2673 ++ p_spq->comp_bitmap_idx++;
2674 ++ qed_chain_return_produced(&p_spq->chain);
2675 ++ }
2676 ++}
2677 ++
2678 + int qed_spq_post(struct qed_hwfn *p_hwfn,
2679 + struct qed_spq_entry *p_ent, u8 *fw_return_code)
2680 + {
2681 +@@ -825,11 +844,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
2682 + p_ent->queue == &p_spq->unlimited_pending);
2683 +
2684 + if (p_ent->queue == &p_spq->unlimited_pending) {
2685 +- /* This is an allocated p_ent which does not need to
2686 +- * return to pool.
2687 +- */
2688 ++ struct qed_spq_entry *p_post_ent = p_ent->post_ent;
2689 ++
2690 + kfree(p_ent);
2691 +- return rc;
2692 ++
2693 ++ /* Return the entry which was actually posted */
2694 ++ p_ent = p_post_ent;
2695 + }
2696 +
2697 + if (rc)
2698 +@@ -843,7 +863,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
2699 + spq_post_fail2:
2700 + spin_lock_bh(&p_spq->lock);
2701 + list_del(&p_ent->list);
2702 +- qed_chain_return_produced(&p_spq->chain);
2703 ++ qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
2704 +
2705 + spq_post_fail:
2706 + /* return to the free pool */
2707 +@@ -875,25 +895,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
2708 + spin_lock_bh(&p_spq->lock);
2709 + list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
2710 + if (p_ent->elem.hdr.echo == echo) {
2711 +- u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
2712 +-
2713 + list_del(&p_ent->list);
2714 +-
2715 +- /* Avoid overriding of SPQ entries when getting
2716 +- * out-of-order completions, by marking the completions
2717 +- * in a bitmap and increasing the chain consumer only
2718 +- * for the first successive completed entries.
2719 +- */
2720 +- __set_bit(pos, p_spq->p_comp_bitmap);
2721 +-
2722 +- while (test_bit(p_spq->comp_bitmap_idx,
2723 +- p_spq->p_comp_bitmap)) {
2724 +- __clear_bit(p_spq->comp_bitmap_idx,
2725 +- p_spq->p_comp_bitmap);
2726 +- p_spq->comp_bitmap_idx++;
2727 +- qed_chain_return_produced(&p_spq->chain);
2728 +- }
2729 +-
2730 ++ qed_spq_comp_bmap_update(p_hwfn, echo);
2731 + p_spq->comp_count++;
2732 + found = p_ent;
2733 + break;
2734 +@@ -932,11 +935,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
2735 + QED_MSG_SPQ,
2736 + "Got a completion without a callback function\n");
2737 +
2738 +- if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
2739 +- (found->queue == &p_spq->unlimited_pending))
2740 ++ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
2741 + /* EBLOCK is responsible for returning its own entry into the
2742 +- * free list, unless it originally added the entry into the
2743 +- * unlimited pending list.
2744 ++ * free list.
2745 + */
2746 + qed_spq_return_entry(p_hwfn, found);
2747 +
2748 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2749 +index 9b08a9d9e151..ca6290fa0f30 100644
2750 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2751 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2752 +@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
2753 + default:
2754 + DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
2755 + p_hwfn->hw_info.personality);
2756 ++ qed_sp_destroy_request(p_hwfn, p_ent);
2757 + return -EINVAL;
2758 + }
2759 +
2760 +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
2761 +index b1b305f8f414..272b9ca66314 100644
2762 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h
2763 ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
2764 +@@ -365,7 +365,8 @@ struct dma_features {
2765 +
2766 + /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
2767 + #define BUF_SIZE_16KiB 16384
2768 +-#define BUF_SIZE_8KiB 8192
2769 ++/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
2770 ++#define BUF_SIZE_8KiB 8188
2771 + #define BUF_SIZE_4KiB 4096
2772 + #define BUF_SIZE_2KiB 2048
2773 +
2774 +diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
2775 +index ca9d7e48034c..40d6356a7e73 100644
2776 +--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
2777 ++++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
2778 +@@ -31,7 +31,7 @@
2779 + /* Enhanced descriptors */
2780 + static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
2781 + {
2782 +- p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
2783 ++ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
2784 + << ERDES1_BUFFER2_SIZE_SHIFT)
2785 + & ERDES1_BUFFER2_SIZE_MASK);
2786 +
2787 +diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2788 +index 77914c89d749..5ef91a790f9d 100644
2789 +--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2790 ++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2791 +@@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
2792 + int mode, int end)
2793 + {
2794 + p->des0 |= cpu_to_le32(RDES0_OWN);
2795 +- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
2796 ++ p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
2797 +
2798 + if (mode == STMMAC_CHAIN_MODE)
2799 + ehn_desc_rx_set_on_chain(p);
2800 +diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
2801 +index a7ffc73fffe8..bc83ced94e1b 100644
2802 +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
2803 ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
2804 +@@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
2805 + static int set_16kib_bfsize(int mtu)
2806 + {
2807 + int ret = 0;
2808 +- if (unlikely(mtu >= BUF_SIZE_8KiB))
2809 ++ if (unlikely(mtu > BUF_SIZE_8KiB))
2810 + ret = BUF_SIZE_16KiB;
2811 + return ret;
2812 + }
2813 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
2814 +index 5da7bfbe907f..14e8c575f6c3 100644
2815 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
2816 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
2817 +@@ -757,10 +757,10 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
2818 + /* Vendor driver don't do it */
2819 + /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
2820 +
2821 ++ mt76x0_vco_cal(dev, channel);
2822 + if (scan)
2823 +- mt76x0_vco_cal(dev, channel);
2824 ++ mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
2825 +
2826 +- mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
2827 + mt76x0_phy_set_chan_pwr(dev, channel);
2828 +
2829 + dev->mt76.chandef = *chandef;
2830 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2831 +index 6bb9908bf46f..0ba301f7e8b4 100644
2832 +--- a/drivers/nvme/host/core.c
2833 ++++ b/drivers/nvme/host/core.c
2834 +@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
2835 + if (ns->ndev)
2836 + nvme_nvm_update_nvm_info(ns);
2837 + #ifdef CONFIG_NVME_MULTIPATH
2838 +- if (ns->head->disk)
2839 ++ if (ns->head->disk) {
2840 + nvme_update_disk_info(ns->head->disk, ns, id);
2841 ++ blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
2842 ++ }
2843 + #endif
2844 + }
2845 +
2846 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
2847 +index b71c9ad1bf45..c27af277e14e 100644
2848 +--- a/drivers/nvme/host/multipath.c
2849 ++++ b/drivers/nvme/host/multipath.c
2850 +@@ -257,6 +257,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
2851 + blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2852 + /* set to a default value for 512 until disk is validated */
2853 + blk_queue_logical_block_size(q, 512);
2854 ++ blk_set_stacking_limits(&q->limits);
2855 +
2856 + /* we need to propagate up the VMC settings */
2857 + if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
2858 +diff --git a/drivers/of/device.c b/drivers/of/device.c
2859 +index 5957cd4fa262..40b9051a7fce 100644
2860 +--- a/drivers/of/device.c
2861 ++++ b/drivers/of/device.c
2862 +@@ -149,9 +149,11 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
2863 + * set by the driver.
2864 + */
2865 + mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
2866 +- dev->bus_dma_mask = mask;
2867 + dev->coherent_dma_mask &= mask;
2868 + *dev->dma_mask &= mask;
2869 ++ /* ...but only set bus mask if we found valid dma-ranges earlier */
2870 ++ if (!ret)
2871 ++ dev->bus_dma_mask = mask;
2872 +
2873 + coherent = of_dma_is_coherent(np);
2874 + dev_dbg(dev, "device is%sdma coherent\n",
2875 +diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
2876 +index ea22591ee66f..53dfe67807e3 100644
2877 +--- a/drivers/platform/x86/acerhdf.c
2878 ++++ b/drivers/platform/x86/acerhdf.c
2879 +@@ -233,6 +233,7 @@ static const struct bios_settings bios_tbl[] = {
2880 + {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
2881 + {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
2882 + {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
2883 ++ {"Gateway", "LT31", "v1.3307", 0x55, 0x58, {0x9e, 0x00}, 0},
2884 + /* Packard Bell */
2885 + {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0},
2886 + {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
2887 +diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
2888 +index ffd0474b0531..1423fa8710fd 100644
2889 +--- a/drivers/platform/x86/intel_telemetry_debugfs.c
2890 ++++ b/drivers/platform/x86/intel_telemetry_debugfs.c
2891 +@@ -951,12 +951,16 @@ static int __init telemetry_debugfs_init(void)
2892 + debugfs_conf = (struct telemetry_debugfs_conf *)id->driver_data;
2893 +
2894 + err = telemetry_pltconfig_valid();
2895 +- if (err < 0)
2896 ++ if (err < 0) {
2897 ++ pr_info("Invalid pltconfig, ensure IPC1 device is enabled in BIOS\n");
2898 + return -ENODEV;
2899 ++ }
2900 +
2901 + err = telemetry_debugfs_check_evts();
2902 +- if (err < 0)
2903 ++ if (err < 0) {
2904 ++ pr_info("telemetry_debugfs_check_evts failed\n");
2905 + return -EINVAL;
2906 ++ }
2907 +
2908 + register_pm_notifier(&pm_notifier);
2909 +
2910 +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
2911 +index 34e0d476c5c6..970654fcc48d 100644
2912 +--- a/drivers/s390/net/qeth_core.h
2913 ++++ b/drivers/s390/net/qeth_core.h
2914 +@@ -826,6 +826,11 @@ struct qeth_trap_id {
2915 + /*some helper functions*/
2916 + #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
2917 +
2918 ++static inline bool qeth_netdev_is_registered(struct net_device *dev)
2919 ++{
2920 ++ return dev->netdev_ops != NULL;
2921 ++}
2922 ++
2923 + static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
2924 + unsigned int elements)
2925 + {
2926 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
2927 +index b5e38531733f..76b2fba5fba2 100644
2928 +--- a/drivers/s390/net/qeth_l2_main.c
2929 ++++ b/drivers/s390/net/qeth_l2_main.c
2930 +@@ -854,7 +854,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
2931 +
2932 + if (cgdev->state == CCWGROUP_ONLINE)
2933 + qeth_l2_set_offline(cgdev);
2934 +- unregister_netdev(card->dev);
2935 ++ if (qeth_netdev_is_registered(card->dev))
2936 ++ unregister_netdev(card->dev);
2937 + }
2938 +
2939 + static const struct ethtool_ops qeth_l2_ethtool_ops = {
2940 +@@ -894,7 +895,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
2941 + {
2942 + int rc;
2943 +
2944 +- if (card->dev->netdev_ops)
2945 ++ if (qeth_netdev_is_registered(card->dev))
2946 + return 0;
2947 +
2948 + card->dev->priv_flags |= IFF_UNICAST_FLT;
2949 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
2950 +index ada258c01a08..b7f6a8384543 100644
2951 +--- a/drivers/s390/net/qeth_l3_main.c
2952 ++++ b/drivers/s390/net/qeth_l3_main.c
2953 +@@ -279,9 +279,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
2954 +
2955 + QETH_CARD_TEXT(card, 4, "clearip");
2956 +
2957 +- if (recover && card->options.sniffer)
2958 +- return;
2959 +-
2960 + spin_lock_bh(&card->ip_lock);
2961 +
2962 + hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
2963 +@@ -664,6 +661,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
2964 + int rc = 0;
2965 + int cnt = 3;
2966 +
2967 ++ if (card->options.sniffer)
2968 ++ return 0;
2969 +
2970 + if (addr->proto == QETH_PROT_IPV4) {
2971 + QETH_CARD_TEXT(card, 2, "setaddr4");
2972 +@@ -698,6 +697,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
2973 + {
2974 + int rc = 0;
2975 +
2976 ++ if (card->options.sniffer)
2977 ++ return 0;
2978 ++
2979 + if (addr->proto == QETH_PROT_IPV4) {
2980 + QETH_CARD_TEXT(card, 2, "deladdr4");
2981 + QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
2982 +@@ -2512,7 +2514,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2983 + {
2984 + int rc;
2985 +
2986 +- if (card->dev->netdev_ops)
2987 ++ if (qeth_netdev_is_registered(card->dev))
2988 + return 0;
2989 +
2990 + if (card->info.type == QETH_CARD_TYPE_OSD ||
2991 +@@ -2609,7 +2611,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2992 + if (cgdev->state == CCWGROUP_ONLINE)
2993 + qeth_l3_set_offline(cgdev);
2994 +
2995 +- unregister_netdev(card->dev);
2996 ++ if (qeth_netdev_is_registered(card->dev))
2997 ++ unregister_netdev(card->dev);
2998 + qeth_l3_clear_ip_htable(card, 0);
2999 + qeth_l3_clear_ipato_list(card);
3000 + }
3001 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
3002 +index 3649b83ef863..effba6ce0caa 100644
3003 +--- a/drivers/tty/serial/sh-sci.c
3004 ++++ b/drivers/tty/serial/sh-sci.c
3005 +@@ -1614,10 +1614,10 @@ static void sci_request_dma(struct uart_port *port)
3006 + hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3007 + s->rx_timer.function = rx_timer_fn;
3008 +
3009 ++ s->chan_rx_saved = s->chan_rx = chan;
3010 ++
3011 + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
3012 + sci_submit_rx(s);
3013 +-
3014 +- s->chan_rx_saved = s->chan_rx = chan;
3015 + }
3016 + }
3017 +
3018 +diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
3019 +index 9916edda5271..2762148c169d 100644
3020 +--- a/drivers/uio/uio.c
3021 ++++ b/drivers/uio/uio.c
3022 +@@ -959,6 +959,8 @@ int __uio_register_device(struct module *owner,
3023 + if (ret)
3024 + goto err_uio_dev_add_attributes;
3025 +
3026 ++ info->uio_dev = idev;
3027 ++
3028 + if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
3029 + /*
3030 + * Note that we deliberately don't use devm_request_irq
3031 +@@ -970,11 +972,12 @@ int __uio_register_device(struct module *owner,
3032 + */
3033 + ret = request_irq(info->irq, uio_interrupt,
3034 + info->irq_flags, info->name, idev);
3035 +- if (ret)
3036 ++ if (ret) {
3037 ++ info->uio_dev = NULL;
3038 + goto err_request_irq;
3039 ++ }
3040 + }
3041 +
3042 +- info->uio_dev = idev;
3043 + return 0;
3044 +
3045 + err_request_irq:
3046 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3047 +index 9ede35cecb12..40c64c7ab5e4 100644
3048 +--- a/drivers/usb/class/cdc-acm.c
3049 ++++ b/drivers/usb/class/cdc-acm.c
3050 +@@ -1711,6 +1711,9 @@ static const struct usb_device_id acm_ids[] = {
3051 + { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
3052 + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
3053 + },
3054 ++ { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */
3055 ++ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
3056 ++ },
3057 + { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
3058 + .driver_info = QUIRK_CONTROL_LINE_STATE, },
3059 + { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
3060 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3061 +index 462ce49f683a..6e0823790bee 100644
3062 +--- a/drivers/usb/core/hub.c
3063 ++++ b/drivers/usb/core/hub.c
3064 +@@ -2791,6 +2791,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
3065 + int i, status;
3066 + u16 portchange, portstatus;
3067 + struct usb_port *port_dev = hub->ports[port1 - 1];
3068 ++ int reset_recovery_time;
3069 +
3070 + if (!hub_is_superspeed(hub->hdev)) {
3071 + if (warm) {
3072 +@@ -2882,11 +2883,18 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
3073 +
3074 + done:
3075 + if (status == 0) {
3076 +- /* TRSTRCY = 10 ms; plus some extra */
3077 + if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM)
3078 + usleep_range(10000, 12000);
3079 +- else
3080 +- msleep(10 + 40);
3081 ++ else {
3082 ++ /* TRSTRCY = 10 ms; plus some extra */
3083 ++ reset_recovery_time = 10 + 40;
3084 ++
3085 ++ /* Hub needs extra delay after resetting its port. */
3086 ++ if (hub->hdev->quirks & USB_QUIRK_HUB_SLOW_RESET)
3087 ++ reset_recovery_time += 100;
3088 ++
3089 ++ msleep(reset_recovery_time);
3090 ++ }
3091 +
3092 + if (udev) {
3093 + struct usb_hcd *hcd = bus_to_hcd(udev->bus);
3094 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
3095 +index 178d6c6063c0..f9ff03e6af93 100644
3096 +--- a/drivers/usb/core/quirks.c
3097 ++++ b/drivers/usb/core/quirks.c
3098 +@@ -128,6 +128,9 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
3099 + case 'n':
3100 + flags |= USB_QUIRK_DELAY_CTRL_MSG;
3101 + break;
3102 ++ case 'o':
3103 ++ flags |= USB_QUIRK_HUB_SLOW_RESET;
3104 ++ break;
3105 + /* Ignore unrecognized flag characters */
3106 + }
3107 + }
3108 +@@ -380,6 +383,9 @@ static const struct usb_device_id usb_quirk_list[] = {
3109 + { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
3110 + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
3111 +
3112 ++ /* Terminus Technology Inc. Hub */
3113 ++ { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
3114 ++
3115 + /* Corsair K70 RGB */
3116 + { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
3117 +
3118 +@@ -391,6 +397,9 @@ static const struct usb_device_id usb_quirk_list[] = {
3119 + { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
3120 + USB_QUIRK_DELAY_CTRL_MSG },
3121 +
3122 ++ /* Corsair K70 LUX RGB */
3123 ++ { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT },
3124 ++
3125 + /* Corsair K70 LUX */
3126 + { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
3127 +
3128 +@@ -411,6 +420,11 @@ static const struct usb_device_id usb_quirk_list[] = {
3129 + { USB_DEVICE(0x2040, 0x7200), .driver_info =
3130 + USB_QUIRK_CONFIG_INTF_STRINGS },
3131 +
3132 ++ /* Raydium Touchscreen */
3133 ++ { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
3134 ++
3135 ++ { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
3136 ++
3137 + /* DJI CineSSD */
3138 + { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
3139 +
3140 +diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
3141 +index d746c26a8055..6a0c60badfa0 100644
3142 +--- a/drivers/usb/misc/appledisplay.c
3143 ++++ b/drivers/usb/misc/appledisplay.c
3144 +@@ -50,6 +50,7 @@ static const struct usb_device_id appledisplay_table[] = {
3145 + { APPLEDISPLAY_DEVICE(0x9219) },
3146 + { APPLEDISPLAY_DEVICE(0x921c) },
3147 + { APPLEDISPLAY_DEVICE(0x921d) },
3148 ++ { APPLEDISPLAY_DEVICE(0x9222) },
3149 + { APPLEDISPLAY_DEVICE(0x9236) },
3150 +
3151 + /* Terminating entry */
3152 +diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
3153 +index 84575baceebc..97341fa75458 100644
3154 +--- a/drivers/xen/grant-table.c
3155 ++++ b/drivers/xen/grant-table.c
3156 +@@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
3157 +
3158 + ret = xenmem_reservation_increase(args->nr_pages, args->frames);
3159 + if (ret != args->nr_pages) {
3160 +- pr_debug("Failed to decrease reservation for DMA buffer\n");
3161 ++ pr_debug("Failed to increase reservation for DMA buffer\n");
3162 + ret = -EFAULT;
3163 + } else {
3164 + ret = 0;
3165 +diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
3166 +index 77a83790a31f..2543f24d23f8 100644
3167 +--- a/fs/afs/rxrpc.c
3168 ++++ b/fs/afs/rxrpc.c
3169 +@@ -500,7 +500,6 @@ static void afs_deliver_to_call(struct afs_call *call)
3170 + case -EINPROGRESS:
3171 + case -EAGAIN:
3172 + goto out;
3173 +- case -EIO:
3174 + case -ECONNABORTED:
3175 + ASSERTCMP(state, ==, AFS_CALL_COMPLETE);
3176 + goto done;
3177 +@@ -509,6 +508,10 @@ static void afs_deliver_to_call(struct afs_call *call)
3178 + rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
3179 + abort_code, ret, "KIV");
3180 + goto local_abort;
3181 ++ case -EIO:
3182 ++ pr_err("kAFS: Call %u in bad state %u\n",
3183 ++ call->debug_id, state);
3184 ++ /* Fall through */
3185 + case -ENODATA:
3186 + case -EBADMSG:
3187 + case -EMSGSIZE:
3188 +diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
3189 +index 32d4f13784ba..03f4d24db8fe 100644
3190 +--- a/fs/ceph/quota.c
3191 ++++ b/fs/ceph/quota.c
3192 +@@ -237,7 +237,8 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
3193 + ceph_put_snap_realm(mdsc, realm);
3194 + realm = next;
3195 + }
3196 +- ceph_put_snap_realm(mdsc, realm);
3197 ++ if (realm)
3198 ++ ceph_put_snap_realm(mdsc, realm);
3199 + up_read(&mdsc->snap_rwsem);
3200 +
3201 + return exceeded;
3202 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
3203 +index 7065426b3280..fb32f3d6925e 100644
3204 +--- a/fs/cifs/cifsfs.c
3205 ++++ b/fs/cifs/cifsfs.c
3206 +@@ -981,8 +981,8 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
3207 + struct inode *src_inode = file_inode(src_file);
3208 + struct inode *target_inode = file_inode(dst_file);
3209 + struct cifsFileInfo *smb_file_src = src_file->private_data;
3210 +- struct cifsFileInfo *smb_file_target = dst_file->private_data;
3211 +- struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
3212 ++ struct cifsFileInfo *smb_file_target;
3213 ++ struct cifs_tcon *target_tcon;
3214 + unsigned int xid;
3215 + int rc;
3216 +
3217 +@@ -996,6 +996,9 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
3218 + goto out;
3219 + }
3220 +
3221 ++ smb_file_target = dst_file->private_data;
3222 ++ target_tcon = tlink_tcon(smb_file_target->tlink);
3223 ++
3224 + /*
3225 + * Note: cifs case is easier than btrfs since server responsible for
3226 + * checks for proper open modes and file type and if it wants
3227 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3228 +index 89985a0a6819..812da3e56a22 100644
3229 +--- a/fs/cifs/smb2ops.c
3230 ++++ b/fs/cifs/smb2ops.c
3231 +@@ -686,6 +686,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
3232 + int rc = 0;
3233 + unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
3234 + char *name, *value;
3235 ++ size_t buf_size = dst_size;
3236 + size_t name_len, value_len, user_name_len;
3237 +
3238 + while (src_size > 0) {
3239 +@@ -721,9 +722,10 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
3240 + /* 'user.' plus a terminating null */
3241 + user_name_len = 5 + 1 + name_len;
3242 +
3243 +- rc += user_name_len;
3244 +-
3245 +- if (dst_size >= user_name_len) {
3246 ++ if (buf_size == 0) {
3247 ++ /* skip copy - calc size only */
3248 ++ rc += user_name_len;
3249 ++ } else if (dst_size >= user_name_len) {
3250 + dst_size -= user_name_len;
3251 + memcpy(dst, "user.", 5);
3252 + dst += 5;
3253 +@@ -731,8 +733,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
3254 + dst += name_len;
3255 + *dst = 0;
3256 + ++dst;
3257 +- } else if (dst_size == 0) {
3258 +- /* skip copy - calc size only */
3259 ++ rc += user_name_len;
3260 + } else {
3261 + /* stop before overrun buffer */
3262 + rc = -ERANGE;
3263 +diff --git a/fs/exofs/super.c b/fs/exofs/super.c
3264 +index 41cf2fbee50d..7d61e3fa378c 100644
3265 +--- a/fs/exofs/super.c
3266 ++++ b/fs/exofs/super.c
3267 +@@ -101,6 +101,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
3268 + token = match_token(p, tokens, args);
3269 + switch (token) {
3270 + case Opt_name:
3271 ++ kfree(opts->dev_name);
3272 + opts->dev_name = match_strdup(&args[0]);
3273 + if (unlikely(!opts->dev_name)) {
3274 + EXOFS_ERR("Error allocating dev_name");
3275 +@@ -866,8 +867,10 @@ static struct dentry *exofs_mount(struct file_system_type *type,
3276 + int ret;
3277 +
3278 + ret = parse_options(data, &opts);
3279 +- if (ret)
3280 ++ if (ret) {
3281 ++ kfree(opts.dev_name);
3282 + return ERR_PTR(ret);
3283 ++ }
3284 +
3285 + if (!opts.dev_name)
3286 + opts.dev_name = dev_name;
3287 +diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
3288 +index 9a8772465a90..da25c49203cc 100644
3289 +--- a/fs/hfs/brec.c
3290 ++++ b/fs/hfs/brec.c
3291 +@@ -425,6 +425,10 @@ skip:
3292 + if (new_node) {
3293 + __be32 cnid;
3294 +
3295 ++ if (!new_node->parent) {
3296 ++ hfs_btree_inc_height(tree);
3297 ++ new_node->parent = tree->root;
3298 ++ }
3299 + fd->bnode = hfs_bnode_find(tree, new_node->parent);
3300 + /* create index key and entry */
3301 + hfs_bnode_read_key(new_node, fd->search_key, 14);
3302 +diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
3303 +index ed8eacb34452..aa17a392b414 100644
3304 +--- a/fs/hfsplus/brec.c
3305 ++++ b/fs/hfsplus/brec.c
3306 +@@ -429,6 +429,10 @@ skip:
3307 + if (new_node) {
3308 + __be32 cnid;
3309 +
3310 ++ if (!new_node->parent) {
3311 ++ hfs_btree_inc_height(tree);
3312 ++ new_node->parent = tree->root;
3313 ++ }
3314 + fd->bnode = hfs_bnode_find(tree, new_node->parent);
3315 + /* create index key and entry */
3316 + hfs_bnode_read_key(new_node, fd->search_key, 14);
3317 +diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
3318 +index 48cdfc81fe10..32d8986c26fb 100644
3319 +--- a/fs/reiserfs/xattr.c
3320 ++++ b/fs/reiserfs/xattr.c
3321 +@@ -185,6 +185,7 @@ struct reiserfs_dentry_buf {
3322 + struct dir_context ctx;
3323 + struct dentry *xadir;
3324 + int count;
3325 ++ int err;
3326 + struct dentry *dentries[8];
3327 + };
3328 +
3329 +@@ -207,6 +208,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
3330 +
3331 + dentry = lookup_one_len(name, dbuf->xadir, namelen);
3332 + if (IS_ERR(dentry)) {
3333 ++ dbuf->err = PTR_ERR(dentry);
3334 + return PTR_ERR(dentry);
3335 + } else if (d_really_is_negative(dentry)) {
3336 + /* A directory entry exists, but no file? */
3337 +@@ -215,6 +217,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
3338 + "not found for file %pd.\n",
3339 + dentry, dbuf->xadir);
3340 + dput(dentry);
3341 ++ dbuf->err = -EIO;
3342 + return -EIO;
3343 + }
3344 +
3345 +@@ -262,6 +265,10 @@ static int reiserfs_for_each_xattr(struct inode *inode,
3346 + err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx);
3347 + if (err)
3348 + break;
3349 ++ if (buf.err) {
3350 ++ err = buf.err;
3351 ++ break;
3352 ++ }
3353 + if (!buf.count)
3354 + break;
3355 + for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
3356 +diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
3357 +index 78b86dea2f29..7f53ece2c039 100644
3358 +--- a/include/linux/mtd/nand.h
3359 ++++ b/include/linux/mtd/nand.h
3360 +@@ -568,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
3361 + }
3362 +
3363 + /**
3364 +- * nanddev_pos_next_eraseblock() - Move a position to the next page
3365 ++ * nanddev_pos_next_page() - Move a position to the next page
3366 + * @nand: NAND device
3367 + * @pos: the position to update
3368 + *
3369 +diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
3370 +index 34fc80f3eb90..1d100efe74ec 100644
3371 +--- a/include/linux/netfilter/ipset/ip_set.h
3372 ++++ b/include/linux/netfilter/ipset/ip_set.h
3373 +@@ -314,7 +314,7 @@ enum {
3374 + extern ip_set_id_t ip_set_get_byname(struct net *net,
3375 + const char *name, struct ip_set **set);
3376 + extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
3377 +-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
3378 ++extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
3379 + extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
3380 + extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
3381 +
3382 +diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
3383 +index 8e2bab1e8e90..70877f8de7e9 100644
3384 +--- a/include/linux/netfilter/ipset/ip_set_comment.h
3385 ++++ b/include/linux/netfilter/ipset/ip_set_comment.h
3386 +@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
3387 + rcu_assign_pointer(comment->c, c);
3388 + }
3389 +
3390 +-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
3391 ++/* Used only when dumping a set, protected by rcu_read_lock() */
3392 + static inline int
3393 + ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
3394 + {
3395 +- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
3396 ++ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
3397 +
3398 + if (!c)
3399 + return 0;
3400 +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
3401 +index b7a99ce56bc9..a1be64c9940f 100644
3402 +--- a/include/linux/usb/quirks.h
3403 ++++ b/include/linux/usb/quirks.h
3404 +@@ -66,4 +66,7 @@
3405 + /* Device needs a pause after every control message. */
3406 + #define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
3407 +
3408 ++/* Hub needs extra delay after resetting its port. */
3409 ++#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
3410 ++
3411 + #endif /* __LINUX_USB_QUIRKS_H */
3412 +diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
3413 +index e23290ffdc77..325ec6ef0a76 100644
3414 +--- a/include/uapi/linux/netfilter/nf_tables.h
3415 ++++ b/include/uapi/linux/netfilter/nf_tables.h
3416 +@@ -1581,8 +1581,8 @@ enum nft_ng_attributes {
3417 + NFTA_NG_MODULUS,
3418 + NFTA_NG_TYPE,
3419 + NFTA_NG_OFFSET,
3420 +- NFTA_NG_SET_NAME,
3421 +- NFTA_NG_SET_ID,
3422 ++ NFTA_NG_SET_NAME, /* deprecated */
3423 ++ NFTA_NG_SET_ID, /* deprecated */
3424 + __NFTA_NG_MAX
3425 + };
3426 + #define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
3427 +diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
3428 +index 156ccd089df1..1610fdbab98d 100644
3429 +--- a/include/uapi/linux/netfilter_bridge.h
3430 ++++ b/include/uapi/linux/netfilter_bridge.h
3431 +@@ -11,6 +11,10 @@
3432 + #include <linux/if_vlan.h>
3433 + #include <linux/if_pppox.h>
3434 +
3435 ++#ifndef __KERNEL__
3436 ++#include <limits.h> /* for INT_MIN, INT_MAX */
3437 ++#endif
3438 ++
3439 + /* Bridge Hooks */
3440 + /* After promisc drops, checksum checks. */
3441 + #define NF_BR_PRE_ROUTING 0
3442 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
3443 +index 675eb6d36e47..382c09dddf93 100644
3444 +--- a/kernel/bpf/syscall.c
3445 ++++ b/kernel/bpf/syscall.c
3446 +@@ -1974,6 +1974,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
3447 + info.jited_prog_len = 0;
3448 + info.xlated_prog_len = 0;
3449 + info.nr_jited_ksyms = 0;
3450 ++ info.nr_jited_func_lens = 0;
3451 + goto done;
3452 + }
3453 +
3454 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
3455 +index ad97f3ba5ec5..35551110d277 100644
3456 +--- a/kernel/sched/core.c
3457 ++++ b/kernel/sched/core.c
3458 +@@ -5854,11 +5854,14 @@ void __init sched_init_smp(void)
3459 + /*
3460 + * There's no userspace yet to cause hotplug operations; hence all the
3461 + * CPU masks are stable and all blatant races in the below code cannot
3462 +- * happen.
3463 ++ * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
3464 ++ * but there won't be any contention on it.
3465 + */
3466 ++ cpus_read_lock();
3467 + mutex_lock(&sched_domains_mutex);
3468 + sched_init_domains(cpu_active_mask);
3469 + mutex_unlock(&sched_domains_mutex);
3470 ++ cpus_read_unlock();
3471 +
3472 + /* Move init over to a non-isolated CPU */
3473 + if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
3474 +diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
3475 +index 5d73f5cb4d8a..79777645cac9 100644
3476 +--- a/lib/raid6/test/Makefile
3477 ++++ b/lib/raid6/test/Makefile
3478 +@@ -27,7 +27,7 @@ ifeq ($(ARCH),arm)
3479 + CFLAGS += -I../../../arch/arm/include -mfpu=neon
3480 + HAS_NEON = yes
3481 + endif
3482 +-ifeq ($(ARCH),arm64)
3483 ++ifeq ($(ARCH),aarch64)
3484 + CFLAGS += -I../../../arch/arm64/include
3485 + HAS_NEON = yes
3486 + endif
3487 +@@ -41,7 +41,7 @@ ifeq ($(IS_X86),yes)
3488 + gcc -c -x assembler - >&/dev/null && \
3489 + rm ./-.o && echo -DCONFIG_AS_AVX512=1)
3490 + else ifeq ($(HAS_NEON),yes)
3491 +- OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
3492 ++ OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
3493 + CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
3494 + else
3495 + HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
3496 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
3497 +index 0a187196aeed..9a1c27c61de8 100644
3498 +--- a/net/ceph/messenger.c
3499 ++++ b/net/ceph/messenger.c
3500 +@@ -593,9 +593,15 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
3501 + struct bio_vec bvec;
3502 + int ret;
3503 +
3504 +- /* sendpage cannot properly handle pages with page_count == 0,
3505 +- * we need to fallback to sendmsg if that's the case */
3506 +- if (page_count(page) >= 1)
3507 ++ /*
3508 ++ * sendpage cannot properly handle pages with page_count == 0,
3509 ++ * we need to fall back to sendmsg if that's the case.
3510 ++ *
3511 ++ * Same goes for slab pages: skb_can_coalesce() allows
3512 ++ * coalescing neighboring slab objects into a single frag which
3513 ++ * triggers one of hardened usercopy checks.
3514 ++ */
3515 ++ if (page_count(page) >= 1 && !PageSlab(page))
3516 + return __ceph_tcp_sendpage(sock, page, offset, size, more);
3517 +
3518 + bvec.bv_page = page;
3519 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
3520 +index 8f68a518d9db..f76bd4d15704 100644
3521 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
3522 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
3523 +@@ -587,11 +587,16 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
3524 + */
3525 + ret = -EINPROGRESS;
3526 + if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
3527 +- fq->q.meat == fq->q.len &&
3528 +- nf_ct_frag6_reasm(fq, skb, dev))
3529 +- ret = 0;
3530 +- else
3531 ++ fq->q.meat == fq->q.len) {
3532 ++ unsigned long orefdst = skb->_skb_refdst;
3533 ++
3534 ++ skb->_skb_refdst = 0UL;
3535 ++ if (nf_ct_frag6_reasm(fq, skb, dev))
3536 ++ ret = 0;
3537 ++ skb->_skb_refdst = orefdst;
3538 ++ } else {
3539 + skb_dst_drop(skb);
3540 ++ }
3541 +
3542 + out_unlock:
3543 + spin_unlock_bh(&fq->q.lock);
3544 +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
3545 +index bc4bd247bb7d..1577f2f76060 100644
3546 +--- a/net/netfilter/ipset/ip_set_core.c
3547 ++++ b/net/netfilter/ipset/ip_set_core.c
3548 +@@ -55,11 +55,15 @@ MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@××××××××××××××.hu>");
3549 + MODULE_DESCRIPTION("core IP set support");
3550 + MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
3551 +
3552 +-/* When the nfnl mutex is held: */
3553 ++/* When the nfnl mutex or ip_set_ref_lock is held: */
3554 + #define ip_set_dereference(p) \
3555 +- rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
3556 ++ rcu_dereference_protected(p, \
3557 ++ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
3558 ++ lockdep_is_held(&ip_set_ref_lock))
3559 + #define ip_set(inst, id) \
3560 + ip_set_dereference((inst)->ip_set_list)[id]
3561 ++#define ip_set_ref_netlink(inst,id) \
3562 ++ rcu_dereference_raw((inst)->ip_set_list)[id]
3563 +
3564 + /* The set types are implemented in modules and registered set types
3565 + * can be found in ip_set_type_list. Adding/deleting types is
3566 +@@ -693,21 +697,20 @@ ip_set_put_byindex(struct net *net, ip_set_id_t index)
3567 + EXPORT_SYMBOL_GPL(ip_set_put_byindex);
3568 +
3569 + /* Get the name of a set behind a set index.
3570 +- * We assume the set is referenced, so it does exist and
3571 +- * can't be destroyed. The set cannot be renamed due to
3572 +- * the referencing either.
3573 +- *
3574 ++ * Set itself is protected by RCU, but its name isn't: to protect against
3575 ++ * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
3576 ++ * name.
3577 + */
3578 +-const char *
3579 +-ip_set_name_byindex(struct net *net, ip_set_id_t index)
3580 ++void
3581 ++ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
3582 + {
3583 +- const struct ip_set *set = ip_set_rcu_get(net, index);
3584 ++ struct ip_set *set = ip_set_rcu_get(net, index);
3585 +
3586 + BUG_ON(!set);
3587 +- BUG_ON(set->ref == 0);
3588 +
3589 +- /* Referenced, so it's safe */
3590 +- return set->name;
3591 ++ read_lock_bh(&ip_set_ref_lock);
3592 ++ strncpy(name, set->name, IPSET_MAXNAMELEN);
3593 ++ read_unlock_bh(&ip_set_ref_lock);
3594 + }
3595 + EXPORT_SYMBOL_GPL(ip_set_name_byindex);
3596 +
3597 +@@ -961,7 +964,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
3598 + /* Wraparound */
3599 + goto cleanup;
3600 +
3601 +- list = kcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
3602 ++ list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
3603 + if (!list)
3604 + goto cleanup;
3605 + /* nfnl mutex is held, both lists are valid */
3606 +@@ -973,7 +976,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
3607 + /* Use new list */
3608 + index = inst->ip_set_max;
3609 + inst->ip_set_max = i;
3610 +- kfree(tmp);
3611 ++ kvfree(tmp);
3612 + ret = 0;
3613 + } else if (ret) {
3614 + goto cleanup;
3615 +@@ -1153,7 +1156,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
3616 + if (!set)
3617 + return -ENOENT;
3618 +
3619 +- read_lock_bh(&ip_set_ref_lock);
3620 ++ write_lock_bh(&ip_set_ref_lock);
3621 + if (set->ref != 0) {
3622 + ret = -IPSET_ERR_REFERENCED;
3623 + goto out;
3624 +@@ -1170,7 +1173,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
3625 + strncpy(set->name, name2, IPSET_MAXNAMELEN);
3626 +
3627 + out:
3628 +- read_unlock_bh(&ip_set_ref_lock);
3629 ++ write_unlock_bh(&ip_set_ref_lock);
3630 + return ret;
3631 + }
3632 +
3633 +@@ -1252,7 +1255,7 @@ ip_set_dump_done(struct netlink_callback *cb)
3634 + struct ip_set_net *inst =
3635 + (struct ip_set_net *)cb->args[IPSET_CB_NET];
3636 + ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
3637 +- struct ip_set *set = ip_set(inst, index);
3638 ++ struct ip_set *set = ip_set_ref_netlink(inst, index);
3639 +
3640 + if (set->variant->uref)
3641 + set->variant->uref(set, cb, false);
3642 +@@ -1441,7 +1444,7 @@ next_set:
3643 + release_refcount:
3644 + /* If there was an error or set is done, release set */
3645 + if (ret || !cb->args[IPSET_CB_ARG0]) {
3646 +- set = ip_set(inst, index);
3647 ++ set = ip_set_ref_netlink(inst, index);
3648 + if (set->variant->uref)
3649 + set->variant->uref(set, cb, false);
3650 + pr_debug("release set %s\n", set->name);
3651 +@@ -2059,7 +2062,7 @@ ip_set_net_init(struct net *net)
3652 + if (inst->ip_set_max >= IPSET_INVALID_ID)
3653 + inst->ip_set_max = IPSET_INVALID_ID - 1;
3654 +
3655 +- list = kcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
3656 ++ list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
3657 + if (!list)
3658 + return -ENOMEM;
3659 + inst->is_deleted = false;
3660 +@@ -2087,7 +2090,7 @@ ip_set_net_exit(struct net *net)
3661 + }
3662 + }
3663 + nfnl_unlock(NFNL_SUBSYS_IPSET);
3664 +- kfree(rcu_dereference_protected(inst->ip_set_list, 1));
3665 ++ kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
3666 + }
3667 +
3668 + static struct pernet_operations ip_set_net_ops = {
3669 +diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
3670 +index d391485a6acd..613e18e720a4 100644
3671 +--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
3672 ++++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
3673 +@@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
3674 +
3675 + if (tb[IPSET_ATTR_CIDR]) {
3676 + e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
3677 +- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
3678 ++ if (e.cidr[0] > HOST_MASK)
3679 + return -IPSET_ERR_INVALID_CIDR;
3680 + }
3681 +
3682 + if (tb[IPSET_ATTR_CIDR2]) {
3683 + e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
3684 +- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
3685 ++ if (e.cidr[1] > HOST_MASK)
3686 + return -IPSET_ERR_INVALID_CIDR;
3687 + }
3688 +
3689 +@@ -493,13 +493,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
3690 +
3691 + if (tb[IPSET_ATTR_CIDR]) {
3692 + e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
3693 +- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
3694 ++ if (e.cidr[0] > HOST_MASK)
3695 + return -IPSET_ERR_INVALID_CIDR;
3696 + }
3697 +
3698 + if (tb[IPSET_ATTR_CIDR2]) {
3699 + e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
3700 +- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
3701 ++ if (e.cidr[1] > HOST_MASK)
3702 + return -IPSET_ERR_INVALID_CIDR;
3703 + }
3704 +
3705 +diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
3706 +index 072a658fde04..4eef55da0878 100644
3707 +--- a/net/netfilter/ipset/ip_set_list_set.c
3708 ++++ b/net/netfilter/ipset/ip_set_list_set.c
3709 +@@ -148,9 +148,7 @@ __list_set_del_rcu(struct rcu_head * rcu)
3710 + {
3711 + struct set_elem *e = container_of(rcu, struct set_elem, rcu);
3712 + struct ip_set *set = e->set;
3713 +- struct list_set *map = set->data;
3714 +
3715 +- ip_set_put_byindex(map->net, e->id);
3716 + ip_set_ext_destroy(set, e);
3717 + kfree(e);
3718 + }
3719 +@@ -158,15 +156,21 @@ __list_set_del_rcu(struct rcu_head * rcu)
3720 + static inline void
3721 + list_set_del(struct ip_set *set, struct set_elem *e)
3722 + {
3723 ++ struct list_set *map = set->data;
3724 ++
3725 + set->elements--;
3726 + list_del_rcu(&e->list);
3727 ++ ip_set_put_byindex(map->net, e->id);
3728 + call_rcu(&e->rcu, __list_set_del_rcu);
3729 + }
3730 +
3731 + static inline void
3732 +-list_set_replace(struct set_elem *e, struct set_elem *old)
3733 ++list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
3734 + {
3735 ++ struct list_set *map = set->data;
3736 ++
3737 + list_replace_rcu(&old->list, &e->list);
3738 ++ ip_set_put_byindex(map->net, old->id);
3739 + call_rcu(&old->rcu, __list_set_del_rcu);
3740 + }
3741 +
3742 +@@ -298,7 +302,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
3743 + INIT_LIST_HEAD(&e->list);
3744 + list_set_init_extensions(set, ext, e);
3745 + if (n)
3746 +- list_set_replace(e, n);
3747 ++ list_set_replace(set, e, n);
3748 + else if (next)
3749 + list_add_tail_rcu(&e->list, &next->list);
3750 + else if (prev)
3751 +@@ -486,6 +490,7 @@ list_set_list(const struct ip_set *set,
3752 + const struct list_set *map = set->data;
3753 + struct nlattr *atd, *nested;
3754 + u32 i = 0, first = cb->args[IPSET_CB_ARG0];
3755 ++ char name[IPSET_MAXNAMELEN];
3756 + struct set_elem *e;
3757 + int ret = 0;
3758 +
3759 +@@ -504,8 +509,8 @@ list_set_list(const struct ip_set *set,
3760 + nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
3761 + if (!nested)
3762 + goto nla_put_failure;
3763 +- if (nla_put_string(skb, IPSET_ATTR_NAME,
3764 +- ip_set_name_byindex(map->net, e->id)))
3765 ++ ip_set_name_byindex(map->net, e->id, name);
3766 ++ if (nla_put_string(skb, IPSET_ATTR_NAME, name))
3767 + goto nla_put_failure;
3768 + if (ip_set_put_extensions(skb, set, e, true))
3769 + goto nla_put_failure;
3770 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
3771 +index 32535eea51b2..ad2fe6a7e47d 100644
3772 +--- a/net/netfilter/nft_compat.c
3773 ++++ b/net/netfilter/nft_compat.c
3774 +@@ -54,9 +54,11 @@ static bool nft_xt_put(struct nft_xt *xt)
3775 + return false;
3776 + }
3777 +
3778 +-static int nft_compat_chain_validate_dependency(const char *tablename,
3779 +- const struct nft_chain *chain)
3780 ++static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
3781 ++ const char *tablename)
3782 + {
3783 ++ enum nft_chain_types type = NFT_CHAIN_T_DEFAULT;
3784 ++ const struct nft_chain *chain = ctx->chain;
3785 + const struct nft_base_chain *basechain;
3786 +
3787 + if (!tablename ||
3788 +@@ -64,9 +66,12 @@ static int nft_compat_chain_validate_dependency(const char *tablename,
3789 + return 0;
3790 +
3791 + basechain = nft_base_chain(chain);
3792 +- if (strcmp(tablename, "nat") == 0 &&
3793 +- basechain->type->type != NFT_CHAIN_T_NAT)
3794 +- return -EINVAL;
3795 ++ if (strcmp(tablename, "nat") == 0) {
3796 ++ if (ctx->family != NFPROTO_BRIDGE)
3797 ++ type = NFT_CHAIN_T_NAT;
3798 ++ if (basechain->type->type != type)
3799 ++ return -EINVAL;
3800 ++ }
3801 +
3802 + return 0;
3803 + }
3804 +@@ -323,8 +328,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
3805 + if (target->hooks && !(hook_mask & target->hooks))
3806 + return -EINVAL;
3807 +
3808 +- ret = nft_compat_chain_validate_dependency(target->table,
3809 +- ctx->chain);
3810 ++ ret = nft_compat_chain_validate_dependency(ctx, target->table);
3811 + if (ret < 0)
3812 + return ret;
3813 + }
3814 +@@ -570,8 +574,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
3815 + if (match->hooks && !(hook_mask & match->hooks))
3816 + return -EINVAL;
3817 +
3818 +- ret = nft_compat_chain_validate_dependency(match->table,
3819 +- ctx->chain);
3820 ++ ret = nft_compat_chain_validate_dependency(ctx, match->table);
3821 + if (ret < 0)
3822 + return ret;
3823 + }
3824 +diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
3825 +index 649d1700ec5b..3cc1b3dc3c3c 100644
3826 +--- a/net/netfilter/nft_numgen.c
3827 ++++ b/net/netfilter/nft_numgen.c
3828 +@@ -24,7 +24,6 @@ struct nft_ng_inc {
3829 + u32 modulus;
3830 + atomic_t counter;
3831 + u32 offset;
3832 +- struct nft_set *map;
3833 + };
3834 +
3835 + static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
3836 +@@ -48,34 +47,11 @@ static void nft_ng_inc_eval(const struct nft_expr *expr,
3837 + regs->data[priv->dreg] = nft_ng_inc_gen(priv);
3838 + }
3839 +
3840 +-static void nft_ng_inc_map_eval(const struct nft_expr *expr,
3841 +- struct nft_regs *regs,
3842 +- const struct nft_pktinfo *pkt)
3843 +-{
3844 +- struct nft_ng_inc *priv = nft_expr_priv(expr);
3845 +- const struct nft_set *map = priv->map;
3846 +- const struct nft_set_ext *ext;
3847 +- u32 result;
3848 +- bool found;
3849 +-
3850 +- result = nft_ng_inc_gen(priv);
3851 +- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
3852 +-
3853 +- if (!found)
3854 +- return;
3855 +-
3856 +- nft_data_copy(&regs->data[priv->dreg],
3857 +- nft_set_ext_data(ext), map->dlen);
3858 +-}
3859 +-
3860 + static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
3861 + [NFTA_NG_DREG] = { .type = NLA_U32 },
3862 + [NFTA_NG_MODULUS] = { .type = NLA_U32 },
3863 + [NFTA_NG_TYPE] = { .type = NLA_U32 },
3864 + [NFTA_NG_OFFSET] = { .type = NLA_U32 },
3865 +- [NFTA_NG_SET_NAME] = { .type = NLA_STRING,
3866 +- .len = NFT_SET_MAXNAMELEN - 1 },
3867 +- [NFTA_NG_SET_ID] = { .type = NLA_U32 },
3868 + };
3869 +
3870 + static int nft_ng_inc_init(const struct nft_ctx *ctx,
3871 +@@ -101,22 +77,6 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
3872 + NFT_DATA_VALUE, sizeof(u32));
3873 + }
3874 +
3875 +-static int nft_ng_inc_map_init(const struct nft_ctx *ctx,
3876 +- const struct nft_expr *expr,
3877 +- const struct nlattr * const tb[])
3878 +-{
3879 +- struct nft_ng_inc *priv = nft_expr_priv(expr);
3880 +- u8 genmask = nft_genmask_next(ctx->net);
3881 +-
3882 +- nft_ng_inc_init(ctx, expr, tb);
3883 +-
3884 +- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
3885 +- tb[NFTA_NG_SET_NAME],
3886 +- tb[NFTA_NG_SET_ID], genmask);
3887 +-
3888 +- return PTR_ERR_OR_ZERO(priv->map);
3889 +-}
3890 +-
3891 + static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
3892 + u32 modulus, enum nft_ng_types type, u32 offset)
3893 + {
3894 +@@ -143,27 +103,10 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
3895 + priv->offset);
3896 + }
3897 +
3898 +-static int nft_ng_inc_map_dump(struct sk_buff *skb,
3899 +- const struct nft_expr *expr)
3900 +-{
3901 +- const struct nft_ng_inc *priv = nft_expr_priv(expr);
3902 +-
3903 +- if (nft_ng_dump(skb, priv->dreg, priv->modulus,
3904 +- NFT_NG_INCREMENTAL, priv->offset) ||
3905 +- nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
3906 +- goto nla_put_failure;
3907 +-
3908 +- return 0;
3909 +-
3910 +-nla_put_failure:
3911 +- return -1;
3912 +-}
3913 +-
3914 + struct nft_ng_random {
3915 + enum nft_registers dreg:8;
3916 + u32 modulus;
3917 + u32 offset;
3918 +- struct nft_set *map;
3919 + };
3920 +
3921 + static u32 nft_ng_random_gen(struct nft_ng_random *priv)
3922 +@@ -183,25 +126,6 @@ static void nft_ng_random_eval(const struct nft_expr *expr,
3923 + regs->data[priv->dreg] = nft_ng_random_gen(priv);
3924 + }
3925 +
3926 +-static void nft_ng_random_map_eval(const struct nft_expr *expr,
3927 +- struct nft_regs *regs,
3928 +- const struct nft_pktinfo *pkt)
3929 +-{
3930 +- struct nft_ng_random *priv = nft_expr_priv(expr);
3931 +- const struct nft_set *map = priv->map;
3932 +- const struct nft_set_ext *ext;
3933 +- u32 result;
3934 +- bool found;
3935 +-
3936 +- result = nft_ng_random_gen(priv);
3937 +- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
3938 +- if (!found)
3939 +- return;
3940 +-
3941 +- nft_data_copy(&regs->data[priv->dreg],
3942 +- nft_set_ext_data(ext), map->dlen);
3943 +-}
3944 +-
3945 + static int nft_ng_random_init(const struct nft_ctx *ctx,
3946 + const struct nft_expr *expr,
3947 + const struct nlattr * const tb[])
3948 +@@ -226,21 +150,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
3949 + NFT_DATA_VALUE, sizeof(u32));
3950 + }
3951 +
3952 +-static int nft_ng_random_map_init(const struct nft_ctx *ctx,
3953 +- const struct nft_expr *expr,
3954 +- const struct nlattr * const tb[])
3955 +-{
3956 +- struct nft_ng_random *priv = nft_expr_priv(expr);
3957 +- u8 genmask = nft_genmask_next(ctx->net);
3958 +-
3959 +- nft_ng_random_init(ctx, expr, tb);
3960 +- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
3961 +- tb[NFTA_NG_SET_NAME],
3962 +- tb[NFTA_NG_SET_ID], genmask);
3963 +-
3964 +- return PTR_ERR_OR_ZERO(priv->map);
3965 +-}
3966 +-
3967 + static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
3968 + {
3969 + const struct nft_ng_random *priv = nft_expr_priv(expr);
3970 +@@ -249,22 +158,6 @@ static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
3971 + priv->offset);
3972 + }
3973 +
3974 +-static int nft_ng_random_map_dump(struct sk_buff *skb,
3975 +- const struct nft_expr *expr)
3976 +-{
3977 +- const struct nft_ng_random *priv = nft_expr_priv(expr);
3978 +-
3979 +- if (nft_ng_dump(skb, priv->dreg, priv->modulus,
3980 +- NFT_NG_RANDOM, priv->offset) ||
3981 +- nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
3982 +- goto nla_put_failure;
3983 +-
3984 +- return 0;
3985 +-
3986 +-nla_put_failure:
3987 +- return -1;
3988 +-}
3989 +-
3990 + static struct nft_expr_type nft_ng_type;
3991 + static const struct nft_expr_ops nft_ng_inc_ops = {
3992 + .type = &nft_ng_type,
3993 +@@ -274,14 +167,6 @@ static const struct nft_expr_ops nft_ng_inc_ops = {
3994 + .dump = nft_ng_inc_dump,
3995 + };
3996 +
3997 +-static const struct nft_expr_ops nft_ng_inc_map_ops = {
3998 +- .type = &nft_ng_type,
3999 +- .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
4000 +- .eval = nft_ng_inc_map_eval,
4001 +- .init = nft_ng_inc_map_init,
4002 +- .dump = nft_ng_inc_map_dump,
4003 +-};
4004 +-
4005 + static const struct nft_expr_ops nft_ng_random_ops = {
4006 + .type = &nft_ng_type,
4007 + .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
4008 +@@ -290,14 +175,6 @@ static const struct nft_expr_ops nft_ng_random_ops = {
4009 + .dump = nft_ng_random_dump,
4010 + };
4011 +
4012 +-static const struct nft_expr_ops nft_ng_random_map_ops = {
4013 +- .type = &nft_ng_type,
4014 +- .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
4015 +- .eval = nft_ng_random_map_eval,
4016 +- .init = nft_ng_random_map_init,
4017 +- .dump = nft_ng_random_map_dump,
4018 +-};
4019 +-
4020 + static const struct nft_expr_ops *
4021 + nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
4022 + {
4023 +@@ -312,12 +189,8 @@ nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
4024 +
4025 + switch (type) {
4026 + case NFT_NG_INCREMENTAL:
4027 +- if (tb[NFTA_NG_SET_NAME])
4028 +- return &nft_ng_inc_map_ops;
4029 + return &nft_ng_inc_ops;
4030 + case NFT_NG_RANDOM:
4031 +- if (tb[NFTA_NG_SET_NAME])
4032 +- return &nft_ng_random_map_ops;
4033 + return &nft_ng_random_ops;
4034 + }
4035 +
4036 +diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
4037 +index 5ee859193783..25453a16385e 100644
4038 +--- a/net/netfilter/xt_IDLETIMER.c
4039 ++++ b/net/netfilter/xt_IDLETIMER.c
4040 +@@ -116,6 +116,22 @@ static void idletimer_tg_expired(struct timer_list *t)
4041 + schedule_work(&timer->work);
4042 + }
4043 +
4044 ++static int idletimer_check_sysfs_name(const char *name, unsigned int size)
4045 ++{
4046 ++ int ret;
4047 ++
4048 ++ ret = xt_check_proc_name(name, size);
4049 ++ if (ret < 0)
4050 ++ return ret;
4051 ++
4052 ++ if (!strcmp(name, "power") ||
4053 ++ !strcmp(name, "subsystem") ||
4054 ++ !strcmp(name, "uevent"))
4055 ++ return -EINVAL;
4056 ++
4057 ++ return 0;
4058 ++}
4059 ++
4060 + static int idletimer_tg_create(struct idletimer_tg_info *info)
4061 + {
4062 + int ret;
4063 +@@ -126,6 +142,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
4064 + goto out;
4065 + }
4066 +
4067 ++ ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
4068 ++ if (ret < 0)
4069 ++ goto out_free_timer;
4070 ++
4071 + sysfs_attr_init(&info->timer->attr.attr);
4072 + info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
4073 + if (!info->timer->attr.attr.name) {
4074 +diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
4075 +index b53cc0960b5d..ac16f509c95c 100644
4076 +--- a/net/sunrpc/xdr.c
4077 ++++ b/net/sunrpc/xdr.c
4078 +@@ -512,7 +512,7 @@ EXPORT_SYMBOL_GPL(xdr_commit_encode);
4079 + static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
4080 + size_t nbytes)
4081 + {
4082 +- static __be32 *p;
4083 ++ __be32 *p;
4084 + int space_left;
4085 + int frag1bytes, frag2bytes;
4086 +
4087 +diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
4088 +index 663a7f343b42..edcad61fe3cd 100755
4089 +--- a/scripts/package/mkdebian
4090 ++++ b/scripts/package/mkdebian
4091 +@@ -88,6 +88,7 @@ set_debarch() {
4092 + version=$KERNELRELEASE
4093 + if [ -n "$KDEB_PKGVERSION" ]; then
4094 + packageversion=$KDEB_PKGVERSION
4095 ++ revision=${packageversion##*-}
4096 + else
4097 + revision=$(cat .version 2>/dev/null||echo 1)
4098 + packageversion=$version-$revision
4099 +@@ -205,10 +206,12 @@ cat <<EOF > debian/rules
4100 + #!$(command -v $MAKE) -f
4101 +
4102 + build:
4103 +- \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC=
4104 ++ \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
4105 ++ KBUILD_BUILD_VERSION=${revision} KBUILD_SRC=
4106 +
4107 + binary-arch:
4108 +- \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC= intdeb-pkg
4109 ++ \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
4110 ++ KBUILD_BUILD_VERSION=${revision} KBUILD_SRC= intdeb-pkg
4111 +
4112 + clean:
4113 + rm -rf debian/*tmp debian/files
4114 +diff --git a/scripts/setlocalversion b/scripts/setlocalversion
4115 +index 79f7dd57d571..71f39410691b 100755
4116 +--- a/scripts/setlocalversion
4117 ++++ b/scripts/setlocalversion
4118 +@@ -74,7 +74,7 @@ scm_version()
4119 + fi
4120 +
4121 + # Check for uncommitted changes
4122 +- if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
4123 ++ if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
4124 + printf '%s' -dirty
4125 + fi
4126 +
4127 +diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
4128 +index 974affe50531..76491e7f4177 100644
4129 +--- a/security/apparmor/lib.c
4130 ++++ b/security/apparmor/lib.c
4131 +@@ -90,10 +90,12 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
4132 + const char *end = fqname + n;
4133 + const char *name = skipn_spaces(fqname, n);
4134 +
4135 +- if (!name)
4136 +- return NULL;
4137 + *ns_name = NULL;
4138 + *ns_len = 0;
4139 ++
4140 ++ if (!name)
4141 ++ return NULL;
4142 ++
4143 + if (name[0] == ':') {
4144 + char *split = strnchr(&name[1], end - &name[1], ':');
4145 + *ns_name = skipn_spaces(&name[1], end - &name[1]);
4146 +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
4147 +index 0166a3d7cd55..dffd60cebc31 100644
4148 +--- a/sound/pci/hda/patch_ca0132.c
4149 ++++ b/sound/pci/hda/patch_ca0132.c
4150 +@@ -7395,7 +7395,7 @@ static void ca0132_free(struct hda_codec *codec)
4151 +
4152 + snd_hda_power_down(codec);
4153 + if (spec->mem_base)
4154 +- iounmap(spec->mem_base);
4155 ++ pci_iounmap(codec->bus->pci, spec->mem_base);
4156 + kfree(spec->spec_init_verbs);
4157 + kfree(codec->spec);
4158 + }
4159 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4160 +index 1070749c3cf4..e58537e13ad3 100644
4161 +--- a/sound/pci/hda/patch_realtek.c
4162 ++++ b/sound/pci/hda/patch_realtek.c
4163 +@@ -6481,6 +6481,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4164 + SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4165 + SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4166 + SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
4167 ++ SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
4168 + SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
4169 + SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
4170 + SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
4171 +diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
4172 +index ac1bcdc17dae..f7eb63cbbc65 100644
4173 +--- a/tools/perf/jvmti/jvmti_agent.c
4174 ++++ b/tools/perf/jvmti/jvmti_agent.c
4175 +@@ -125,7 +125,7 @@ perf_get_timestamp(void)
4176 + }
4177 +
4178 + static int
4179 +-debug_cache_init(void)
4180 ++create_jit_cache_dir(void)
4181 + {
4182 + char str[32];
4183 + char *base, *p;
4184 +@@ -144,8 +144,13 @@ debug_cache_init(void)
4185 +
4186 + strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
4187 +
4188 +- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/", base);
4189 +-
4190 ++ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/", base);
4191 ++ if (ret >= PATH_MAX) {
4192 ++ warnx("jvmti: cannot generate jit cache dir because %s/.debug/"
4193 ++ " is too long, please check the cwd, JITDUMPDIR, and"
4194 ++ " HOME variables", base);
4195 ++ return -1;
4196 ++ }
4197 + ret = mkdir(jit_path, 0755);
4198 + if (ret == -1) {
4199 + if (errno != EEXIST) {
4200 +@@ -154,20 +159,32 @@ debug_cache_init(void)
4201 + }
4202 + }
4203 +
4204 +- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit", base);
4205 ++ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit", base);
4206 ++ if (ret >= PATH_MAX) {
4207 ++ warnx("jvmti: cannot generate jit cache dir because"
4208 ++ " %s/.debug/jit is too long, please check the cwd,"
4209 ++ " JITDUMPDIR, and HOME variables", base);
4210 ++ return -1;
4211 ++ }
4212 + ret = mkdir(jit_path, 0755);
4213 + if (ret == -1) {
4214 + if (errno != EEXIST) {
4215 +- warn("cannot create jit cache dir %s", jit_path);
4216 ++ warn("jvmti: cannot create jit cache dir %s", jit_path);
4217 + return -1;
4218 + }
4219 + }
4220 +
4221 +- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit/%s.XXXXXXXX", base, str);
4222 +-
4223 ++ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit/%s.XXXXXXXX", base, str);
4224 ++ if (ret >= PATH_MAX) {
4225 ++ warnx("jvmti: cannot generate jit cache dir because"
4226 ++ " %s/.debug/jit/%s.XXXXXXXX is too long, please check"
4227 ++ " the cwd, JITDUMPDIR, and HOME variables",
4228 ++ base, str);
4229 ++ return -1;
4230 ++ }
4231 + p = mkdtemp(jit_path);
4232 + if (p != jit_path) {
4233 +- warn("cannot create jit cache dir %s", jit_path);
4234 ++ warn("jvmti: cannot create jit cache dir %s", jit_path);
4235 + return -1;
4236 + }
4237 +
4238 +@@ -228,7 +245,7 @@ void *jvmti_open(void)
4239 + {
4240 + char dump_path[PATH_MAX];
4241 + struct jitheader header;
4242 +- int fd;
4243 ++ int fd, ret;
4244 + FILE *fp;
4245 +
4246 + init_arch_timestamp();
4247 +@@ -245,12 +262,22 @@ void *jvmti_open(void)
4248 +
4249 + memset(&header, 0, sizeof(header));
4250 +
4251 +- debug_cache_init();
4252 ++ /*
4253 ++ * jitdump file dir
4254 ++ */
4255 ++ if (create_jit_cache_dir() < 0)
4256 ++ return NULL;
4257 +
4258 + /*
4259 + * jitdump file name
4260 + */
4261 +- scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
4262 ++ ret = snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
4263 ++ if (ret >= PATH_MAX) {
4264 ++ warnx("jvmti: cannot generate jitdump file full path because"
4265 ++ " %s/jit-%i.dump is too long, please check the cwd,"
4266 ++ " JITDUMPDIR, and HOME variables", jit_path, getpid());
4267 ++ return NULL;
4268 ++ }
4269 +
4270 + fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
4271 + if (fd == -1)
4272 +diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
4273 +index 8a33ca4f9e1f..f0729c454f16 100644
4274 +--- a/tools/perf/tests/attr/test-record-group-sampling
4275 ++++ b/tools/perf/tests/attr/test-record-group-sampling
4276 +@@ -37,4 +37,3 @@ sample_freq=0
4277 + sample_period=0
4278 + freq=0
4279 + write_backward=0
4280 +-sample_id_all=0
4281 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
4282 +index e596ae358c4d..03a72310315f 100644
4283 +--- a/tools/perf/util/evsel.c
4284 ++++ b/tools/perf/util/evsel.c
4285 +@@ -952,7 +952,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
4286 + attr->sample_freq = 0;
4287 + attr->sample_period = 0;
4288 + attr->write_backward = 0;
4289 +- attr->sample_id_all = 0;
4290 + }
4291 +
4292 + if (opts->no_samples)
4293 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
4294 +index 29770ea61768..6e70cc00c161 100644
4295 +--- a/tools/perf/util/symbol-elf.c
4296 ++++ b/tools/perf/util/symbol-elf.c
4297 +@@ -324,7 +324,17 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
4298 + plt_entry_size = 16;
4299 + break;
4300 +
4301 +- default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/sparc/xtensa need to be checked */
4302 ++ case EM_SPARC:
4303 ++ plt_header_size = 48;
4304 ++ plt_entry_size = 12;
4305 ++ break;
4306 ++
4307 ++ case EM_SPARCV9:
4308 ++ plt_header_size = 128;
4309 ++ plt_entry_size = 32;
4310 ++ break;
4311 ++
4312 ++ default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
4313 + plt_header_size = shdr_plt.sh_entsize;
4314 + plt_entry_size = shdr_plt.sh_entsize;
4315 + break;
4316 +diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
4317 +index 6f318b15950e..5eff9bfc5758 100644
4318 +--- a/tools/perf/util/unwind-libdw.c
4319 ++++ b/tools/perf/util/unwind-libdw.c
4320 +@@ -45,13 +45,13 @@ static int __report_module(struct addr_location *al, u64 ip,
4321 + Dwarf_Addr s;
4322 +
4323 + dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
4324 +- if (s != al->map->start)
4325 ++ if (s != al->map->start - al->map->pgoff)
4326 + mod = 0;
4327 + }
4328 +
4329 + if (!mod)
4330 + mod = dwfl_report_elf(ui->dwfl, dso->short_name,
4331 +- (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start,
4332 ++ (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff,
4333 + false);
4334 +
4335 + return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;