Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.1 commit in: /
Date: Sun, 26 May 2019 17:07:06
Message-Id: 1558890388.9d3a433e9965ee8c3dd94f446ea6598439d3a362.mpagano@gentoo
1 commit: 9d3a433e9965ee8c3dd94f446ea6598439d3a362
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun May 26 17:06:28 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun May 26 17:06:28 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9d3a433e
7
8 Linux patch 5.1.5
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1004_linux-5.1.5.patch | 4948 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4952 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 7dd0866..2431699 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -59,6 +59,10 @@ Patch: 1003_linux-5.1.4.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.1.4
23
24 +Patch: 1004_linux-5.1.5.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.1.5
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1004_linux-5.1.5.patch b/1004_linux-5.1.5.patch
33 new file mode 100644
34 index 0000000..db0c77c
35 --- /dev/null
36 +++ b/1004_linux-5.1.5.patch
37 @@ -0,0 +1,4948 @@
38 +diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
39 +index cf43bc4dbf31..a60fa516d4cb 100644
40 +--- a/Documentation/filesystems/porting
41 ++++ b/Documentation/filesystems/porting
42 +@@ -638,3 +638,8 @@ in your dentry operations instead.
43 + inode to d_splice_alias() will also do the right thing (equivalent of
44 + d_add(dentry, NULL); return NULL;), so that kind of special cases
45 + also doesn't need a separate treatment.
46 ++--
47 ++[mandatory]
48 ++ DCACHE_RCUACCESS is gone; having an RCU delay on dentry freeing is the
49 ++ default. DCACHE_NORCU opts out, and only d_alloc_pseudo() has any
50 ++ business doing so.
51 +diff --git a/Makefile b/Makefile
52 +index acab93537f63..24a16a544ffd 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 1
59 +-SUBLEVEL = 4
60 ++SUBLEVEL = 5
61 + EXTRAVERSION =
62 + NAME = Shy Crocodile
63 +
64 +diff --git a/arch/Kconfig b/arch/Kconfig
65 +index 33687dddd86a..9092e0ffe4d3 100644
66 +--- a/arch/Kconfig
67 ++++ b/arch/Kconfig
68 +@@ -764,7 +764,7 @@ config COMPAT_OLD_SIGACTION
69 + bool
70 +
71 + config 64BIT_TIME
72 +- def_bool ARCH_HAS_64BIT_TIME
73 ++ def_bool y
74 + help
75 + This should be selected by all architectures that need to support
76 + new system calls with a 64-bit time_t. This is relevant on all 32-bit
77 +diff --git a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
78 +index 3cae139e6396..c40a7af6ebee 100644
79 +--- a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
80 ++++ b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
81 +@@ -88,6 +88,7 @@
82 + regulator-min-microvolt = <5000000>;
83 + regulator-max-microvolt = <5000000>;
84 + gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>;
85 ++ startup-delay-us = <70000>;
86 + enable-active-high;
87 + };
88 +
89 +@@ -99,6 +100,7 @@
90 + regulator-min-microvolt = <3300000>;
91 + regulator-max-microvolt = <3300000>;
92 + gpio = <&gpio1 26 GPIO_ACTIVE_HIGH>;
93 ++ startup-delay-us = <70000>;
94 + enable-active-high;
95 + regulator-always-on;
96 + };
97 +diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
98 +index 413863508f6f..d67fb64e908c 100644
99 +--- a/arch/mips/kernel/perf_event_mipsxx.c
100 ++++ b/arch/mips/kernel/perf_event_mipsxx.c
101 +@@ -64,17 +64,11 @@ struct mips_perf_event {
102 + #define CNTR_EVEN 0x55555555
103 + #define CNTR_ODD 0xaaaaaaaa
104 + #define CNTR_ALL 0xffffffff
105 +-#ifdef CONFIG_MIPS_MT_SMP
106 + enum {
107 + T = 0,
108 + V = 1,
109 + P = 2,
110 + } range;
111 +-#else
112 +- #define T
113 +- #define V
114 +- #define P
115 +-#endif
116 + };
117 +
118 + static struct mips_perf_event raw_event;
119 +@@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
120 + {
121 + struct perf_event *event = container_of(evt, struct perf_event, hw);
122 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
123 +-#ifdef CONFIG_MIPS_MT_SMP
124 + unsigned int range = evt->event_base >> 24;
125 +-#endif /* CONFIG_MIPS_MT_SMP */
126 +
127 + WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
128 +
129 +@@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
130 + /* Make sure interrupt enabled. */
131 + MIPS_PERFCTRL_IE;
132 +
133 +-#ifdef CONFIG_CPU_BMIPS5000
134 +- {
135 ++ if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
136 + /* enable the counter for the calling thread */
137 + cpuc->saved_ctrl[idx] |=
138 + (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
139 +- }
140 +-#else
141 +-#ifdef CONFIG_MIPS_MT_SMP
142 +- if (range > V) {
143 ++ } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
144 + /* The counter is processor wide. Set it up to count all TCs. */
145 + pr_debug("Enabling perf counter for all TCs\n");
146 + cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
147 +- } else
148 +-#endif /* CONFIG_MIPS_MT_SMP */
149 +- {
150 ++ } else {
151 + unsigned int cpu, ctrl;
152 +
153 + /*
154 +@@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
155 + cpuc->saved_ctrl[idx] |= ctrl;
156 + pr_debug("Enabling perf counter for CPU%d\n", cpu);
157 + }
158 +-#endif /* CONFIG_CPU_BMIPS5000 */
159 + /*
160 + * We do not actually let the counter run. Leave it until start().
161 + */
162 +diff --git a/arch/parisc/boot/compressed/head.S b/arch/parisc/boot/compressed/head.S
163 +index 5aba20fa48aa..e8b798fd0cf0 100644
164 +--- a/arch/parisc/boot/compressed/head.S
165 ++++ b/arch/parisc/boot/compressed/head.S
166 +@@ -22,7 +22,7 @@
167 + __HEAD
168 +
169 + ENTRY(startup)
170 +- .level LEVEL
171 ++ .level PA_ASM_LEVEL
172 +
173 + #define PSW_W_SM 0x200
174 + #define PSW_W_BIT 36
175 +@@ -63,7 +63,7 @@ $bss_loop:
176 + load32 BOOTADDR(decompress_kernel),%r3
177 +
178 + #ifdef CONFIG_64BIT
179 +- .level LEVEL
180 ++ .level PA_ASM_LEVEL
181 + ssm PSW_W_SM, %r0 /* set W-bit */
182 + depdi 0, 31, 32, %r3
183 + #endif
184 +@@ -72,7 +72,7 @@ $bss_loop:
185 +
186 + startup_continue:
187 + #ifdef CONFIG_64BIT
188 +- .level LEVEL
189 ++ .level PA_ASM_LEVEL
190 + rsm PSW_W_SM, %r0 /* clear W-bit */
191 + #endif
192 +
193 +diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
194 +index c17ec0ee6e7c..d85738a7bbe6 100644
195 +--- a/arch/parisc/include/asm/assembly.h
196 ++++ b/arch/parisc/include/asm/assembly.h
197 +@@ -61,14 +61,14 @@
198 + #define LDCW ldcw,co
199 + #define BL b,l
200 + # ifdef CONFIG_64BIT
201 +-# define LEVEL 2.0w
202 ++# define PA_ASM_LEVEL 2.0w
203 + # else
204 +-# define LEVEL 2.0
205 ++# define PA_ASM_LEVEL 2.0
206 + # endif
207 + #else
208 + #define LDCW ldcw
209 + #define BL bl
210 +-#define LEVEL 1.1
211 ++#define PA_ASM_LEVEL 1.1
212 + #endif
213 +
214 + #ifdef __ASSEMBLY__
215 +diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
216 +index 006fb939cac8..4016fe1c65a9 100644
217 +--- a/arch/parisc/include/asm/cache.h
218 ++++ b/arch/parisc/include/asm/cache.h
219 +@@ -44,22 +44,22 @@ void parisc_setup_cache_timing(void);
220 +
221 + #define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \
222 + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
223 +- : : "r" (addr))
224 ++ : : "r" (addr) : "memory")
225 + #define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \
226 + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
227 + ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \
228 +- : : "r" (addr))
229 ++ : : "r" (addr) : "memory")
230 + #define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \
231 + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
232 +- : : "r" (addr))
233 ++ : : "r" (addr) : "memory")
234 +
235 + #define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \
236 + ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
237 + ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \
238 +- : : "r" (addr))
239 ++ : : "r" (addr) : "memory")
240 + #define asm_io_sync() asm volatile("sync" \
241 + ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
242 +- ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: )
243 ++ ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
244 +
245 + #endif /* ! __ASSEMBLY__ */
246 +
247 +diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
248 +index fbb4e43fda05..f56cbab64ac1 100644
249 +--- a/arch/parisc/kernel/head.S
250 ++++ b/arch/parisc/kernel/head.S
251 +@@ -22,7 +22,7 @@
252 + #include <linux/linkage.h>
253 + #include <linux/init.h>
254 +
255 +- .level LEVEL
256 ++ .level PA_ASM_LEVEL
257 +
258 + __INITDATA
259 + ENTRY(boot_args)
260 +@@ -258,7 +258,7 @@ stext_pdc_ret:
261 + ldo R%PA(fault_vector_11)(%r10),%r10
262 +
263 + $is_pa20:
264 +- .level LEVEL /* restore 1.1 || 2.0w */
265 ++ .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
266 + #endif /*!CONFIG_64BIT*/
267 + load32 PA(fault_vector_20),%r10
268 +
269 +diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
270 +index 841db71958cd..97c206734e24 100644
271 +--- a/arch/parisc/kernel/process.c
272 ++++ b/arch/parisc/kernel/process.c
273 +@@ -193,6 +193,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
274 + */
275 +
276 + int running_on_qemu __read_mostly;
277 ++EXPORT_SYMBOL(running_on_qemu);
278 +
279 + void __cpuidle arch_cpu_idle_dead(void)
280 + {
281 +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
282 +index 4f77bd9be66b..93cc36d98875 100644
283 +--- a/arch/parisc/kernel/syscall.S
284 ++++ b/arch/parisc/kernel/syscall.S
285 +@@ -48,7 +48,7 @@ registers).
286 + */
287 + #define KILL_INSN break 0,0
288 +
289 +- .level LEVEL
290 ++ .level PA_ASM_LEVEL
291 +
292 + .text
293 +
294 +diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
295 +index d0b166256f1a..14147eb7a142 100644
296 +--- a/arch/parisc/mm/init.c
297 ++++ b/arch/parisc/mm/init.c
298 +@@ -495,7 +495,7 @@ static void __init map_pages(unsigned long start_vaddr,
299 +
300 + void __init set_kernel_text_rw(int enable_read_write)
301 + {
302 +- unsigned long start = (unsigned long) _text;
303 ++ unsigned long start = (unsigned long) __init_begin;
304 + unsigned long end = (unsigned long) &data_start;
305 +
306 + map_pages(start, __pa(start), end-start,
307 +diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
308 +index 6ee8195a2ffb..4a6dd3ba0b0b 100644
309 +--- a/arch/powerpc/include/asm/mmu_context.h
310 ++++ b/arch/powerpc/include/asm/mmu_context.h
311 +@@ -237,7 +237,6 @@ extern void arch_exit_mmap(struct mm_struct *mm);
312 + #endif
313 +
314 + static inline void arch_unmap(struct mm_struct *mm,
315 +- struct vm_area_struct *vma,
316 + unsigned long start, unsigned long end)
317 + {
318 + if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
319 +diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
320 +index fca34b2177e2..9f4b4bb78120 100644
321 +--- a/arch/um/include/asm/mmu_context.h
322 ++++ b/arch/um/include/asm/mmu_context.h
323 +@@ -22,7 +22,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
324 + }
325 + extern void arch_exit_mmap(struct mm_struct *mm);
326 + static inline void arch_unmap(struct mm_struct *mm,
327 +- struct vm_area_struct *vma,
328 + unsigned long start, unsigned long end)
329 + {
330 + }
331 +diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
332 +index 5c205a9cb5a6..9f06ea5466dd 100644
333 +--- a/arch/unicore32/include/asm/mmu_context.h
334 ++++ b/arch/unicore32/include/asm/mmu_context.h
335 +@@ -88,7 +88,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
336 + }
337 +
338 + static inline void arch_unmap(struct mm_struct *mm,
339 +- struct vm_area_struct *vma,
340 + unsigned long start, unsigned long end)
341 + {
342 + }
343 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
344 +index 4fe27b67d7e2..b1d59a7c556e 100644
345 +--- a/arch/x86/entry/entry_64.S
346 ++++ b/arch/x86/entry/entry_64.S
347 +@@ -881,7 +881,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
348 + * @paranoid == 2 is special: the stub will never switch stacks. This is for
349 + * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
350 + */
351 +-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
352 ++.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0
353 + ENTRY(\sym)
354 + UNWIND_HINT_IRET_REGS offset=\has_error_code*8
355 +
356 +@@ -901,6 +901,20 @@ ENTRY(\sym)
357 + jnz .Lfrom_usermode_switch_stack_\@
358 + .endif
359 +
360 ++ .if \create_gap == 1
361 ++ /*
362 ++ * If coming from kernel space, create a 6-word gap to allow the
363 ++ * int3 handler to emulate a call instruction.
364 ++ */
365 ++ testb $3, CS-ORIG_RAX(%rsp)
366 ++ jnz .Lfrom_usermode_no_gap_\@
367 ++ .rept 6
368 ++ pushq 5*8(%rsp)
369 ++ .endr
370 ++ UNWIND_HINT_IRET_REGS offset=8
371 ++.Lfrom_usermode_no_gap_\@:
372 ++ .endif
373 ++
374 + .if \paranoid
375 + call paranoid_entry
376 + .else
377 +@@ -1132,7 +1146,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
378 + #endif /* CONFIG_HYPERV */
379 +
380 + idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
381 +-idtentry int3 do_int3 has_error_code=0
382 ++idtentry int3 do_int3 has_error_code=0 create_gap=1
383 + idtentry stack_segment do_stack_segment has_error_code=1
384 +
385 + #ifdef CONFIG_XEN_PV
386 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
387 +index 19d18fae6ec6..41019af68adf 100644
388 +--- a/arch/x86/include/asm/mmu_context.h
389 ++++ b/arch/x86/include/asm/mmu_context.h
390 +@@ -277,8 +277,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
391 + mpx_mm_init(mm);
392 + }
393 +
394 +-static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
395 +- unsigned long start, unsigned long end)
396 ++static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
397 ++ unsigned long end)
398 + {
399 + /*
400 + * mpx_notify_unmap() goes and reads a rarely-hot
401 +@@ -298,7 +298,7 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
402 + * consistently wrong.
403 + */
404 + if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
405 +- mpx_notify_unmap(mm, vma, start, end);
406 ++ mpx_notify_unmap(mm, start, end);
407 + }
408 +
409 + /*
410 +diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h
411 +index d0b1434fb0b6..143a5c193ed3 100644
412 +--- a/arch/x86/include/asm/mpx.h
413 ++++ b/arch/x86/include/asm/mpx.h
414 +@@ -64,12 +64,15 @@ struct mpx_fault_info {
415 + };
416 +
417 + #ifdef CONFIG_X86_INTEL_MPX
418 +-int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
419 +-int mpx_handle_bd_fault(void);
420 ++
421 ++extern int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
422 ++extern int mpx_handle_bd_fault(void);
423 ++
424 + static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
425 + {
426 + return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR);
427 + }
428 ++
429 + static inline void mpx_mm_init(struct mm_struct *mm)
430 + {
431 + /*
432 +@@ -78,11 +81,10 @@ static inline void mpx_mm_init(struct mm_struct *mm)
433 + */
434 + mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
435 + }
436 +-void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
437 +- unsigned long start, unsigned long end);
438 +
439 +-unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
440 +- unsigned long flags);
441 ++extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end);
442 ++extern unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags);
443 ++
444 + #else
445 + static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
446 + {
447 +@@ -100,7 +102,6 @@ static inline void mpx_mm_init(struct mm_struct *mm)
448 + {
449 + }
450 + static inline void mpx_notify_unmap(struct mm_struct *mm,
451 +- struct vm_area_struct *vma,
452 + unsigned long start, unsigned long end)
453 + {
454 + }
455 +diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
456 +index e85ff65c43c3..05861cc08787 100644
457 +--- a/arch/x86/include/asm/text-patching.h
458 ++++ b/arch/x86/include/asm/text-patching.h
459 +@@ -39,4 +39,32 @@ extern int poke_int3_handler(struct pt_regs *regs);
460 + extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
461 + extern int after_bootmem;
462 +
463 ++static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
464 ++{
465 ++ regs->ip = ip;
466 ++}
467 ++
468 ++#define INT3_INSN_SIZE 1
469 ++#define CALL_INSN_SIZE 5
470 ++
471 ++#ifdef CONFIG_X86_64
472 ++static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
473 ++{
474 ++ /*
475 ++ * The int3 handler in entry_64.S adds a gap between the
476 ++ * stack where the break point happened, and the saving of
477 ++ * pt_regs. We can extend the original stack because of
478 ++ * this gap. See the idtentry macro's create_gap option.
479 ++ */
480 ++ regs->sp -= sizeof(unsigned long);
481 ++ *(unsigned long *)regs->sp = val;
482 ++}
483 ++
484 ++static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
485 ++{
486 ++ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
487 ++ int3_emulate_jmp(regs, func);
488 ++}
489 ++#endif
490 ++
491 + #endif /* _ASM_X86_TEXT_PATCHING_H */
492 +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
493 +index ef49517f6bb2..bd553b3af22e 100644
494 +--- a/arch/x86/kernel/ftrace.c
495 ++++ b/arch/x86/kernel/ftrace.c
496 +@@ -29,6 +29,7 @@
497 + #include <asm/kprobes.h>
498 + #include <asm/ftrace.h>
499 + #include <asm/nops.h>
500 ++#include <asm/text-patching.h>
501 +
502 + #ifdef CONFIG_DYNAMIC_FTRACE
503 +
504 +@@ -231,6 +232,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
505 + }
506 +
507 + static unsigned long ftrace_update_func;
508 ++static unsigned long ftrace_update_func_call;
509 +
510 + static int update_ftrace_func(unsigned long ip, void *new)
511 + {
512 +@@ -259,6 +261,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
513 + unsigned char *new;
514 + int ret;
515 +
516 ++ ftrace_update_func_call = (unsigned long)func;
517 ++
518 + new = ftrace_call_replace(ip, (unsigned long)func);
519 + ret = update_ftrace_func(ip, new);
520 +
521 +@@ -294,13 +298,28 @@ int ftrace_int3_handler(struct pt_regs *regs)
522 + if (WARN_ON_ONCE(!regs))
523 + return 0;
524 +
525 +- ip = regs->ip - 1;
526 +- if (!ftrace_location(ip) && !is_ftrace_caller(ip))
527 +- return 0;
528 ++ ip = regs->ip - INT3_INSN_SIZE;
529 +
530 +- regs->ip += MCOUNT_INSN_SIZE - 1;
531 ++#ifdef CONFIG_X86_64
532 ++ if (ftrace_location(ip)) {
533 ++ int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
534 ++ return 1;
535 ++ } else if (is_ftrace_caller(ip)) {
536 ++ if (!ftrace_update_func_call) {
537 ++ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
538 ++ return 1;
539 ++ }
540 ++ int3_emulate_call(regs, ftrace_update_func_call);
541 ++ return 1;
542 ++ }
543 ++#else
544 ++ if (ftrace_location(ip) || is_ftrace_caller(ip)) {
545 ++ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
546 ++ return 1;
547 ++ }
548 ++#endif
549 +
550 +- return 1;
551 ++ return 0;
552 + }
553 + NOKPROBE_SYMBOL(ftrace_int3_handler);
554 +
555 +@@ -859,6 +878,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
556 +
557 + func = ftrace_ops_get_func(ops);
558 +
559 ++ ftrace_update_func_call = (unsigned long)func;
560 ++
561 + /* Do a safe modify in case the trampoline is executing */
562 + new = ftrace_call_replace(ip, (unsigned long)func);
563 + ret = update_ftrace_func(ip, new);
564 +@@ -960,6 +981,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
565 + {
566 + unsigned char *new;
567 +
568 ++ ftrace_update_func_call = 0UL;
569 + new = ftrace_jmp_replace(ip, (unsigned long)func);
570 +
571 + return update_ftrace_func(ip, new);
572 +diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
573 +index c805db6236b4..7aeb9fe2955f 100644
574 +--- a/arch/x86/mm/mpx.c
575 ++++ b/arch/x86/mm/mpx.c
576 +@@ -881,9 +881,10 @@ static int mpx_unmap_tables(struct mm_struct *mm,
577 + * the virtual address region start...end have already been split if
578 + * necessary, and the 'vma' is the first vma in this range (start -> end).
579 + */
580 +-void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
581 +- unsigned long start, unsigned long end)
582 ++void mpx_notify_unmap(struct mm_struct *mm, unsigned long start,
583 ++ unsigned long end)
584 + {
585 ++ struct vm_area_struct *vma;
586 + int ret;
587 +
588 + /*
589 +@@ -902,11 +903,12 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
590 + * which should not occur normally. Being strict about it here
591 + * helps ensure that we do not have an exploitable stack overflow.
592 + */
593 +- do {
594 ++ vma = find_vma(mm, start);
595 ++ while (vma && vma->vm_start < end) {
596 + if (vma->vm_flags & VM_MPX)
597 + return;
598 + vma = vma->vm_next;
599 +- } while (vma && vma->vm_start < end);
600 ++ }
601 +
602 + ret = mpx_unmap_tables(mm, start, end);
603 + if (ret)
604 +diff --git a/block/blk-core.c b/block/blk-core.c
605 +index a55389ba8779..b375cfea024c 100644
606 +--- a/block/blk-core.c
607 ++++ b/block/blk-core.c
608 +@@ -375,7 +375,7 @@ void blk_cleanup_queue(struct request_queue *q)
609 + blk_exit_queue(q);
610 +
611 + if (queue_is_mq(q))
612 +- blk_mq_free_queue(q);
613 ++ blk_mq_exit_queue(q);
614 +
615 + percpu_ref_exit(&q->q_usage_counter);
616 +
617 +diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
618 +index 3f9c3f4ac44c..4040e62c3737 100644
619 +--- a/block/blk-mq-sysfs.c
620 ++++ b/block/blk-mq-sysfs.c
621 +@@ -10,6 +10,7 @@
622 + #include <linux/smp.h>
623 +
624 + #include <linux/blk-mq.h>
625 ++#include "blk.h"
626 + #include "blk-mq.h"
627 + #include "blk-mq-tag.h"
628 +
629 +@@ -33,6 +34,11 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
630 + {
631 + struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
632 + kobj);
633 ++
634 ++ if (hctx->flags & BLK_MQ_F_BLOCKING)
635 ++ cleanup_srcu_struct(hctx->srcu);
636 ++ blk_free_flush_queue(hctx->fq);
637 ++ sbitmap_free(&hctx->ctx_map);
638 + free_cpumask_var(hctx->cpumask);
639 + kfree(hctx->ctxs);
640 + kfree(hctx);
641 +diff --git a/block/blk-mq.c b/block/blk-mq.c
642 +index fc60ed7e940e..b0e5e67e20a2 100644
643 +--- a/block/blk-mq.c
644 ++++ b/block/blk-mq.c
645 +@@ -2267,12 +2267,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
646 + if (set->ops->exit_hctx)
647 + set->ops->exit_hctx(hctx, hctx_idx);
648 +
649 +- if (hctx->flags & BLK_MQ_F_BLOCKING)
650 +- cleanup_srcu_struct(hctx->srcu);
651 +-
652 + blk_mq_remove_cpuhp(hctx);
653 +- blk_free_flush_queue(hctx->fq);
654 +- sbitmap_free(&hctx->ctx_map);
655 + }
656 +
657 + static void blk_mq_exit_hw_queues(struct request_queue *q,
658 +@@ -2905,7 +2900,8 @@ err_exit:
659 + }
660 + EXPORT_SYMBOL(blk_mq_init_allocated_queue);
661 +
662 +-void blk_mq_free_queue(struct request_queue *q)
663 ++/* tags can _not_ be used after returning from blk_mq_exit_queue */
664 ++void blk_mq_exit_queue(struct request_queue *q)
665 + {
666 + struct blk_mq_tag_set *set = q->tag_set;
667 +
668 +diff --git a/block/blk-mq.h b/block/blk-mq.h
669 +index 423ea88ab6fb..633a5a77ee8b 100644
670 +--- a/block/blk-mq.h
671 ++++ b/block/blk-mq.h
672 +@@ -37,7 +37,7 @@ struct blk_mq_ctx {
673 + struct kobject kobj;
674 + } ____cacheline_aligned_in_smp;
675 +
676 +-void blk_mq_free_queue(struct request_queue *q);
677 ++void blk_mq_exit_queue(struct request_queue *q);
678 + int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
679 + void blk_mq_wake_waiters(struct request_queue *q);
680 + bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
681 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
682 +index a823f469e53f..0df9b4461766 100644
683 +--- a/drivers/base/dd.c
684 ++++ b/drivers/base/dd.c
685 +@@ -490,7 +490,7 @@ re_probe:
686 + if (dev->bus->dma_configure) {
687 + ret = dev->bus->dma_configure(dev);
688 + if (ret)
689 +- goto dma_failed;
690 ++ goto probe_failed;
691 + }
692 +
693 + if (driver_sysfs_add(dev)) {
694 +@@ -546,14 +546,13 @@ re_probe:
695 + goto done;
696 +
697 + probe_failed:
698 +- arch_teardown_dma_ops(dev);
699 +-dma_failed:
700 + if (dev->bus)
701 + blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
702 + BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
703 + pinctrl_bind_failed:
704 + device_links_no_driver(dev);
705 + devres_release_all(dev);
706 ++ arch_teardown_dma_ops(dev);
707 + driver_sysfs_remove(dev);
708 + dev->driver = NULL;
709 + dev_set_drvdata(dev, NULL);
710 +diff --git a/drivers/block/brd.c b/drivers/block/brd.c
711 +index c18586fccb6f..17defbf4f332 100644
712 +--- a/drivers/block/brd.c
713 ++++ b/drivers/block/brd.c
714 +@@ -96,13 +96,8 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
715 + /*
716 + * Must use NOIO because we don't want to recurse back into the
717 + * block or filesystem layers from page reclaim.
718 +- *
719 +- * Cannot support DAX and highmem, because our ->direct_access
720 +- * routine for DAX must return memory that is always addressable.
721 +- * If DAX was reworked to use pfns and kmap throughout, this
722 +- * restriction might be able to be lifted.
723 + */
724 +- gfp_flags = GFP_NOIO | __GFP_ZERO;
725 ++ gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
726 + page = alloc_page(gfp_flags);
727 + if (!page)
728 + return NULL;
729 +diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
730 +index f40419959656..794eeff0d5d2 100644
731 +--- a/drivers/clk/hisilicon/clk-hi3660.c
732 ++++ b/drivers/clk/hisilicon/clk-hi3660.c
733 +@@ -163,8 +163,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
734 + "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
735 + { HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2",
736 + "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, },
737 ++ /*
738 ++ * clk_gate_ufs_subsys is a system bus clock, mark it as critical
739 ++ * clock and keep it on for system suspend and resume.
740 ++ */
741 + { HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus",
742 +- CLK_SET_RATE_PARENT, 0x50, 21, 0, },
743 ++ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0x50, 21, 0, },
744 + { HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus",
745 + CLK_SET_RATE_PARENT, 0x50, 28, 0, },
746 + { HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus",
747 +diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
748 +index f54e4015b0b1..18842d660317 100644
749 +--- a/drivers/clk/mediatek/clk-pll.c
750 ++++ b/drivers/clk/mediatek/clk-pll.c
751 +@@ -88,6 +88,32 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
752 + return ((unsigned long)vco + postdiv - 1) / postdiv;
753 + }
754 +
755 ++static void __mtk_pll_tuner_enable(struct mtk_clk_pll *pll)
756 ++{
757 ++ u32 r;
758 ++
759 ++ if (pll->tuner_en_addr) {
760 ++ r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
761 ++ writel(r, pll->tuner_en_addr);
762 ++ } else if (pll->tuner_addr) {
763 ++ r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
764 ++ writel(r, pll->tuner_addr);
765 ++ }
766 ++}
767 ++
768 ++static void __mtk_pll_tuner_disable(struct mtk_clk_pll *pll)
769 ++{
770 ++ u32 r;
771 ++
772 ++ if (pll->tuner_en_addr) {
773 ++ r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
774 ++ writel(r, pll->tuner_en_addr);
775 ++ } else if (pll->tuner_addr) {
776 ++ r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
777 ++ writel(r, pll->tuner_addr);
778 ++ }
779 ++}
780 ++
781 + static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
782 + int postdiv)
783 + {
784 +@@ -96,6 +122,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
785 +
786 + pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
787 +
788 ++ /* disable tuner */
789 ++ __mtk_pll_tuner_disable(pll);
790 ++
791 + /* set postdiv */
792 + val = readl(pll->pd_addr);
793 + val &= ~(POSTDIV_MASK << pll->data->pd_shift);
794 +@@ -122,6 +151,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
795 + if (pll->tuner_addr)
796 + writel(con1 + 1, pll->tuner_addr);
797 +
798 ++ /* restore tuner_en */
799 ++ __mtk_pll_tuner_enable(pll);
800 ++
801 + if (pll_en)
802 + udelay(20);
803 + }
804 +@@ -228,13 +260,7 @@ static int mtk_pll_prepare(struct clk_hw *hw)
805 + r |= pll->data->en_mask;
806 + writel(r, pll->base_addr + REG_CON0);
807 +
808 +- if (pll->tuner_en_addr) {
809 +- r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
810 +- writel(r, pll->tuner_en_addr);
811 +- } else if (pll->tuner_addr) {
812 +- r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
813 +- writel(r, pll->tuner_addr);
814 +- }
815 ++ __mtk_pll_tuner_enable(pll);
816 +
817 + udelay(20);
818 +
819 +@@ -258,13 +284,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
820 + writel(r, pll->base_addr + REG_CON0);
821 + }
822 +
823 +- if (pll->tuner_en_addr) {
824 +- r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
825 +- writel(r, pll->tuner_en_addr);
826 +- } else if (pll->tuner_addr) {
827 +- r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
828 +- writel(r, pll->tuner_addr);
829 +- }
830 ++ __mtk_pll_tuner_disable(pll);
831 +
832 + r = readl(pll->base_addr + REG_CON0);
833 + r &= ~CON0_BASE_EN;
834 +diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
835 +index 65ab5c2f48b0..f12142d9cea2 100644
836 +--- a/drivers/clk/rockchip/clk-rk3328.c
837 ++++ b/drivers/clk/rockchip/clk-rk3328.c
838 +@@ -458,7 +458,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
839 + RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS,
840 + RK3328_CLKGATE_CON(2), 12, GFLAGS),
841 + COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0,
842 +- RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS,
843 ++ RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS,
844 + RK3328_CLKGATE_CON(2), 4, GFLAGS),
845 + COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0,
846 + RK3328_CLKSEL_CON(22), 0, 10, DFLAGS,
847 +@@ -550,15 +550,15 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
848 + GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0,
849 + RK3328_CLKGATE_CON(25), 1, GFLAGS),
850 + GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0,
851 +- RK3328_CLKGATE_CON(25), 0, GFLAGS),
852 ++ RK3328_CLKGATE_CON(25), 2, GFLAGS),
853 + GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0,
854 +- RK3328_CLKGATE_CON(25), 1, GFLAGS),
855 ++ RK3328_CLKGATE_CON(25), 3, GFLAGS),
856 + GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0,
857 +- RK3328_CLKGATE_CON(25), 0, GFLAGS),
858 ++ RK3328_CLKGATE_CON(25), 4, GFLAGS),
859 + GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0,
860 +- RK3328_CLKGATE_CON(25), 1, GFLAGS),
861 ++ RK3328_CLKGATE_CON(25), 5, GFLAGS),
862 + GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED,
863 +- RK3328_CLKGATE_CON(25), 0, GFLAGS),
864 ++ RK3328_CLKGATE_CON(25), 6, GFLAGS),
865 +
866 + COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0,
867 + RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS,
868 +@@ -663,7 +663,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
869 +
870 + /* PD_GMAC */
871 + COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0,
872 +- RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
873 ++ RK3328_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
874 + RK3328_CLKGATE_CON(3), 2, GFLAGS),
875 + COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0,
876 + RK3328_CLKSEL_CON(25), 8, 3, DFLAGS,
877 +@@ -733,7 +733,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
878 +
879 + /* PD_PERI */
880 + GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS),
881 +- GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS),
882 ++ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 14, GFLAGS),
883 +
884 + GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS),
885 + GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS),
886 +@@ -913,7 +913,7 @@ static void __init rk3328_clk_init(struct device_node *np)
887 + &rk3328_cpuclk_data, rk3328_cpuclk_rates,
888 + ARRAY_SIZE(rk3328_cpuclk_rates));
889 +
890 +- rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0),
891 ++ rockchip_register_softrst(np, 12, reg_base + RK3328_SOFTRST_CON(0),
892 + ROCKCHIP_SOFTRST_HIWORD_MASK);
893 +
894 + rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL);
895 +diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
896 +index b50b7460014b..3e67cbcd80da 100644
897 +--- a/drivers/clk/tegra/clk-pll.c
898 ++++ b/drivers/clk/tegra/clk-pll.c
899 +@@ -663,8 +663,8 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
900 + pll_override_writel(val, params->pmc_divp_reg, pll);
901 +
902 + val = pll_override_readl(params->pmc_divnm_reg, pll);
903 +- val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) |
904 +- ~(divn_mask(pll) << div_nmp->override_divn_shift);
905 ++ val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) |
906 ++ (divn_mask(pll) << div_nmp->override_divn_shift));
907 + val |= (cfg->m << div_nmp->override_divm_shift) |
908 + (cfg->n << div_nmp->override_divn_shift);
909 + pll_override_writel(val, params->pmc_divnm_reg, pll);
910 +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
911 +index 5f3c1378b90e..99d9f431ae2c 100644
912 +--- a/drivers/dma/imx-sdma.c
913 ++++ b/drivers/dma/imx-sdma.c
914 +@@ -419,6 +419,7 @@ struct sdma_driver_data {
915 + int chnenbl0;
916 + int num_events;
917 + struct sdma_script_start_addrs *script_addrs;
918 ++ bool check_ratio;
919 + };
920 +
921 + struct sdma_engine {
922 +@@ -557,6 +558,13 @@ static struct sdma_driver_data sdma_imx7d = {
923 + .script_addrs = &sdma_script_imx7d,
924 + };
925 +
926 ++static struct sdma_driver_data sdma_imx8mq = {
927 ++ .chnenbl0 = SDMA_CHNENBL0_IMX35,
928 ++ .num_events = 48,
929 ++ .script_addrs = &sdma_script_imx7d,
930 ++ .check_ratio = 1,
931 ++};
932 ++
933 + static const struct platform_device_id sdma_devtypes[] = {
934 + {
935 + .name = "imx25-sdma",
936 +@@ -579,6 +587,9 @@ static const struct platform_device_id sdma_devtypes[] = {
937 + }, {
938 + .name = "imx7d-sdma",
939 + .driver_data = (unsigned long)&sdma_imx7d,
940 ++ }, {
941 ++ .name = "imx8mq-sdma",
942 ++ .driver_data = (unsigned long)&sdma_imx8mq,
943 + }, {
944 + /* sentinel */
945 + }
946 +@@ -593,6 +604,7 @@ static const struct of_device_id sdma_dt_ids[] = {
947 + { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
948 + { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
949 + { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
950 ++ { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
951 + { /* sentinel */ }
952 + };
953 + MODULE_DEVICE_TABLE(of, sdma_dt_ids);
954 +@@ -1852,7 +1864,8 @@ static int sdma_init(struct sdma_engine *sdma)
955 + if (ret)
956 + goto disable_clk_ipg;
957 +
958 +- if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))
959 ++ if (sdma->drvdata->check_ratio &&
960 ++ (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
961 + sdma->clk_ratio = 1;
962 +
963 + /* Be sure SDMA has not started yet */
964 +diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
965 +index ba7aaf421f36..8ff326c0c406 100644
966 +--- a/drivers/hwtracing/intel_th/msu.c
967 ++++ b/drivers/hwtracing/intel_th/msu.c
968 +@@ -84,6 +84,7 @@ struct msc_iter {
969 + * @reg_base: register window base address
970 + * @thdev: intel_th_device pointer
971 + * @win_list: list of windows in multiblock mode
972 ++ * @single_sgt: single mode buffer
973 + * @nr_pages: total number of pages allocated for this buffer
974 + * @single_sz: amount of data in single mode
975 + * @single_wrap: single mode wrap occurred
976 +@@ -104,6 +105,7 @@ struct msc {
977 + struct intel_th_device *thdev;
978 +
979 + struct list_head win_list;
980 ++ struct sg_table single_sgt;
981 + unsigned long nr_pages;
982 + unsigned long single_sz;
983 + unsigned int single_wrap : 1;
984 +@@ -617,22 +619,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev)
985 + */
986 + static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
987 + {
988 ++ unsigned long nr_pages = size >> PAGE_SHIFT;
989 + unsigned int order = get_order(size);
990 + struct page *page;
991 ++ int ret;
992 +
993 + if (!size)
994 + return 0;
995 +
996 ++ ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
997 ++ if (ret)
998 ++ goto err_out;
999 ++
1000 ++ ret = -ENOMEM;
1001 + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1002 + if (!page)
1003 +- return -ENOMEM;
1004 ++ goto err_free_sgt;
1005 +
1006 + split_page(page, order);
1007 +- msc->nr_pages = size >> PAGE_SHIFT;
1008 ++ sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
1009 ++
1010 ++ ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
1011 ++ DMA_FROM_DEVICE);
1012 ++ if (ret < 0)
1013 ++ goto err_free_pages;
1014 ++
1015 ++ msc->nr_pages = nr_pages;
1016 + msc->base = page_address(page);
1017 +- msc->base_addr = page_to_phys(page);
1018 ++ msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
1019 +
1020 + return 0;
1021 ++
1022 ++err_free_pages:
1023 ++ __free_pages(page, order);
1024 ++
1025 ++err_free_sgt:
1026 ++ sg_free_table(&msc->single_sgt);
1027 ++
1028 ++err_out:
1029 ++ return ret;
1030 + }
1031 +
1032 + /**
1033 +@@ -643,6 +668,10 @@ static void msc_buffer_contig_free(struct msc *msc)
1034 + {
1035 + unsigned long off;
1036 +
1037 ++ dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
1038 ++ 1, DMA_FROM_DEVICE);
1039 ++ sg_free_table(&msc->single_sgt);
1040 ++
1041 + for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
1042 + struct page *page = virt_to_page(msc->base + off);
1043 +
1044 +diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
1045 +index c7ba8acfd4d5..e55b902560de 100644
1046 +--- a/drivers/hwtracing/stm/core.c
1047 ++++ b/drivers/hwtracing/stm/core.c
1048 +@@ -166,11 +166,10 @@ stm_master(struct stm_device *stm, unsigned int idx)
1049 + static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
1050 + {
1051 + struct stp_master *master;
1052 +- size_t size;
1053 +
1054 +- size = ALIGN(stm->data->sw_nchannels, 8) / 8;
1055 +- size += sizeof(struct stp_master);
1056 +- master = kzalloc(size, GFP_ATOMIC);
1057 ++ master = kzalloc(struct_size(master, chan_map,
1058 ++ BITS_TO_LONGS(stm->data->sw_nchannels)),
1059 ++ GFP_ATOMIC);
1060 + if (!master)
1061 + return -ENOMEM;
1062 +
1063 +@@ -218,8 +217,8 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
1064 + bitmap_release_region(&master->chan_map[0], output->channel,
1065 + ilog2(output->nr_chans));
1066 +
1067 +- output->nr_chans = 0;
1068 + master->nr_free += output->nr_chans;
1069 ++ output->nr_chans = 0;
1070 + }
1071 +
1072 + /*
1073 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1074 +index d3dd290ae1b1..da81402992bc 100644
1075 +--- a/drivers/infiniband/hw/mlx5/main.c
1076 ++++ b/drivers/infiniband/hw/mlx5/main.c
1077 +@@ -2070,11 +2070,12 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
1078 + return -EPERM;
1079 + vma->vm_flags &= ~VM_MAYWRITE;
1080 +
1081 +- if (!dev->mdev->clock_info_page)
1082 ++ if (!dev->mdev->clock_info)
1083 + return -EOPNOTSUPP;
1084 +
1085 + return rdma_user_mmap_page(&context->ibucontext, vma,
1086 +- dev->mdev->clock_info_page, PAGE_SIZE);
1087 ++ virt_to_page(dev->mdev->clock_info),
1088 ++ PAGE_SIZE);
1089 + }
1090 +
1091 + static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
1092 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1093 +index 48eda16db1a7..9b5e11d3fb85 100644
1094 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
1095 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1096 +@@ -2402,7 +2402,18 @@ static ssize_t dev_id_show(struct device *dev,
1097 + {
1098 + struct net_device *ndev = to_net_dev(dev);
1099 +
1100 +- if (ndev->dev_id == ndev->dev_port)
1101 ++ /*
1102 ++ * ndev->dev_port will be equal to 0 in old kernel prior to commit
1103 ++ * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
1104 ++ * port numbers") Zero was chosen as special case for user space
1105 ++ * applications to fallback and query dev_id to check if it has
1106 ++ * different value or not.
1107 ++ *
1108 ++ * Don't print warning in such scenario.
1109 ++ *
1110 ++ * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
1111 ++ */
1112 ++ if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
1113 + netdev_info_once(ndev,
1114 + "\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
1115 + current->comm);
1116 +diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
1117 +index 5182c7d6171e..8d30653cd13a 100644
1118 +--- a/drivers/iommu/tegra-smmu.c
1119 ++++ b/drivers/iommu/tegra-smmu.c
1120 +@@ -102,7 +102,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
1121 + #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
1122 + #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
1123 + #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
1124 +-#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
1125 + #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
1126 + SMMU_TLB_FLUSH_VA_MATCH_SECTION)
1127 + #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
1128 +@@ -205,8 +204,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
1129 + {
1130 + u32 value;
1131 +
1132 +- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
1133 +- SMMU_TLB_FLUSH_VA_MATCH_ALL;
1134 ++ if (smmu->soc->num_asids == 4)
1135 ++ value = (asid & 0x3) << 29;
1136 ++ else
1137 ++ value = (asid & 0x7f) << 24;
1138 ++
1139 ++ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
1140 + smmu_writel(smmu, value, SMMU_TLB_FLUSH);
1141 + }
1142 +
1143 +@@ -216,8 +219,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
1144 + {
1145 + u32 value;
1146 +
1147 +- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
1148 +- SMMU_TLB_FLUSH_VA_SECTION(iova);
1149 ++ if (smmu->soc->num_asids == 4)
1150 ++ value = (asid & 0x3) << 29;
1151 ++ else
1152 ++ value = (asid & 0x7f) << 24;
1153 ++
1154 ++ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
1155 + smmu_writel(smmu, value, SMMU_TLB_FLUSH);
1156 + }
1157 +
1158 +@@ -227,8 +234,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
1159 + {
1160 + u32 value;
1161 +
1162 +- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
1163 +- SMMU_TLB_FLUSH_VA_GROUP(iova);
1164 ++ if (smmu->soc->num_asids == 4)
1165 ++ value = (asid & 0x3) << 29;
1166 ++ else
1167 ++ value = (asid & 0x7f) << 24;
1168 ++
1169 ++ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
1170 + smmu_writel(smmu, value, SMMU_TLB_FLUSH);
1171 + }
1172 +
1173 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
1174 +index 6fc93834da44..151aa95775be 100644
1175 +--- a/drivers/md/dm-cache-metadata.c
1176 ++++ b/drivers/md/dm-cache-metadata.c
1177 +@@ -1167,11 +1167,18 @@ static int __load_discards(struct dm_cache_metadata *cmd,
1178 + if (r)
1179 + return r;
1180 +
1181 +- for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1182 ++ for (b = 0; ; b++) {
1183 + r = fn(context, cmd->discard_block_size, to_dblock(b),
1184 + dm_bitset_cursor_get_value(&c));
1185 + if (r)
1186 + break;
1187 ++
1188 ++ if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
1189 ++ break;
1190 ++
1191 ++ r = dm_bitset_cursor_next(&c);
1192 ++ if (r)
1193 ++ break;
1194 + }
1195 +
1196 + dm_bitset_cursor_end(&c);
1197 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1198 +index dd6565798778..86fd2d0fa975 100644
1199 +--- a/drivers/md/dm-crypt.c
1200 ++++ b/drivers/md/dm-crypt.c
1201 +@@ -949,6 +949,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1202 + {
1203 + #ifdef CONFIG_BLK_DEV_INTEGRITY
1204 + struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1205 ++ struct mapped_device *md = dm_table_get_md(ti->table);
1206 +
1207 + /* From now we require underlying device with our integrity profile */
1208 + if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1209 +@@ -968,7 +969,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1210 +
1211 + if (crypt_integrity_aead(cc)) {
1212 + cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
1213 +- DMINFO("Integrity AEAD, tag size %u, IV size %u.",
1214 ++ DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
1215 + cc->integrity_tag_size, cc->integrity_iv_size);
1216 +
1217 + if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1218 +@@ -976,7 +977,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1219 + return -EINVAL;
1220 + }
1221 + } else if (cc->integrity_iv_size)
1222 +- DMINFO("Additional per-sector space %u bytes for IV.",
1223 ++ DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
1224 + cc->integrity_iv_size);
1225 +
1226 + if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1227 +@@ -1891,7 +1892,7 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
1228 + * algorithm implementation is used. Help people debug performance
1229 + * problems by logging the ->cra_driver_name.
1230 + */
1231 +- DMINFO("%s using implementation \"%s\"", ciphermode,
1232 ++ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
1233 + crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
1234 + return 0;
1235 + }
1236 +@@ -1911,7 +1912,7 @@ static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
1237 + return err;
1238 + }
1239 +
1240 +- DMINFO("%s using implementation \"%s\"", ciphermode,
1241 ++ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
1242 + crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
1243 + return 0;
1244 + }
1245 +diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
1246 +index fddffe251bf6..f496213f8b67 100644
1247 +--- a/drivers/md/dm-delay.c
1248 ++++ b/drivers/md/dm-delay.c
1249 +@@ -121,7 +121,8 @@ static void delay_dtr(struct dm_target *ti)
1250 + {
1251 + struct delay_c *dc = ti->private;
1252 +
1253 +- destroy_workqueue(dc->kdelayd_wq);
1254 ++ if (dc->kdelayd_wq)
1255 ++ destroy_workqueue(dc->kdelayd_wq);
1256 +
1257 + if (dc->read.dev)
1258 + dm_put_device(ti, dc->read.dev);
1259 +diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
1260 +index 4b76f84424c3..352e803f566e 100644
1261 +--- a/drivers/md/dm-init.c
1262 ++++ b/drivers/md/dm-init.c
1263 +@@ -160,7 +160,7 @@ static int __init dm_parse_table(struct dm_device *dev, char *str)
1264 +
1265 + while (table_entry) {
1266 + DMDEBUG("parsing table \"%s\"", str);
1267 +- if (++dev->dmi.target_count >= DM_MAX_TARGETS) {
1268 ++ if (++dev->dmi.target_count > DM_MAX_TARGETS) {
1269 + DMERR("too many targets %u > %d",
1270 + dev->dmi.target_count, DM_MAX_TARGETS);
1271 + return -EINVAL;
1272 +@@ -242,9 +242,9 @@ static int __init dm_parse_devices(struct list_head *devices, char *str)
1273 + return -ENOMEM;
1274 + list_add_tail(&dev->list, devices);
1275 +
1276 +- if (++ndev >= DM_MAX_DEVICES) {
1277 +- DMERR("too many targets %u > %d",
1278 +- dev->dmi.target_count, DM_MAX_TARGETS);
1279 ++ if (++ndev > DM_MAX_DEVICES) {
1280 ++ DMERR("too many devices %lu > %d",
1281 ++ ndev, DM_MAX_DEVICES);
1282 + return -EINVAL;
1283 + }
1284 +
1285 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1286 +index 7c678f50aaa3..7848ef019880 100644
1287 +--- a/drivers/md/dm-integrity.c
1288 ++++ b/drivers/md/dm-integrity.c
1289 +@@ -2568,7 +2568,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
1290 + if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
1291 + return -EINVAL;
1292 + } else {
1293 +- __u64 meta_size = ic->provided_data_sectors * ic->tag_size;
1294 ++ __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
1295 + meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
1296 + >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
1297 + meta_size <<= ic->log2_buffer_sectors;
1298 +@@ -3439,7 +3439,7 @@ try_smaller_buffer:
1299 + DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
1300 + DEBUG_print(" journal_entries %u\n", ic->journal_entries);
1301 + DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
1302 +- DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
1303 ++ DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors);
1304 + DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
1305 + DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
1306 + DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
1307 +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
1308 +index c740153b4e52..1e03bc89e20f 100644
1309 +--- a/drivers/md/dm-ioctl.c
1310 ++++ b/drivers/md/dm-ioctl.c
1311 +@@ -2069,7 +2069,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
1312 + /* alloc table */
1313 + r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md);
1314 + if (r)
1315 +- goto err_destroy_dm;
1316 ++ goto err_hash_remove;
1317 +
1318 + /* add targets */
1319 + for (i = 0; i < dmi->target_count; i++) {
1320 +@@ -2116,6 +2116,10 @@ int __init dm_early_create(struct dm_ioctl *dmi,
1321 +
1322 + err_destroy_table:
1323 + dm_table_destroy(t);
1324 ++err_hash_remove:
1325 ++ (void) __hash_remove(__get_name_cell(dmi->name));
1326 ++ /* release reference from __get_name_cell */
1327 ++ dm_put(md);
1328 + err_destroy_dm:
1329 + dm_put(md);
1330 + dm_destroy(md);
1331 +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
1332 +index 2ee5e357a0a7..cc5173dfd466 100644
1333 +--- a/drivers/md/dm-mpath.c
1334 ++++ b/drivers/md/dm-mpath.c
1335 +@@ -882,6 +882,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
1336 + if (attached_handler_name || m->hw_handler_name) {
1337 + INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
1338 + r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
1339 ++ kfree(attached_handler_name);
1340 + if (r) {
1341 + dm_put_device(ti, p->path.dev);
1342 + goto bad;
1343 +@@ -896,7 +897,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
1344 +
1345 + return p;
1346 + bad:
1347 +- kfree(attached_handler_name);
1348 + free_pgpath(p);
1349 + return ERR_PTR(r);
1350 + }
1351 +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
1352 +index fa68336560c3..d8334cd45d7c 100644
1353 +--- a/drivers/md/dm-zoned-metadata.c
1354 ++++ b/drivers/md/dm-zoned-metadata.c
1355 +@@ -1169,6 +1169,9 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
1356 + goto out;
1357 + }
1358 +
1359 ++ if (!nr_blkz)
1360 ++ break;
1361 ++
1362 + /* Process report */
1363 + for (i = 0; i < nr_blkz; i++) {
1364 + ret = dmz_init_zone(zmd, zone, &blkz[i]);
1365 +@@ -1204,6 +1207,8 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1366 + /* Get zone information from disk */
1367 + ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
1368 + &blkz, &nr_blkz, GFP_NOIO);
1369 ++ if (!nr_blkz)
1370 ++ ret = -EIO;
1371 + if (ret) {
1372 + dmz_dev_err(zmd->dev, "Get zone %u report failed",
1373 + dmz_id(zmd, zone));
1374 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1375 +index 043f0761e4a0..08e7d412af95 100644
1376 +--- a/drivers/md/dm.c
1377 ++++ b/drivers/md/dm.c
1378 +@@ -1467,7 +1467,7 @@ static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
1379 + static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1380 + unsigned num_bios)
1381 + {
1382 +- unsigned len = ci->sector_count;
1383 ++ unsigned len;
1384 +
1385 + /*
1386 + * Even though the device advertised support for this type of
1387 +@@ -1478,6 +1478,8 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *
1388 + if (!num_bios)
1389 + return -EOPNOTSUPP;
1390 +
1391 ++ len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1392 ++
1393 + __send_duplicate_bios(ci, ti, num_bios, &len);
1394 +
1395 + ci->sector += len;
1396 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1397 +index 05ffffb8b769..295ff09cff4c 100644
1398 +--- a/drivers/md/md.c
1399 ++++ b/drivers/md/md.c
1400 +@@ -132,24 +132,6 @@ static inline int speed_max(struct mddev *mddev)
1401 + mddev->sync_speed_max : sysctl_speed_limit_max;
1402 + }
1403 +
1404 +-static void * flush_info_alloc(gfp_t gfp_flags, void *data)
1405 +-{
1406 +- return kzalloc(sizeof(struct flush_info), gfp_flags);
1407 +-}
1408 +-static void flush_info_free(void *flush_info, void *data)
1409 +-{
1410 +- kfree(flush_info);
1411 +-}
1412 +-
1413 +-static void * flush_bio_alloc(gfp_t gfp_flags, void *data)
1414 +-{
1415 +- return kzalloc(sizeof(struct flush_bio), gfp_flags);
1416 +-}
1417 +-static void flush_bio_free(void *flush_bio, void *data)
1418 +-{
1419 +- kfree(flush_bio);
1420 +-}
1421 +-
1422 + static struct ctl_table_header *raid_table_header;
1423 +
1424 + static struct ctl_table raid_table[] = {
1425 +@@ -423,54 +405,31 @@ static int md_congested(void *data, int bits)
1426 + /*
1427 + * Generic flush handling for md
1428 + */
1429 +-static void submit_flushes(struct work_struct *ws)
1430 +-{
1431 +- struct flush_info *fi = container_of(ws, struct flush_info, flush_work);
1432 +- struct mddev *mddev = fi->mddev;
1433 +- struct bio *bio = fi->bio;
1434 +-
1435 +- bio->bi_opf &= ~REQ_PREFLUSH;
1436 +- md_handle_request(mddev, bio);
1437 +-
1438 +- mempool_free(fi, mddev->flush_pool);
1439 +-}
1440 +
1441 +-static void md_end_flush(struct bio *fbio)
1442 ++static void md_end_flush(struct bio *bio)
1443 + {
1444 +- struct flush_bio *fb = fbio->bi_private;
1445 +- struct md_rdev *rdev = fb->rdev;
1446 +- struct flush_info *fi = fb->fi;
1447 +- struct bio *bio = fi->bio;
1448 +- struct mddev *mddev = fi->mddev;
1449 ++ struct md_rdev *rdev = bio->bi_private;
1450 ++ struct mddev *mddev = rdev->mddev;
1451 +
1452 + rdev_dec_pending(rdev, mddev);
1453 +
1454 +- if (atomic_dec_and_test(&fi->flush_pending)) {
1455 +- if (bio->bi_iter.bi_size == 0) {
1456 +- /* an empty barrier - all done */
1457 +- bio_endio(bio);
1458 +- mempool_free(fi, mddev->flush_pool);
1459 +- } else {
1460 +- INIT_WORK(&fi->flush_work, submit_flushes);
1461 +- queue_work(md_wq, &fi->flush_work);
1462 +- }
1463 ++ if (atomic_dec_and_test(&mddev->flush_pending)) {
1464 ++ /* The pre-request flush has finished */
1465 ++ queue_work(md_wq, &mddev->flush_work);
1466 + }
1467 +-
1468 +- mempool_free(fb, mddev->flush_bio_pool);
1469 +- bio_put(fbio);
1470 ++ bio_put(bio);
1471 + }
1472 +
1473 +-void md_flush_request(struct mddev *mddev, struct bio *bio)
1474 ++static void md_submit_flush_data(struct work_struct *ws);
1475 ++
1476 ++static void submit_flushes(struct work_struct *ws)
1477 + {
1478 ++ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
1479 + struct md_rdev *rdev;
1480 +- struct flush_info *fi;
1481 +-
1482 +- fi = mempool_alloc(mddev->flush_pool, GFP_NOIO);
1483 +-
1484 +- fi->bio = bio;
1485 +- fi->mddev = mddev;
1486 +- atomic_set(&fi->flush_pending, 1);
1487 +
1488 ++ mddev->start_flush = ktime_get_boottime();
1489 ++ INIT_WORK(&mddev->flush_work, md_submit_flush_data);
1490 ++ atomic_set(&mddev->flush_pending, 1);
1491 + rcu_read_lock();
1492 + rdev_for_each_rcu(rdev, mddev)
1493 + if (rdev->raid_disk >= 0 &&
1494 +@@ -480,37 +439,74 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
1495 + * we reclaim rcu_read_lock
1496 + */
1497 + struct bio *bi;
1498 +- struct flush_bio *fb;
1499 + atomic_inc(&rdev->nr_pending);
1500 + atomic_inc(&rdev->nr_pending);
1501 + rcu_read_unlock();
1502 +-
1503 +- fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO);
1504 +- fb->fi = fi;
1505 +- fb->rdev = rdev;
1506 +-
1507 + bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
1508 +- bio_set_dev(bi, rdev->bdev);
1509 + bi->bi_end_io = md_end_flush;
1510 +- bi->bi_private = fb;
1511 ++ bi->bi_private = rdev;
1512 ++ bio_set_dev(bi, rdev->bdev);
1513 + bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1514 +-
1515 +- atomic_inc(&fi->flush_pending);
1516 ++ atomic_inc(&mddev->flush_pending);
1517 + submit_bio(bi);
1518 +-
1519 + rcu_read_lock();
1520 + rdev_dec_pending(rdev, mddev);
1521 + }
1522 + rcu_read_unlock();
1523 ++ if (atomic_dec_and_test(&mddev->flush_pending))
1524 ++ queue_work(md_wq, &mddev->flush_work);
1525 ++}
1526 ++
1527 ++static void md_submit_flush_data(struct work_struct *ws)
1528 ++{
1529 ++ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
1530 ++ struct bio *bio = mddev->flush_bio;
1531 ++
1532 ++ /*
1533 ++ * must reset flush_bio before calling into md_handle_request to avoid a
1534 ++ * deadlock, because other bios passed md_handle_request suspend check
1535 ++ * could wait for this and below md_handle_request could wait for those
1536 ++ * bios because of suspend check
1537 ++ */
1538 ++ mddev->last_flush = mddev->start_flush;
1539 ++ mddev->flush_bio = NULL;
1540 ++ wake_up(&mddev->sb_wait);
1541 ++
1542 ++ if (bio->bi_iter.bi_size == 0) {
1543 ++ /* an empty barrier - all done */
1544 ++ bio_endio(bio);
1545 ++ } else {
1546 ++ bio->bi_opf &= ~REQ_PREFLUSH;
1547 ++ md_handle_request(mddev, bio);
1548 ++ }
1549 ++}
1550 +
1551 +- if (atomic_dec_and_test(&fi->flush_pending)) {
1552 +- if (bio->bi_iter.bi_size == 0) {
1553 ++void md_flush_request(struct mddev *mddev, struct bio *bio)
1554 ++{
1555 ++ ktime_t start = ktime_get_boottime();
1556 ++ spin_lock_irq(&mddev->lock);
1557 ++ wait_event_lock_irq(mddev->sb_wait,
1558 ++ !mddev->flush_bio ||
1559 ++ ktime_after(mddev->last_flush, start),
1560 ++ mddev->lock);
1561 ++ if (!ktime_after(mddev->last_flush, start)) {
1562 ++ WARN_ON(mddev->flush_bio);
1563 ++ mddev->flush_bio = bio;
1564 ++ bio = NULL;
1565 ++ }
1566 ++ spin_unlock_irq(&mddev->lock);
1567 ++
1568 ++ if (!bio) {
1569 ++ INIT_WORK(&mddev->flush_work, submit_flushes);
1570 ++ queue_work(md_wq, &mddev->flush_work);
1571 ++ } else {
1572 ++ /* flush was performed for some other bio while we waited. */
1573 ++ if (bio->bi_iter.bi_size == 0)
1574 + /* an empty barrier - all done */
1575 + bio_endio(bio);
1576 +- mempool_free(fi, mddev->flush_pool);
1577 +- } else {
1578 +- INIT_WORK(&fi->flush_work, submit_flushes);
1579 +- queue_work(md_wq, &fi->flush_work);
1580 ++ else {
1581 ++ bio->bi_opf &= ~REQ_PREFLUSH;
1582 ++ mddev->pers->make_request(mddev, bio);
1583 + }
1584 + }
1585 + }
1586 +@@ -560,6 +556,7 @@ void mddev_init(struct mddev *mddev)
1587 + atomic_set(&mddev->openers, 0);
1588 + atomic_set(&mddev->active_io, 0);
1589 + spin_lock_init(&mddev->lock);
1590 ++ atomic_set(&mddev->flush_pending, 0);
1591 + init_waitqueue_head(&mddev->sb_wait);
1592 + init_waitqueue_head(&mddev->recovery_wait);
1593 + mddev->reshape_position = MaxSector;
1594 +@@ -2855,8 +2852,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
1595 + err = 0;
1596 + }
1597 + } else if (cmd_match(buf, "re-add")) {
1598 +- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
1599 +- rdev->saved_raid_disk >= 0) {
1600 ++ if (!rdev->mddev->pers)
1601 ++ err = -EINVAL;
1602 ++ else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
1603 ++ rdev->saved_raid_disk >= 0) {
1604 + /* clear_bit is performed _after_ all the devices
1605 + * have their local Faulty bit cleared. If any writes
1606 + * happen in the meantime in the local node, they
1607 +@@ -5511,22 +5510,6 @@ int md_run(struct mddev *mddev)
1608 + if (err)
1609 + return err;
1610 + }
1611 +- if (mddev->flush_pool == NULL) {
1612 +- mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc,
1613 +- flush_info_free, mddev);
1614 +- if (!mddev->flush_pool) {
1615 +- err = -ENOMEM;
1616 +- goto abort;
1617 +- }
1618 +- }
1619 +- if (mddev->flush_bio_pool == NULL) {
1620 +- mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc,
1621 +- flush_bio_free, mddev);
1622 +- if (!mddev->flush_bio_pool) {
1623 +- err = -ENOMEM;
1624 +- goto abort;
1625 +- }
1626 +- }
1627 +
1628 + spin_lock(&pers_lock);
1629 + pers = find_pers(mddev->level, mddev->clevel);
1630 +@@ -5686,11 +5669,8 @@ int md_run(struct mddev *mddev)
1631 + return 0;
1632 +
1633 + abort:
1634 +- mempool_destroy(mddev->flush_bio_pool);
1635 +- mddev->flush_bio_pool = NULL;
1636 +- mempool_destroy(mddev->flush_pool);
1637 +- mddev->flush_pool = NULL;
1638 +-
1639 ++ bioset_exit(&mddev->bio_set);
1640 ++ bioset_exit(&mddev->sync_set);
1641 + return err;
1642 + }
1643 + EXPORT_SYMBOL_GPL(md_run);
1644 +@@ -5894,14 +5874,6 @@ static void __md_stop(struct mddev *mddev)
1645 + mddev->to_remove = &md_redundancy_group;
1646 + module_put(pers->owner);
1647 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1648 +- if (mddev->flush_bio_pool) {
1649 +- mempool_destroy(mddev->flush_bio_pool);
1650 +- mddev->flush_bio_pool = NULL;
1651 +- }
1652 +- if (mddev->flush_pool) {
1653 +- mempool_destroy(mddev->flush_pool);
1654 +- mddev->flush_pool = NULL;
1655 +- }
1656 + }
1657 +
1658 + void md_stop(struct mddev *mddev)
1659 +@@ -9257,7 +9229,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
1660 + * reshape is happening in the remote node, we need to
1661 + * update reshape_position and call start_reshape.
1662 + */
1663 +- mddev->reshape_position = sb->reshape_position;
1664 ++ mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1665 + if (mddev->pers->update_reshape_pos)
1666 + mddev->pers->update_reshape_pos(mddev);
1667 + if (mddev->pers->start_reshape)
1668 +diff --git a/drivers/md/md.h b/drivers/md/md.h
1669 +index c52afb52c776..257cb4c9e22b 100644
1670 +--- a/drivers/md/md.h
1671 ++++ b/drivers/md/md.h
1672 +@@ -252,19 +252,6 @@ enum mddev_sb_flags {
1673 + MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
1674 + };
1675 +
1676 +-#define NR_FLUSH_INFOS 8
1677 +-#define NR_FLUSH_BIOS 64
1678 +-struct flush_info {
1679 +- struct bio *bio;
1680 +- struct mddev *mddev;
1681 +- struct work_struct flush_work;
1682 +- atomic_t flush_pending;
1683 +-};
1684 +-struct flush_bio {
1685 +- struct flush_info *fi;
1686 +- struct md_rdev *rdev;
1687 +-};
1688 +-
1689 + struct mddev {
1690 + void *private;
1691 + struct md_personality *pers;
1692 +@@ -470,8 +457,16 @@ struct mddev {
1693 + * metadata and bitmap writes
1694 + */
1695 +
1696 +- mempool_t *flush_pool;
1697 +- mempool_t *flush_bio_pool;
1698 ++ /* Generic flush handling.
1699 ++ * The last to finish preflush schedules a worker to submit
1700 ++ * the rest of the request (without the REQ_PREFLUSH flag).
1701 ++ */
1702 ++ struct bio *flush_bio;
1703 ++ atomic_t flush_pending;
1704 ++ ktime_t start_flush, last_flush; /* last_flush is when the last completed
1705 ++ * flush was started.
1706 ++ */
1707 ++ struct work_struct flush_work;
1708 + struct work_struct event_work; /* used by dm to report failure event */
1709 + void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
1710 + struct md_cluster_info *cluster_info;
1711 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1712 +index 364dd2f6fa1b..a4d2f552c8ab 100644
1713 +--- a/drivers/md/raid5.c
1714 ++++ b/drivers/md/raid5.c
1715 +@@ -4187,7 +4187,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
1716 + /* now write out any block on a failed drive,
1717 + * or P or Q if they were recomputed
1718 + */
1719 +- BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
1720 ++ dev = NULL;
1721 + if (s->failed == 2) {
1722 + dev = &sh->dev[s->failed_num[1]];
1723 + s->locked++;
1724 +@@ -4212,6 +4212,14 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
1725 + set_bit(R5_LOCKED, &dev->flags);
1726 + set_bit(R5_Wantwrite, &dev->flags);
1727 + }
1728 ++ if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags),
1729 ++ "%s: disk%td not up to date\n",
1730 ++ mdname(conf->mddev),
1731 ++ dev - (struct r5dev *) &sh->dev)) {
1732 ++ clear_bit(R5_LOCKED, &dev->flags);
1733 ++ clear_bit(R5_Wantwrite, &dev->flags);
1734 ++ s->locked--;
1735 ++ }
1736 + clear_bit(STRIPE_DEGRADED, &sh->state);
1737 +
1738 + set_bit(STRIPE_INSYNC, &sh->state);
1739 +@@ -4223,15 +4231,26 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
1740 + case check_state_check_result:
1741 + sh->check_state = check_state_idle;
1742 +
1743 +- if (s->failed > 1)
1744 +- break;
1745 + /* handle a successful check operation, if parity is correct
1746 + * we are done. Otherwise update the mismatch count and repair
1747 + * parity if !MD_RECOVERY_CHECK
1748 + */
1749 + if (sh->ops.zero_sum_result == 0) {
1750 +- /* Any parity checked was correct */
1751 +- set_bit(STRIPE_INSYNC, &sh->state);
1752 ++ /* both parities are correct */
1753 ++ if (!s->failed)
1754 ++ set_bit(STRIPE_INSYNC, &sh->state);
1755 ++ else {
1756 ++ /* in contrast to the raid5 case we can validate
1757 ++ * parity, but still have a failure to write
1758 ++ * back
1759 ++ */
1760 ++ sh->check_state = check_state_compute_result;
1761 ++ /* Returning at this point means that we may go
1762 ++ * off and bring p and/or q uptodate again so
1763 ++ * we make sure to check zero_sum_result again
1764 ++ * to verify if p or q need writeback
1765 ++ */
1766 ++ }
1767 + } else {
1768 + atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
1769 + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
1770 +diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
1771 +index c33fd584cb44..f9359b11fa5c 100644
1772 +--- a/drivers/media/i2c/ov6650.c
1773 ++++ b/drivers/media/i2c/ov6650.c
1774 +@@ -814,6 +814,8 @@ static int ov6650_video_probe(struct i2c_client *client)
1775 + if (ret < 0)
1776 + return ret;
1777 +
1778 ++ msleep(20);
1779 ++
1780 + /*
1781 + * check and show product ID and manufacturer ID
1782 + */
1783 +diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
1784 +index 4acbed189644..67e48ff10532 100644
1785 +--- a/drivers/media/platform/Kconfig
1786 ++++ b/drivers/media/platform/Kconfig
1787 +@@ -649,7 +649,7 @@ config VIDEO_SECO_CEC
1788 + config VIDEO_SECO_RC
1789 + bool "SECO Boards IR RC5 support"
1790 + depends on VIDEO_SECO_CEC
1791 +- depends on RC_CORE
1792 ++ depends on RC_CORE=y || RC_CORE = VIDEO_SECO_CEC
1793 + help
1794 + If you say yes here you will get support for the
1795 + SECO Boards Consumer-IR in seco-cec driver.
1796 +diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
1797 +index 0a53598d982f..5bd8df926052 100644
1798 +--- a/drivers/memory/tegra/mc.c
1799 ++++ b/drivers/memory/tegra/mc.c
1800 +@@ -282,7 +282,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
1801 + u32 value;
1802 +
1803 + /* compute the number of MC clock cycles per tick */
1804 +- tick = mc->tick * clk_get_rate(mc->clk);
1805 ++ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
1806 + do_div(tick, NSEC_PER_SEC);
1807 +
1808 + value = readl(mc->regs + MC_EMEM_ARB_CFG);
1809 +diff --git a/drivers/net/Makefile b/drivers/net/Makefile
1810 +index 21cde7e78621..0d3ba056cda3 100644
1811 +--- a/drivers/net/Makefile
1812 ++++ b/drivers/net/Makefile
1813 +@@ -40,7 +40,7 @@ obj-$(CONFIG_ARCNET) += arcnet/
1814 + obj-$(CONFIG_DEV_APPLETALK) += appletalk/
1815 + obj-$(CONFIG_CAIF) += caif/
1816 + obj-$(CONFIG_CAN) += can/
1817 +-obj-$(CONFIG_NET_DSA) += dsa/
1818 ++obj-y += dsa/
1819 + obj-$(CONFIG_ETHERNET) += ethernet/
1820 + obj-$(CONFIG_FDDI) += fddi/
1821 + obj-$(CONFIG_HIPPI) += hippi/
1822 +diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
1823 +index ffed2d4c9403..9c481823b3e8 100644
1824 +--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
1825 ++++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
1826 +@@ -1492,7 +1492,7 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1827 + rule.port = port;
1828 + rule.qpn = qpn;
1829 + INIT_LIST_HEAD(&rule.list);
1830 +- mlx4_err(dev, "going promisc on %x\n", port);
1831 ++ mlx4_info(dev, "going promisc on %x\n", port);
1832 +
1833 + return mlx4_flow_attach(dev, &rule, regid_p);
1834 + }
1835 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
1836 +index 6debffb8336b..430c2eab6fc3 100644
1837 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
1838 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
1839 +@@ -7,6 +7,7 @@ config MLX5_CORE
1840 + depends on PCI
1841 + imply PTP_1588_CLOCK
1842 + imply VXLAN
1843 ++ imply MLXFW
1844 + default n
1845 + ---help---
1846 + Core driver for low level functionality of the ConnectX-4 and
1847 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
1848 +index 4746f2d28fb6..0ccd6d40baf7 100644
1849 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
1850 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
1851 +@@ -26,7 +26,7 @@ static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev)
1852 +
1853 + MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
1854 + MLX5_SET(disable_hca_in, in, function_id, 0);
1855 +- MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
1856 ++ MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0);
1857 + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
1858 + }
1859 +
1860 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1861 +index 78dc8fe2a83c..2821208119c0 100644
1862 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1863 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1864 +@@ -1901,6 +1901,22 @@ static int mlx5e_flash_device(struct net_device *dev,
1865 + return mlx5e_ethtool_flash_device(priv, flash);
1866 + }
1867 +
1868 ++#ifndef CONFIG_MLX5_EN_RXNFC
1869 ++/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
1870 ++ * otherwise this function will be defined from en_fs_ethtool.c
1871 ++ */
1872 ++static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
1873 ++{
1874 ++ struct mlx5e_priv *priv = netdev_priv(dev);
1875 ++
1876 ++ if (info->cmd != ETHTOOL_GRXRINGS)
1877 ++ return -EOPNOTSUPP;
1878 ++ /* ring_count is needed by ethtool -x */
1879 ++ info->data = priv->channels.params.num_channels;
1880 ++ return 0;
1881 ++}
1882 ++#endif
1883 ++
1884 + const struct ethtool_ops mlx5e_ethtool_ops = {
1885 + .get_drvinfo = mlx5e_get_drvinfo,
1886 + .get_link = ethtool_op_get_link,
1887 +@@ -1919,8 +1935,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
1888 + .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
1889 + .get_rxfh = mlx5e_get_rxfh,
1890 + .set_rxfh = mlx5e_set_rxfh,
1891 +-#ifdef CONFIG_MLX5_EN_RXNFC
1892 + .get_rxnfc = mlx5e_get_rxnfc,
1893 ++#ifdef CONFIG_MLX5_EN_RXNFC
1894 + .set_rxnfc = mlx5e_set_rxnfc,
1895 + #endif
1896 + .flash_device = mlx5e_flash_device,
1897 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1898 +index a66b6ed80b30..0b09fa91019d 100644
1899 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1900 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1901 +@@ -65,9 +65,26 @@ static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
1902 + static void mlx5e_rep_get_drvinfo(struct net_device *dev,
1903 + struct ethtool_drvinfo *drvinfo)
1904 + {
1905 ++ struct mlx5e_priv *priv = netdev_priv(dev);
1906 ++ struct mlx5_core_dev *mdev = priv->mdev;
1907 ++
1908 + strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
1909 + sizeof(drvinfo->driver));
1910 + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
1911 ++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1912 ++ "%d.%d.%04d (%.16s)",
1913 ++ fw_rev_maj(mdev), fw_rev_min(mdev),
1914 ++ fw_rev_sub(mdev), mdev->board_id);
1915 ++}
1916 ++
1917 ++static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
1918 ++ struct ethtool_drvinfo *drvinfo)
1919 ++{
1920 ++ struct mlx5e_priv *priv = netdev_priv(dev);
1921 ++
1922 ++ mlx5e_rep_get_drvinfo(dev, drvinfo);
1923 ++ strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
1924 ++ sizeof(drvinfo->bus_info));
1925 + }
1926 +
1927 + static const struct counter_desc sw_rep_stats_desc[] = {
1928 +@@ -363,7 +380,7 @@ static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
1929 + };
1930 +
1931 + static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
1932 +- .get_drvinfo = mlx5e_rep_get_drvinfo,
1933 ++ .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
1934 + .get_link = ethtool_op_get_link,
1935 + .get_strings = mlx5e_rep_get_strings,
1936 + .get_sset_count = mlx5e_rep_get_sset_count,
1937 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1938 +index d75dc44eb2ff..4cb23631616b 100644
1939 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1940 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1941 +@@ -1561,7 +1561,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1942 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1943 + struct flow_match_vlan match;
1944 +
1945 +- flow_rule_match_vlan(rule, &match);
1946 ++ flow_rule_match_cvlan(rule, &match);
1947 + if (match.mask->vlan_id ||
1948 + match.mask->vlan_priority ||
1949 + match.mask->vlan_tpid) {
1950 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1951 +index 0be3eb86dd84..581cc145795d 100644
1952 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1953 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1954 +@@ -1386,6 +1386,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1955 + if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1956 + d1->vport.num == d2->vport.num &&
1957 + d1->vport.flags == d2->vport.flags &&
1958 ++ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1959 ++ (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1960 + ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1961 + (d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
1962 + (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1963 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1964 +index ca0ee9916e9e..0059b290e095 100644
1965 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1966 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1967 +@@ -535,23 +535,16 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
1968 + do_div(ns, NSEC_PER_SEC / HZ);
1969 + clock->overflow_period = ns;
1970 +
1971 +- mdev->clock_info_page = alloc_page(GFP_KERNEL);
1972 +- if (mdev->clock_info_page) {
1973 +- mdev->clock_info = kmap(mdev->clock_info_page);
1974 +- if (!mdev->clock_info) {
1975 +- __free_page(mdev->clock_info_page);
1976 +- mlx5_core_warn(mdev, "failed to map clock page\n");
1977 +- } else {
1978 +- mdev->clock_info->sign = 0;
1979 +- mdev->clock_info->nsec = clock->tc.nsec;
1980 +- mdev->clock_info->cycles = clock->tc.cycle_last;
1981 +- mdev->clock_info->mask = clock->cycles.mask;
1982 +- mdev->clock_info->mult = clock->nominal_c_mult;
1983 +- mdev->clock_info->shift = clock->cycles.shift;
1984 +- mdev->clock_info->frac = clock->tc.frac;
1985 +- mdev->clock_info->overflow_period =
1986 +- clock->overflow_period;
1987 +- }
1988 ++ mdev->clock_info =
1989 ++ (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
1990 ++ if (mdev->clock_info) {
1991 ++ mdev->clock_info->nsec = clock->tc.nsec;
1992 ++ mdev->clock_info->cycles = clock->tc.cycle_last;
1993 ++ mdev->clock_info->mask = clock->cycles.mask;
1994 ++ mdev->clock_info->mult = clock->nominal_c_mult;
1995 ++ mdev->clock_info->shift = clock->cycles.shift;
1996 ++ mdev->clock_info->frac = clock->tc.frac;
1997 ++ mdev->clock_info->overflow_period = clock->overflow_period;
1998 + }
1999 +
2000 + INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
2001 +@@ -599,8 +592,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
2002 + cancel_delayed_work_sync(&clock->overflow_work);
2003 +
2004 + if (mdev->clock_info) {
2005 +- kunmap(mdev->clock_info_page);
2006 +- __free_page(mdev->clock_info_page);
2007 ++ free_page((unsigned long)mdev->clock_info);
2008 + mdev->clock_info = NULL;
2009 + }
2010 +
2011 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
2012 +index f26a4ca29363..0b56291d22c6 100644
2013 +--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
2014 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
2015 +@@ -122,6 +122,12 @@ void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
2016 + }
2017 + EXPORT_SYMBOL(mlxsw_core_driver_priv);
2018 +
2019 ++bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
2020 ++{
2021 ++ return mlxsw_core->driver->res_query_enabled;
2022 ++}
2023 ++EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
2024 ++
2025 + struct mlxsw_rx_listener_item {
2026 + struct list_head list;
2027 + struct mlxsw_rx_listener rxl;
2028 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
2029 +index 8ec53f027575..62b8de9305af 100644
2030 +--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
2031 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
2032 +@@ -28,6 +28,8 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
2033 +
2034 + void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
2035 +
2036 ++bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core);
2037 ++
2038 + int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
2039 + void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
2040 +
2041 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
2042 +index c1c1965d7acc..72539a9a3847 100644
2043 +--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
2044 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
2045 +@@ -3,6 +3,7 @@
2046 +
2047 + #include <linux/kernel.h>
2048 + #include <linux/err.h>
2049 ++#include <linux/sfp.h>
2050 +
2051 + #include "core.h"
2052 + #include "core_env.h"
2053 +@@ -162,7 +163,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
2054 + {
2055 + u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE];
2056 + u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE;
2057 +- u8 module_rev_id, module_id;
2058 ++ u8 module_rev_id, module_id, diag_mon;
2059 + unsigned int read_size;
2060 + int err;
2061 +
2062 +@@ -195,8 +196,21 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
2063 + }
2064 + break;
2065 + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP:
2066 ++ /* Verify if transceiver provides diagnostic monitoring page */
2067 ++ err = mlxsw_env_query_module_eeprom(mlxsw_core, module,
2068 ++ SFP_DIAGMON, 1, &diag_mon,
2069 ++ &read_size);
2070 ++ if (err)
2071 ++ return err;
2072 ++
2073 ++ if (read_size < 1)
2074 ++ return -EIO;
2075 ++
2076 + modinfo->type = ETH_MODULE_SFF_8472;
2077 +- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2078 ++ if (diag_mon)
2079 ++ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2080 ++ else
2081 ++ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2;
2082 + break;
2083 + default:
2084 + return -EINVAL;
2085 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
2086 +index 6956bbebe2f1..496dc904c5ed 100644
2087 +--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
2088 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
2089 +@@ -518,6 +518,9 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
2090 + u8 width;
2091 + int err;
2092 +
2093 ++ if (!mlxsw_core_res_query_enabled(mlxsw_hwmon->core))
2094 ++ return 0;
2095 ++
2096 + /* Add extra attributes for module temperature. Sensor index is
2097 + * assigned to sensor_count value, while all indexed before
2098 + * sensor_count are already utilized by the sensors connected through
2099 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
2100 +index 472f63f9fac5..d3e851e7ca72 100644
2101 +--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
2102 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
2103 +@@ -740,6 +740,9 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
2104 + struct mlxsw_thermal_module *module_tz;
2105 + int i, err;
2106 +
2107 ++ if (!mlxsw_core_res_query_enabled(core))
2108 ++ return 0;
2109 ++
2110 + thermal->tz_module_arr = kcalloc(module_count,
2111 + sizeof(*thermal->tz_module_arr),
2112 + GFP_KERNEL);
2113 +@@ -776,6 +779,9 @@ mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal)
2114 + unsigned int module_count = mlxsw_core_max_ports(thermal->core);
2115 + int i;
2116 +
2117 ++ if (!mlxsw_core_res_query_enabled(thermal->core))
2118 ++ return;
2119 ++
2120 + for (i = module_count - 1; i >= 0; i--)
2121 + mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
2122 + kfree(thermal->tz_module_arr);
2123 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
2124 +index 4d78be4ec4e9..843ddf548f26 100644
2125 +--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
2126 ++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
2127 +@@ -168,6 +168,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
2128 + return;
2129 + }
2130 +
2131 ++ rcu_read_lock();
2132 + for (i = 0; i < count; i++) {
2133 + ipv4_addr = payload->tun_info[i].ipv4;
2134 + port = be32_to_cpu(payload->tun_info[i].egress_port);
2135 +@@ -183,6 +184,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
2136 + neigh_event_send(n, NULL);
2137 + neigh_release(n);
2138 + }
2139 ++ rcu_read_unlock();
2140 + }
2141 +
2142 + static int
2143 +@@ -366,9 +368,10 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
2144 +
2145 + payload = nfp_flower_cmsg_get_data(skb);
2146 +
2147 ++ rcu_read_lock();
2148 + netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
2149 + if (!netdev)
2150 +- goto route_fail_warning;
2151 ++ goto fail_rcu_unlock;
2152 +
2153 + flow.daddr = payload->ipv4_addr;
2154 + flow.flowi4_proto = IPPROTO_UDP;
2155 +@@ -378,21 +381,23 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
2156 + rt = ip_route_output_key(dev_net(netdev), &flow);
2157 + err = PTR_ERR_OR_ZERO(rt);
2158 + if (err)
2159 +- goto route_fail_warning;
2160 ++ goto fail_rcu_unlock;
2161 + #else
2162 +- goto route_fail_warning;
2163 ++ goto fail_rcu_unlock;
2164 + #endif
2165 +
2166 + /* Get the neighbour entry for the lookup */
2167 + n = dst_neigh_lookup(&rt->dst, &flow.daddr);
2168 + ip_rt_put(rt);
2169 + if (!n)
2170 +- goto route_fail_warning;
2171 +- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
2172 ++ goto fail_rcu_unlock;
2173 ++ nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
2174 + neigh_release(n);
2175 ++ rcu_read_unlock();
2176 + return;
2177 +
2178 +-route_fail_warning:
2179 ++fail_rcu_unlock:
2180 ++ rcu_read_unlock();
2181 + nfp_flower_cmsg_warn(app, "Requested route not found.\n");
2182 + }
2183 +
2184 +diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
2185 +index b5edc7f96a39..685e875f5164 100644
2186 +--- a/drivers/net/ppp/ppp_deflate.c
2187 ++++ b/drivers/net/ppp/ppp_deflate.c
2188 +@@ -610,12 +610,20 @@ static struct compressor ppp_deflate_draft = {
2189 +
2190 + static int __init deflate_init(void)
2191 + {
2192 +- int answer = ppp_register_compressor(&ppp_deflate);
2193 +- if (answer == 0)
2194 +- printk(KERN_INFO
2195 +- "PPP Deflate Compression module registered\n");
2196 +- ppp_register_compressor(&ppp_deflate_draft);
2197 +- return answer;
2198 ++ int rc;
2199 ++
2200 ++ rc = ppp_register_compressor(&ppp_deflate);
2201 ++ if (rc)
2202 ++ return rc;
2203 ++
2204 ++ rc = ppp_register_compressor(&ppp_deflate_draft);
2205 ++ if (rc) {
2206 ++ ppp_unregister_compressor(&ppp_deflate);
2207 ++ return rc;
2208 ++ }
2209 ++
2210 ++ pr_info("PPP Deflate Compression module registered\n");
2211 ++ return 0;
2212 + }
2213 +
2214 + static void __exit deflate_cleanup(void)
2215 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2216 +index 679e404a5224..366217263d70 100644
2217 +--- a/drivers/net/usb/qmi_wwan.c
2218 ++++ b/drivers/net/usb/qmi_wwan.c
2219 +@@ -1250,6 +1250,8 @@ static const struct usb_device_id products[] = {
2220 + {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
2221 + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
2222 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
2223 ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
2224 ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
2225 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
2226 + {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
2227 + {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
2228 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
2229 +index 7535cb0d4ac0..9f1417e00073 100644
2230 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
2231 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
2232 +@@ -31,6 +31,10 @@ struct brcmf_dmi_data {
2233 +
2234 + /* NOTE: Please keep all entries sorted alphabetically */
2235 +
2236 ++static const struct brcmf_dmi_data acepc_t8_data = {
2237 ++ BRCM_CC_4345_CHIP_ID, 6, "acepc-t8"
2238 ++};
2239 ++
2240 + static const struct brcmf_dmi_data gpd_win_pocket_data = {
2241 + BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
2242 + };
2243 +@@ -48,6 +52,28 @@ static const struct brcmf_dmi_data pov_tab_p1006w_data = {
2244 + };
2245 +
2246 + static const struct dmi_system_id dmi_platform_data[] = {
2247 ++ {
2248 ++ /* ACEPC T8 Cherry Trail Z8350 mini PC */
2249 ++ .matches = {
2250 ++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
2251 ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
2252 ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
2253 ++ /* also match on somewhat unique bios-version */
2254 ++ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
2255 ++ },
2256 ++ .driver_data = (void *)&acepc_t8_data,
2257 ++ },
2258 ++ {
2259 ++ /* ACEPC T11 Cherry Trail Z8350 mini PC, same wifi as the T8 */
2260 ++ .matches = {
2261 ++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
2262 ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
2263 ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
2264 ++ /* also match on somewhat unique bios-version */
2265 ++ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
2266 ++ },
2267 ++ .driver_data = (void *)&acepc_t8_data,
2268 ++ },
2269 + {
2270 + /* Match for the GPDwin which unfortunately uses somewhat
2271 + * generic dmi strings, which is why we test for 4 strings.
2272 +diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
2273 +index 27a49068d32d..57ad56435dda 100644
2274 +--- a/drivers/net/wireless/intersil/p54/p54pci.c
2275 ++++ b/drivers/net/wireless/intersil/p54/p54pci.c
2276 +@@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev,
2277 + err = pci_enable_device(pdev);
2278 + if (err) {
2279 + dev_err(&pdev->dev, "Cannot enable new PCI device\n");
2280 +- return err;
2281 ++ goto err_put;
2282 + }
2283 +
2284 + mem_addr = pci_resource_start(pdev, 0);
2285 +@@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev,
2286 + pci_release_regions(pdev);
2287 + err_disable_dev:
2288 + pci_disable_device(pdev);
2289 ++err_put:
2290 + pci_dev_put(pdev);
2291 + return err;
2292 + }
2293 +diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
2294 +index 0c6e8b44b4ed..c60b465f6fe4 100644
2295 +--- a/drivers/parisc/led.c
2296 ++++ b/drivers/parisc/led.c
2297 +@@ -568,6 +568,9 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d
2298 + break;
2299 +
2300 + case DISPLAY_MODEL_LASI:
2301 ++ /* Skip to register LED in QEMU */
2302 ++ if (running_on_qemu)
2303 ++ return 1;
2304 + LED_DATA_REG = data_reg;
2305 + led_func_ptr = led_LASI_driver;
2306 + printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG);
2307 +diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
2308 +index c8febb009454..6a4e435bd35f 100644
2309 +--- a/drivers/pci/controller/pcie-rcar.c
2310 ++++ b/drivers/pci/controller/pcie-rcar.c
2311 +@@ -46,6 +46,7 @@
2312 +
2313 + /* Transfer control */
2314 + #define PCIETCTLR 0x02000
2315 ++#define DL_DOWN BIT(3)
2316 + #define CFINIT 1
2317 + #define PCIETSTR 0x02004
2318 + #define DATA_LINK_ACTIVE 1
2319 +@@ -94,6 +95,7 @@
2320 + #define MACCTLR 0x011058
2321 + #define SPEED_CHANGE BIT(24)
2322 + #define SCRAMBLE_DISABLE BIT(27)
2323 ++#define PMSR 0x01105c
2324 + #define MACS2R 0x011078
2325 + #define MACCGSPSETR 0x011084
2326 + #define SPCNGRSN BIT(31)
2327 +@@ -1130,6 +1132,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
2328 + pcie = pci_host_bridge_priv(bridge);
2329 +
2330 + pcie->dev = dev;
2331 ++ platform_set_drvdata(pdev, pcie);
2332 +
2333 + err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
2334 + if (err)
2335 +@@ -1221,10 +1224,28 @@ err_free_bridge:
2336 + return err;
2337 + }
2338 +
2339 ++static int rcar_pcie_resume_noirq(struct device *dev)
2340 ++{
2341 ++ struct rcar_pcie *pcie = dev_get_drvdata(dev);
2342 ++
2343 ++ if (rcar_pci_read_reg(pcie, PMSR) &&
2344 ++ !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
2345 ++ return 0;
2346 ++
2347 ++ /* Re-establish the PCIe link */
2348 ++ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
2349 ++ return rcar_pcie_wait_for_dl(pcie);
2350 ++}
2351 ++
2352 ++static const struct dev_pm_ops rcar_pcie_pm_ops = {
2353 ++ .resume_noirq = rcar_pcie_resume_noirq,
2354 ++};
2355 ++
2356 + static struct platform_driver rcar_pcie_driver = {
2357 + .driver = {
2358 + .name = "rcar-pcie",
2359 + .of_match_table = rcar_pcie_of_match,
2360 ++ .pm = &rcar_pcie_pm_ops,
2361 + .suppress_bind_attrs = true,
2362 + },
2363 + .probe = rcar_pcie_probe,
2364 +diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
2365 +index d994839a3e24..9cb99380c61e 100644
2366 +--- a/drivers/pci/pci.h
2367 ++++ b/drivers/pci/pci.h
2368 +@@ -597,7 +597,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev);
2369 + void pci_aer_clear_device_status(struct pci_dev *dev);
2370 + #else
2371 + static inline void pci_no_aer(void) { }
2372 +-static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
2373 ++static inline void pci_aer_init(struct pci_dev *d) { }
2374 + static inline void pci_aer_exit(struct pci_dev *d) { }
2375 + static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
2376 + static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
2377 +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
2378 +index 727e3c1ef9a4..38e7017478b5 100644
2379 +--- a/drivers/pci/pcie/aspm.c
2380 ++++ b/drivers/pci/pcie/aspm.c
2381 +@@ -196,6 +196,38 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
2382 + link->clkpm_capable = (blacklist) ? 0 : capable;
2383 + }
2384 +
2385 ++static bool pcie_retrain_link(struct pcie_link_state *link)
2386 ++{
2387 ++ struct pci_dev *parent = link->pdev;
2388 ++ unsigned long start_jiffies;
2389 ++ u16 reg16;
2390 ++
2391 ++ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
2392 ++ reg16 |= PCI_EXP_LNKCTL_RL;
2393 ++ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
2394 ++ if (parent->clear_retrain_link) {
2395 ++ /*
2396 ++ * Due to an erratum in some devices the Retrain Link bit
2397 ++ * needs to be cleared again manually to allow the link
2398 ++ * training to succeed.
2399 ++ */
2400 ++ reg16 &= ~PCI_EXP_LNKCTL_RL;
2401 ++ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
2402 ++ }
2403 ++
2404 ++ /* Wait for link training end. Break out after waiting for timeout */
2405 ++ start_jiffies = jiffies;
2406 ++ for (;;) {
2407 ++ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
2408 ++ if (!(reg16 & PCI_EXP_LNKSTA_LT))
2409 ++ break;
2410 ++ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
2411 ++ break;
2412 ++ msleep(1);
2413 ++ }
2414 ++ return !(reg16 & PCI_EXP_LNKSTA_LT);
2415 ++}
2416 ++
2417 + /*
2418 + * pcie_aspm_configure_common_clock: check if the 2 ends of a link
2419 + * could use common clock. If they are, configure them to use the
2420 +@@ -205,7 +237,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
2421 + {
2422 + int same_clock = 1;
2423 + u16 reg16, parent_reg, child_reg[8];
2424 +- unsigned long start_jiffies;
2425 + struct pci_dev *child, *parent = link->pdev;
2426 + struct pci_bus *linkbus = parent->subordinate;
2427 + /*
2428 +@@ -263,21 +294,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
2429 + reg16 &= ~PCI_EXP_LNKCTL_CCC;
2430 + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
2431 +
2432 +- /* Retrain link */
2433 +- reg16 |= PCI_EXP_LNKCTL_RL;
2434 +- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
2435 +-
2436 +- /* Wait for link training end. Break out after waiting for timeout */
2437 +- start_jiffies = jiffies;
2438 +- for (;;) {
2439 +- pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
2440 +- if (!(reg16 & PCI_EXP_LNKSTA_LT))
2441 +- break;
2442 +- if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
2443 +- break;
2444 +- msleep(1);
2445 +- }
2446 +- if (!(reg16 & PCI_EXP_LNKSTA_LT))
2447 ++ if (pcie_retrain_link(link))
2448 + return;
2449 +
2450 + /* Training failed. Restore common clock configurations */
2451 +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
2452 +index 7e12d0163863..eea78477d311 100644
2453 +--- a/drivers/pci/probe.c
2454 ++++ b/drivers/pci/probe.c
2455 +@@ -586,16 +586,9 @@ static void pci_release_host_bridge_dev(struct device *dev)
2456 + kfree(to_pci_host_bridge(dev));
2457 + }
2458 +
2459 +-struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
2460 ++static void pci_init_host_bridge(struct pci_host_bridge *bridge)
2461 + {
2462 +- struct pci_host_bridge *bridge;
2463 +-
2464 +- bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
2465 +- if (!bridge)
2466 +- return NULL;
2467 +-
2468 + INIT_LIST_HEAD(&bridge->windows);
2469 +- bridge->dev.release = pci_release_host_bridge_dev;
2470 +
2471 + /*
2472 + * We assume we can manage these PCIe features. Some systems may
2473 +@@ -608,6 +601,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
2474 + bridge->native_shpc_hotplug = 1;
2475 + bridge->native_pme = 1;
2476 + bridge->native_ltr = 1;
2477 ++}
2478 ++
2479 ++struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
2480 ++{
2481 ++ struct pci_host_bridge *bridge;
2482 ++
2483 ++ bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
2484 ++ if (!bridge)
2485 ++ return NULL;
2486 ++
2487 ++ pci_init_host_bridge(bridge);
2488 ++ bridge->dev.release = pci_release_host_bridge_dev;
2489 +
2490 + return bridge;
2491 + }
2492 +@@ -622,7 +627,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
2493 + if (!bridge)
2494 + return NULL;
2495 +
2496 +- INIT_LIST_HEAD(&bridge->windows);
2497 ++ pci_init_host_bridge(bridge);
2498 + bridge->dev.release = devm_pci_release_host_bridge_dev;
2499 +
2500 + return bridge;
2501 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2502 +index a077f67fe1da..cc616a5f6a8f 100644
2503 +--- a/drivers/pci/quirks.c
2504 ++++ b/drivers/pci/quirks.c
2505 +@@ -2245,6 +2245,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
2506 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
2507 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
2508 +
2509 ++/*
2510 ++ * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
2511 ++ * Link bit cleared after starting the link retrain process to allow this
2512 ++ * process to finish.
2513 ++ *
2514 ++ * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the
2515 ++ * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
2516 ++ */
2517 ++static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
2518 ++{
2519 ++ dev->clear_retrain_link = 1;
2520 ++ pci_info(dev, "Enable PCIe Retrain Link quirk\n");
2521 ++}
2522 ++DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
2523 ++DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
2524 ++DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
2525 ++
2526 + static void fixup_rev1_53c810(struct pci_dev *dev)
2527 + {
2528 + u32 class = dev->class;
2529 +@@ -3408,6 +3425,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
2530 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
2531 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
2532 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
2533 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
2534 +
2535 + /*
2536 + * Root port on some Cavium CN8xxx chips do not successfully complete a bus
2537 +@@ -4905,6 +4923,7 @@ static void quirk_no_ats(struct pci_dev *pdev)
2538 +
2539 + /* AMD Stoney platform GPU */
2540 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
2541 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
2542 + #endif /* CONFIG_PCI_ATS */
2543 +
2544 + /* Freescale PCIe doesn't support MSI in RC mode */
2545 +@@ -5122,3 +5141,61 @@ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
2546 + SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
2547 + SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
2548 + SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
2549 ++
2550 ++/*
2551 ++ * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
2552 ++ * not always reset the secondary Nvidia GPU between reboots if the system
2553 ++ * is configured to use Hybrid Graphics mode. This results in the GPU
2554 ++ * being left in whatever state it was in during the *previous* boot, which
2555 ++ * causes spurious interrupts from the GPU, which in turn causes us to
2556 ++ * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly,
2557 ++ * this also completely breaks nouveau.
2558 ++ *
2559 ++ * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
2560 ++ * clean state and fixes all these issues.
2561 ++ *
2562 ++ * When the machine is configured in Dedicated display mode, the issue
2563 ++ * doesn't occur. Fortunately the GPU advertises NoReset+ when in this
2564 ++ * mode, so we can detect that and avoid resetting it.
2565 ++ */
2566 ++static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
2567 ++{
2568 ++ void __iomem *map;
2569 ++ int ret;
2570 ++
2571 ++ if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
2572 ++ pdev->subsystem_device != 0x222e ||
2573 ++ !pdev->reset_fn)
2574 ++ return;
2575 ++
2576 ++ if (pci_enable_device_mem(pdev))
2577 ++ return;
2578 ++
2579 ++ /*
2580 ++ * Based on nvkm_device_ctor() in
2581 ++ * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
2582 ++ */
2583 ++ map = pci_iomap(pdev, 0, 0x23000);
2584 ++ if (!map) {
2585 ++ pci_err(pdev, "Can't map MMIO space\n");
2586 ++ goto out_disable;
2587 ++ }
2588 ++
2589 ++ /*
2590 ++ * Make sure the GPU looks like it's been POSTed before resetting
2591 ++ * it.
2592 ++ */
2593 ++ if (ioread32(map + 0x2240c) & 0x2) {
2594 ++ pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
2595 ++ ret = pci_reset_function(pdev);
2596 ++ if (ret < 0)
2597 ++ pci_err(pdev, "Failed to reset GPU: %d\n", ret);
2598 ++ }
2599 ++
2600 ++ iounmap(map);
2601 ++out_disable:
2602 ++ pci_disable_device(pdev);
2603 ++}
2604 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
2605 ++ PCI_CLASS_DISPLAY_VGA, 8,
2606 ++ quirk_reset_lenovo_thinkpad_p50_nvgpu);
2607 +diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
2608 +index 68ce4a082b9b..693acc167351 100644
2609 +--- a/drivers/phy/ti/phy-ti-pipe3.c
2610 ++++ b/drivers/phy/ti/phy-ti-pipe3.c
2611 +@@ -303,7 +303,7 @@ static void ti_pipe3_calibrate(struct ti_pipe3 *phy)
2612 +
2613 + val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY);
2614 + val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV);
2615 +- val = (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
2616 ++ val |= (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
2617 + ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY, val);
2618 +
2619 + val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES);
2620 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
2621 +index 68473d0cc57e..968dcd9d7a07 100644
2622 +--- a/drivers/regulator/core.c
2623 ++++ b/drivers/regulator/core.c
2624 +@@ -3322,15 +3322,12 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
2625 +
2626 + /* for not coupled regulators this will just set the voltage */
2627 + ret = regulator_balance_voltage(rdev, state);
2628 +- if (ret < 0)
2629 +- goto out2;
2630 ++ if (ret < 0) {
2631 ++ voltage->min_uV = old_min_uV;
2632 ++ voltage->max_uV = old_max_uV;
2633 ++ }
2634 +
2635 + out:
2636 +- return 0;
2637 +-out2:
2638 +- voltage->min_uV = old_min_uV;
2639 +- voltage->max_uV = old_max_uV;
2640 +-
2641 + return ret;
2642 + }
2643 +
2644 +diff --git a/drivers/staging/media/imx/imx-ic-common.c b/drivers/staging/media/imx/imx-ic-common.c
2645 +index 765919487a73..90a926891eb9 100644
2646 +--- a/drivers/staging/media/imx/imx-ic-common.c
2647 ++++ b/drivers/staging/media/imx/imx-ic-common.c
2648 +@@ -26,7 +26,7 @@ static struct imx_ic_ops *ic_ops[IC_NUM_OPS] = {
2649 +
2650 + static int imx_ic_probe(struct platform_device *pdev)
2651 + {
2652 +- struct imx_media_internal_sd_platformdata *pdata;
2653 ++ struct imx_media_ipu_internal_sd_pdata *pdata;
2654 + struct imx_ic_priv *priv;
2655 + int ret;
2656 +
2657 +diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
2658 +index 3b7517348666..41965d8b56c4 100644
2659 +--- a/drivers/staging/media/imx/imx-media-csi.c
2660 ++++ b/drivers/staging/media/imx/imx-media-csi.c
2661 +@@ -154,9 +154,10 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
2662 + /*
2663 + * Parses the fwnode endpoint from the source pad of the entity
2664 + * connected to this CSI. This will either be the entity directly
2665 +- * upstream from the CSI-2 receiver, or directly upstream from the
2666 +- * video mux. The endpoint is needed to determine the bus type and
2667 +- * bus config coming into the CSI.
2668 ++ * upstream from the CSI-2 receiver, directly upstream from the
2669 ++ * video mux, or directly upstream from the CSI itself. The endpoint
2670 ++ * is needed to determine the bus type and bus config coming into
2671 ++ * the CSI.
2672 + */
2673 + static int csi_get_upstream_endpoint(struct csi_priv *priv,
2674 + struct v4l2_fwnode_endpoint *ep)
2675 +@@ -172,7 +173,8 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
2676 + if (!priv->src_sd)
2677 + return -EPIPE;
2678 +
2679 +- src = &priv->src_sd->entity;
2680 ++ sd = priv->src_sd;
2681 ++ src = &sd->entity;
2682 +
2683 + if (src->function == MEDIA_ENT_F_VID_MUX) {
2684 + /*
2685 +@@ -186,6 +188,14 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
2686 + src = &sd->entity;
2687 + }
2688 +
2689 ++ /*
2690 ++ * If the source is neither the video mux nor the CSI-2 receiver,
2691 ++ * get the source pad directly upstream from CSI itself.
2692 ++ */
2693 ++ if (src->function != MEDIA_ENT_F_VID_MUX &&
2694 ++ sd->grp_id != IMX_MEDIA_GRP_ID_CSI2)
2695 ++ src = &priv->sd.entity;
2696 ++
2697 + /* get source pad of entity directly upstream from src */
2698 + pad = imx_media_find_upstream_pad(priv->md, src, 0);
2699 + if (IS_ERR(pad))
2700 +diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
2701 +index 28a3d23aad5b..10a63a4fa90b 100644
2702 +--- a/drivers/staging/media/imx/imx-media-dev.c
2703 ++++ b/drivers/staging/media/imx/imx-media-dev.c
2704 +@@ -477,13 +477,6 @@ static int imx_media_probe(struct platform_device *pdev)
2705 + goto cleanup;
2706 + }
2707 +
2708 +- ret = imx_media_add_internal_subdevs(imxmd);
2709 +- if (ret) {
2710 +- v4l2_err(&imxmd->v4l2_dev,
2711 +- "add_internal_subdevs failed with %d\n", ret);
2712 +- goto cleanup;
2713 +- }
2714 +-
2715 + ret = imx_media_dev_notifier_register(imxmd);
2716 + if (ret)
2717 + goto del_int;
2718 +@@ -491,7 +484,7 @@ static int imx_media_probe(struct platform_device *pdev)
2719 + return 0;
2720 +
2721 + del_int:
2722 +- imx_media_remove_internal_subdevs(imxmd);
2723 ++ imx_media_remove_ipu_internal_subdevs(imxmd);
2724 + cleanup:
2725 + v4l2_async_notifier_cleanup(&imxmd->notifier);
2726 + v4l2_device_unregister(&imxmd->v4l2_dev);
2727 +@@ -508,7 +501,7 @@ static int imx_media_remove(struct platform_device *pdev)
2728 + v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
2729 +
2730 + v4l2_async_notifier_unregister(&imxmd->notifier);
2731 +- imx_media_remove_internal_subdevs(imxmd);
2732 ++ imx_media_remove_ipu_internal_subdevs(imxmd);
2733 + v4l2_async_notifier_cleanup(&imxmd->notifier);
2734 + media_device_unregister(&imxmd->md);
2735 + v4l2_device_unregister(&imxmd->v4l2_dev);
2736 +diff --git a/drivers/staging/media/imx/imx-media-internal-sd.c b/drivers/staging/media/imx/imx-media-internal-sd.c
2737 +index 5e10d95e5529..dc510dcfe160 100644
2738 +--- a/drivers/staging/media/imx/imx-media-internal-sd.c
2739 ++++ b/drivers/staging/media/imx/imx-media-internal-sd.c
2740 +@@ -1,7 +1,7 @@
2741 + /*
2742 + * Media driver for Freescale i.MX5/6 SOC
2743 + *
2744 +- * Adds the internal subdevices and the media links between them.
2745 ++ * Adds the IPU internal subdevices and the media links between them.
2746 + *
2747 + * Copyright (c) 2016 Mentor Graphics Inc.
2748 + *
2749 +@@ -192,7 +192,7 @@ static struct v4l2_subdev *find_sink(struct imx_media_dev *imxmd,
2750 +
2751 + /*
2752 + * retrieve IPU id from subdev name, note: can't get this from
2753 +- * struct imx_media_internal_sd_platformdata because if src is
2754 ++ * struct imx_media_ipu_internal_sd_pdata because if src is
2755 + * a CSI, it has different struct ipu_client_platformdata which
2756 + * does not contain IPU id.
2757 + */
2758 +@@ -270,7 +270,7 @@ static int add_internal_subdev(struct imx_media_dev *imxmd,
2759 + const struct internal_subdev *isd,
2760 + int ipu_id)
2761 + {
2762 +- struct imx_media_internal_sd_platformdata pdata;
2763 ++ struct imx_media_ipu_internal_sd_pdata pdata;
2764 + struct platform_device_info pdevinfo = {};
2765 + struct platform_device *pdev;
2766 +
2767 +@@ -298,13 +298,14 @@ static int add_internal_subdev(struct imx_media_dev *imxmd,
2768 + }
2769 +
2770 + /* adds the internal subdevs in one ipu */
2771 +-static int add_ipu_internal_subdevs(struct imx_media_dev *imxmd, int ipu_id)
2772 ++int imx_media_add_ipu_internal_subdevs(struct imx_media_dev *imxmd,
2773 ++ int ipu_id)
2774 + {
2775 + enum isd_enum i;
2776 ++ int ret;
2777 +
2778 + for (i = 0; i < num_isd; i++) {
2779 + const struct internal_subdev *isd = &int_subdev[i];
2780 +- int ret;
2781 +
2782 + /*
2783 + * the CSIs are represented in the device-tree, so those
2784 +@@ -322,32 +323,17 @@ static int add_ipu_internal_subdevs(struct imx_media_dev *imxmd, int ipu_id)
2785 + }
2786 +
2787 + if (ret)
2788 +- return ret;
2789 ++ goto remove;
2790 + }
2791 +
2792 + return 0;
2793 +-}
2794 +-
2795 +-int imx_media_add_internal_subdevs(struct imx_media_dev *imxmd)
2796 +-{
2797 +- int ret;
2798 +-
2799 +- ret = add_ipu_internal_subdevs(imxmd, 0);
2800 +- if (ret)
2801 +- goto remove;
2802 +-
2803 +- ret = add_ipu_internal_subdevs(imxmd, 1);
2804 +- if (ret)
2805 +- goto remove;
2806 +-
2807 +- return 0;
2808 +
2809 + remove:
2810 +- imx_media_remove_internal_subdevs(imxmd);
2811 ++ imx_media_remove_ipu_internal_subdevs(imxmd);
2812 + return ret;
2813 + }
2814 +
2815 +-void imx_media_remove_internal_subdevs(struct imx_media_dev *imxmd)
2816 ++void imx_media_remove_ipu_internal_subdevs(struct imx_media_dev *imxmd)
2817 + {
2818 + struct imx_media_async_subdev *imxasd;
2819 + struct v4l2_async_subdev *asd;
2820 +diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
2821 +index 03446335ac03..12383f4785ad 100644
2822 +--- a/drivers/staging/media/imx/imx-media-of.c
2823 ++++ b/drivers/staging/media/imx/imx-media-of.c
2824 +@@ -23,36 +23,25 @@
2825 + int imx_media_of_add_csi(struct imx_media_dev *imxmd,
2826 + struct device_node *csi_np)
2827 + {
2828 +- int ret;
2829 +-
2830 + if (!of_device_is_available(csi_np)) {
2831 + dev_dbg(imxmd->md.dev, "%s: %pOFn not enabled\n", __func__,
2832 + csi_np);
2833 +- /* unavailable is not an error */
2834 +- return 0;
2835 ++ return -ENODEV;
2836 + }
2837 +
2838 + /* add CSI fwnode to async notifier */
2839 +- ret = imx_media_add_async_subdev(imxmd, of_fwnode_handle(csi_np), NULL);
2840 +- if (ret) {
2841 +- if (ret == -EEXIST) {
2842 +- /* already added, everything is fine */
2843 +- return 0;
2844 +- }
2845 +-
2846 +- /* other error, can't continue */
2847 +- return ret;
2848 +- }
2849 +-
2850 +- return 0;
2851 ++ return imx_media_add_async_subdev(imxmd, of_fwnode_handle(csi_np),
2852 ++ NULL);
2853 + }
2854 + EXPORT_SYMBOL_GPL(imx_media_of_add_csi);
2855 +
2856 + int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
2857 + struct device_node *np)
2858 + {
2859 ++ bool ipu_found[2] = {false, false};
2860 + struct device_node *csi_np;
2861 + int i, ret;
2862 ++ u32 ipu_id;
2863 +
2864 + for (i = 0; ; i++) {
2865 + csi_np = of_parse_phandle(np, "ports", i);
2866 +@@ -60,12 +49,43 @@ int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
2867 + break;
2868 +
2869 + ret = imx_media_of_add_csi(imxmd, csi_np);
2870 +- of_node_put(csi_np);
2871 +- if (ret)
2872 +- return ret;
2873 ++ if (ret) {
2874 ++ /* unavailable or already added is not an error */
2875 ++ if (ret == -ENODEV || ret == -EEXIST) {
2876 ++ of_node_put(csi_np);
2877 ++ continue;
2878 ++ }
2879 ++
2880 ++ /* other error, can't continue */
2881 ++ goto err_out;
2882 ++ }
2883 ++
2884 ++ ret = of_alias_get_id(csi_np->parent, "ipu");
2885 ++ if (ret < 0)
2886 ++ goto err_out;
2887 ++ if (ret > 1) {
2888 ++ ret = -EINVAL;
2889 ++ goto err_out;
2890 ++ }
2891 ++
2892 ++ ipu_id = ret;
2893 ++
2894 ++ if (!ipu_found[ipu_id]) {
2895 ++ ret = imx_media_add_ipu_internal_subdevs(imxmd,
2896 ++ ipu_id);
2897 ++ if (ret)
2898 ++ goto err_out;
2899 ++ }
2900 ++
2901 ++ ipu_found[ipu_id] = true;
2902 + }
2903 +
2904 + return 0;
2905 ++
2906 ++err_out:
2907 ++ imx_media_remove_ipu_internal_subdevs(imxmd);
2908 ++ of_node_put(csi_np);
2909 ++ return ret;
2910 + }
2911 +
2912 + /*
2913 +@@ -145,15 +165,18 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
2914 + struct v4l2_subdev *csi)
2915 + {
2916 + struct device_node *csi_np = csi->dev->of_node;
2917 +- struct fwnode_handle *fwnode, *csi_ep;
2918 +- struct v4l2_fwnode_link link;
2919 + struct device_node *ep;
2920 +- int ret;
2921 +-
2922 +- link.local_node = of_fwnode_handle(csi_np);
2923 +- link.local_port = CSI_SINK_PAD;
2924 +
2925 + for_each_child_of_node(csi_np, ep) {
2926 ++ struct fwnode_handle *fwnode, *csi_ep;
2927 ++ struct v4l2_fwnode_link link;
2928 ++ int ret;
2929 ++
2930 ++ memset(&link, 0, sizeof(link));
2931 ++
2932 ++ link.local_node = of_fwnode_handle(csi_np);
2933 ++ link.local_port = CSI_SINK_PAD;
2934 ++
2935 + csi_ep = of_fwnode_handle(ep);
2936 +
2937 + fwnode = fwnode_graph_get_remote_endpoint(csi_ep);
2938 +diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
2939 +index 2808662e2597..8a9af4688fd4 100644
2940 +--- a/drivers/staging/media/imx/imx-media-vdic.c
2941 ++++ b/drivers/staging/media/imx/imx-media-vdic.c
2942 +@@ -934,7 +934,7 @@ static const struct v4l2_subdev_internal_ops vdic_internal_ops = {
2943 +
2944 + static int imx_vdic_probe(struct platform_device *pdev)
2945 + {
2946 +- struct imx_media_internal_sd_platformdata *pdata;
2947 ++ struct imx_media_ipu_internal_sd_pdata *pdata;
2948 + struct vdic_priv *priv;
2949 + int ret;
2950 +
2951 +diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
2952 +index ae964c8d5be1..dd603a6b3a70 100644
2953 +--- a/drivers/staging/media/imx/imx-media.h
2954 ++++ b/drivers/staging/media/imx/imx-media.h
2955 +@@ -115,7 +115,7 @@ struct imx_media_pad_vdev {
2956 + struct list_head list;
2957 + };
2958 +
2959 +-struct imx_media_internal_sd_platformdata {
2960 ++struct imx_media_ipu_internal_sd_pdata {
2961 + char sd_name[V4L2_SUBDEV_NAME_SIZE];
2962 + u32 grp_id;
2963 + int ipu_id;
2964 +@@ -252,10 +252,11 @@ struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd);
2965 + void imx_media_fim_free(struct imx_media_fim *fim);
2966 +
2967 + /* imx-media-internal-sd.c */
2968 +-int imx_media_add_internal_subdevs(struct imx_media_dev *imxmd);
2969 ++int imx_media_add_ipu_internal_subdevs(struct imx_media_dev *imxmd,
2970 ++ int ipu_id);
2971 + int imx_media_create_ipu_internal_links(struct imx_media_dev *imxmd,
2972 + struct v4l2_subdev *sd);
2973 +-void imx_media_remove_internal_subdevs(struct imx_media_dev *imxmd);
2974 ++void imx_media_remove_ipu_internal_subdevs(struct imx_media_dev *imxmd);
2975 +
2976 + /* imx-media-of.c */
2977 + int imx_media_add_of_subdevs(struct imx_media_dev *dev,
2978 +diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
2979 +index 3fba7c27c0ec..1ba62fcdcae8 100644
2980 +--- a/drivers/staging/media/imx/imx7-media-csi.c
2981 ++++ b/drivers/staging/media/imx/imx7-media-csi.c
2982 +@@ -1271,7 +1271,7 @@ static int imx7_csi_probe(struct platform_device *pdev)
2983 + platform_set_drvdata(pdev, &csi->sd);
2984 +
2985 + ret = imx_media_of_add_csi(imxmd, node);
2986 +- if (ret < 0)
2987 ++ if (ret < 0 && ret != -ENODEV && ret != -EEXIST)
2988 + goto cleanup;
2989 +
2990 + ret = imx_media_dev_notifier_register(imxmd);
2991 +diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
2992 +index ba906876cc45..fd02e8a4841d 100644
2993 +--- a/drivers/video/fbdev/efifb.c
2994 ++++ b/drivers/video/fbdev/efifb.c
2995 +@@ -476,8 +476,12 @@ static int efifb_probe(struct platform_device *dev)
2996 + * If the UEFI memory map covers the efifb region, we may only
2997 + * remap it using the attributes the memory map prescribes.
2998 + */
2999 +- mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
3000 +- mem_flags &= md.attribute;
3001 ++ md.attribute &= EFI_MEMORY_UC | EFI_MEMORY_WC |
3002 ++ EFI_MEMORY_WT | EFI_MEMORY_WB;
3003 ++ if (md.attribute) {
3004 ++ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
3005 ++ mem_flags &= md.attribute;
3006 ++ }
3007 + }
3008 + if (mem_flags & EFI_MEMORY_WC)
3009 + info->screen_base = ioremap_wc(efifb_fix.smem_start,
3010 +diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h
3011 +index aad1cc4be34a..c7ebf03b8d53 100644
3012 +--- a/drivers/video/fbdev/sm712.h
3013 ++++ b/drivers/video/fbdev/sm712.h
3014 +@@ -15,14 +15,10 @@
3015 +
3016 + #define FB_ACCEL_SMI_LYNX 88
3017 +
3018 +-#define SCREEN_X_RES 1024
3019 +-#define SCREEN_Y_RES 600
3020 +-#define SCREEN_BPP 16
3021 +-
3022 +-/*Assume SM712 graphics chip has 4MB VRAM */
3023 +-#define SM712_VIDEOMEMORYSIZE 0x00400000
3024 +-/*Assume SM722 graphics chip has 8MB VRAM */
3025 +-#define SM722_VIDEOMEMORYSIZE 0x00800000
3026 ++#define SCREEN_X_RES 1024
3027 ++#define SCREEN_Y_RES_PC 768
3028 ++#define SCREEN_Y_RES_NETBOOK 600
3029 ++#define SCREEN_BPP 16
3030 +
3031 + #define dac_reg (0x3c8)
3032 + #define dac_val (0x3c9)
3033 +diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
3034 +index 502d0de2feec..f1dcc6766d1e 100644
3035 +--- a/drivers/video/fbdev/sm712fb.c
3036 ++++ b/drivers/video/fbdev/sm712fb.c
3037 +@@ -530,6 +530,65 @@ static const struct modeinit vgamode[] = {
3038 + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
3039 + },
3040 + },
3041 ++ { /* 1024 x 768 16Bpp 60Hz */
3042 ++ 1024, 768, 16, 60,
3043 ++ /* Init_MISC */
3044 ++ 0xEB,
3045 ++ { /* Init_SR0_SR4 */
3046 ++ 0x03, 0x01, 0x0F, 0x03, 0x0E,
3047 ++ },
3048 ++ { /* Init_SR10_SR24 */
3049 ++ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
3050 ++ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
3051 ++ 0xC4, 0x30, 0x02, 0x01, 0x01,
3052 ++ },
3053 ++ { /* Init_SR30_SR75 */
3054 ++ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
3055 ++ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
3056 ++ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
3057 ++ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
3058 ++ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
3059 ++ 0x0F, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
3060 ++ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
3061 ++ 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
3062 ++ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
3063 ++ },
3064 ++ { /* Init_SR80_SR93 */
3065 ++ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
3066 ++ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
3067 ++ 0x00, 0x00, 0x00, 0x00,
3068 ++ },
3069 ++ { /* Init_SRA0_SRAF */
3070 ++ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
3071 ++ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
3072 ++ },
3073 ++ { /* Init_GR00_GR08 */
3074 ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
3075 ++ 0xFF,
3076 ++ },
3077 ++ { /* Init_AR00_AR14 */
3078 ++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
3079 ++ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
3080 ++ 0x41, 0x00, 0x0F, 0x00, 0x00,
3081 ++ },
3082 ++ { /* Init_CR00_CR18 */
3083 ++ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
3084 ++ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3085 ++ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
3086 ++ 0xFF,
3087 ++ },
3088 ++ { /* Init_CR30_CR4D */
3089 ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
3090 ++ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
3091 ++ 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
3092 ++ 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
3093 ++ },
3094 ++ { /* Init_CR90_CRA7 */
3095 ++ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
3096 ++ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
3097 ++ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
3098 ++ },
3099 ++ },
3100 + { /* mode#5: 1024 x 768 24Bpp 60Hz */
3101 + 1024, 768, 24, 60,
3102 + /* Init_MISC */
3103 +@@ -827,67 +886,80 @@ static inline unsigned int chan_to_field(unsigned int chan,
3104 +
3105 + static int smtc_blank(int blank_mode, struct fb_info *info)
3106 + {
3107 ++ struct smtcfb_info *sfb = info->par;
3108 ++
3109 + /* clear DPMS setting */
3110 + switch (blank_mode) {
3111 + case FB_BLANK_UNBLANK:
3112 + /* Screen On: HSync: On, VSync : On */
3113 ++
3114 ++ switch (sfb->chip_id) {
3115 ++ case 0x710:
3116 ++ case 0x712:
3117 ++ smtc_seqw(0x6a, 0x16);
3118 ++ smtc_seqw(0x6b, 0x02);
3119 ++ break;
3120 ++ case 0x720:
3121 ++ smtc_seqw(0x6a, 0x0d);
3122 ++ smtc_seqw(0x6b, 0x02);
3123 ++ break;
3124 ++ }
3125 ++
3126 ++ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
3127 + smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
3128 +- smtc_seqw(0x6a, 0x16);
3129 +- smtc_seqw(0x6b, 0x02);
3130 + smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
3131 + smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
3132 +- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
3133 +- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
3134 + smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
3135 ++ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
3136 + break;
3137 + case FB_BLANK_NORMAL:
3138 + /* Screen Off: HSync: On, VSync : On Soft blank */
3139 ++ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
3140 ++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3141 ++ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
3142 + smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
3143 ++ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
3144 + smtc_seqw(0x6a, 0x16);
3145 + smtc_seqw(0x6b, 0x02);
3146 +- smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
3147 +- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
3148 +- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
3149 +- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3150 + break;
3151 + case FB_BLANK_VSYNC_SUSPEND:
3152 + /* Screen On: HSync: On, VSync : Off */
3153 ++ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3154 ++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3155 ++ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
3156 + smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
3157 +- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3158 +- smtc_seqw(0x6a, 0x0c);
3159 +- smtc_seqw(0x6b, 0x02);
3160 + smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
3161 ++ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3162 + smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
3163 +- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
3164 +- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3165 +- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3166 + smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
3167 ++ smtc_seqw(0x6a, 0x0c);
3168 ++ smtc_seqw(0x6b, 0x02);
3169 + break;
3170 + case FB_BLANK_HSYNC_SUSPEND:
3171 + /* Screen On: HSync: Off, VSync : On */
3172 ++ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3173 ++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3174 ++ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
3175 + smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
3176 +- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3177 +- smtc_seqw(0x6a, 0x0c);
3178 +- smtc_seqw(0x6b, 0x02);
3179 + smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
3180 ++ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3181 + smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
3182 +- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
3183 +- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3184 +- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3185 + smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
3186 ++ smtc_seqw(0x6a, 0x0c);
3187 ++ smtc_seqw(0x6b, 0x02);
3188 + break;
3189 + case FB_BLANK_POWERDOWN:
3190 + /* Screen On: HSync: Off, VSync : Off */
3191 ++ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3192 ++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3193 ++ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
3194 + smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
3195 +- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3196 +- smtc_seqw(0x6a, 0x0c);
3197 +- smtc_seqw(0x6b, 0x02);
3198 + smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
3199 ++ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3200 + smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
3201 +- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
3202 +- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3203 +- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3204 + smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
3205 ++ smtc_seqw(0x6a, 0x0c);
3206 ++ smtc_seqw(0x6b, 0x02);
3207 + break;
3208 + default:
3209 + return -EINVAL;
3210 +@@ -1145,8 +1217,10 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
3211 +
3212 + /* init SEQ register SR30 - SR75 */
3213 + for (i = 0; i < SIZE_SR30_SR75; i++)
3214 +- if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
3215 +- (i + 0x30) != 0x6b)
3216 ++ if ((i + 0x30) != 0x30 && (i + 0x30) != 0x62 &&
3217 ++ (i + 0x30) != 0x6a && (i + 0x30) != 0x6b &&
3218 ++ (i + 0x30) != 0x70 && (i + 0x30) != 0x71 &&
3219 ++ (i + 0x30) != 0x74 && (i + 0x30) != 0x75)
3220 + smtc_seqw(i + 0x30,
3221 + vgamode[j].init_sr30_sr75[i]);
3222 +
3223 +@@ -1171,8 +1245,12 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
3224 + smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
3225 +
3226 + /* init CRTC register CR30 - CR4D */
3227 +- for (i = 0; i < SIZE_CR30_CR4D; i++)
3228 ++ for (i = 0; i < SIZE_CR30_CR4D; i++) {
3229 ++ if ((i + 0x30) >= 0x3B && (i + 0x30) <= 0x3F)
3230 ++ /* side-effect, don't write to CR3B-CR3F */
3231 ++ continue;
3232 + smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
3233 ++ }
3234 +
3235 + /* init CRTC register CR90 - CRA7 */
3236 + for (i = 0; i < SIZE_CR90_CRA7; i++)
3237 +@@ -1323,6 +1401,11 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
3238 + {
3239 + sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
3240 +
3241 ++ if (sfb->chip_id == 0x720)
3242 ++ /* on SM720, the framebuffer starts at the 1 MB offset */
3243 ++ sfb->fb->fix.smem_start += 0x00200000;
3244 ++
3245 ++ /* XXX: is it safe for SM720 on Big-Endian? */
3246 + if (sfb->fb->var.bits_per_pixel == 32)
3247 + sfb->fb->fix.smem_start += big_addr;
3248 +
3249 +@@ -1360,12 +1443,82 @@ static inline void sm7xx_init_hw(void)
3250 + outb_p(0x11, 0x3c5);
3251 + }
3252 +
3253 ++static u_long sm7xx_vram_probe(struct smtcfb_info *sfb)
3254 ++{
3255 ++ u8 vram;
3256 ++
3257 ++ switch (sfb->chip_id) {
3258 ++ case 0x710:
3259 ++ case 0x712:
3260 ++ /*
3261 ++ * Assume SM712 graphics chip has 4MB VRAM.
3262 ++ *
3263 ++ * FIXME: SM712 can have 2MB VRAM, which is used on earlier
3264 ++ * laptops, such as IBM Thinkpad 240X. This driver would
3265 ++ * probably crash on those machines. If anyone gets one of
3266 ++ * those and is willing to help, run "git blame" and send me
3267 ++ * an E-mail.
3268 ++ */
3269 ++ return 0x00400000;
3270 ++ case 0x720:
3271 ++ outb_p(0x76, 0x3c4);
3272 ++ vram = inb_p(0x3c5) >> 6;
3273 ++
3274 ++ if (vram == 0x00)
3275 ++ return 0x00800000; /* 8 MB */
3276 ++ else if (vram == 0x01)
3277 ++ return 0x01000000; /* 16 MB */
3278 ++ else if (vram == 0x02)
3279 ++ return 0x00400000; /* illegal, fallback to 4 MB */
3280 ++ else if (vram == 0x03)
3281 ++ return 0x00400000; /* 4 MB */
3282 ++ }
3283 ++ return 0; /* unknown hardware */
3284 ++}
3285 ++
3286 ++static void sm7xx_resolution_probe(struct smtcfb_info *sfb)
3287 ++{
3288 ++ /* get mode parameter from smtc_scr_info */
3289 ++ if (smtc_scr_info.lfb_width != 0) {
3290 ++ sfb->fb->var.xres = smtc_scr_info.lfb_width;
3291 ++ sfb->fb->var.yres = smtc_scr_info.lfb_height;
3292 ++ sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
3293 ++ goto final;
3294 ++ }
3295 ++
3296 ++ /*
3297 ++ * No parameter, default resolution is 1024x768-16.
3298 ++ *
3299 ++ * FIXME: earlier laptops, such as IBM Thinkpad 240X, has a 800x600
3300 ++ * panel, also see the comments about Thinkpad 240X above.
3301 ++ */
3302 ++ sfb->fb->var.xres = SCREEN_X_RES;
3303 ++ sfb->fb->var.yres = SCREEN_Y_RES_PC;
3304 ++ sfb->fb->var.bits_per_pixel = SCREEN_BPP;
3305 ++
3306 ++#ifdef CONFIG_MIPS
3307 ++ /*
3308 ++ * Loongson MIPS netbooks use 1024x600 LCD panels, which is the original
3309 ++ * target platform of this driver, but nearly all old x86 laptops have
3310 ++ * 1024x768. Lighting 768 panels using 600's timings would partially
3311 ++ * garble the display, so we don't want that. But it's not possible to
3312 ++ * distinguish them reliably.
3313 ++ *
3314 ++ * So we change the default to 768, but keep 600 as-is on MIPS.
3315 ++ */
3316 ++ sfb->fb->var.yres = SCREEN_Y_RES_NETBOOK;
3317 ++#endif
3318 ++
3319 ++final:
3320 ++ big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
3321 ++}
3322 ++
3323 + static int smtcfb_pci_probe(struct pci_dev *pdev,
3324 + const struct pci_device_id *ent)
3325 + {
3326 + struct smtcfb_info *sfb;
3327 + struct fb_info *info;
3328 +- u_long smem_size = 0x00800000; /* default 8MB */
3329 ++ u_long smem_size;
3330 + int err;
3331 + unsigned long mmio_base;
3332 +
3333 +@@ -1405,29 +1558,19 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
3334 +
3335 + sm7xx_init_hw();
3336 +
3337 +- /* get mode parameter from smtc_scr_info */
3338 +- if (smtc_scr_info.lfb_width != 0) {
3339 +- sfb->fb->var.xres = smtc_scr_info.lfb_width;
3340 +- sfb->fb->var.yres = smtc_scr_info.lfb_height;
3341 +- sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
3342 +- } else {
3343 +- /* default resolution 1024x600 16bit mode */
3344 +- sfb->fb->var.xres = SCREEN_X_RES;
3345 +- sfb->fb->var.yres = SCREEN_Y_RES;
3346 +- sfb->fb->var.bits_per_pixel = SCREEN_BPP;
3347 +- }
3348 +-
3349 +- big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
3350 + /* Map address and memory detection */
3351 + mmio_base = pci_resource_start(pdev, 0);
3352 + pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
3353 +
3354 ++ smem_size = sm7xx_vram_probe(sfb);
3355 ++ dev_info(&pdev->dev, "%lu MiB of VRAM detected.\n",
3356 ++ smem_size / 1048576);
3357 ++
3358 + switch (sfb->chip_id) {
3359 + case 0x710:
3360 + case 0x712:
3361 + sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
3362 + sfb->fb->fix.mmio_len = 0x00400000;
3363 +- smem_size = SM712_VIDEOMEMORYSIZE;
3364 + sfb->lfb = ioremap(mmio_base, mmio_addr);
3365 + if (!sfb->lfb) {
3366 + dev_err(&pdev->dev,
3367 +@@ -1459,8 +1602,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
3368 + case 0x720:
3369 + sfb->fb->fix.mmio_start = mmio_base;
3370 + sfb->fb->fix.mmio_len = 0x00200000;
3371 +- smem_size = SM722_VIDEOMEMORYSIZE;
3372 +- sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
3373 ++ sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size);
3374 + sfb->lfb = sfb->dp_regs + 0x00200000;
3375 + sfb->mmio = (smtc_regbaseaddress =
3376 + sfb->dp_regs + 0x000c0000);
3377 +@@ -1477,6 +1619,9 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
3378 + goto failed_fb;
3379 + }
3380 +
3381 ++ /* probe and decide resolution */
3382 ++ sm7xx_resolution_probe(sfb);
3383 ++
3384 + /* can support 32 bpp */
3385 + if (sfb->fb->var.bits_per_pixel == 15)
3386 + sfb->fb->var.bits_per_pixel = 16;
3387 +@@ -1487,7 +1632,11 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
3388 + if (err)
3389 + goto failed;
3390 +
3391 +- smtcfb_setmode(sfb);
3392 ++ /*
3393 ++ * The screen would be temporarily garbled when sm712fb takes over
3394 ++ * vesafb or VGA text mode. Zero the framebuffer.
3395 ++ */
3396 ++ memset_io(sfb->lfb, 0, sfb->fb->fix.smem_len);
3397 +
3398 + err = register_framebuffer(info);
3399 + if (err < 0)
3400 +diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
3401 +index 1d034dddc556..5a0d6fb02bbc 100644
3402 +--- a/drivers/video/fbdev/udlfb.c
3403 ++++ b/drivers/video/fbdev/udlfb.c
3404 +@@ -594,8 +594,7 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
3405 + return 0;
3406 + }
3407 +
3408 +-static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
3409 +- int width, int height, char *data)
3410 ++static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
3411 + {
3412 + int i, ret;
3413 + char *cmd;
3414 +@@ -607,21 +606,29 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
3415 +
3416 + start_cycles = get_cycles();
3417 +
3418 ++ mutex_lock(&dlfb->render_mutex);
3419 ++
3420 + aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
3421 + width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
3422 + x = aligned_x;
3423 +
3424 + if ((width <= 0) ||
3425 + (x + width > dlfb->info->var.xres) ||
3426 +- (y + height > dlfb->info->var.yres))
3427 +- return -EINVAL;
3428 ++ (y + height > dlfb->info->var.yres)) {
3429 ++ ret = -EINVAL;
3430 ++ goto unlock_ret;
3431 ++ }
3432 +
3433 +- if (!atomic_read(&dlfb->usb_active))
3434 +- return 0;
3435 ++ if (!atomic_read(&dlfb->usb_active)) {
3436 ++ ret = 0;
3437 ++ goto unlock_ret;
3438 ++ }
3439 +
3440 + urb = dlfb_get_urb(dlfb);
3441 +- if (!urb)
3442 +- return 0;
3443 ++ if (!urb) {
3444 ++ ret = 0;
3445 ++ goto unlock_ret;
3446 ++ }
3447 + cmd = urb->transfer_buffer;
3448 +
3449 + for (i = y; i < y + height ; i++) {
3450 +@@ -641,7 +648,7 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
3451 + *cmd++ = 0xAF;
3452 + /* Send partial buffer remaining before exiting */
3453 + len = cmd - (char *) urb->transfer_buffer;
3454 +- ret = dlfb_submit_urb(dlfb, urb, len);
3455 ++ dlfb_submit_urb(dlfb, urb, len);
3456 + bytes_sent += len;
3457 + } else
3458 + dlfb_urb_completion(urb);
3459 +@@ -655,7 +662,55 @@ error:
3460 + >> 10)), /* Kcycles */
3461 + &dlfb->cpu_kcycles_used);
3462 +
3463 +- return 0;
3464 ++ ret = 0;
3465 ++
3466 ++unlock_ret:
3467 ++ mutex_unlock(&dlfb->render_mutex);
3468 ++ return ret;
3469 ++}
3470 ++
3471 ++static void dlfb_init_damage(struct dlfb_data *dlfb)
3472 ++{
3473 ++ dlfb->damage_x = INT_MAX;
3474 ++ dlfb->damage_x2 = 0;
3475 ++ dlfb->damage_y = INT_MAX;
3476 ++ dlfb->damage_y2 = 0;
3477 ++}
3478 ++
3479 ++static void dlfb_damage_work(struct work_struct *w)
3480 ++{
3481 ++ struct dlfb_data *dlfb = container_of(w, struct dlfb_data, damage_work);
3482 ++ int x, x2, y, y2;
3483 ++
3484 ++ spin_lock_irq(&dlfb->damage_lock);
3485 ++ x = dlfb->damage_x;
3486 ++ x2 = dlfb->damage_x2;
3487 ++ y = dlfb->damage_y;
3488 ++ y2 = dlfb->damage_y2;
3489 ++ dlfb_init_damage(dlfb);
3490 ++ spin_unlock_irq(&dlfb->damage_lock);
3491 ++
3492 ++ if (x < x2 && y < y2)
3493 ++ dlfb_handle_damage(dlfb, x, y, x2 - x, y2 - y);
3494 ++}
3495 ++
3496 ++static void dlfb_offload_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
3497 ++{
3498 ++ unsigned long flags;
3499 ++ int x2 = x + width;
3500 ++ int y2 = y + height;
3501 ++
3502 ++ if (x >= x2 || y >= y2)
3503 ++ return;
3504 ++
3505 ++ spin_lock_irqsave(&dlfb->damage_lock, flags);
3506 ++ dlfb->damage_x = min(x, dlfb->damage_x);
3507 ++ dlfb->damage_x2 = max(x2, dlfb->damage_x2);
3508 ++ dlfb->damage_y = min(y, dlfb->damage_y);
3509 ++ dlfb->damage_y2 = max(y2, dlfb->damage_y2);
3510 ++ spin_unlock_irqrestore(&dlfb->damage_lock, flags);
3511 ++
3512 ++ schedule_work(&dlfb->damage_work);
3513 + }
3514 +
3515 + /*
3516 +@@ -679,7 +734,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
3517 + (u32)info->var.yres);
3518 +
3519 + dlfb_handle_damage(dlfb, 0, start, info->var.xres,
3520 +- lines, info->screen_base);
3521 ++ lines);
3522 + }
3523 +
3524 + return result;
3525 +@@ -694,8 +749,8 @@ static void dlfb_ops_copyarea(struct fb_info *info,
3526 +
3527 + sys_copyarea(info, area);
3528 +
3529 +- dlfb_handle_damage(dlfb, area->dx, area->dy,
3530 +- area->width, area->height, info->screen_base);
3531 ++ dlfb_offload_damage(dlfb, area->dx, area->dy,
3532 ++ area->width, area->height);
3533 + }
3534 +
3535 + static void dlfb_ops_imageblit(struct fb_info *info,
3536 +@@ -705,8 +760,8 @@ static void dlfb_ops_imageblit(struct fb_info *info,
3537 +
3538 + sys_imageblit(info, image);
3539 +
3540 +- dlfb_handle_damage(dlfb, image->dx, image->dy,
3541 +- image->width, image->height, info->screen_base);
3542 ++ dlfb_offload_damage(dlfb, image->dx, image->dy,
3543 ++ image->width, image->height);
3544 + }
3545 +
3546 + static void dlfb_ops_fillrect(struct fb_info *info,
3547 +@@ -716,8 +771,8 @@ static void dlfb_ops_fillrect(struct fb_info *info,
3548 +
3549 + sys_fillrect(info, rect);
3550 +
3551 +- dlfb_handle_damage(dlfb, rect->dx, rect->dy, rect->width,
3552 +- rect->height, info->screen_base);
3553 ++ dlfb_offload_damage(dlfb, rect->dx, rect->dy, rect->width,
3554 ++ rect->height);
3555 + }
3556 +
3557 + /*
3558 +@@ -739,17 +794,19 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
3559 + int bytes_identical = 0;
3560 + int bytes_rendered = 0;
3561 +
3562 ++ mutex_lock(&dlfb->render_mutex);
3563 ++
3564 + if (!fb_defio)
3565 +- return;
3566 ++ goto unlock_ret;
3567 +
3568 + if (!atomic_read(&dlfb->usb_active))
3569 +- return;
3570 ++ goto unlock_ret;
3571 +
3572 + start_cycles = get_cycles();
3573 +
3574 + urb = dlfb_get_urb(dlfb);
3575 + if (!urb)
3576 +- return;
3577 ++ goto unlock_ret;
3578 +
3579 + cmd = urb->transfer_buffer;
3580 +
3581 +@@ -782,6 +839,8 @@ error:
3582 + atomic_add(((unsigned int) ((end_cycles - start_cycles)
3583 + >> 10)), /* Kcycles */
3584 + &dlfb->cpu_kcycles_used);
3585 ++unlock_ret:
3586 ++ mutex_unlock(&dlfb->render_mutex);
3587 + }
3588 +
3589 + static int dlfb_get_edid(struct dlfb_data *dlfb, char *edid, int len)
3590 +@@ -859,8 +918,7 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
3591 + if (area.y > info->var.yres)
3592 + area.y = info->var.yres;
3593 +
3594 +- dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h,
3595 +- info->screen_base);
3596 ++ dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h);
3597 + }
3598 +
3599 + return 0;
3600 +@@ -942,6 +1000,10 @@ static void dlfb_ops_destroy(struct fb_info *info)
3601 + {
3602 + struct dlfb_data *dlfb = info->par;
3603 +
3604 ++ cancel_work_sync(&dlfb->damage_work);
3605 ++
3606 ++ mutex_destroy(&dlfb->render_mutex);
3607 ++
3608 + if (info->cmap.len != 0)
3609 + fb_dealloc_cmap(&info->cmap);
3610 + if (info->monspecs.modedb)
3611 +@@ -1065,8 +1127,7 @@ static int dlfb_ops_set_par(struct fb_info *info)
3612 + pix_framebuffer[i] = 0x37e6;
3613 + }
3614 +
3615 +- dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres,
3616 +- info->screen_base);
3617 ++ dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres);
3618 +
3619 + return 0;
3620 + }
3621 +@@ -1639,6 +1700,11 @@ static int dlfb_usb_probe(struct usb_interface *intf,
3622 + dlfb->ops = dlfb_ops;
3623 + info->fbops = &dlfb->ops;
3624 +
3625 ++ mutex_init(&dlfb->render_mutex);
3626 ++ dlfb_init_damage(dlfb);
3627 ++ spin_lock_init(&dlfb->damage_lock);
3628 ++ INIT_WORK(&dlfb->damage_work, dlfb_damage_work);
3629 ++
3630 + INIT_LIST_HEAD(&info->modelist);
3631 +
3632 + if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
3633 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
3634 +index ddf028509931..351fa506dc9b 100644
3635 +--- a/fs/btrfs/relocation.c
3636 ++++ b/fs/btrfs/relocation.c
3637 +@@ -4667,14 +4667,12 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
3638 + void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
3639 + u64 *bytes_to_reserve)
3640 + {
3641 +- struct btrfs_root *root;
3642 +- struct reloc_control *rc;
3643 ++ struct btrfs_root *root = pending->root;
3644 ++ struct reloc_control *rc = root->fs_info->reloc_ctl;
3645 +
3646 +- root = pending->root;
3647 +- if (!root->reloc_root)
3648 ++ if (!root->reloc_root || !rc)
3649 + return;
3650 +
3651 +- rc = root->fs_info->reloc_ctl;
3652 + if (!rc->merge_reloc_tree)
3653 + return;
3654 +
3655 +@@ -4703,10 +4701,10 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
3656 + struct btrfs_root *root = pending->root;
3657 + struct btrfs_root *reloc_root;
3658 + struct btrfs_root *new_root;
3659 +- struct reloc_control *rc;
3660 ++ struct reloc_control *rc = root->fs_info->reloc_ctl;
3661 + int ret;
3662 +
3663 +- if (!root->reloc_root)
3664 ++ if (!root->reloc_root || !rc)
3665 + return 0;
3666 +
3667 + rc = root->fs_info->reloc_ctl;
3668 +diff --git a/fs/ceph/super.c b/fs/ceph/super.c
3669 +index 6d5bb2f74612..01113c86e469 100644
3670 +--- a/fs/ceph/super.c
3671 ++++ b/fs/ceph/super.c
3672 +@@ -845,6 +845,12 @@ static void ceph_umount_begin(struct super_block *sb)
3673 + return;
3674 + }
3675 +
3676 ++static int ceph_remount(struct super_block *sb, int *flags, char *data)
3677 ++{
3678 ++ sync_filesystem(sb);
3679 ++ return 0;
3680 ++}
3681 ++
3682 + static const struct super_operations ceph_super_ops = {
3683 + .alloc_inode = ceph_alloc_inode,
3684 + .destroy_inode = ceph_destroy_inode,
3685 +@@ -852,6 +858,7 @@ static const struct super_operations ceph_super_ops = {
3686 + .drop_inode = ceph_drop_inode,
3687 + .sync_fs = ceph_sync_fs,
3688 + .put_super = ceph_put_super,
3689 ++ .remount_fs = ceph_remount,
3690 + .show_options = ceph_show_options,
3691 + .statfs = ceph_statfs,
3692 + .umount_begin = ceph_umount_begin,
3693 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
3694 +index 585ad3207cb1..607468948f72 100644
3695 +--- a/fs/cifs/cifsglob.h
3696 ++++ b/fs/cifs/cifsglob.h
3697 +@@ -1687,6 +1687,7 @@ static inline bool is_retryable_error(int error)
3698 +
3699 + #define CIFS_HAS_CREDITS 0x0400 /* already has credits */
3700 + #define CIFS_TRANSFORM_REQ 0x0800 /* transform request before sending */
3701 ++#define CIFS_NO_SRV_RSP 0x1000 /* there is no server response */
3702 +
3703 + /* Security Flags: indicate type of session setup needed */
3704 + #define CIFSSEC_MAY_SIGN 0x00001
3705 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
3706 +index f43747c062a7..6050851edcb8 100644
3707 +--- a/fs/cifs/cifssmb.c
3708 ++++ b/fs/cifs/cifssmb.c
3709 +@@ -2540,7 +2540,7 @@ CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon,
3710 +
3711 + if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
3712 + /* no response expected */
3713 +- flags = CIFS_ASYNC_OP | CIFS_OBREAK_OP;
3714 ++ flags = CIFS_NO_SRV_RSP | CIFS_ASYNC_OP | CIFS_OBREAK_OP;
3715 + pSMB->Timeout = 0;
3716 + } else if (waitFlag) {
3717 + flags = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
3718 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3719 +index c36ff0d1fe2a..aa61dcf471b3 100644
3720 +--- a/fs/cifs/smb2ops.c
3721 ++++ b/fs/cifs/smb2ops.c
3722 +@@ -2917,26 +2917,28 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3723 + unsigned int epoch, bool *purge_cache)
3724 + {
3725 + char message[5] = {0};
3726 ++ unsigned int new_oplock = 0;
3727 +
3728 + oplock &= 0xFF;
3729 + if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3730 + return;
3731 +
3732 +- cinode->oplock = 0;
3733 + if (oplock & SMB2_LEASE_READ_CACHING_HE) {
3734 +- cinode->oplock |= CIFS_CACHE_READ_FLG;
3735 ++ new_oplock |= CIFS_CACHE_READ_FLG;
3736 + strcat(message, "R");
3737 + }
3738 + if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
3739 +- cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
3740 ++ new_oplock |= CIFS_CACHE_HANDLE_FLG;
3741 + strcat(message, "H");
3742 + }
3743 + if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
3744 +- cinode->oplock |= CIFS_CACHE_WRITE_FLG;
3745 ++ new_oplock |= CIFS_CACHE_WRITE_FLG;
3746 + strcat(message, "W");
3747 + }
3748 +- if (!cinode->oplock)
3749 +- strcat(message, "None");
3750 ++ if (!new_oplock)
3751 ++ strncpy(message, "None", sizeof(message));
3752 ++
3753 ++ cinode->oplock = new_oplock;
3754 + cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3755 + &cinode->vfs_inode);
3756 + }
3757 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
3758 +index 1de8e996e566..72e242c49ca1 100644
3759 +--- a/fs/cifs/transport.c
3760 ++++ b/fs/cifs/transport.c
3761 +@@ -1054,8 +1054,11 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
3762 +
3763 + mutex_unlock(&ses->server->srv_mutex);
3764 +
3765 +- if (rc < 0) {
3766 +- /* Sending failed for some reason - return credits back */
3767 ++ /*
3768 ++ * If sending failed for some reason or it is an oplock break that we
3769 ++ * will not receive a response to - return credits back
3770 ++ */
3771 ++ if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
3772 + for (i = 0; i < num_rqst; i++)
3773 + add_credits(ses->server, &credits[i], optype);
3774 + goto out;
3775 +@@ -1076,9 +1079,6 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
3776 + smb311_update_preauth_hash(ses, rqst[0].rq_iov,
3777 + rqst[0].rq_nvec);
3778 +
3779 +- if ((flags & CIFS_TIMEOUT_MASK) == CIFS_ASYNC_OP)
3780 +- goto out;
3781 +-
3782 + for (i = 0; i < num_rqst; i++) {
3783 + rc = wait_for_response(ses->server, midQ[i]);
3784 + if (rc != 0)
3785 +diff --git a/fs/dcache.c b/fs/dcache.c
3786 +index aac41adf4743..c663c602f9ef 100644
3787 +--- a/fs/dcache.c
3788 ++++ b/fs/dcache.c
3789 +@@ -344,7 +344,7 @@ static void dentry_free(struct dentry *dentry)
3790 + }
3791 + }
3792 + /* if dentry was never visible to RCU, immediate free is OK */
3793 +- if (!(dentry->d_flags & DCACHE_RCUACCESS))
3794 ++ if (dentry->d_flags & DCACHE_NORCU)
3795 + __d_free(&dentry->d_u.d_rcu);
3796 + else
3797 + call_rcu(&dentry->d_u.d_rcu, __d_free);
3798 +@@ -1701,7 +1701,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
3799 + struct dentry *dentry = __d_alloc(parent->d_sb, name);
3800 + if (!dentry)
3801 + return NULL;
3802 +- dentry->d_flags |= DCACHE_RCUACCESS;
3803 + spin_lock(&parent->d_lock);
3804 + /*
3805 + * don't need child lock because it is not subject
3806 +@@ -1726,7 +1725,7 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
3807 + {
3808 + struct dentry *dentry = d_alloc_anon(parent->d_sb);
3809 + if (dentry) {
3810 +- dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
3811 ++ dentry->d_flags |= DCACHE_DENTRY_CURSOR;
3812 + dentry->d_parent = dget(parent);
3813 + }
3814 + return dentry;
3815 +@@ -1739,10 +1738,17 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
3816 + *
3817 + * For a filesystem that just pins its dentries in memory and never
3818 + * performs lookups at all, return an unhashed IS_ROOT dentry.
3819 ++ * This is used for pipes, sockets et.al. - the stuff that should
3820 ++ * never be anyone's children or parents. Unlike all other
3821 ++ * dentries, these will not have RCU delay between dropping the
3822 ++ * last reference and freeing them.
3823 + */
3824 + struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
3825 + {
3826 +- return __d_alloc(sb, name);
3827 ++ struct dentry *dentry = __d_alloc(sb, name);
3828 ++ if (likely(dentry))
3829 ++ dentry->d_flags |= DCACHE_NORCU;
3830 ++ return dentry;
3831 + }
3832 + EXPORT_SYMBOL(d_alloc_pseudo);
3833 +
3834 +@@ -1911,12 +1917,10 @@ struct dentry *d_make_root(struct inode *root_inode)
3835 +
3836 + if (root_inode) {
3837 + res = d_alloc_anon(root_inode->i_sb);
3838 +- if (res) {
3839 +- res->d_flags |= DCACHE_RCUACCESS;
3840 ++ if (res)
3841 + d_instantiate(res, root_inode);
3842 +- } else {
3843 ++ else
3844 + iput(root_inode);
3845 +- }
3846 + }
3847 + return res;
3848 + }
3849 +@@ -2781,9 +2785,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
3850 + copy_name(dentry, target);
3851 + target->d_hash.pprev = NULL;
3852 + dentry->d_parent->d_lockref.count++;
3853 +- if (dentry == old_parent)
3854 +- dentry->d_flags |= DCACHE_RCUACCESS;
3855 +- else
3856 ++ if (dentry != old_parent) /* wasn't IS_ROOT */
3857 + WARN_ON(!--old_parent->d_lockref.count);
3858 + } else {
3859 + target->d_parent = old_parent;
3860 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
3861 +index 06096b60f1df..92ee15dda4c7 100644
3862 +--- a/fs/fuse/file.c
3863 ++++ b/fs/fuse/file.c
3864 +@@ -178,7 +178,9 @@ void fuse_finish_open(struct inode *inode, struct file *file)
3865 +
3866 + if (!(ff->open_flags & FOPEN_KEEP_CACHE))
3867 + invalidate_inode_pages2(inode->i_mapping);
3868 +- if (ff->open_flags & FOPEN_NONSEEKABLE)
3869 ++ if (ff->open_flags & FOPEN_STREAM)
3870 ++ stream_open(inode, file);
3871 ++ else if (ff->open_flags & FOPEN_NONSEEKABLE)
3872 + nonseekable_open(inode, file);
3873 + if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
3874 + struct fuse_inode *fi = get_fuse_inode(inode);
3875 +@@ -1586,7 +1588,7 @@ __acquires(fi->lock)
3876 + {
3877 + struct fuse_conn *fc = get_fuse_conn(inode);
3878 + struct fuse_inode *fi = get_fuse_inode(inode);
3879 +- size_t crop = i_size_read(inode);
3880 ++ loff_t crop = i_size_read(inode);
3881 + struct fuse_req *req;
3882 +
3883 + while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
3884 +@@ -3044,6 +3046,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3885 + }
3886 + }
3887 +
3888 ++ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3889 ++ offset + length > i_size_read(inode)) {
3890 ++ err = inode_newsize_ok(inode, offset + length);
3891 ++ if (err)
3892 ++ return err;
3893 ++ }
3894 ++
3895 + if (!(mode & FALLOC_FL_KEEP_SIZE))
3896 + set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3897 +
3898 +diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
3899 +index 61f46facb39c..b3e8ba3bd654 100644
3900 +--- a/fs/nfs/filelayout/filelayout.c
3901 ++++ b/fs/nfs/filelayout/filelayout.c
3902 +@@ -904,7 +904,7 @@ fl_pnfs_update_layout(struct inode *ino,
3903 + status = filelayout_check_deviceid(lo, fl, gfp_flags);
3904 + if (status) {
3905 + pnfs_put_lseg(lseg);
3906 +- lseg = ERR_PTR(status);
3907 ++ lseg = NULL;
3908 + }
3909 + out:
3910 + return lseg;
3911 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
3912 +index 3de36479ed7a..f502f1c054cf 100644
3913 +--- a/fs/nfs/nfs4state.c
3914 ++++ b/fs/nfs/nfs4state.c
3915 +@@ -159,6 +159,10 @@ int nfs40_discover_server_trunking(struct nfs_client *clp,
3916 + /* Sustain the lease, even if it's empty. If the clientid4
3917 + * goes stale it's of no use for trunking discovery. */
3918 + nfs4_schedule_state_renewal(*result);
3919 ++
3920 ++ /* If the client state need to recover, do it. */
3921 ++ if (clp->cl_state)
3922 ++ nfs4_schedule_state_manager(clp);
3923 + }
3924 + out:
3925 + return status;
3926 +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
3927 +index df06f3da166c..e8d3f349b7f2 100644
3928 +--- a/fs/notify/fsnotify.c
3929 ++++ b/fs/notify/fsnotify.c
3930 +@@ -107,6 +107,47 @@ void fsnotify_sb_delete(struct super_block *sb)
3931 + fsnotify_clear_marks_by_sb(sb);
3932 + }
3933 +
3934 ++/*
3935 ++ * fsnotify_nameremove - a filename was removed from a directory
3936 ++ *
3937 ++ * This is mostly called under parent vfs inode lock so name and
3938 ++ * dentry->d_parent should be stable. However there are some corner cases where
3939 ++ * inode lock is not held. So to be on the safe side and be reselient to future
3940 ++ * callers and out of tree users of d_delete(), we do not assume that d_parent
3941 ++ * and d_name are stable and we use dget_parent() and
3942 ++ * take_dentry_name_snapshot() to grab stable references.
3943 ++ */
3944 ++void fsnotify_nameremove(struct dentry *dentry, int isdir)
3945 ++{
3946 ++ struct dentry *parent;
3947 ++ struct name_snapshot name;
3948 ++ __u32 mask = FS_DELETE;
3949 ++
3950 ++ /* d_delete() of pseudo inode? (e.g. __ns_get_path() playing tricks) */
3951 ++ if (IS_ROOT(dentry))
3952 ++ return;
3953 ++
3954 ++ if (isdir)
3955 ++ mask |= FS_ISDIR;
3956 ++
3957 ++ parent = dget_parent(dentry);
3958 ++ /* Avoid unneeded take_dentry_name_snapshot() */
3959 ++ if (!(d_inode(parent)->i_fsnotify_mask & FS_DELETE) &&
3960 ++ !(dentry->d_sb->s_fsnotify_mask & FS_DELETE))
3961 ++ goto out_dput;
3962 ++
3963 ++ take_dentry_name_snapshot(&name, dentry);
3964 ++
3965 ++ fsnotify(d_inode(parent), mask, d_inode(dentry), FSNOTIFY_EVENT_INODE,
3966 ++ name.name, 0);
3967 ++
3968 ++ release_dentry_name_snapshot(&name);
3969 ++
3970 ++out_dput:
3971 ++ dput(parent);
3972 ++}
3973 ++EXPORT_SYMBOL(fsnotify_nameremove);
3974 ++
3975 + /*
3976 + * Given an inode, first check if we care what happens to our children. Inotify
3977 + * and dnotify both tell their parents about events. If we care about any event
3978 +diff --git a/fs/nsfs.c b/fs/nsfs.c
3979 +index 60702d677bd4..30d150a4f0c6 100644
3980 +--- a/fs/nsfs.c
3981 ++++ b/fs/nsfs.c
3982 +@@ -85,13 +85,12 @@ slow:
3983 + inode->i_fop = &ns_file_operations;
3984 + inode->i_private = ns;
3985 +
3986 +- dentry = d_alloc_pseudo(mnt->mnt_sb, &empty_name);
3987 ++ dentry = d_alloc_anon(mnt->mnt_sb);
3988 + if (!dentry) {
3989 + iput(inode);
3990 + return ERR_PTR(-ENOMEM);
3991 + }
3992 + d_instantiate(dentry, inode);
3993 +- dentry->d_flags |= DCACHE_RCUACCESS;
3994 + dentry->d_fsdata = (void *)ns->ops;
3995 + d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
3996 + if (d) {
3997 +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
3998 +index 68b3303e4b46..56feaa739979 100644
3999 +--- a/fs/overlayfs/copy_up.c
4000 ++++ b/fs/overlayfs/copy_up.c
4001 +@@ -909,14 +909,14 @@ static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
4002 + return true;
4003 + }
4004 +
4005 +-int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
4006 ++int ovl_maybe_copy_up(struct dentry *dentry, int flags)
4007 + {
4008 + int err = 0;
4009 +
4010 +- if (ovl_open_need_copy_up(dentry, file_flags)) {
4011 ++ if (ovl_open_need_copy_up(dentry, flags)) {
4012 + err = ovl_want_write(dentry);
4013 + if (!err) {
4014 +- err = ovl_copy_up_flags(dentry, file_flags);
4015 ++ err = ovl_copy_up_flags(dentry, flags);
4016 + ovl_drop_write(dentry);
4017 + }
4018 + }
4019 +diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
4020 +index 84dd957efa24..50e4407398d8 100644
4021 +--- a/fs/overlayfs/file.c
4022 ++++ b/fs/overlayfs/file.c
4023 +@@ -116,11 +116,10 @@ static int ovl_real_fdget(const struct file *file, struct fd *real)
4024 +
4025 + static int ovl_open(struct inode *inode, struct file *file)
4026 + {
4027 +- struct dentry *dentry = file_dentry(file);
4028 + struct file *realfile;
4029 + int err;
4030 +
4031 +- err = ovl_open_maybe_copy_up(dentry, file->f_flags);
4032 ++ err = ovl_maybe_copy_up(file_dentry(file), file->f_flags);
4033 + if (err)
4034 + return err;
4035 +
4036 +@@ -390,7 +389,7 @@ static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4037 + if (ret)
4038 + return ret;
4039 +
4040 +- ret = ovl_copy_up_with_data(file_dentry(file));
4041 ++ ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY);
4042 + if (!ret) {
4043 + ret = ovl_real_ioctl(file, cmd, arg);
4044 +
4045 +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
4046 +index 9c6018287d57..d26efed9f80a 100644
4047 +--- a/fs/overlayfs/overlayfs.h
4048 ++++ b/fs/overlayfs/overlayfs.h
4049 +@@ -421,7 +421,7 @@ extern const struct file_operations ovl_file_operations;
4050 + int ovl_copy_up(struct dentry *dentry);
4051 + int ovl_copy_up_with_data(struct dentry *dentry);
4052 + int ovl_copy_up_flags(struct dentry *dentry, int flags);
4053 +-int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
4054 ++int ovl_maybe_copy_up(struct dentry *dentry, int flags);
4055 + int ovl_copy_xattr(struct dentry *old, struct dentry *new);
4056 + int ovl_set_attr(struct dentry *upper, struct kstat *stat);
4057 + struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper);
4058 +diff --git a/fs/proc/base.c b/fs/proc/base.c
4059 +index 6a803a0b75df..0c9bef89ac43 100644
4060 +--- a/fs/proc/base.c
4061 ++++ b/fs/proc/base.c
4062 +@@ -2540,6 +2540,11 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
4063 + rcu_read_unlock();
4064 + return -EACCES;
4065 + }
4066 ++ /* Prevent changes to overridden credentials. */
4067 ++ if (current_cred() != current_real_cred()) {
4068 ++ rcu_read_unlock();
4069 ++ return -EBUSY;
4070 ++ }
4071 + rcu_read_unlock();
4072 +
4073 + if (count > PAGE_SIZE)
4074 +diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
4075 +index 8ac4e68a12f0..6736ed2f632b 100644
4076 +--- a/include/asm-generic/mm_hooks.h
4077 ++++ b/include/asm-generic/mm_hooks.h
4078 +@@ -18,7 +18,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
4079 + }
4080 +
4081 + static inline void arch_unmap(struct mm_struct *mm,
4082 +- struct vm_area_struct *vma,
4083 + unsigned long start, unsigned long end)
4084 + {
4085 + }
4086 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
4087 +index 944ccc310201..ac721fc5f95e 100644
4088 +--- a/include/linux/bpf.h
4089 ++++ b/include/linux/bpf.h
4090 +@@ -36,6 +36,7 @@ struct bpf_map_ops {
4091 + void (*map_free)(struct bpf_map *map);
4092 + int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
4093 + void (*map_release_uref)(struct bpf_map *map);
4094 ++ void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
4095 +
4096 + /* funcs callable from userspace and from eBPF programs */
4097 + void *(*map_lookup_elem)(struct bpf_map *map, void *key);
4098 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
4099 +index 60996e64c579..6e1e8e6602c6 100644
4100 +--- a/include/linux/dcache.h
4101 ++++ b/include/linux/dcache.h
4102 +@@ -176,7 +176,6 @@ struct dentry_operations {
4103 + * typically using d_splice_alias. */
4104 +
4105 + #define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
4106 +-#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */
4107 +
4108 + #define DCACHE_CANT_MOUNT 0x00000100
4109 + #define DCACHE_GENOCIDE 0x00000200
4110 +@@ -217,6 +216,7 @@ struct dentry_operations {
4111 +
4112 + #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
4113 + #define DCACHE_DENTRY_CURSOR 0x20000000
4114 ++#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */
4115 +
4116 + extern seqlock_t rename_lock;
4117 +
4118 +diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
4119 +index 09587e2860b5..e30d6132c633 100644
4120 +--- a/include/linux/fsnotify.h
4121 ++++ b/include/linux/fsnotify.h
4122 +@@ -151,39 +151,6 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
4123 + __fsnotify_vfsmount_delete(mnt);
4124 + }
4125 +
4126 +-/*
4127 +- * fsnotify_nameremove - a filename was removed from a directory
4128 +- *
4129 +- * This is mostly called under parent vfs inode lock so name and
4130 +- * dentry->d_parent should be stable. However there are some corner cases where
4131 +- * inode lock is not held. So to be on the safe side and be reselient to future
4132 +- * callers and out of tree users of d_delete(), we do not assume that d_parent
4133 +- * and d_name are stable and we use dget_parent() and
4134 +- * take_dentry_name_snapshot() to grab stable references.
4135 +- */
4136 +-static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
4137 +-{
4138 +- struct dentry *parent;
4139 +- struct name_snapshot name;
4140 +- __u32 mask = FS_DELETE;
4141 +-
4142 +- /* d_delete() of pseudo inode? (e.g. __ns_get_path() playing tricks) */
4143 +- if (IS_ROOT(dentry))
4144 +- return;
4145 +-
4146 +- if (isdir)
4147 +- mask |= FS_ISDIR;
4148 +-
4149 +- parent = dget_parent(dentry);
4150 +- take_dentry_name_snapshot(&name, dentry);
4151 +-
4152 +- fsnotify(d_inode(parent), mask, d_inode(dentry), FSNOTIFY_EVENT_INODE,
4153 +- name.name, 0);
4154 +-
4155 +- release_dentry_name_snapshot(&name);
4156 +- dput(parent);
4157 +-}
4158 +-
4159 + /*
4160 + * fsnotify_inoderemove - an inode is going away
4161 + */
4162 +diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
4163 +index dfc28fcb4de8..094b38f2d9a1 100644
4164 +--- a/include/linux/fsnotify_backend.h
4165 ++++ b/include/linux/fsnotify_backend.h
4166 +@@ -355,6 +355,7 @@ extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u
4167 + extern void __fsnotify_inode_delete(struct inode *inode);
4168 + extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
4169 + extern void fsnotify_sb_delete(struct super_block *sb);
4170 ++extern void fsnotify_nameremove(struct dentry *dentry, int isdir);
4171 + extern u32 fsnotify_get_cookie(void);
4172 +
4173 + static inline int fsnotify_inode_watches_children(struct inode *inode)
4174 +@@ -524,6 +525,9 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
4175 + static inline void fsnotify_sb_delete(struct super_block *sb)
4176 + {}
4177 +
4178 ++static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
4179 ++{}
4180 ++
4181 + static inline void fsnotify_update_flags(struct dentry *dentry)
4182 + {}
4183 +
4184 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
4185 +index 0d0729648844..9ffc53acaec1 100644
4186 +--- a/include/linux/mlx5/driver.h
4187 ++++ b/include/linux/mlx5/driver.h
4188 +@@ -681,7 +681,6 @@ struct mlx5_core_dev {
4189 + #endif
4190 + struct mlx5_clock clock;
4191 + struct mlx5_ib_clock_info *clock_info;
4192 +- struct page *clock_info_page;
4193 + struct mlx5_fw_tracer *tracer;
4194 + };
4195 +
4196 +diff --git a/include/linux/of.h b/include/linux/of.h
4197 +index e240992e5cb6..074913002e39 100644
4198 +--- a/include/linux/of.h
4199 ++++ b/include/linux/of.h
4200 +@@ -234,8 +234,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
4201 + static inline u64 of_read_number(const __be32 *cell, int size)
4202 + {
4203 + u64 r = 0;
4204 +- while (size--)
4205 +- r = (r << 32) | be32_to_cpu(*(cell++));
4206 ++ for (; size--; cell++)
4207 ++ r = (r << 32) | be32_to_cpu(*cell);
4208 + return r;
4209 + }
4210 +
4211 +diff --git a/include/linux/pci.h b/include/linux/pci.h
4212 +index 77448215ef5b..2c056a7a728a 100644
4213 +--- a/include/linux/pci.h
4214 ++++ b/include/linux/pci.h
4215 +@@ -348,6 +348,8 @@ struct pci_dev {
4216 + unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
4217 + controlled exclusively by
4218 + user sysfs */
4219 ++ unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
4220 ++ bit manually */
4221 + unsigned int d3_delay; /* D3->D0 transition time in ms */
4222 + unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
4223 +
4224 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
4225 +index 9027a8c4219f..20a4c2280308 100644
4226 +--- a/include/linux/skbuff.h
4227 ++++ b/include/linux/skbuff.h
4228 +@@ -1425,10 +1425,12 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
4229 + struct ubuf_info *uarg = skb_zcopy(skb);
4230 +
4231 + if (uarg) {
4232 +- if (uarg->callback == sock_zerocopy_callback) {
4233 ++ if (skb_zcopy_is_nouarg(skb)) {
4234 ++ /* no notification callback */
4235 ++ } else if (uarg->callback == sock_zerocopy_callback) {
4236 + uarg->zerocopy = uarg->zerocopy && zerocopy;
4237 + sock_zerocopy_put(uarg);
4238 +- } else if (!skb_zcopy_is_nouarg(skb)) {
4239 ++ } else {
4240 + uarg->callback(uarg, zerocopy);
4241 + }
4242 +
4243 +@@ -2683,7 +2685,8 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
4244 + {
4245 + if (likely(!skb_zcopy(skb)))
4246 + return 0;
4247 +- if (skb_uarg(skb)->callback == sock_zerocopy_callback)
4248 ++ if (!skb_zcopy_is_nouarg(skb) &&
4249 ++ skb_uarg(skb)->callback == sock_zerocopy_callback)
4250 + return 0;
4251 + return skb_copy_ubufs(skb, gfp_mask);
4252 + }
4253 +diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
4254 +index d035183c8d03..cc32b9d9ecec 100644
4255 +--- a/include/net/flow_offload.h
4256 ++++ b/include/net/flow_offload.h
4257 +@@ -71,6 +71,8 @@ void flow_rule_match_eth_addrs(const struct flow_rule *rule,
4258 + struct flow_match_eth_addrs *out);
4259 + void flow_rule_match_vlan(const struct flow_rule *rule,
4260 + struct flow_match_vlan *out);
4261 ++void flow_rule_match_cvlan(const struct flow_rule *rule,
4262 ++ struct flow_match_vlan *out);
4263 + void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
4264 + struct flow_match_ipv4_addrs *out);
4265 + void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
4266 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
4267 +index 84097010237c..b5e3add90e99 100644
4268 +--- a/include/net/ip6_fib.h
4269 ++++ b/include/net/ip6_fib.h
4270 +@@ -171,7 +171,8 @@ struct fib6_info {
4271 + dst_nocount:1,
4272 + dst_nopolicy:1,
4273 + dst_host:1,
4274 +- unused:3;
4275 ++ fib6_destroying:1,
4276 ++ unused:2;
4277 +
4278 + struct fib6_nh fib6_nh;
4279 + struct rcu_head rcu;
4280 +diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
4281 +index 2ac598614a8f..56a8fb4e1222 100644
4282 +--- a/include/uapi/linux/fuse.h
4283 ++++ b/include/uapi/linux/fuse.h
4284 +@@ -229,11 +229,13 @@ struct fuse_file_lock {
4285 + * FOPEN_KEEP_CACHE: don't invalidate the data cache on open
4286 + * FOPEN_NONSEEKABLE: the file is not seekable
4287 + * FOPEN_CACHE_DIR: allow caching this directory
4288 ++ * FOPEN_STREAM: the file is stream-like (no file position at all)
4289 + */
4290 + #define FOPEN_DIRECT_IO (1 << 0)
4291 + #define FOPEN_KEEP_CACHE (1 << 1)
4292 + #define FOPEN_NONSEEKABLE (1 << 2)
4293 + #define FOPEN_CACHE_DIR (1 << 3)
4294 ++#define FOPEN_STREAM (1 << 4)
4295 +
4296 + /**
4297 + * INIT request/reply flags
4298 +diff --git a/include/video/udlfb.h b/include/video/udlfb.h
4299 +index 7d09e54ae54e..58fb5732831a 100644
4300 +--- a/include/video/udlfb.h
4301 ++++ b/include/video/udlfb.h
4302 +@@ -48,6 +48,13 @@ struct dlfb_data {
4303 + int base8;
4304 + u32 pseudo_palette[256];
4305 + int blank_mode; /*one of FB_BLANK_ */
4306 ++ struct mutex render_mutex;
4307 ++ int damage_x;
4308 ++ int damage_y;
4309 ++ int damage_x2;
4310 ++ int damage_y2;
4311 ++ spinlock_t damage_lock;
4312 ++ struct work_struct damage_work;
4313 + struct fb_ops ops;
4314 + /* blit-only rendering path metrics, exposed through sysfs */
4315 + atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
4316 +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
4317 +index fed15cf94dca..f79b4aa0a4af 100644
4318 +--- a/kernel/bpf/hashtab.c
4319 ++++ b/kernel/bpf/hashtab.c
4320 +@@ -527,18 +527,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
4321 + return insn - insn_buf;
4322 + }
4323 +
4324 +-static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
4325 ++static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
4326 ++ void *key, const bool mark)
4327 + {
4328 + struct htab_elem *l = __htab_map_lookup_elem(map, key);
4329 +
4330 + if (l) {
4331 +- bpf_lru_node_set_ref(&l->lru_node);
4332 ++ if (mark)
4333 ++ bpf_lru_node_set_ref(&l->lru_node);
4334 + return l->key + round_up(map->key_size, 8);
4335 + }
4336 +
4337 + return NULL;
4338 + }
4339 +
4340 ++static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
4341 ++{
4342 ++ return __htab_lru_map_lookup_elem(map, key, true);
4343 ++}
4344 ++
4345 ++static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
4346 ++{
4347 ++ return __htab_lru_map_lookup_elem(map, key, false);
4348 ++}
4349 ++
4350 + static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
4351 + struct bpf_insn *insn_buf)
4352 + {
4353 +@@ -1250,6 +1262,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
4354 + .map_free = htab_map_free,
4355 + .map_get_next_key = htab_map_get_next_key,
4356 + .map_lookup_elem = htab_lru_map_lookup_elem,
4357 ++ .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
4358 + .map_update_elem = htab_lru_map_update_elem,
4359 + .map_delete_elem = htab_lru_map_delete_elem,
4360 + .map_gen_lookup = htab_lru_map_gen_lookup,
4361 +@@ -1281,7 +1294,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
4362 +
4363 + int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
4364 + {
4365 +- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
4366 + struct htab_elem *l;
4367 + void __percpu *pptr;
4368 + int ret = -ENOENT;
4369 +@@ -1297,8 +1309,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
4370 + l = __htab_map_lookup_elem(map, key);
4371 + if (!l)
4372 + goto out;
4373 +- if (htab_is_lru(htab))
4374 +- bpf_lru_node_set_ref(&l->lru_node);
4375 ++ /* We do not mark LRU map element here in order to not mess up
4376 ++ * eviction heuristics when user space does a map walk.
4377 ++ */
4378 + pptr = htab_elem_get_ptr(l, map->key_size);
4379 + for_each_possible_cpu(cpu) {
4380 + bpf_long_memcpy(value + off,
4381 +diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
4382 +index 4a8f390a2b82..dc9d7ac8228d 100644
4383 +--- a/kernel/bpf/inode.c
4384 ++++ b/kernel/bpf/inode.c
4385 +@@ -518,7 +518,7 @@ out:
4386 + static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
4387 + {
4388 + struct bpf_prog *prog;
4389 +- int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
4390 ++ int ret = inode_permission(inode, MAY_READ);
4391 + if (ret)
4392 + return ERR_PTR(ret);
4393 +
4394 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
4395 +index afca36f53c49..db6e825e2958 100644
4396 +--- a/kernel/bpf/syscall.c
4397 ++++ b/kernel/bpf/syscall.c
4398 +@@ -773,7 +773,10 @@ static int map_lookup_elem(union bpf_attr *attr)
4399 + err = map->ops->map_peek_elem(map, value);
4400 + } else {
4401 + rcu_read_lock();
4402 +- ptr = map->ops->map_lookup_elem(map, key);
4403 ++ if (map->ops->map_lookup_elem_sys_only)
4404 ++ ptr = map->ops->map_lookup_elem_sys_only(map, key);
4405 ++ else
4406 ++ ptr = map->ops->map_lookup_elem(map, key);
4407 + if (IS_ERR(ptr)) {
4408 + err = PTR_ERR(ptr);
4409 + } else if (!ptr) {
4410 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
4411 +index 5b3b0c3c8a47..d910e36c34b5 100644
4412 +--- a/kernel/trace/trace_events.c
4413 ++++ b/kernel/trace/trace_events.c
4414 +@@ -1318,9 +1318,6 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
4415 + char buf[32];
4416 + int len;
4417 +
4418 +- if (*ppos)
4419 +- return 0;
4420 +-
4421 + if (unlikely(!id))
4422 + return -ENODEV;
4423 +
4424 +diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
4425 +index 8f8411e7835f..e41d389b7f49 100644
4426 +--- a/kernel/trace/trace_probe.c
4427 ++++ b/kernel/trace/trace_probe.c
4428 +@@ -420,13 +420,14 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
4429 + return -E2BIG;
4430 + }
4431 + }
4432 +- /*
4433 +- * The default type of $comm should be "string", and it can't be
4434 +- * dereferenced.
4435 +- */
4436 +- if (!t && strcmp(arg, "$comm") == 0)
4437 ++
4438 ++ /* Since $comm can not be dereferred, we can find $comm by strcmp */
4439 ++ if (strcmp(arg, "$comm") == 0) {
4440 ++ /* The type of $comm must be "string", and not an array. */
4441 ++ if (parg->count || (t && strcmp(t, "string")))
4442 ++ return -EINVAL;
4443 + parg->type = find_fetch_type("string");
4444 +- else
4445 ++ } else
4446 + parg->type = find_fetch_type(t);
4447 + if (!parg->type) {
4448 + pr_info("Unsupported type: %s\n", t);
4449 +diff --git a/mm/mmap.c b/mm/mmap.c
4450 +index bd7b9f293b39..2d6a6662edb9 100644
4451 +--- a/mm/mmap.c
4452 ++++ b/mm/mmap.c
4453 +@@ -2735,9 +2735,17 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
4454 + return -EINVAL;
4455 +
4456 + len = PAGE_ALIGN(len);
4457 ++ end = start + len;
4458 + if (len == 0)
4459 + return -EINVAL;
4460 +
4461 ++ /*
4462 ++ * arch_unmap() might do unmaps itself. It must be called
4463 ++ * and finish any rbtree manipulation before this code
4464 ++ * runs and also starts to manipulate the rbtree.
4465 ++ */
4466 ++ arch_unmap(mm, start, end);
4467 ++
4468 + /* Find the first overlapping VMA */
4469 + vma = find_vma(mm, start);
4470 + if (!vma)
4471 +@@ -2746,7 +2754,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
4472 + /* we have start < vma->vm_end */
4473 +
4474 + /* if it doesn't overlap, we have nothing.. */
4475 +- end = start + len;
4476 + if (vma->vm_start >= end)
4477 + return 0;
4478 +
4479 +@@ -2816,12 +2823,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
4480 + /* Detach vmas from rbtree */
4481 + detach_vmas_to_be_unmapped(mm, vma, prev, end);
4482 +
4483 +- /*
4484 +- * mpx unmap needs to be called with mmap_sem held for write.
4485 +- * It is safe to call it before unmap_region().
4486 +- */
4487 +- arch_unmap(mm, vma, start, end);
4488 +-
4489 + if (downgrade)
4490 + downgrade_write(&mm->mmap_sem);
4491 +
4492 +diff --git a/net/core/dev.c b/net/core/dev.c
4493 +index f409406254dd..255f99cb7c48 100644
4494 +--- a/net/core/dev.c
4495 ++++ b/net/core/dev.c
4496 +@@ -8911,7 +8911,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
4497 +
4498 + refcnt = netdev_refcnt_read(dev);
4499 +
4500 +- if (time_after(jiffies, warning_time + 10 * HZ)) {
4501 ++ if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
4502 + pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
4503 + dev->name, refcnt);
4504 + warning_time = jiffies;
4505 +diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
4506 +index c3a00eac4804..5ce7d47a960e 100644
4507 +--- a/net/core/flow_offload.c
4508 ++++ b/net/core/flow_offload.c
4509 +@@ -54,6 +54,13 @@ void flow_rule_match_vlan(const struct flow_rule *rule,
4510 + }
4511 + EXPORT_SYMBOL(flow_rule_match_vlan);
4512 +
4513 ++void flow_rule_match_cvlan(const struct flow_rule *rule,
4514 ++ struct flow_match_vlan *out)
4515 ++{
4516 ++ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
4517 ++}
4518 ++EXPORT_SYMBOL(flow_rule_match_cvlan);
4519 ++
4520 + void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
4521 + struct flow_match_ipv4_addrs *out)
4522 + {
4523 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4524 +index 220c56e93659..467d771ac6ba 100644
4525 +--- a/net/core/rtnetlink.c
4526 ++++ b/net/core/rtnetlink.c
4527 +@@ -1496,14 +1496,15 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
4528 + return ret;
4529 + }
4530 +
4531 +-static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
4532 ++static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
4533 ++ bool force)
4534 + {
4535 + int ifindex = dev_get_iflink(dev);
4536 +
4537 +- if (dev->ifindex == ifindex)
4538 +- return 0;
4539 ++ if (force || dev->ifindex != ifindex)
4540 ++ return nla_put_u32(skb, IFLA_LINK, ifindex);
4541 +
4542 +- return nla_put_u32(skb, IFLA_LINK, ifindex);
4543 ++ return 0;
4544 + }
4545 +
4546 + static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
4547 +@@ -1520,6 +1521,8 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
4548 + const struct net_device *dev,
4549 + struct net *src_net)
4550 + {
4551 ++ bool put_iflink = false;
4552 ++
4553 + if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
4554 + struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
4555 +
4556 +@@ -1528,10 +1531,12 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
4557 +
4558 + if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
4559 + return -EMSGSIZE;
4560 ++
4561 ++ put_iflink = true;
4562 + }
4563 + }
4564 +
4565 +- return 0;
4566 ++ return nla_put_iflink(skb, dev, put_iflink);
4567 + }
4568 +
4569 + static int rtnl_fill_link_af(struct sk_buff *skb,
4570 +@@ -1617,7 +1622,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
4571 + #ifdef CONFIG_RPS
4572 + nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
4573 + #endif
4574 +- nla_put_iflink(skb, dev) ||
4575 + put_master_ifindex(skb, dev) ||
4576 + nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
4577 + (dev->qdisc &&
4578 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
4579 +index 91247a6fc67f..9915f64b38a0 100644
4580 +--- a/net/ipv6/ip6_fib.c
4581 ++++ b/net/ipv6/ip6_fib.c
4582 +@@ -909,6 +909,12 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
4583 + {
4584 + int cpu;
4585 +
4586 ++ /* Make sure rt6_make_pcpu_route() wont add other percpu routes
4587 ++ * while we are cleaning them here.
4588 ++ */
4589 ++ f6i->fib6_destroying = 1;
4590 ++ mb(); /* paired with the cmpxchg() in rt6_make_pcpu_route() */
4591 ++
4592 + /* release the reference to this fib entry from
4593 + * all of its cached pcpu routes
4594 + */
4595 +@@ -932,6 +938,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
4596 + {
4597 + struct fib6_table *table = rt->fib6_table;
4598 +
4599 ++ if (rt->rt6i_pcpu)
4600 ++ fib6_drop_pcpu_from(rt, table);
4601 ++
4602 + if (atomic_read(&rt->fib6_ref) != 1) {
4603 + /* This route is used as dummy address holder in some split
4604 + * nodes. It is not leaked, but it still holds other resources,
4605 +@@ -953,9 +962,6 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
4606 + fn = rcu_dereference_protected(fn->parent,
4607 + lockdep_is_held(&table->tb6_lock));
4608 + }
4609 +-
4610 +- if (rt->rt6i_pcpu)
4611 +- fib6_drop_pcpu_from(rt, table);
4612 + }
4613 + }
4614 +
4615 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4616 +index 0520aca3354b..e470589fb93b 100644
4617 +--- a/net/ipv6/route.c
4618 ++++ b/net/ipv6/route.c
4619 +@@ -110,8 +110,8 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4620 + int iif, int type, u32 portid, u32 seq,
4621 + unsigned int flags);
4622 + static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
4623 +- struct in6_addr *daddr,
4624 +- struct in6_addr *saddr);
4625 ++ const struct in6_addr *daddr,
4626 ++ const struct in6_addr *saddr);
4627 +
4628 + #ifdef CONFIG_IPV6_ROUTE_INFO
4629 + static struct fib6_info *rt6_add_route_info(struct net *net,
4630 +@@ -1260,6 +1260,13 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net,
4631 + prev = cmpxchg(p, NULL, pcpu_rt);
4632 + BUG_ON(prev);
4633 +
4634 ++ if (rt->fib6_destroying) {
4635 ++ struct fib6_info *from;
4636 ++
4637 ++ from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
4638 ++ fib6_info_release(from);
4639 ++ }
4640 ++
4641 + return pcpu_rt;
4642 + }
4643 +
4644 +@@ -1529,31 +1536,44 @@ out:
4645 + * Caller has to hold rcu_read_lock()
4646 + */
4647 + static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
4648 +- struct in6_addr *daddr,
4649 +- struct in6_addr *saddr)
4650 ++ const struct in6_addr *daddr,
4651 ++ const struct in6_addr *saddr)
4652 + {
4653 ++ const struct in6_addr *src_key = NULL;
4654 + struct rt6_exception_bucket *bucket;
4655 +- struct in6_addr *src_key = NULL;
4656 + struct rt6_exception *rt6_ex;
4657 + struct rt6_info *res = NULL;
4658 +
4659 +- bucket = rcu_dereference(rt->rt6i_exception_bucket);
4660 +-
4661 + #ifdef CONFIG_IPV6_SUBTREES
4662 + /* rt6i_src.plen != 0 indicates rt is in subtree
4663 + * and exception table is indexed by a hash of
4664 + * both rt6i_dst and rt6i_src.
4665 +- * Otherwise, the exception table is indexed by
4666 +- * a hash of only rt6i_dst.
4667 ++ * However, the src addr used to create the hash
4668 ++ * might not be exactly the passed in saddr which
4669 ++ * is a /128 addr from the flow.
4670 ++ * So we need to use f6i->fib6_src to redo lookup
4671 ++ * if the passed in saddr does not find anything.
4672 ++ * (See the logic in ip6_rt_cache_alloc() on how
4673 ++ * rt->rt6i_src is updated.)
4674 + */
4675 + if (rt->fib6_src.plen)
4676 + src_key = saddr;
4677 ++find_ex:
4678 + #endif
4679 ++ bucket = rcu_dereference(rt->rt6i_exception_bucket);
4680 + rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
4681 +
4682 + if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
4683 + res = rt6_ex->rt6i;
4684 +
4685 ++#ifdef CONFIG_IPV6_SUBTREES
4686 ++ /* Use fib6_src as src_key and redo lookup */
4687 ++ if (!res && src_key && src_key != &rt->fib6_src.addr) {
4688 ++ src_key = &rt->fib6_src.addr;
4689 ++ goto find_ex;
4690 ++ }
4691 ++#endif
4692 ++
4693 + return res;
4694 + }
4695 +
4696 +@@ -2608,10 +2628,8 @@ out:
4697 + u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
4698 + struct in6_addr *saddr)
4699 + {
4700 +- struct rt6_exception_bucket *bucket;
4701 +- struct rt6_exception *rt6_ex;
4702 +- struct in6_addr *src_key;
4703 + struct inet6_dev *idev;
4704 ++ struct rt6_info *rt;
4705 + u32 mtu = 0;
4706 +
4707 + if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
4708 +@@ -2620,18 +2638,10 @@ u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
4709 + goto out;
4710 + }
4711 +
4712 +- src_key = NULL;
4713 +-#ifdef CONFIG_IPV6_SUBTREES
4714 +- if (f6i->fib6_src.plen)
4715 +- src_key = saddr;
4716 +-#endif
4717 +-
4718 +- bucket = rcu_dereference(f6i->rt6i_exception_bucket);
4719 +- rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
4720 +- if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
4721 +- mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU);
4722 +-
4723 +- if (likely(!mtu)) {
4724 ++ rt = rt6_find_cached_rt(f6i, daddr, saddr);
4725 ++ if (unlikely(rt)) {
4726 ++ mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
4727 ++ } else {
4728 + struct net_device *dev = fib6_info_nh_dev(f6i);
4729 +
4730 + mtu = IPV6_MIN_MTU;
4731 +diff --git a/net/tipc/core.c b/net/tipc/core.c
4732 +index 5b38f5164281..d7b0688c98dd 100644
4733 +--- a/net/tipc/core.c
4734 ++++ b/net/tipc/core.c
4735 +@@ -66,6 +66,10 @@ static int __net_init tipc_init_net(struct net *net)
4736 + INIT_LIST_HEAD(&tn->node_list);
4737 + spin_lock_init(&tn->node_list_lock);
4738 +
4739 ++ err = tipc_socket_init();
4740 ++ if (err)
4741 ++ goto out_socket;
4742 ++
4743 + err = tipc_sk_rht_init(net);
4744 + if (err)
4745 + goto out_sk_rht;
4746 +@@ -92,6 +96,8 @@ out_subscr:
4747 + out_nametbl:
4748 + tipc_sk_rht_destroy(net);
4749 + out_sk_rht:
4750 ++ tipc_socket_stop();
4751 ++out_socket:
4752 + return err;
4753 + }
4754 +
4755 +@@ -102,6 +108,7 @@ static void __net_exit tipc_exit_net(struct net *net)
4756 + tipc_bcast_stop(net);
4757 + tipc_nametbl_stop(net);
4758 + tipc_sk_rht_destroy(net);
4759 ++ tipc_socket_stop();
4760 + }
4761 +
4762 + static struct pernet_operations tipc_net_ops = {
4763 +@@ -129,10 +136,6 @@ static int __init tipc_init(void)
4764 + if (err)
4765 + goto out_netlink_compat;
4766 +
4767 +- err = tipc_socket_init();
4768 +- if (err)
4769 +- goto out_socket;
4770 +-
4771 + err = tipc_register_sysctl();
4772 + if (err)
4773 + goto out_sysctl;
4774 +@@ -152,8 +155,6 @@ out_bearer:
4775 + out_pernet:
4776 + tipc_unregister_sysctl();
4777 + out_sysctl:
4778 +- tipc_socket_stop();
4779 +-out_socket:
4780 + tipc_netlink_compat_stop();
4781 + out_netlink_compat:
4782 + tipc_netlink_stop();
4783 +@@ -168,7 +169,6 @@ static void __exit tipc_exit(void)
4784 + unregister_pernet_subsys(&tipc_net_ops);
4785 + tipc_netlink_stop();
4786 + tipc_netlink_compat_stop();
4787 +- tipc_socket_stop();
4788 + tipc_unregister_sysctl();
4789 +
4790 + pr_info("Deactivated\n");
4791 +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
4792 +index 15eb5d3d4750..96ab344f17bb 100644
4793 +--- a/net/vmw_vsock/virtio_transport.c
4794 ++++ b/net/vmw_vsock/virtio_transport.c
4795 +@@ -702,28 +702,27 @@ static int __init virtio_vsock_init(void)
4796 + if (!virtio_vsock_workqueue)
4797 + return -ENOMEM;
4798 +
4799 +- ret = register_virtio_driver(&virtio_vsock_driver);
4800 ++ ret = vsock_core_init(&virtio_transport.transport);
4801 + if (ret)
4802 + goto out_wq;
4803 +
4804 +- ret = vsock_core_init(&virtio_transport.transport);
4805 ++ ret = register_virtio_driver(&virtio_vsock_driver);
4806 + if (ret)
4807 +- goto out_vdr;
4808 ++ goto out_vci;
4809 +
4810 + return 0;
4811 +
4812 +-out_vdr:
4813 +- unregister_virtio_driver(&virtio_vsock_driver);
4814 ++out_vci:
4815 ++ vsock_core_exit();
4816 + out_wq:
4817 + destroy_workqueue(virtio_vsock_workqueue);
4818 + return ret;
4819 +-
4820 + }
4821 +
4822 + static void __exit virtio_vsock_exit(void)
4823 + {
4824 +- vsock_core_exit();
4825 + unregister_virtio_driver(&virtio_vsock_driver);
4826 ++ vsock_core_exit();
4827 + destroy_workqueue(virtio_vsock_workqueue);
4828 + }
4829 +
4830 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
4831 +index 602715fc9a75..f3f3d06cb6d8 100644
4832 +--- a/net/vmw_vsock/virtio_transport_common.c
4833 ++++ b/net/vmw_vsock/virtio_transport_common.c
4834 +@@ -786,12 +786,19 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
4835 +
4836 + void virtio_transport_release(struct vsock_sock *vsk)
4837 + {
4838 ++ struct virtio_vsock_sock *vvs = vsk->trans;
4839 ++ struct virtio_vsock_pkt *pkt, *tmp;
4840 + struct sock *sk = &vsk->sk;
4841 + bool remove_sock = true;
4842 +
4843 + lock_sock(sk);
4844 + if (sk->sk_type == SOCK_STREAM)
4845 + remove_sock = virtio_transport_close(vsk);
4846 ++
4847 ++ list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
4848 ++ list_del(&pkt->list);
4849 ++ virtio_transport_free_pkt(pkt);
4850 ++ }
4851 + release_sock(sk);
4852 +
4853 + if (remove_sock)
4854 +diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
4855 +index 89c47f57d1ce..8c1af9bdcb1b 100644
4856 +--- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
4857 ++++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
4858 +@@ -36,7 +36,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
4859 + mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
4860 + masked_sp = gen_reg_rtx(Pmode);
4861 +
4862 +- emit_insn_before(gen_rtx_SET(masked_sp,
4863 ++ emit_insn_before(gen_rtx_set(masked_sp,
4864 + gen_rtx_AND(Pmode,
4865 + stack_pointer_rtx,
4866 + mask)),
4867 +diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
4868 +index 53f8be0f4a1f..88158239622b 100644
4869 +--- a/tools/objtool/Makefile
4870 ++++ b/tools/objtool/Makefile
4871 +@@ -7,11 +7,12 @@ ARCH := x86
4872 + endif
4873 +
4874 + # always use the host compiler
4875 ++HOSTAR ?= ar
4876 + HOSTCC ?= gcc
4877 + HOSTLD ?= ld
4878 ++AR = $(HOSTAR)
4879 + CC = $(HOSTCC)
4880 + LD = $(HOSTLD)
4881 +-AR = ar
4882 +
4883 + ifeq ($(srctree),)
4884 + srctree := $(patsubst %/,%,$(dir $(CURDIR)))
4885 +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4886 +index 872fab163585..f4c3c84b090f 100644
4887 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4888 ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4889 +@@ -58,6 +58,7 @@ enum intel_pt_pkt_state {
4890 + INTEL_PT_STATE_NO_IP,
4891 + INTEL_PT_STATE_ERR_RESYNC,
4892 + INTEL_PT_STATE_IN_SYNC,
4893 ++ INTEL_PT_STATE_TNT_CONT,
4894 + INTEL_PT_STATE_TNT,
4895 + INTEL_PT_STATE_TIP,
4896 + INTEL_PT_STATE_TIP_PGD,
4897 +@@ -72,8 +73,9 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
4898 + case INTEL_PT_STATE_NO_IP:
4899 + case INTEL_PT_STATE_ERR_RESYNC:
4900 + case INTEL_PT_STATE_IN_SYNC:
4901 +- case INTEL_PT_STATE_TNT:
4902 ++ case INTEL_PT_STATE_TNT_CONT:
4903 + return true;
4904 ++ case INTEL_PT_STATE_TNT:
4905 + case INTEL_PT_STATE_TIP:
4906 + case INTEL_PT_STATE_TIP_PGD:
4907 + case INTEL_PT_STATE_FUP:
4908 +@@ -888,16 +890,20 @@ static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
4909 + timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
4910 + masked_timestamp = timestamp & decoder->period_mask;
4911 + if (decoder->continuous_period) {
4912 +- if (masked_timestamp != decoder->last_masked_timestamp)
4913 ++ if (masked_timestamp > decoder->last_masked_timestamp)
4914 + return 1;
4915 + } else {
4916 + timestamp += 1;
4917 + masked_timestamp = timestamp & decoder->period_mask;
4918 +- if (masked_timestamp != decoder->last_masked_timestamp) {
4919 ++ if (masked_timestamp > decoder->last_masked_timestamp) {
4920 + decoder->last_masked_timestamp = masked_timestamp;
4921 + decoder->continuous_period = true;
4922 + }
4923 + }
4924 ++
4925 ++ if (masked_timestamp < decoder->last_masked_timestamp)
4926 ++ return decoder->period_ticks;
4927 ++
4928 + return decoder->period_ticks - (timestamp - masked_timestamp);
4929 + }
4930 +
4931 +@@ -926,7 +932,10 @@ static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
4932 + case INTEL_PT_PERIOD_TICKS:
4933 + timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
4934 + masked_timestamp = timestamp & decoder->period_mask;
4935 +- decoder->last_masked_timestamp = masked_timestamp;
4936 ++ if (masked_timestamp > decoder->last_masked_timestamp)
4937 ++ decoder->last_masked_timestamp = masked_timestamp;
4938 ++ else
4939 ++ decoder->last_masked_timestamp += decoder->period_ticks;
4940 + break;
4941 + case INTEL_PT_PERIOD_NONE:
4942 + case INTEL_PT_PERIOD_MTC:
4943 +@@ -1254,7 +1263,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
4944 + return -ENOENT;
4945 + }
4946 + decoder->tnt.count -= 1;
4947 +- if (!decoder->tnt.count)
4948 ++ if (decoder->tnt.count)
4949 ++ decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
4950 ++ else
4951 + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
4952 + decoder->tnt.payload <<= 1;
4953 + decoder->state.from_ip = decoder->ip;
4954 +@@ -1285,7 +1296,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
4955 +
4956 + if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
4957 + decoder->tnt.count -= 1;
4958 +- if (!decoder->tnt.count)
4959 ++ if (decoder->tnt.count)
4960 ++ decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
4961 ++ else
4962 + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
4963 + if (decoder->tnt.payload & BIT63) {
4964 + decoder->tnt.payload <<= 1;
4965 +@@ -1305,8 +1318,11 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
4966 + return 0;
4967 + }
4968 + decoder->ip += intel_pt_insn.length;
4969 +- if (!decoder->tnt.count)
4970 ++ if (!decoder->tnt.count) {
4971 ++ decoder->sample_timestamp = decoder->timestamp;
4972 ++ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
4973 + return -EAGAIN;
4974 ++ }
4975 + decoder->tnt.payload <<= 1;
4976 + continue;
4977 + }
4978 +@@ -2365,6 +2381,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
4979 + err = intel_pt_walk_trace(decoder);
4980 + break;
4981 + case INTEL_PT_STATE_TNT:
4982 ++ case INTEL_PT_STATE_TNT_CONT:
4983 + err = intel_pt_walk_tnt(decoder);
4984 + if (err == -EAGAIN)
4985 + err = intel_pt_walk_trace(decoder);