Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.0 commit in: /
Date: Sun, 26 May 2019 17:08:24
Message-Id: 1558890482.15741aee1cf7932e3be5a4e02b6d5867bdcb4bd0.mpagano@gentoo
1 commit: 15741aee1cf7932e3be5a4e02b6d5867bdcb4bd0
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun May 26 17:08:02 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun May 26 17:08:02 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=15741aee
7
8 Linux patch 5.0.19
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1018_linux-5.0.19.patch | 5315 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5319 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 396a4db..599546c 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -115,6 +115,10 @@ Patch: 1017_linux-5.0.18.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.0.18
23
24 +Patch: 1018_linux-5.0.19.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.0.19
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1018_linux-5.0.19.patch b/1018_linux-5.0.19.patch
33 new file mode 100644
34 index 0000000..0c8c977
35 --- /dev/null
36 +++ b/1018_linux-5.0.19.patch
37 @@ -0,0 +1,5315 @@
38 +diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
39 +index cf43bc4dbf31..a60fa516d4cb 100644
40 +--- a/Documentation/filesystems/porting
41 ++++ b/Documentation/filesystems/porting
42 +@@ -638,3 +638,8 @@ in your dentry operations instead.
43 + inode to d_splice_alias() will also do the right thing (equivalent of
44 + d_add(dentry, NULL); return NULL;), so that kind of special cases
45 + also doesn't need a separate treatment.
46 ++--
47 ++[mandatory]
48 ++ DCACHE_RCUACCESS is gone; having an RCU delay on dentry freeing is the
49 ++ default. DCACHE_NORCU opts out, and only d_alloc_pseudo() has any
50 ++ business doing so.
51 +diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
52 +index ba8927c0d45c..a1b8e6d92298 100644
53 +--- a/Documentation/virtual/kvm/api.txt
54 ++++ b/Documentation/virtual/kvm/api.txt
55 +@@ -3790,8 +3790,9 @@ The ioctl clears the dirty status of pages in a memory slot, according to
56 + the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap
57 + field. Bit 0 of the bitmap corresponds to page "first_page" in the
58 + memory slot, and num_pages is the size in bits of the input bitmap.
59 +-Both first_page and num_pages must be a multiple of 64. For each bit
60 +-that is set in the input bitmap, the corresponding page is marked "clean"
61 ++first_page must be a multiple of 64; num_pages must also be a multiple of
62 ++64 unless first_page + num_pages is the size of the memory slot. For each
63 ++bit that is set in the input bitmap, the corresponding page is marked "clean"
64 + in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
65 + (for example via write-protection, or by clearing the dirty bit in
66 + a page table entry).
67 +diff --git a/Makefile b/Makefile
68 +index bf21b5a86e4b..66efffc3fb41 100644
69 +--- a/Makefile
70 ++++ b/Makefile
71 +@@ -1,7 +1,7 @@
72 + # SPDX-License-Identifier: GPL-2.0
73 + VERSION = 5
74 + PATCHLEVEL = 0
75 +-SUBLEVEL = 18
76 ++SUBLEVEL = 19
77 + EXTRAVERSION =
78 + NAME = Shy Crocodile
79 +
80 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
81 +index 4135abec3fb0..63e6e6504699 100644
82 +--- a/arch/arc/mm/cache.c
83 ++++ b/arch/arc/mm/cache.c
84 +@@ -113,10 +113,24 @@ static void read_decode_cache_bcr_arcv2(int cpu)
85 + }
86 +
87 + READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
88 +- if (cbcr.c)
89 ++ if (cbcr.c) {
90 + ioc_exists = 1;
91 +- else
92 ++
93 ++ /*
94 ++ * As for today we don't support both IOC and ZONE_HIGHMEM enabled
95 ++ * simultaneously. This happens because as of today IOC aperture covers
96 ++ * only ZONE_NORMAL (low mem) and any dma transactions outside this
97 ++ * region won't be HW coherent.
98 ++ * If we want to use both IOC and ZONE_HIGHMEM we can use
99 ++ * bounce_buffer to handle dma transactions to HIGHMEM.
100 ++ * Also it is possible to modify dma_direct cache ops or increase IOC
101 ++ * aperture size if we are planning to use HIGHMEM without PAE.
102 ++ */
103 ++ if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
104 ++ ioc_enable = 0;
105 ++ } else {
106 + ioc_enable = 0;
107 ++ }
108 +
109 + /* HS 2.0 didn't have AUX_VOL */
110 + if (cpuinfo_arc700[cpu].core.family > 0x51) {
111 +@@ -1158,19 +1172,6 @@ noinline void __init arc_ioc_setup(void)
112 + if (!ioc_enable)
113 + return;
114 +
115 +- /*
116 +- * As for today we don't support both IOC and ZONE_HIGHMEM enabled
117 +- * simultaneously. This happens because as of today IOC aperture covers
118 +- * only ZONE_NORMAL (low mem) and any dma transactions outside this
119 +- * region won't be HW coherent.
120 +- * If we want to use both IOC and ZONE_HIGHMEM we can use
121 +- * bounce_buffer to handle dma transactions to HIGHMEM.
122 +- * Also it is possible to modify dma_direct cache ops or increase IOC
123 +- * aperture size if we are planning to use HIGHMEM without PAE.
124 +- */
125 +- if (IS_ENABLED(CONFIG_HIGHMEM))
126 +- panic("IOC and HIGHMEM can't be used simultaneously");
127 +-
128 + /* Flush + invalidate + disable L1 dcache */
129 + __dc_disable();
130 +
131 +diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
132 +index 413863508f6f..d67fb64e908c 100644
133 +--- a/arch/mips/kernel/perf_event_mipsxx.c
134 ++++ b/arch/mips/kernel/perf_event_mipsxx.c
135 +@@ -64,17 +64,11 @@ struct mips_perf_event {
136 + #define CNTR_EVEN 0x55555555
137 + #define CNTR_ODD 0xaaaaaaaa
138 + #define CNTR_ALL 0xffffffff
139 +-#ifdef CONFIG_MIPS_MT_SMP
140 + enum {
141 + T = 0,
142 + V = 1,
143 + P = 2,
144 + } range;
145 +-#else
146 +- #define T
147 +- #define V
148 +- #define P
149 +-#endif
150 + };
151 +
152 + static struct mips_perf_event raw_event;
153 +@@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
154 + {
155 + struct perf_event *event = container_of(evt, struct perf_event, hw);
156 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
157 +-#ifdef CONFIG_MIPS_MT_SMP
158 + unsigned int range = evt->event_base >> 24;
159 +-#endif /* CONFIG_MIPS_MT_SMP */
160 +
161 + WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
162 +
163 +@@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
164 + /* Make sure interrupt enabled. */
165 + MIPS_PERFCTRL_IE;
166 +
167 +-#ifdef CONFIG_CPU_BMIPS5000
168 +- {
169 ++ if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
170 + /* enable the counter for the calling thread */
171 + cpuc->saved_ctrl[idx] |=
172 + (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
173 +- }
174 +-#else
175 +-#ifdef CONFIG_MIPS_MT_SMP
176 +- if (range > V) {
177 ++ } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
178 + /* The counter is processor wide. Set it up to count all TCs. */
179 + pr_debug("Enabling perf counter for all TCs\n");
180 + cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
181 +- } else
182 +-#endif /* CONFIG_MIPS_MT_SMP */
183 +- {
184 ++ } else {
185 + unsigned int cpu, ctrl;
186 +
187 + /*
188 +@@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
189 + cpuc->saved_ctrl[idx] |= ctrl;
190 + pr_debug("Enabling perf counter for CPU%d\n", cpu);
191 + }
192 +-#endif /* CONFIG_CPU_BMIPS5000 */
193 + /*
194 + * We do not actually let the counter run. Leave it until start().
195 + */
196 +diff --git a/arch/parisc/boot/compressed/head.S b/arch/parisc/boot/compressed/head.S
197 +index 5aba20fa48aa..e8b798fd0cf0 100644
198 +--- a/arch/parisc/boot/compressed/head.S
199 ++++ b/arch/parisc/boot/compressed/head.S
200 +@@ -22,7 +22,7 @@
201 + __HEAD
202 +
203 + ENTRY(startup)
204 +- .level LEVEL
205 ++ .level PA_ASM_LEVEL
206 +
207 + #define PSW_W_SM 0x200
208 + #define PSW_W_BIT 36
209 +@@ -63,7 +63,7 @@ $bss_loop:
210 + load32 BOOTADDR(decompress_kernel),%r3
211 +
212 + #ifdef CONFIG_64BIT
213 +- .level LEVEL
214 ++ .level PA_ASM_LEVEL
215 + ssm PSW_W_SM, %r0 /* set W-bit */
216 + depdi 0, 31, 32, %r3
217 + #endif
218 +@@ -72,7 +72,7 @@ $bss_loop:
219 +
220 + startup_continue:
221 + #ifdef CONFIG_64BIT
222 +- .level LEVEL
223 ++ .level PA_ASM_LEVEL
224 + rsm PSW_W_SM, %r0 /* clear W-bit */
225 + #endif
226 +
227 +diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
228 +index c17ec0ee6e7c..d85738a7bbe6 100644
229 +--- a/arch/parisc/include/asm/assembly.h
230 ++++ b/arch/parisc/include/asm/assembly.h
231 +@@ -61,14 +61,14 @@
232 + #define LDCW ldcw,co
233 + #define BL b,l
234 + # ifdef CONFIG_64BIT
235 +-# define LEVEL 2.0w
236 ++# define PA_ASM_LEVEL 2.0w
237 + # else
238 +-# define LEVEL 2.0
239 ++# define PA_ASM_LEVEL 2.0
240 + # endif
241 + #else
242 + #define LDCW ldcw
243 + #define BL bl
244 +-#define LEVEL 1.1
245 ++#define PA_ASM_LEVEL 1.1
246 + #endif
247 +
248 + #ifdef __ASSEMBLY__
249 +diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
250 +index 006fb939cac8..4016fe1c65a9 100644
251 +--- a/arch/parisc/include/asm/cache.h
252 ++++ b/arch/parisc/include/asm/cache.h
253 +@@ -44,22 +44,22 @@ void parisc_setup_cache_timing(void);
254 +
255 + #define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \
256 + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
257 +- : : "r" (addr))
258 ++ : : "r" (addr) : "memory")
259 + #define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \
260 + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
261 + ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \
262 +- : : "r" (addr))
263 ++ : : "r" (addr) : "memory")
264 + #define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \
265 + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
266 +- : : "r" (addr))
267 ++ : : "r" (addr) : "memory")
268 +
269 + #define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \
270 + ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
271 + ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \
272 +- : : "r" (addr))
273 ++ : : "r" (addr) : "memory")
274 + #define asm_io_sync() asm volatile("sync" \
275 + ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
276 +- ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: )
277 ++ ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
278 +
279 + #endif /* ! __ASSEMBLY__ */
280 +
281 +diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
282 +index fbb4e43fda05..f56cbab64ac1 100644
283 +--- a/arch/parisc/kernel/head.S
284 ++++ b/arch/parisc/kernel/head.S
285 +@@ -22,7 +22,7 @@
286 + #include <linux/linkage.h>
287 + #include <linux/init.h>
288 +
289 +- .level LEVEL
290 ++ .level PA_ASM_LEVEL
291 +
292 + __INITDATA
293 + ENTRY(boot_args)
294 +@@ -258,7 +258,7 @@ stext_pdc_ret:
295 + ldo R%PA(fault_vector_11)(%r10),%r10
296 +
297 + $is_pa20:
298 +- .level LEVEL /* restore 1.1 || 2.0w */
299 ++ .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
300 + #endif /*!CONFIG_64BIT*/
301 + load32 PA(fault_vector_20),%r10
302 +
303 +diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
304 +index 841db71958cd..97c206734e24 100644
305 +--- a/arch/parisc/kernel/process.c
306 ++++ b/arch/parisc/kernel/process.c
307 +@@ -193,6 +193,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
308 + */
309 +
310 + int running_on_qemu __read_mostly;
311 ++EXPORT_SYMBOL(running_on_qemu);
312 +
313 + void __cpuidle arch_cpu_idle_dead(void)
314 + {
315 +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
316 +index 4f77bd9be66b..93cc36d98875 100644
317 +--- a/arch/parisc/kernel/syscall.S
318 ++++ b/arch/parisc/kernel/syscall.S
319 +@@ -48,7 +48,7 @@ registers).
320 + */
321 + #define KILL_INSN break 0,0
322 +
323 +- .level LEVEL
324 ++ .level PA_ASM_LEVEL
325 +
326 + .text
327 +
328 +diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
329 +index 059187a3ded7..3d1305aa64b6 100644
330 +--- a/arch/parisc/mm/init.c
331 ++++ b/arch/parisc/mm/init.c
332 +@@ -512,7 +512,7 @@ static void __init map_pages(unsigned long start_vaddr,
333 +
334 + void __init set_kernel_text_rw(int enable_read_write)
335 + {
336 +- unsigned long start = (unsigned long) _text;
337 ++ unsigned long start = (unsigned long) __init_begin;
338 + unsigned long end = (unsigned long) &data_start;
339 +
340 + map_pages(start, __pa(start), end-start,
341 +diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
342 +index 6ee8195a2ffb..4a6dd3ba0b0b 100644
343 +--- a/arch/powerpc/include/asm/mmu_context.h
344 ++++ b/arch/powerpc/include/asm/mmu_context.h
345 +@@ -237,7 +237,6 @@ extern void arch_exit_mmap(struct mm_struct *mm);
346 + #endif
347 +
348 + static inline void arch_unmap(struct mm_struct *mm,
349 +- struct vm_area_struct *vma,
350 + unsigned long start, unsigned long end)
351 + {
352 + if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
353 +diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
354 +index 532ab79734c7..d43e8fe6d424 100644
355 +--- a/arch/powerpc/kvm/book3s_64_vio.c
356 ++++ b/arch/powerpc/kvm/book3s_64_vio.c
357 +@@ -543,14 +543,14 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
358 + if (ret != H_SUCCESS)
359 + return ret;
360 +
361 ++ idx = srcu_read_lock(&vcpu->kvm->srcu);
362 ++
363 + ret = kvmppc_tce_validate(stt, tce);
364 + if (ret != H_SUCCESS)
365 +- return ret;
366 ++ goto unlock_exit;
367 +
368 + dir = iommu_tce_direction(tce);
369 +
370 +- idx = srcu_read_lock(&vcpu->kvm->srcu);
371 +-
372 + if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
373 + ret = H_PARAMETER;
374 + goto unlock_exit;
375 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
376 +index 5a066fc299e1..f17065f2c962 100644
377 +--- a/arch/powerpc/kvm/book3s_hv.c
378 ++++ b/arch/powerpc/kvm/book3s_hv.c
379 +@@ -3407,7 +3407,9 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
380 + vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
381 + vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
382 +
383 +- mtspr(SPRN_PSSCR, host_psscr);
384 ++ /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
385 ++ mtspr(SPRN_PSSCR, host_psscr |
386 ++ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
387 + mtspr(SPRN_HFSCR, host_hfscr);
388 + mtspr(SPRN_CIABR, host_ciabr);
389 + mtspr(SPRN_DAWR, host_dawr);
390 +diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
391 +index fca34b2177e2..9f4b4bb78120 100644
392 +--- a/arch/um/include/asm/mmu_context.h
393 ++++ b/arch/um/include/asm/mmu_context.h
394 +@@ -22,7 +22,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
395 + }
396 + extern void arch_exit_mmap(struct mm_struct *mm);
397 + static inline void arch_unmap(struct mm_struct *mm,
398 +- struct vm_area_struct *vma,
399 + unsigned long start, unsigned long end)
400 + {
401 + }
402 +diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
403 +index 5c205a9cb5a6..9f06ea5466dd 100644
404 +--- a/arch/unicore32/include/asm/mmu_context.h
405 ++++ b/arch/unicore32/include/asm/mmu_context.h
406 +@@ -88,7 +88,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
407 + }
408 +
409 + static inline void arch_unmap(struct mm_struct *mm,
410 +- struct vm_area_struct *vma,
411 + unsigned long start, unsigned long end)
412 + {
413 + }
414 +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
415 +index 4fe27b67d7e2..b1d59a7c556e 100644
416 +--- a/arch/x86/entry/entry_64.S
417 ++++ b/arch/x86/entry/entry_64.S
418 +@@ -881,7 +881,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
419 + * @paranoid == 2 is special: the stub will never switch stacks. This is for
420 + * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
421 + */
422 +-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
423 ++.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0
424 + ENTRY(\sym)
425 + UNWIND_HINT_IRET_REGS offset=\has_error_code*8
426 +
427 +@@ -901,6 +901,20 @@ ENTRY(\sym)
428 + jnz .Lfrom_usermode_switch_stack_\@
429 + .endif
430 +
431 ++ .if \create_gap == 1
432 ++ /*
433 ++ * If coming from kernel space, create a 6-word gap to allow the
434 ++ * int3 handler to emulate a call instruction.
435 ++ */
436 ++ testb $3, CS-ORIG_RAX(%rsp)
437 ++ jnz .Lfrom_usermode_no_gap_\@
438 ++ .rept 6
439 ++ pushq 5*8(%rsp)
440 ++ .endr
441 ++ UNWIND_HINT_IRET_REGS offset=8
442 ++.Lfrom_usermode_no_gap_\@:
443 ++ .endif
444 ++
445 + .if \paranoid
446 + call paranoid_entry
447 + .else
448 +@@ -1132,7 +1146,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
449 + #endif /* CONFIG_HYPERV */
450 +
451 + idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
452 +-idtentry int3 do_int3 has_error_code=0
453 ++idtentry int3 do_int3 has_error_code=0 create_gap=1
454 + idtentry stack_segment do_stack_segment has_error_code=1
455 +
456 + #ifdef CONFIG_XEN_PV
457 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
458 +index 71fb8b7b2954..c87b06ad9f86 100644
459 +--- a/arch/x86/events/intel/core.c
460 ++++ b/arch/x86/events/intel/core.c
461 +@@ -2090,15 +2090,19 @@ static void intel_pmu_disable_event(struct perf_event *event)
462 + cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
463 + cpuc->intel_cp_status &= ~(1ull << hwc->idx);
464 +
465 +- if (unlikely(event->attr.precise_ip))
466 +- intel_pmu_pebs_disable(event);
467 +-
468 + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
469 + intel_pmu_disable_fixed(hwc);
470 + return;
471 + }
472 +
473 + x86_pmu_disable_event(event);
474 ++
475 ++ /*
476 ++ * Needs to be called after x86_pmu_disable_event,
477 ++ * so we don't trigger the event without PEBS bit set.
478 ++ */
479 ++ if (unlikely(event->attr.precise_ip))
480 ++ intel_pmu_pebs_disable(event);
481 + }
482 +
483 + static void intel_pmu_del_event(struct perf_event *event)
484 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
485 +index 19d18fae6ec6..41019af68adf 100644
486 +--- a/arch/x86/include/asm/mmu_context.h
487 ++++ b/arch/x86/include/asm/mmu_context.h
488 +@@ -277,8 +277,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
489 + mpx_mm_init(mm);
490 + }
491 +
492 +-static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
493 +- unsigned long start, unsigned long end)
494 ++static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
495 ++ unsigned long end)
496 + {
497 + /*
498 + * mpx_notify_unmap() goes and reads a rarely-hot
499 +@@ -298,7 +298,7 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
500 + * consistently wrong.
501 + */
502 + if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
503 +- mpx_notify_unmap(mm, vma, start, end);
504 ++ mpx_notify_unmap(mm, start, end);
505 + }
506 +
507 + /*
508 +diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h
509 +index d0b1434fb0b6..143a5c193ed3 100644
510 +--- a/arch/x86/include/asm/mpx.h
511 ++++ b/arch/x86/include/asm/mpx.h
512 +@@ -64,12 +64,15 @@ struct mpx_fault_info {
513 + };
514 +
515 + #ifdef CONFIG_X86_INTEL_MPX
516 +-int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
517 +-int mpx_handle_bd_fault(void);
518 ++
519 ++extern int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
520 ++extern int mpx_handle_bd_fault(void);
521 ++
522 + static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
523 + {
524 + return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR);
525 + }
526 ++
527 + static inline void mpx_mm_init(struct mm_struct *mm)
528 + {
529 + /*
530 +@@ -78,11 +81,10 @@ static inline void mpx_mm_init(struct mm_struct *mm)
531 + */
532 + mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
533 + }
534 +-void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
535 +- unsigned long start, unsigned long end);
536 +
537 +-unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
538 +- unsigned long flags);
539 ++extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end);
540 ++extern unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags);
541 ++
542 + #else
543 + static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
544 + {
545 +@@ -100,7 +102,6 @@ static inline void mpx_mm_init(struct mm_struct *mm)
546 + {
547 + }
548 + static inline void mpx_notify_unmap(struct mm_struct *mm,
549 +- struct vm_area_struct *vma,
550 + unsigned long start, unsigned long end)
551 + {
552 + }
553 +diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
554 +index 9c85b54bf03c..0bb566315621 100644
555 +--- a/arch/x86/include/asm/pgtable_64.h
556 ++++ b/arch/x86/include/asm/pgtable_64.h
557 +@@ -259,8 +259,7 @@ extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
558 + extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
559 +
560 + #define gup_fast_permitted gup_fast_permitted
561 +-static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
562 +- int write)
563 ++static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
564 + {
565 + unsigned long len, end;
566 +
567 +diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
568 +index e85ff65c43c3..05861cc08787 100644
569 +--- a/arch/x86/include/asm/text-patching.h
570 ++++ b/arch/x86/include/asm/text-patching.h
571 +@@ -39,4 +39,32 @@ extern int poke_int3_handler(struct pt_regs *regs);
572 + extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
573 + extern int after_bootmem;
574 +
575 ++static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
576 ++{
577 ++ regs->ip = ip;
578 ++}
579 ++
580 ++#define INT3_INSN_SIZE 1
581 ++#define CALL_INSN_SIZE 5
582 ++
583 ++#ifdef CONFIG_X86_64
584 ++static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
585 ++{
586 ++ /*
587 ++ * The int3 handler in entry_64.S adds a gap between the
588 ++ * stack where the break point happened, and the saving of
589 ++ * pt_regs. We can extend the original stack because of
590 ++ * this gap. See the idtentry macro's create_gap option.
591 ++ */
592 ++ regs->sp -= sizeof(unsigned long);
593 ++ *(unsigned long *)regs->sp = val;
594 ++}
595 ++
596 ++static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
597 ++{
598 ++ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
599 ++ int3_emulate_jmp(regs, func);
600 ++}
601 ++#endif
602 ++
603 + #endif /* _ASM_X86_TEXT_PATCHING_H */
604 +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
605 +index 763d4264d16a..2ee4b12a70e8 100644
606 +--- a/arch/x86/kernel/ftrace.c
607 ++++ b/arch/x86/kernel/ftrace.c
608 +@@ -29,6 +29,7 @@
609 + #include <asm/kprobes.h>
610 + #include <asm/ftrace.h>
611 + #include <asm/nops.h>
612 ++#include <asm/text-patching.h>
613 +
614 + #ifdef CONFIG_DYNAMIC_FTRACE
615 +
616 +@@ -231,6 +232,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
617 + }
618 +
619 + static unsigned long ftrace_update_func;
620 ++static unsigned long ftrace_update_func_call;
621 +
622 + static int update_ftrace_func(unsigned long ip, void *new)
623 + {
624 +@@ -259,6 +261,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
625 + unsigned char *new;
626 + int ret;
627 +
628 ++ ftrace_update_func_call = (unsigned long)func;
629 ++
630 + new = ftrace_call_replace(ip, (unsigned long)func);
631 + ret = update_ftrace_func(ip, new);
632 +
633 +@@ -294,13 +298,28 @@ int ftrace_int3_handler(struct pt_regs *regs)
634 + if (WARN_ON_ONCE(!regs))
635 + return 0;
636 +
637 +- ip = regs->ip - 1;
638 +- if (!ftrace_location(ip) && !is_ftrace_caller(ip))
639 +- return 0;
640 ++ ip = regs->ip - INT3_INSN_SIZE;
641 +
642 +- regs->ip += MCOUNT_INSN_SIZE - 1;
643 ++#ifdef CONFIG_X86_64
644 ++ if (ftrace_location(ip)) {
645 ++ int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
646 ++ return 1;
647 ++ } else if (is_ftrace_caller(ip)) {
648 ++ if (!ftrace_update_func_call) {
649 ++ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
650 ++ return 1;
651 ++ }
652 ++ int3_emulate_call(regs, ftrace_update_func_call);
653 ++ return 1;
654 ++ }
655 ++#else
656 ++ if (ftrace_location(ip) || is_ftrace_caller(ip)) {
657 ++ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
658 ++ return 1;
659 ++ }
660 ++#endif
661 +
662 +- return 1;
663 ++ return 0;
664 + }
665 +
666 + static int ftrace_write(unsigned long ip, const char *val, int size)
667 +@@ -858,6 +877,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
668 +
669 + func = ftrace_ops_get_func(ops);
670 +
671 ++ ftrace_update_func_call = (unsigned long)func;
672 ++
673 + /* Do a safe modify in case the trampoline is executing */
674 + new = ftrace_call_replace(ip, (unsigned long)func);
675 + ret = update_ftrace_func(ip, new);
676 +@@ -959,6 +980,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
677 + {
678 + unsigned char *new;
679 +
680 ++ ftrace_update_func_call = 0UL;
681 + new = ftrace_jmp_replace(ip, (unsigned long)func);
682 +
683 + return update_ftrace_func(ip, new);
684 +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
685 +index 371c669696d7..610c0f1fbdd7 100644
686 +--- a/arch/x86/kvm/hyperv.c
687 ++++ b/arch/x86/kvm/hyperv.c
688 +@@ -1371,7 +1371,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
689 +
690 + valid_bank_mask = BIT_ULL(0);
691 + sparse_banks[0] = flush.processor_mask;
692 +- all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
693 ++
694 ++ /*
695 ++ * Work around possible WS2012 bug: it sends hypercalls
696 ++ * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
697 ++ * while also expecting us to flush something and crashing if
698 ++ * we don't. Let's treat processor_mask == 0 same as
699 ++ * HV_FLUSH_ALL_PROCESSORS.
700 ++ */
701 ++ all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
702 ++ flush.processor_mask == 0;
703 + } else {
704 + if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
705 + sizeof(flush_ex))))
706 +diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
707 +index 140e61843a07..3cb3af51ec89 100644
708 +--- a/arch/x86/lib/Makefile
709 ++++ b/arch/x86/lib/Makefile
710 +@@ -6,6 +6,18 @@
711 + # Produces uninteresting flaky coverage.
712 + KCOV_INSTRUMENT_delay.o := n
713 +
714 ++# Early boot use of cmdline; don't instrument it
715 ++ifdef CONFIG_AMD_MEM_ENCRYPT
716 ++KCOV_INSTRUMENT_cmdline.o := n
717 ++KASAN_SANITIZE_cmdline.o := n
718 ++
719 ++ifdef CONFIG_FUNCTION_TRACER
720 ++CFLAGS_REMOVE_cmdline.o = -pg
721 ++endif
722 ++
723 ++CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector)
724 ++endif
725 ++
726 + inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
727 + inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
728 + quiet_cmd_inat_tables = GEN $@
729 +diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
730 +index de1851d15699..ea17ff6c8588 100644
731 +--- a/arch/x86/mm/mpx.c
732 ++++ b/arch/x86/mm/mpx.c
733 +@@ -881,9 +881,10 @@ static int mpx_unmap_tables(struct mm_struct *mm,
734 + * the virtual address region start...end have already been split if
735 + * necessary, and the 'vma' is the first vma in this range (start -> end).
736 + */
737 +-void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
738 +- unsigned long start, unsigned long end)
739 ++void mpx_notify_unmap(struct mm_struct *mm, unsigned long start,
740 ++ unsigned long end)
741 + {
742 ++ struct vm_area_struct *vma;
743 + int ret;
744 +
745 + /*
746 +@@ -902,11 +903,12 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
747 + * which should not occur normally. Being strict about it here
748 + * helps ensure that we do not have an exploitable stack overflow.
749 + */
750 +- do {
751 ++ vma = find_vma(mm, start);
752 ++ while (vma && vma->vm_start < end) {
753 + if (vma->vm_flags & VM_MPX)
754 + return;
755 + vma = vma->vm_next;
756 +- } while (vma && vma->vm_start < end);
757 ++ }
758 +
759 + ret = mpx_unmap_tables(mm, start, end);
760 + if (ret)
761 +diff --git a/block/blk-core.c b/block/blk-core.c
762 +index 5bde73a49399..6ba6d8805697 100644
763 +--- a/block/blk-core.c
764 ++++ b/block/blk-core.c
765 +@@ -375,7 +375,7 @@ void blk_cleanup_queue(struct request_queue *q)
766 + blk_exit_queue(q);
767 +
768 + if (queue_is_mq(q))
769 +- blk_mq_free_queue(q);
770 ++ blk_mq_exit_queue(q);
771 +
772 + percpu_ref_exit(&q->q_usage_counter);
773 +
774 +diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
775 +index 3f9c3f4ac44c..4040e62c3737 100644
776 +--- a/block/blk-mq-sysfs.c
777 ++++ b/block/blk-mq-sysfs.c
778 +@@ -10,6 +10,7 @@
779 + #include <linux/smp.h>
780 +
781 + #include <linux/blk-mq.h>
782 ++#include "blk.h"
783 + #include "blk-mq.h"
784 + #include "blk-mq-tag.h"
785 +
786 +@@ -33,6 +34,11 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
787 + {
788 + struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
789 + kobj);
790 ++
791 ++ if (hctx->flags & BLK_MQ_F_BLOCKING)
792 ++ cleanup_srcu_struct(hctx->srcu);
793 ++ blk_free_flush_queue(hctx->fq);
794 ++ sbitmap_free(&hctx->ctx_map);
795 + free_cpumask_var(hctx->cpumask);
796 + kfree(hctx->ctxs);
797 + kfree(hctx);
798 +diff --git a/block/blk-mq.c b/block/blk-mq.c
799 +index 5b920a82bfe6..9957e0fc17fc 100644
800 +--- a/block/blk-mq.c
801 ++++ b/block/blk-mq.c
802 +@@ -2270,12 +2270,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
803 + if (set->ops->exit_hctx)
804 + set->ops->exit_hctx(hctx, hctx_idx);
805 +
806 +- if (hctx->flags & BLK_MQ_F_BLOCKING)
807 +- cleanup_srcu_struct(hctx->srcu);
808 +-
809 + blk_mq_remove_cpuhp(hctx);
810 +- blk_free_flush_queue(hctx->fq);
811 +- sbitmap_free(&hctx->ctx_map);
812 + }
813 +
814 + static void blk_mq_exit_hw_queues(struct request_queue *q,
815 +@@ -2904,7 +2899,8 @@ err_exit:
816 + }
817 + EXPORT_SYMBOL(blk_mq_init_allocated_queue);
818 +
819 +-void blk_mq_free_queue(struct request_queue *q)
820 ++/* tags can _not_ be used after returning from blk_mq_exit_queue */
821 ++void blk_mq_exit_queue(struct request_queue *q)
822 + {
823 + struct blk_mq_tag_set *set = q->tag_set;
824 +
825 +diff --git a/block/blk-mq.h b/block/blk-mq.h
826 +index a3a684a8c633..39bc1d5d4637 100644
827 +--- a/block/blk-mq.h
828 ++++ b/block/blk-mq.h
829 +@@ -36,7 +36,7 @@ struct blk_mq_ctx {
830 + struct kobject kobj;
831 + } ____cacheline_aligned_in_smp;
832 +
833 +-void blk_mq_free_queue(struct request_queue *q);
834 ++void blk_mq_exit_queue(struct request_queue *q);
835 + int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
836 + void blk_mq_wake_waiters(struct request_queue *q);
837 + bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
838 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
839 +index d62487d02455..4add909e1a91 100644
840 +--- a/drivers/base/dd.c
841 ++++ b/drivers/base/dd.c
842 +@@ -486,7 +486,7 @@ re_probe:
843 + if (dev->bus->dma_configure) {
844 + ret = dev->bus->dma_configure(dev);
845 + if (ret)
846 +- goto dma_failed;
847 ++ goto probe_failed;
848 + }
849 +
850 + if (driver_sysfs_add(dev)) {
851 +@@ -542,14 +542,13 @@ re_probe:
852 + goto done;
853 +
854 + probe_failed:
855 +- arch_teardown_dma_ops(dev);
856 +-dma_failed:
857 + if (dev->bus)
858 + blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
859 + BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
860 + pinctrl_bind_failed:
861 + device_links_no_driver(dev);
862 + devres_release_all(dev);
863 ++ arch_teardown_dma_ops(dev);
864 + driver_sysfs_remove(dev);
865 + dev->driver = NULL;
866 + dev_set_drvdata(dev, NULL);
867 +diff --git a/drivers/block/brd.c b/drivers/block/brd.c
868 +index c18586fccb6f..17defbf4f332 100644
869 +--- a/drivers/block/brd.c
870 ++++ b/drivers/block/brd.c
871 +@@ -96,13 +96,8 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
872 + /*
873 + * Must use NOIO because we don't want to recurse back into the
874 + * block or filesystem layers from page reclaim.
875 +- *
876 +- * Cannot support DAX and highmem, because our ->direct_access
877 +- * routine for DAX must return memory that is always addressable.
878 +- * If DAX was reworked to use pfns and kmap throughout, this
879 +- * restriction might be able to be lifted.
880 + */
881 +- gfp_flags = GFP_NOIO | __GFP_ZERO;
882 ++ gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
883 + page = alloc_page(gfp_flags);
884 + if (!page)
885 + return NULL;
886 +diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
887 +index f40419959656..794eeff0d5d2 100644
888 +--- a/drivers/clk/hisilicon/clk-hi3660.c
889 ++++ b/drivers/clk/hisilicon/clk-hi3660.c
890 +@@ -163,8 +163,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
891 + "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
892 + { HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2",
893 + "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, },
894 ++ /*
895 ++ * clk_gate_ufs_subsys is a system bus clock, mark it as critical
896 ++ * clock and keep it on for system suspend and resume.
897 ++ */
898 + { HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus",
899 +- CLK_SET_RATE_PARENT, 0x50, 21, 0, },
900 ++ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0x50, 21, 0, },
901 + { HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus",
902 + CLK_SET_RATE_PARENT, 0x50, 28, 0, },
903 + { HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus",
904 +diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
905 +index f54e4015b0b1..18842d660317 100644
906 +--- a/drivers/clk/mediatek/clk-pll.c
907 ++++ b/drivers/clk/mediatek/clk-pll.c
908 +@@ -88,6 +88,32 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
909 + return ((unsigned long)vco + postdiv - 1) / postdiv;
910 + }
911 +
912 ++static void __mtk_pll_tuner_enable(struct mtk_clk_pll *pll)
913 ++{
914 ++ u32 r;
915 ++
916 ++ if (pll->tuner_en_addr) {
917 ++ r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
918 ++ writel(r, pll->tuner_en_addr);
919 ++ } else if (pll->tuner_addr) {
920 ++ r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
921 ++ writel(r, pll->tuner_addr);
922 ++ }
923 ++}
924 ++
925 ++static void __mtk_pll_tuner_disable(struct mtk_clk_pll *pll)
926 ++{
927 ++ u32 r;
928 ++
929 ++ if (pll->tuner_en_addr) {
930 ++ r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
931 ++ writel(r, pll->tuner_en_addr);
932 ++ } else if (pll->tuner_addr) {
933 ++ r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
934 ++ writel(r, pll->tuner_addr);
935 ++ }
936 ++}
937 ++
938 + static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
939 + int postdiv)
940 + {
941 +@@ -96,6 +122,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
942 +
943 + pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
944 +
945 ++ /* disable tuner */
946 ++ __mtk_pll_tuner_disable(pll);
947 ++
948 + /* set postdiv */
949 + val = readl(pll->pd_addr);
950 + val &= ~(POSTDIV_MASK << pll->data->pd_shift);
951 +@@ -122,6 +151,9 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
952 + if (pll->tuner_addr)
953 + writel(con1 + 1, pll->tuner_addr);
954 +
955 ++ /* restore tuner_en */
956 ++ __mtk_pll_tuner_enable(pll);
957 ++
958 + if (pll_en)
959 + udelay(20);
960 + }
961 +@@ -228,13 +260,7 @@ static int mtk_pll_prepare(struct clk_hw *hw)
962 + r |= pll->data->en_mask;
963 + writel(r, pll->base_addr + REG_CON0);
964 +
965 +- if (pll->tuner_en_addr) {
966 +- r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
967 +- writel(r, pll->tuner_en_addr);
968 +- } else if (pll->tuner_addr) {
969 +- r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
970 +- writel(r, pll->tuner_addr);
971 +- }
972 ++ __mtk_pll_tuner_enable(pll);
973 +
974 + udelay(20);
975 +
976 +@@ -258,13 +284,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
977 + writel(r, pll->base_addr + REG_CON0);
978 + }
979 +
980 +- if (pll->tuner_en_addr) {
981 +- r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
982 +- writel(r, pll->tuner_en_addr);
983 +- } else if (pll->tuner_addr) {
984 +- r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
985 +- writel(r, pll->tuner_addr);
986 +- }
987 ++ __mtk_pll_tuner_disable(pll);
988 +
989 + r = readl(pll->base_addr + REG_CON0);
990 + r &= ~CON0_BASE_EN;
991 +diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
992 +index 65ab5c2f48b0..f12142d9cea2 100644
993 +--- a/drivers/clk/rockchip/clk-rk3328.c
994 ++++ b/drivers/clk/rockchip/clk-rk3328.c
995 +@@ -458,7 +458,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
996 + RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS,
997 + RK3328_CLKGATE_CON(2), 12, GFLAGS),
998 + COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0,
999 +- RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS,
1000 ++ RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS,
1001 + RK3328_CLKGATE_CON(2), 4, GFLAGS),
1002 + COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0,
1003 + RK3328_CLKSEL_CON(22), 0, 10, DFLAGS,
1004 +@@ -550,15 +550,15 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
1005 + GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0,
1006 + RK3328_CLKGATE_CON(25), 1, GFLAGS),
1007 + GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0,
1008 +- RK3328_CLKGATE_CON(25), 0, GFLAGS),
1009 ++ RK3328_CLKGATE_CON(25), 2, GFLAGS),
1010 + GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0,
1011 +- RK3328_CLKGATE_CON(25), 1, GFLAGS),
1012 ++ RK3328_CLKGATE_CON(25), 3, GFLAGS),
1013 + GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0,
1014 +- RK3328_CLKGATE_CON(25), 0, GFLAGS),
1015 ++ RK3328_CLKGATE_CON(25), 4, GFLAGS),
1016 + GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0,
1017 +- RK3328_CLKGATE_CON(25), 1, GFLAGS),
1018 ++ RK3328_CLKGATE_CON(25), 5, GFLAGS),
1019 + GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED,
1020 +- RK3328_CLKGATE_CON(25), 0, GFLAGS),
1021 ++ RK3328_CLKGATE_CON(25), 6, GFLAGS),
1022 +
1023 + COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0,
1024 + RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS,
1025 +@@ -663,7 +663,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
1026 +
1027 + /* PD_GMAC */
1028 + COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0,
1029 +- RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
1030 ++ RK3328_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
1031 + RK3328_CLKGATE_CON(3), 2, GFLAGS),
1032 + COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0,
1033 + RK3328_CLKSEL_CON(25), 8, 3, DFLAGS,
1034 +@@ -733,7 +733,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
1035 +
1036 + /* PD_PERI */
1037 + GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS),
1038 +- GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS),
1039 ++ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 14, GFLAGS),
1040 +
1041 + GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS),
1042 + GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS),
1043 +@@ -913,7 +913,7 @@ static void __init rk3328_clk_init(struct device_node *np)
1044 + &rk3328_cpuclk_data, rk3328_cpuclk_rates,
1045 + ARRAY_SIZE(rk3328_cpuclk_rates));
1046 +
1047 +- rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0),
1048 ++ rockchip_register_softrst(np, 12, reg_base + RK3328_SOFTRST_CON(0),
1049 + ROCKCHIP_SOFTRST_HIWORD_MASK);
1050 +
1051 + rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL);
1052 +diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
1053 +index 9b49adb20d07..69dfc6de1c4e 100644
1054 +--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
1055 ++++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
1056 +@@ -167,7 +167,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
1057 + unsigned long parent_rate)
1058 + {
1059 + struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
1060 +- u32 n_mask, k_mask, m_mask, p_mask;
1061 ++ u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
1062 + struct _ccu_nkmp _nkmp;
1063 + unsigned long flags;
1064 + u32 reg;
1065 +@@ -186,10 +186,18 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
1066 +
1067 + ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
1068 +
1069 +- n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
1070 +- k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
1071 +- m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
1072 +- p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
1073 ++ if (nkmp->n.width)
1074 ++ n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
1075 ++ nkmp->n.shift);
1076 ++ if (nkmp->k.width)
1077 ++ k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
1078 ++ nkmp->k.shift);
1079 ++ if (nkmp->m.width)
1080 ++ m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
1081 ++ nkmp->m.shift);
1082 ++ if (nkmp->p.width)
1083 ++ p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
1084 ++ nkmp->p.shift);
1085 +
1086 + spin_lock_irqsave(nkmp->common.lock, flags);
1087 +
1088 +diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
1089 +index b50b7460014b..3e67cbcd80da 100644
1090 +--- a/drivers/clk/tegra/clk-pll.c
1091 ++++ b/drivers/clk/tegra/clk-pll.c
1092 +@@ -663,8 +663,8 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
1093 + pll_override_writel(val, params->pmc_divp_reg, pll);
1094 +
1095 + val = pll_override_readl(params->pmc_divnm_reg, pll);
1096 +- val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) |
1097 +- ~(divn_mask(pll) << div_nmp->override_divn_shift);
1098 ++ val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) |
1099 ++ (divn_mask(pll) << div_nmp->override_divn_shift));
1100 + val |= (cfg->m << div_nmp->override_divm_shift) |
1101 + (cfg->n << div_nmp->override_divn_shift);
1102 + pll_override_writel(val, params->pmc_divnm_reg, pll);
1103 +diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
1104 +index ba7aaf421f36..8ff326c0c406 100644
1105 +--- a/drivers/hwtracing/intel_th/msu.c
1106 ++++ b/drivers/hwtracing/intel_th/msu.c
1107 +@@ -84,6 +84,7 @@ struct msc_iter {
1108 + * @reg_base: register window base address
1109 + * @thdev: intel_th_device pointer
1110 + * @win_list: list of windows in multiblock mode
1111 ++ * @single_sgt: single mode buffer
1112 + * @nr_pages: total number of pages allocated for this buffer
1113 + * @single_sz: amount of data in single mode
1114 + * @single_wrap: single mode wrap occurred
1115 +@@ -104,6 +105,7 @@ struct msc {
1116 + struct intel_th_device *thdev;
1117 +
1118 + struct list_head win_list;
1119 ++ struct sg_table single_sgt;
1120 + unsigned long nr_pages;
1121 + unsigned long single_sz;
1122 + unsigned int single_wrap : 1;
1123 +@@ -617,22 +619,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev)
1124 + */
1125 + static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
1126 + {
1127 ++ unsigned long nr_pages = size >> PAGE_SHIFT;
1128 + unsigned int order = get_order(size);
1129 + struct page *page;
1130 ++ int ret;
1131 +
1132 + if (!size)
1133 + return 0;
1134 +
1135 ++ ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
1136 ++ if (ret)
1137 ++ goto err_out;
1138 ++
1139 ++ ret = -ENOMEM;
1140 + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1141 + if (!page)
1142 +- return -ENOMEM;
1143 ++ goto err_free_sgt;
1144 +
1145 + split_page(page, order);
1146 +- msc->nr_pages = size >> PAGE_SHIFT;
1147 ++ sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
1148 ++
1149 ++ ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
1150 ++ DMA_FROM_DEVICE);
1151 ++ if (ret < 0)
1152 ++ goto err_free_pages;
1153 ++
1154 ++ msc->nr_pages = nr_pages;
1155 + msc->base = page_address(page);
1156 +- msc->base_addr = page_to_phys(page);
1157 ++ msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
1158 +
1159 + return 0;
1160 ++
1161 ++err_free_pages:
1162 ++ __free_pages(page, order);
1163 ++
1164 ++err_free_sgt:
1165 ++ sg_free_table(&msc->single_sgt);
1166 ++
1167 ++err_out:
1168 ++ return ret;
1169 + }
1170 +
1171 + /**
1172 +@@ -643,6 +668,10 @@ static void msc_buffer_contig_free(struct msc *msc)
1173 + {
1174 + unsigned long off;
1175 +
1176 ++ dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
1177 ++ 1, DMA_FROM_DEVICE);
1178 ++ sg_free_table(&msc->single_sgt);
1179 ++
1180 + for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
1181 + struct page *page = virt_to_page(msc->base + off);
1182 +
1183 +diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
1184 +index c7ba8acfd4d5..e55b902560de 100644
1185 +--- a/drivers/hwtracing/stm/core.c
1186 ++++ b/drivers/hwtracing/stm/core.c
1187 +@@ -166,11 +166,10 @@ stm_master(struct stm_device *stm, unsigned int idx)
1188 + static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
1189 + {
1190 + struct stp_master *master;
1191 +- size_t size;
1192 +
1193 +- size = ALIGN(stm->data->sw_nchannels, 8) / 8;
1194 +- size += sizeof(struct stp_master);
1195 +- master = kzalloc(size, GFP_ATOMIC);
1196 ++ master = kzalloc(struct_size(master, chan_map,
1197 ++ BITS_TO_LONGS(stm->data->sw_nchannels)),
1198 ++ GFP_ATOMIC);
1199 + if (!master)
1200 + return -ENOMEM;
1201 +
1202 +@@ -218,8 +217,8 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
1203 + bitmap_release_region(&master->chan_map[0], output->channel,
1204 + ilog2(output->nr_chans));
1205 +
1206 +- output->nr_chans = 0;
1207 + master->nr_free += output->nr_chans;
1208 ++ output->nr_chans = 0;
1209 + }
1210 +
1211 + /*
1212 +diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
1213 +index bb8e3f149979..d464799e40a3 100644
1214 +--- a/drivers/i2c/busses/i2c-designware-master.c
1215 ++++ b/drivers/i2c/busses/i2c-designware-master.c
1216 +@@ -426,8 +426,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
1217 +
1218 + pm_runtime_get_sync(dev->dev);
1219 +
1220 +- if (dev->suspended) {
1221 +- dev_err(dev->dev, "Error %s call while suspended\n", __func__);
1222 ++ if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) {
1223 + ret = -ESHUTDOWN;
1224 + goto done_nolock;
1225 + }
1226 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1227 +index c6bdd0d16c4b..ca91f90b4ccc 100644
1228 +--- a/drivers/infiniband/hw/mlx5/main.c
1229 ++++ b/drivers/infiniband/hw/mlx5/main.c
1230 +@@ -1986,11 +1986,12 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
1231 + return -EPERM;
1232 + vma->vm_flags &= ~VM_MAYWRITE;
1233 +
1234 +- if (!dev->mdev->clock_info_page)
1235 ++ if (!dev->mdev->clock_info)
1236 + return -EOPNOTSUPP;
1237 +
1238 + return rdma_user_mmap_page(&context->ibucontext, vma,
1239 +- dev->mdev->clock_info_page, PAGE_SIZE);
1240 ++ virt_to_page(dev->mdev->clock_info),
1241 ++ PAGE_SIZE);
1242 + }
1243 +
1244 + static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
1245 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1246 +index d932f99201d1..1851bc5e05ae 100644
1247 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
1248 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1249 +@@ -2402,7 +2402,18 @@ static ssize_t dev_id_show(struct device *dev,
1250 + {
1251 + struct net_device *ndev = to_net_dev(dev);
1252 +
1253 +- if (ndev->dev_id == ndev->dev_port)
1254 ++ /*
1255 ++ * ndev->dev_port will be equal to 0 in old kernel prior to commit
1256 ++ * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
1257 ++ * port numbers") Zero was chosen as special case for user space
1258 ++ * applications to fallback and query dev_id to check if it has
1259 ++ * different value or not.
1260 ++ *
1261 ++ * Don't print warning in such scenario.
1262 ++ *
1263 ++ * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
1264 ++ */
1265 ++ if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
1266 + netdev_info_once(ndev,
1267 + "\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
1268 + current->comm);
1269 +diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
1270 +index 3a5c7dc6dc57..43fe59642930 100644
1271 +--- a/drivers/iommu/tegra-smmu.c
1272 ++++ b/drivers/iommu/tegra-smmu.c
1273 +@@ -102,7 +102,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
1274 + #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
1275 + #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
1276 + #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
1277 +-#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
1278 + #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
1279 + SMMU_TLB_FLUSH_VA_MATCH_SECTION)
1280 + #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
1281 +@@ -205,8 +204,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
1282 + {
1283 + u32 value;
1284 +
1285 +- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
1286 +- SMMU_TLB_FLUSH_VA_MATCH_ALL;
1287 ++ if (smmu->soc->num_asids == 4)
1288 ++ value = (asid & 0x3) << 29;
1289 ++ else
1290 ++ value = (asid & 0x7f) << 24;
1291 ++
1292 ++ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
1293 + smmu_writel(smmu, value, SMMU_TLB_FLUSH);
1294 + }
1295 +
1296 +@@ -216,8 +219,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
1297 + {
1298 + u32 value;
1299 +
1300 +- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
1301 +- SMMU_TLB_FLUSH_VA_SECTION(iova);
1302 ++ if (smmu->soc->num_asids == 4)
1303 ++ value = (asid & 0x3) << 29;
1304 ++ else
1305 ++ value = (asid & 0x7f) << 24;
1306 ++
1307 ++ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
1308 + smmu_writel(smmu, value, SMMU_TLB_FLUSH);
1309 + }
1310 +
1311 +@@ -227,8 +234,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
1312 + {
1313 + u32 value;
1314 +
1315 +- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
1316 +- SMMU_TLB_FLUSH_VA_GROUP(iova);
1317 ++ if (smmu->soc->num_asids == 4)
1318 ++ value = (asid & 0x3) << 29;
1319 ++ else
1320 ++ value = (asid & 0x7f) << 24;
1321 ++
1322 ++ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
1323 + smmu_writel(smmu, value, SMMU_TLB_FLUSH);
1324 + }
1325 +
1326 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
1327 +index 6fc93834da44..151aa95775be 100644
1328 +--- a/drivers/md/dm-cache-metadata.c
1329 ++++ b/drivers/md/dm-cache-metadata.c
1330 +@@ -1167,11 +1167,18 @@ static int __load_discards(struct dm_cache_metadata *cmd,
1331 + if (r)
1332 + return r;
1333 +
1334 +- for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1335 ++ for (b = 0; ; b++) {
1336 + r = fn(context, cmd->discard_block_size, to_dblock(b),
1337 + dm_bitset_cursor_get_value(&c));
1338 + if (r)
1339 + break;
1340 ++
1341 ++ if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
1342 ++ break;
1343 ++
1344 ++ r = dm_bitset_cursor_next(&c);
1345 ++ if (r)
1346 ++ break;
1347 + }
1348 +
1349 + dm_bitset_cursor_end(&c);
1350 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1351 +index dd538e6b2748..df39b07de800 100644
1352 +--- a/drivers/md/dm-crypt.c
1353 ++++ b/drivers/md/dm-crypt.c
1354 +@@ -949,6 +949,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1355 + {
1356 + #ifdef CONFIG_BLK_DEV_INTEGRITY
1357 + struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1358 ++ struct mapped_device *md = dm_table_get_md(ti->table);
1359 +
1360 + /* From now we require underlying device with our integrity profile */
1361 + if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1362 +@@ -968,7 +969,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1363 +
1364 + if (crypt_integrity_aead(cc)) {
1365 + cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
1366 +- DMINFO("Integrity AEAD, tag size %u, IV size %u.",
1367 ++ DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
1368 + cc->integrity_tag_size, cc->integrity_iv_size);
1369 +
1370 + if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1371 +@@ -976,7 +977,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1372 + return -EINVAL;
1373 + }
1374 + } else if (cc->integrity_iv_size)
1375 +- DMINFO("Additional per-sector space %u bytes for IV.",
1376 ++ DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
1377 + cc->integrity_iv_size);
1378 +
1379 + if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1380 +@@ -1890,7 +1891,7 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
1381 + * algorithm implementation is used. Help people debug performance
1382 + * problems by logging the ->cra_driver_name.
1383 + */
1384 +- DMINFO("%s using implementation \"%s\"", ciphermode,
1385 ++ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
1386 + crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
1387 + return 0;
1388 + }
1389 +@@ -1910,7 +1911,7 @@ static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
1390 + return err;
1391 + }
1392 +
1393 +- DMINFO("%s using implementation \"%s\"", ciphermode,
1394 ++ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
1395 + crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
1396 + return 0;
1397 + }
1398 +diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
1399 +index fddffe251bf6..f496213f8b67 100644
1400 +--- a/drivers/md/dm-delay.c
1401 ++++ b/drivers/md/dm-delay.c
1402 +@@ -121,7 +121,8 @@ static void delay_dtr(struct dm_target *ti)
1403 + {
1404 + struct delay_c *dc = ti->private;
1405 +
1406 +- destroy_workqueue(dc->kdelayd_wq);
1407 ++ if (dc->kdelayd_wq)
1408 ++ destroy_workqueue(dc->kdelayd_wq);
1409 +
1410 + if (dc->read.dev)
1411 + dm_put_device(ti, dc->read.dev);
1412 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1413 +index f535fd8ac82d..a4fe187d50d0 100644
1414 +--- a/drivers/md/dm-integrity.c
1415 ++++ b/drivers/md/dm-integrity.c
1416 +@@ -2568,7 +2568,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
1417 + if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
1418 + return -EINVAL;
1419 + } else {
1420 +- __u64 meta_size = ic->provided_data_sectors * ic->tag_size;
1421 ++ __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
1422 + meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
1423 + >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
1424 + meta_size <<= ic->log2_buffer_sectors;
1425 +@@ -3439,7 +3439,7 @@ try_smaller_buffer:
1426 + DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
1427 + DEBUG_print(" journal_entries %u\n", ic->journal_entries);
1428 + DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
1429 +- DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
1430 ++ DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors);
1431 + DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
1432 + DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
1433 + DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
1434 +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
1435 +index 2ee5e357a0a7..cc5173dfd466 100644
1436 +--- a/drivers/md/dm-mpath.c
1437 ++++ b/drivers/md/dm-mpath.c
1438 +@@ -882,6 +882,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
1439 + if (attached_handler_name || m->hw_handler_name) {
1440 + INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
1441 + r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
1442 ++ kfree(attached_handler_name);
1443 + if (r) {
1444 + dm_put_device(ti, p->path.dev);
1445 + goto bad;
1446 +@@ -896,7 +897,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
1447 +
1448 + return p;
1449 + bad:
1450 +- kfree(attached_handler_name);
1451 + free_pgpath(p);
1452 + return ERR_PTR(r);
1453 + }
1454 +diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
1455 +index fa68336560c3..d8334cd45d7c 100644
1456 +--- a/drivers/md/dm-zoned-metadata.c
1457 ++++ b/drivers/md/dm-zoned-metadata.c
1458 +@@ -1169,6 +1169,9 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
1459 + goto out;
1460 + }
1461 +
1462 ++ if (!nr_blkz)
1463 ++ break;
1464 ++
1465 + /* Process report */
1466 + for (i = 0; i < nr_blkz; i++) {
1467 + ret = dmz_init_zone(zmd, zone, &blkz[i]);
1468 +@@ -1204,6 +1207,8 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1469 + /* Get zone information from disk */
1470 + ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
1471 + &blkz, &nr_blkz, GFP_NOIO);
1472 ++ if (!nr_blkz)
1473 ++ ret = -EIO;
1474 + if (ret) {
1475 + dmz_dev_err(zmd->dev, "Get zone %u report failed",
1476 + dmz_id(zmd, zone));
1477 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1478 +index 05ffffb8b769..295ff09cff4c 100644
1479 +--- a/drivers/md/md.c
1480 ++++ b/drivers/md/md.c
1481 +@@ -132,24 +132,6 @@ static inline int speed_max(struct mddev *mddev)
1482 + mddev->sync_speed_max : sysctl_speed_limit_max;
1483 + }
1484 +
1485 +-static void * flush_info_alloc(gfp_t gfp_flags, void *data)
1486 +-{
1487 +- return kzalloc(sizeof(struct flush_info), gfp_flags);
1488 +-}
1489 +-static void flush_info_free(void *flush_info, void *data)
1490 +-{
1491 +- kfree(flush_info);
1492 +-}
1493 +-
1494 +-static void * flush_bio_alloc(gfp_t gfp_flags, void *data)
1495 +-{
1496 +- return kzalloc(sizeof(struct flush_bio), gfp_flags);
1497 +-}
1498 +-static void flush_bio_free(void *flush_bio, void *data)
1499 +-{
1500 +- kfree(flush_bio);
1501 +-}
1502 +-
1503 + static struct ctl_table_header *raid_table_header;
1504 +
1505 + static struct ctl_table raid_table[] = {
1506 +@@ -423,54 +405,31 @@ static int md_congested(void *data, int bits)
1507 + /*
1508 + * Generic flush handling for md
1509 + */
1510 +-static void submit_flushes(struct work_struct *ws)
1511 +-{
1512 +- struct flush_info *fi = container_of(ws, struct flush_info, flush_work);
1513 +- struct mddev *mddev = fi->mddev;
1514 +- struct bio *bio = fi->bio;
1515 +-
1516 +- bio->bi_opf &= ~REQ_PREFLUSH;
1517 +- md_handle_request(mddev, bio);
1518 +-
1519 +- mempool_free(fi, mddev->flush_pool);
1520 +-}
1521 +
1522 +-static void md_end_flush(struct bio *fbio)
1523 ++static void md_end_flush(struct bio *bio)
1524 + {
1525 +- struct flush_bio *fb = fbio->bi_private;
1526 +- struct md_rdev *rdev = fb->rdev;
1527 +- struct flush_info *fi = fb->fi;
1528 +- struct bio *bio = fi->bio;
1529 +- struct mddev *mddev = fi->mddev;
1530 ++ struct md_rdev *rdev = bio->bi_private;
1531 ++ struct mddev *mddev = rdev->mddev;
1532 +
1533 + rdev_dec_pending(rdev, mddev);
1534 +
1535 +- if (atomic_dec_and_test(&fi->flush_pending)) {
1536 +- if (bio->bi_iter.bi_size == 0) {
1537 +- /* an empty barrier - all done */
1538 +- bio_endio(bio);
1539 +- mempool_free(fi, mddev->flush_pool);
1540 +- } else {
1541 +- INIT_WORK(&fi->flush_work, submit_flushes);
1542 +- queue_work(md_wq, &fi->flush_work);
1543 +- }
1544 ++ if (atomic_dec_and_test(&mddev->flush_pending)) {
1545 ++ /* The pre-request flush has finished */
1546 ++ queue_work(md_wq, &mddev->flush_work);
1547 + }
1548 +-
1549 +- mempool_free(fb, mddev->flush_bio_pool);
1550 +- bio_put(fbio);
1551 ++ bio_put(bio);
1552 + }
1553 +
1554 +-void md_flush_request(struct mddev *mddev, struct bio *bio)
1555 ++static void md_submit_flush_data(struct work_struct *ws);
1556 ++
1557 ++static void submit_flushes(struct work_struct *ws)
1558 + {
1559 ++ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
1560 + struct md_rdev *rdev;
1561 +- struct flush_info *fi;
1562 +-
1563 +- fi = mempool_alloc(mddev->flush_pool, GFP_NOIO);
1564 +-
1565 +- fi->bio = bio;
1566 +- fi->mddev = mddev;
1567 +- atomic_set(&fi->flush_pending, 1);
1568 +
1569 ++ mddev->start_flush = ktime_get_boottime();
1570 ++ INIT_WORK(&mddev->flush_work, md_submit_flush_data);
1571 ++ atomic_set(&mddev->flush_pending, 1);
1572 + rcu_read_lock();
1573 + rdev_for_each_rcu(rdev, mddev)
1574 + if (rdev->raid_disk >= 0 &&
1575 +@@ -480,37 +439,74 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
1576 + * we reclaim rcu_read_lock
1577 + */
1578 + struct bio *bi;
1579 +- struct flush_bio *fb;
1580 + atomic_inc(&rdev->nr_pending);
1581 + atomic_inc(&rdev->nr_pending);
1582 + rcu_read_unlock();
1583 +-
1584 +- fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO);
1585 +- fb->fi = fi;
1586 +- fb->rdev = rdev;
1587 +-
1588 + bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
1589 +- bio_set_dev(bi, rdev->bdev);
1590 + bi->bi_end_io = md_end_flush;
1591 +- bi->bi_private = fb;
1592 ++ bi->bi_private = rdev;
1593 ++ bio_set_dev(bi, rdev->bdev);
1594 + bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1595 +-
1596 +- atomic_inc(&fi->flush_pending);
1597 ++ atomic_inc(&mddev->flush_pending);
1598 + submit_bio(bi);
1599 +-
1600 + rcu_read_lock();
1601 + rdev_dec_pending(rdev, mddev);
1602 + }
1603 + rcu_read_unlock();
1604 ++ if (atomic_dec_and_test(&mddev->flush_pending))
1605 ++ queue_work(md_wq, &mddev->flush_work);
1606 ++}
1607 ++
1608 ++static void md_submit_flush_data(struct work_struct *ws)
1609 ++{
1610 ++ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
1611 ++ struct bio *bio = mddev->flush_bio;
1612 ++
1613 ++ /*
1614 ++ * must reset flush_bio before calling into md_handle_request to avoid a
1615 ++ * deadlock, because other bios passed md_handle_request suspend check
1616 ++ * could wait for this and below md_handle_request could wait for those
1617 ++ * bios because of suspend check
1618 ++ */
1619 ++ mddev->last_flush = mddev->start_flush;
1620 ++ mddev->flush_bio = NULL;
1621 ++ wake_up(&mddev->sb_wait);
1622 ++
1623 ++ if (bio->bi_iter.bi_size == 0) {
1624 ++ /* an empty barrier - all done */
1625 ++ bio_endio(bio);
1626 ++ } else {
1627 ++ bio->bi_opf &= ~REQ_PREFLUSH;
1628 ++ md_handle_request(mddev, bio);
1629 ++ }
1630 ++}
1631 +
1632 +- if (atomic_dec_and_test(&fi->flush_pending)) {
1633 +- if (bio->bi_iter.bi_size == 0) {
1634 ++void md_flush_request(struct mddev *mddev, struct bio *bio)
1635 ++{
1636 ++ ktime_t start = ktime_get_boottime();
1637 ++ spin_lock_irq(&mddev->lock);
1638 ++ wait_event_lock_irq(mddev->sb_wait,
1639 ++ !mddev->flush_bio ||
1640 ++ ktime_after(mddev->last_flush, start),
1641 ++ mddev->lock);
1642 ++ if (!ktime_after(mddev->last_flush, start)) {
1643 ++ WARN_ON(mddev->flush_bio);
1644 ++ mddev->flush_bio = bio;
1645 ++ bio = NULL;
1646 ++ }
1647 ++ spin_unlock_irq(&mddev->lock);
1648 ++
1649 ++ if (!bio) {
1650 ++ INIT_WORK(&mddev->flush_work, submit_flushes);
1651 ++ queue_work(md_wq, &mddev->flush_work);
1652 ++ } else {
1653 ++ /* flush was performed for some other bio while we waited. */
1654 ++ if (bio->bi_iter.bi_size == 0)
1655 + /* an empty barrier - all done */
1656 + bio_endio(bio);
1657 +- mempool_free(fi, mddev->flush_pool);
1658 +- } else {
1659 +- INIT_WORK(&fi->flush_work, submit_flushes);
1660 +- queue_work(md_wq, &fi->flush_work);
1661 ++ else {
1662 ++ bio->bi_opf &= ~REQ_PREFLUSH;
1663 ++ mddev->pers->make_request(mddev, bio);
1664 + }
1665 + }
1666 + }
1667 +@@ -560,6 +556,7 @@ void mddev_init(struct mddev *mddev)
1668 + atomic_set(&mddev->openers, 0);
1669 + atomic_set(&mddev->active_io, 0);
1670 + spin_lock_init(&mddev->lock);
1671 ++ atomic_set(&mddev->flush_pending, 0);
1672 + init_waitqueue_head(&mddev->sb_wait);
1673 + init_waitqueue_head(&mddev->recovery_wait);
1674 + mddev->reshape_position = MaxSector;
1675 +@@ -2855,8 +2852,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
1676 + err = 0;
1677 + }
1678 + } else if (cmd_match(buf, "re-add")) {
1679 +- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
1680 +- rdev->saved_raid_disk >= 0) {
1681 ++ if (!rdev->mddev->pers)
1682 ++ err = -EINVAL;
1683 ++ else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
1684 ++ rdev->saved_raid_disk >= 0) {
1685 + /* clear_bit is performed _after_ all the devices
1686 + * have their local Faulty bit cleared. If any writes
1687 + * happen in the meantime in the local node, they
1688 +@@ -5511,22 +5510,6 @@ int md_run(struct mddev *mddev)
1689 + if (err)
1690 + return err;
1691 + }
1692 +- if (mddev->flush_pool == NULL) {
1693 +- mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc,
1694 +- flush_info_free, mddev);
1695 +- if (!mddev->flush_pool) {
1696 +- err = -ENOMEM;
1697 +- goto abort;
1698 +- }
1699 +- }
1700 +- if (mddev->flush_bio_pool == NULL) {
1701 +- mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc,
1702 +- flush_bio_free, mddev);
1703 +- if (!mddev->flush_bio_pool) {
1704 +- err = -ENOMEM;
1705 +- goto abort;
1706 +- }
1707 +- }
1708 +
1709 + spin_lock(&pers_lock);
1710 + pers = find_pers(mddev->level, mddev->clevel);
1711 +@@ -5686,11 +5669,8 @@ int md_run(struct mddev *mddev)
1712 + return 0;
1713 +
1714 + abort:
1715 +- mempool_destroy(mddev->flush_bio_pool);
1716 +- mddev->flush_bio_pool = NULL;
1717 +- mempool_destroy(mddev->flush_pool);
1718 +- mddev->flush_pool = NULL;
1719 +-
1720 ++ bioset_exit(&mddev->bio_set);
1721 ++ bioset_exit(&mddev->sync_set);
1722 + return err;
1723 + }
1724 + EXPORT_SYMBOL_GPL(md_run);
1725 +@@ -5894,14 +5874,6 @@ static void __md_stop(struct mddev *mddev)
1726 + mddev->to_remove = &md_redundancy_group;
1727 + module_put(pers->owner);
1728 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1729 +- if (mddev->flush_bio_pool) {
1730 +- mempool_destroy(mddev->flush_bio_pool);
1731 +- mddev->flush_bio_pool = NULL;
1732 +- }
1733 +- if (mddev->flush_pool) {
1734 +- mempool_destroy(mddev->flush_pool);
1735 +- mddev->flush_pool = NULL;
1736 +- }
1737 + }
1738 +
1739 + void md_stop(struct mddev *mddev)
1740 +@@ -9257,7 +9229,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
1741 + * reshape is happening in the remote node, we need to
1742 + * update reshape_position and call start_reshape.
1743 + */
1744 +- mddev->reshape_position = sb->reshape_position;
1745 ++ mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1746 + if (mddev->pers->update_reshape_pos)
1747 + mddev->pers->update_reshape_pos(mddev);
1748 + if (mddev->pers->start_reshape)
1749 +diff --git a/drivers/md/md.h b/drivers/md/md.h
1750 +index c52afb52c776..257cb4c9e22b 100644
1751 +--- a/drivers/md/md.h
1752 ++++ b/drivers/md/md.h
1753 +@@ -252,19 +252,6 @@ enum mddev_sb_flags {
1754 + MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
1755 + };
1756 +
1757 +-#define NR_FLUSH_INFOS 8
1758 +-#define NR_FLUSH_BIOS 64
1759 +-struct flush_info {
1760 +- struct bio *bio;
1761 +- struct mddev *mddev;
1762 +- struct work_struct flush_work;
1763 +- atomic_t flush_pending;
1764 +-};
1765 +-struct flush_bio {
1766 +- struct flush_info *fi;
1767 +- struct md_rdev *rdev;
1768 +-};
1769 +-
1770 + struct mddev {
1771 + void *private;
1772 + struct md_personality *pers;
1773 +@@ -470,8 +457,16 @@ struct mddev {
1774 + * metadata and bitmap writes
1775 + */
1776 +
1777 +- mempool_t *flush_pool;
1778 +- mempool_t *flush_bio_pool;
1779 ++ /* Generic flush handling.
1780 ++ * The last to finish preflush schedules a worker to submit
1781 ++ * the rest of the request (without the REQ_PREFLUSH flag).
1782 ++ */
1783 ++ struct bio *flush_bio;
1784 ++ atomic_t flush_pending;
1785 ++ ktime_t start_flush, last_flush; /* last_flush is when the last completed
1786 ++ * flush was started.
1787 ++ */
1788 ++ struct work_struct flush_work;
1789 + struct work_struct event_work; /* used by dm to report failure event */
1790 + void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
1791 + struct md_cluster_info *cluster_info;
1792 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1793 +index 3ae13c06b200..f9c90ab220b9 100644
1794 +--- a/drivers/md/raid5.c
1795 ++++ b/drivers/md/raid5.c
1796 +@@ -4197,7 +4197,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
1797 + /* now write out any block on a failed drive,
1798 + * or P or Q if they were recomputed
1799 + */
1800 +- BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
1801 ++ dev = NULL;
1802 + if (s->failed == 2) {
1803 + dev = &sh->dev[s->failed_num[1]];
1804 + s->locked++;
1805 +@@ -4222,6 +4222,14 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
1806 + set_bit(R5_LOCKED, &dev->flags);
1807 + set_bit(R5_Wantwrite, &dev->flags);
1808 + }
1809 ++ if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags),
1810 ++ "%s: disk%td not up to date\n",
1811 ++ mdname(conf->mddev),
1812 ++ dev - (struct r5dev *) &sh->dev)) {
1813 ++ clear_bit(R5_LOCKED, &dev->flags);
1814 ++ clear_bit(R5_Wantwrite, &dev->flags);
1815 ++ s->locked--;
1816 ++ }
1817 + clear_bit(STRIPE_DEGRADED, &sh->state);
1818 +
1819 + set_bit(STRIPE_INSYNC, &sh->state);
1820 +@@ -4233,15 +4241,26 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
1821 + case check_state_check_result:
1822 + sh->check_state = check_state_idle;
1823 +
1824 +- if (s->failed > 1)
1825 +- break;
1826 + /* handle a successful check operation, if parity is correct
1827 + * we are done. Otherwise update the mismatch count and repair
1828 + * parity if !MD_RECOVERY_CHECK
1829 + */
1830 + if (sh->ops.zero_sum_result == 0) {
1831 +- /* Any parity checked was correct */
1832 +- set_bit(STRIPE_INSYNC, &sh->state);
1833 ++ /* both parities are correct */
1834 ++ if (!s->failed)
1835 ++ set_bit(STRIPE_INSYNC, &sh->state);
1836 ++ else {
1837 ++ /* in contrast to the raid5 case we can validate
1838 ++ * parity, but still have a failure to write
1839 ++ * back
1840 ++ */
1841 ++ sh->check_state = check_state_compute_result;
1842 ++ /* Returning at this point means that we may go
1843 ++ * off and bring p and/or q uptodate again so
1844 ++ * we make sure to check zero_sum_result again
1845 ++ * to verify if p or q need writeback
1846 ++ */
1847 ++ }
1848 + } else {
1849 + atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
1850 + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
1851 +diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
1852 +index 5d1b218bb7f0..2d3f7e00b129 100644
1853 +--- a/drivers/media/i2c/ov6650.c
1854 ++++ b/drivers/media/i2c/ov6650.c
1855 +@@ -814,6 +814,8 @@ static int ov6650_video_probe(struct i2c_client *client)
1856 + if (ret < 0)
1857 + return ret;
1858 +
1859 ++ msleep(20);
1860 ++
1861 + /*
1862 + * check and show product ID and manufacturer ID
1863 + */
1864 +diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
1865 +index 24afc36833bf..1608a482f681 100644
1866 +--- a/drivers/memory/tegra/mc.c
1867 ++++ b/drivers/memory/tegra/mc.c
1868 +@@ -280,7 +280,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
1869 + u32 value;
1870 +
1871 + /* compute the number of MC clock cycles per tick */
1872 +- tick = mc->tick * clk_get_rate(mc->clk);
1873 ++ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
1874 + do_div(tick, NSEC_PER_SEC);
1875 +
1876 + value = readl(mc->regs + MC_EMEM_ARB_CFG);
1877 +diff --git a/drivers/net/Makefile b/drivers/net/Makefile
1878 +index 21cde7e78621..0d3ba056cda3 100644
1879 +--- a/drivers/net/Makefile
1880 ++++ b/drivers/net/Makefile
1881 +@@ -40,7 +40,7 @@ obj-$(CONFIG_ARCNET) += arcnet/
1882 + obj-$(CONFIG_DEV_APPLETALK) += appletalk/
1883 + obj-$(CONFIG_CAIF) += caif/
1884 + obj-$(CONFIG_CAN) += can/
1885 +-obj-$(CONFIG_NET_DSA) += dsa/
1886 ++obj-y += dsa/
1887 + obj-$(CONFIG_ETHERNET) += ethernet/
1888 + obj-$(CONFIG_FDDI) += fddi/
1889 + obj-$(CONFIG_HIPPI) += hippi/
1890 +diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
1891 +index ffed2d4c9403..9c481823b3e8 100644
1892 +--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
1893 ++++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
1894 +@@ -1492,7 +1492,7 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1895 + rule.port = port;
1896 + rule.qpn = qpn;
1897 + INIT_LIST_HEAD(&rule.list);
1898 +- mlx4_err(dev, "going promisc on %x\n", port);
1899 ++ mlx4_info(dev, "going promisc on %x\n", port);
1900 +
1901 + return mlx4_flow_attach(dev, &rule, regid_p);
1902 + }
1903 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
1904 +index 37a551436e4a..b7e3b8902e7e 100644
1905 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
1906 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
1907 +@@ -8,6 +8,7 @@ config MLX5_CORE
1908 + depends on PCI
1909 + imply PTP_1588_CLOCK
1910 + imply VXLAN
1911 ++ imply MLXFW
1912 + default n
1913 + ---help---
1914 + Core driver for low level functionality of the ConnectX-4 and
1915 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1916 +index 253496c4a3db..a908e29ddb7b 100644
1917 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1918 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1919 +@@ -1802,6 +1802,22 @@ static int mlx5e_flash_device(struct net_device *dev,
1920 + return mlx5e_ethtool_flash_device(priv, flash);
1921 + }
1922 +
1923 ++#ifndef CONFIG_MLX5_EN_RXNFC
1924 ++/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
1925 ++ * otherwise this function will be defined from en_fs_ethtool.c
1926 ++ */
1927 ++static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
1928 ++{
1929 ++ struct mlx5e_priv *priv = netdev_priv(dev);
1930 ++
1931 ++ if (info->cmd != ETHTOOL_GRXRINGS)
1932 ++ return -EOPNOTSUPP;
1933 ++ /* ring_count is needed by ethtool -x */
1934 ++ info->data = priv->channels.params.num_channels;
1935 ++ return 0;
1936 ++}
1937 ++#endif
1938 ++
1939 + const struct ethtool_ops mlx5e_ethtool_ops = {
1940 + .get_drvinfo = mlx5e_get_drvinfo,
1941 + .get_link = ethtool_op_get_link,
1942 +@@ -1820,8 +1836,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
1943 + .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
1944 + .get_rxfh = mlx5e_get_rxfh,
1945 + .set_rxfh = mlx5e_set_rxfh,
1946 +-#ifdef CONFIG_MLX5_EN_RXNFC
1947 + .get_rxnfc = mlx5e_get_rxnfc,
1948 ++#ifdef CONFIG_MLX5_EN_RXNFC
1949 + .set_rxnfc = mlx5e_set_rxnfc,
1950 + #endif
1951 + .flash_device = mlx5e_flash_device,
1952 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1953 +index ef9e472daffb..3977f763b6ed 100644
1954 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1955 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1956 +@@ -64,9 +64,26 @@ static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
1957 + static void mlx5e_rep_get_drvinfo(struct net_device *dev,
1958 + struct ethtool_drvinfo *drvinfo)
1959 + {
1960 ++ struct mlx5e_priv *priv = netdev_priv(dev);
1961 ++ struct mlx5_core_dev *mdev = priv->mdev;
1962 ++
1963 + strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
1964 + sizeof(drvinfo->driver));
1965 + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
1966 ++ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1967 ++ "%d.%d.%04d (%.16s)",
1968 ++ fw_rev_maj(mdev), fw_rev_min(mdev),
1969 ++ fw_rev_sub(mdev), mdev->board_id);
1970 ++}
1971 ++
1972 ++static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
1973 ++ struct ethtool_drvinfo *drvinfo)
1974 ++{
1975 ++ struct mlx5e_priv *priv = netdev_priv(dev);
1976 ++
1977 ++ mlx5e_rep_get_drvinfo(dev, drvinfo);
1978 ++ strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
1979 ++ sizeof(drvinfo->bus_info));
1980 + }
1981 +
1982 + static const struct counter_desc sw_rep_stats_desc[] = {
1983 +@@ -374,7 +391,7 @@ static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
1984 + };
1985 +
1986 + static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
1987 +- .get_drvinfo = mlx5e_rep_get_drvinfo,
1988 ++ .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
1989 + .get_link = ethtool_op_get_link,
1990 + .get_strings = mlx5e_rep_get_strings,
1991 + .get_sset_count = mlx5e_rep_get_sset_count,
1992 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1993 +index 79f122b45def..abbdd4906984 100644
1994 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1995 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1996 +@@ -1375,6 +1375,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1997 + if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1998 + d1->vport.num == d2->vport.num &&
1999 + d1->vport.flags == d2->vport.flags &&
2000 ++ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
2001 ++ (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
2002 + ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
2003 + (d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
2004 + (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
2005 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
2006 +index ca0ee9916e9e..0059b290e095 100644
2007 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
2008 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
2009 +@@ -535,23 +535,16 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
2010 + do_div(ns, NSEC_PER_SEC / HZ);
2011 + clock->overflow_period = ns;
2012 +
2013 +- mdev->clock_info_page = alloc_page(GFP_KERNEL);
2014 +- if (mdev->clock_info_page) {
2015 +- mdev->clock_info = kmap(mdev->clock_info_page);
2016 +- if (!mdev->clock_info) {
2017 +- __free_page(mdev->clock_info_page);
2018 +- mlx5_core_warn(mdev, "failed to map clock page\n");
2019 +- } else {
2020 +- mdev->clock_info->sign = 0;
2021 +- mdev->clock_info->nsec = clock->tc.nsec;
2022 +- mdev->clock_info->cycles = clock->tc.cycle_last;
2023 +- mdev->clock_info->mask = clock->cycles.mask;
2024 +- mdev->clock_info->mult = clock->nominal_c_mult;
2025 +- mdev->clock_info->shift = clock->cycles.shift;
2026 +- mdev->clock_info->frac = clock->tc.frac;
2027 +- mdev->clock_info->overflow_period =
2028 +- clock->overflow_period;
2029 +- }
2030 ++ mdev->clock_info =
2031 ++ (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
2032 ++ if (mdev->clock_info) {
2033 ++ mdev->clock_info->nsec = clock->tc.nsec;
2034 ++ mdev->clock_info->cycles = clock->tc.cycle_last;
2035 ++ mdev->clock_info->mask = clock->cycles.mask;
2036 ++ mdev->clock_info->mult = clock->nominal_c_mult;
2037 ++ mdev->clock_info->shift = clock->cycles.shift;
2038 ++ mdev->clock_info->frac = clock->tc.frac;
2039 ++ mdev->clock_info->overflow_period = clock->overflow_period;
2040 + }
2041 +
2042 + INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
2043 +@@ -599,8 +592,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
2044 + cancel_delayed_work_sync(&clock->overflow_work);
2045 +
2046 + if (mdev->clock_info) {
2047 +- kunmap(mdev->clock_info_page);
2048 +- __free_page(mdev->clock_info_page);
2049 ++ free_page((unsigned long)mdev->clock_info);
2050 + mdev->clock_info = NULL;
2051 + }
2052 +
2053 +diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
2054 +index 2d9f26a725c2..37bd2dbcd206 100644
2055 +--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
2056 ++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
2057 +@@ -164,6 +164,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
2058 + return;
2059 + }
2060 +
2061 ++ rcu_read_lock();
2062 + for (i = 0; i < count; i++) {
2063 + ipv4_addr = payload->tun_info[i].ipv4;
2064 + port = be32_to_cpu(payload->tun_info[i].egress_port);
2065 +@@ -179,6 +180,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
2066 + neigh_event_send(n, NULL);
2067 + neigh_release(n);
2068 + }
2069 ++ rcu_read_unlock();
2070 + }
2071 +
2072 + static int
2073 +@@ -362,9 +364,10 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
2074 +
2075 + payload = nfp_flower_cmsg_get_data(skb);
2076 +
2077 ++ rcu_read_lock();
2078 + netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
2079 + if (!netdev)
2080 +- goto route_fail_warning;
2081 ++ goto fail_rcu_unlock;
2082 +
2083 + flow.daddr = payload->ipv4_addr;
2084 + flow.flowi4_proto = IPPROTO_UDP;
2085 +@@ -374,21 +377,23 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
2086 + rt = ip_route_output_key(dev_net(netdev), &flow);
2087 + err = PTR_ERR_OR_ZERO(rt);
2088 + if (err)
2089 +- goto route_fail_warning;
2090 ++ goto fail_rcu_unlock;
2091 + #else
2092 +- goto route_fail_warning;
2093 ++ goto fail_rcu_unlock;
2094 + #endif
2095 +
2096 + /* Get the neighbour entry for the lookup */
2097 + n = dst_neigh_lookup(&rt->dst, &flow.daddr);
2098 + ip_rt_put(rt);
2099 + if (!n)
2100 +- goto route_fail_warning;
2101 +- nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
2102 ++ goto fail_rcu_unlock;
2103 ++ nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
2104 + neigh_release(n);
2105 ++ rcu_read_unlock();
2106 + return;
2107 +
2108 +-route_fail_warning:
2109 ++fail_rcu_unlock:
2110 ++ rcu_read_unlock();
2111 + nfp_flower_cmsg_warn(app, "Requested route not found.\n");
2112 + }
2113 +
2114 +diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
2115 +index c589f5ae75bb..8bb53ec8d9cf 100644
2116 +--- a/drivers/net/ieee802154/mcr20a.c
2117 ++++ b/drivers/net/ieee802154/mcr20a.c
2118 +@@ -533,6 +533,8 @@ mcr20a_start(struct ieee802154_hw *hw)
2119 + dev_dbg(printdev(lp), "no slotted operation\n");
2120 + ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
2121 + DAR_PHY_CTRL1_SLOTTED, 0x0);
2122 ++ if (ret < 0)
2123 ++ return ret;
2124 +
2125 + /* enable irq */
2126 + enable_irq(lp->spi->irq);
2127 +@@ -540,11 +542,15 @@ mcr20a_start(struct ieee802154_hw *hw)
2128 + /* Unmask SEQ interrupt */
2129 + ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
2130 + DAR_PHY_CTRL2_SEQMSK, 0x0);
2131 ++ if (ret < 0)
2132 ++ return ret;
2133 +
2134 + /* Start the RX sequence */
2135 + dev_dbg(printdev(lp), "start the RX sequence\n");
2136 + ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
2137 + DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
2138 ++ if (ret < 0)
2139 ++ return ret;
2140 +
2141 + return 0;
2142 + }
2143 +diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
2144 +index b5edc7f96a39..685e875f5164 100644
2145 +--- a/drivers/net/ppp/ppp_deflate.c
2146 ++++ b/drivers/net/ppp/ppp_deflate.c
2147 +@@ -610,12 +610,20 @@ static struct compressor ppp_deflate_draft = {
2148 +
2149 + static int __init deflate_init(void)
2150 + {
2151 +- int answer = ppp_register_compressor(&ppp_deflate);
2152 +- if (answer == 0)
2153 +- printk(KERN_INFO
2154 +- "PPP Deflate Compression module registered\n");
2155 +- ppp_register_compressor(&ppp_deflate_draft);
2156 +- return answer;
2157 ++ int rc;
2158 ++
2159 ++ rc = ppp_register_compressor(&ppp_deflate);
2160 ++ if (rc)
2161 ++ return rc;
2162 ++
2163 ++ rc = ppp_register_compressor(&ppp_deflate_draft);
2164 ++ if (rc) {
2165 ++ ppp_unregister_compressor(&ppp_deflate);
2166 ++ return rc;
2167 ++ }
2168 ++
2169 ++ pr_info("PPP Deflate Compression module registered\n");
2170 ++ return 0;
2171 + }
2172 +
2173 + static void __exit deflate_cleanup(void)
2174 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2175 +index 9195f3476b1d..366217263d70 100644
2176 +--- a/drivers/net/usb/qmi_wwan.c
2177 ++++ b/drivers/net/usb/qmi_wwan.c
2178 +@@ -1122,9 +1122,16 @@ static const struct usb_device_id products[] = {
2179 + {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
2180 + {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
2181 + {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
2182 ++ {QMI_FIXED_INTF(0x1435, 0x0918, 3)}, /* Wistron NeWeb D16Q1 */
2183 ++ {QMI_FIXED_INTF(0x1435, 0x0918, 4)}, /* Wistron NeWeb D16Q1 */
2184 ++ {QMI_FIXED_INTF(0x1435, 0x0918, 5)}, /* Wistron NeWeb D16Q1 */
2185 ++ {QMI_FIXED_INTF(0x1435, 0x3185, 4)}, /* Wistron NeWeb M18Q5 */
2186 ++ {QMI_FIXED_INTF(0x1435, 0xd111, 4)}, /* M9615A DM11-1 D51QC */
2187 + {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
2188 + {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
2189 + {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
2190 ++ {QMI_FIXED_INTF(0x1435, 0xd182, 4)}, /* Wistron NeWeb D18 */
2191 ++ {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
2192 + {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
2193 + {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
2194 + {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
2195 +@@ -1180,6 +1187,7 @@ static const struct usb_device_id products[] = {
2196 + {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
2197 + {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
2198 + {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
2199 ++ {QMI_FIXED_INTF(0x19d2, 0x0396, 3)}, /* ZTE ZM8620 */
2200 + {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
2201 + {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
2202 + {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
2203 +@@ -1200,7 +1208,9 @@ static const struct usb_device_id products[] = {
2204 + {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
2205 + {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
2206 + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
2207 ++ {QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
2208 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
2209 ++ {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
2210 + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
2211 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
2212 + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
2213 +@@ -1240,6 +1250,8 @@ static const struct usb_device_id products[] = {
2214 + {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
2215 + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
2216 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
2217 ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
2218 ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
2219 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
2220 + {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
2221 + {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
2222 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
2223 +index 51d76ac45075..188d7961584e 100644
2224 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
2225 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
2226 +@@ -31,6 +31,10 @@ struct brcmf_dmi_data {
2227 +
2228 + /* NOTE: Please keep all entries sorted alphabetically */
2229 +
2230 ++static const struct brcmf_dmi_data acepc_t8_data = {
2231 ++ BRCM_CC_4345_CHIP_ID, 6, "acepc-t8"
2232 ++};
2233 ++
2234 + static const struct brcmf_dmi_data gpd_win_pocket_data = {
2235 + BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
2236 + };
2237 +@@ -44,6 +48,28 @@ static const struct brcmf_dmi_data meegopad_t08_data = {
2238 + };
2239 +
2240 + static const struct dmi_system_id dmi_platform_data[] = {
2241 ++ {
2242 ++ /* ACEPC T8 Cherry Trail Z8350 mini PC */
2243 ++ .matches = {
2244 ++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
2245 ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
2246 ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
2247 ++ /* also match on somewhat unique bios-version */
2248 ++ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
2249 ++ },
2250 ++ .driver_data = (void *)&acepc_t8_data,
2251 ++ },
2252 ++ {
2253 ++ /* ACEPC T11 Cherry Trail Z8350 mini PC, same wifi as the T8 */
2254 ++ .matches = {
2255 ++ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
2256 ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
2257 ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
2258 ++ /* also match on somewhat unique bios-version */
2259 ++ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
2260 ++ },
2261 ++ .driver_data = (void *)&acepc_t8_data,
2262 ++ },
2263 + {
2264 + /* Match for the GPDwin which unfortunately uses somewhat
2265 + * generic dmi strings, which is why we test for 4 strings.
2266 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
2267 +index 7bd8676508f5..519c7dd47f69 100644
2268 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
2269 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
2270 +@@ -143,9 +143,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
2271 + }
2272 +
2273 + /* iwl_mvm_create_skb Adds the rxb to a new skb */
2274 +-static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
2275 +- u16 len, u8 crypt_len,
2276 +- struct iwl_rx_cmd_buffer *rxb)
2277 ++static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
2278 ++ struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
2279 ++ struct iwl_rx_cmd_buffer *rxb)
2280 + {
2281 + struct iwl_rx_packet *pkt = rxb_addr(rxb);
2282 + struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
2283 +@@ -178,6 +178,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
2284 + * present before copying packet data.
2285 + */
2286 + hdrlen += crypt_len;
2287 ++
2288 ++ if (WARN_ONCE(headlen < hdrlen,
2289 ++ "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
2290 ++ hdrlen, len, crypt_len)) {
2291 ++ /*
2292 ++ * We warn and trace because we want to be able to see
2293 ++ * it in trace-cmd as well.
2294 ++ */
2295 ++ IWL_DEBUG_RX(mvm,
2296 ++ "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
2297 ++ hdrlen, len, crypt_len);
2298 ++ return -EINVAL;
2299 ++ }
2300 ++
2301 + skb_put_data(skb, hdr, hdrlen);
2302 + skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
2303 +
2304 +@@ -190,6 +204,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
2305 + skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
2306 + fraglen, rxb->truesize);
2307 + }
2308 ++
2309 ++ return 0;
2310 + }
2311 +
2312 + /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
2313 +@@ -1600,7 +1616,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
2314 + rx_status->boottime_ns = ktime_get_boot_ns();
2315 + }
2316 +
2317 +- iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
2318 ++ if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
2319 ++ kfree_skb(skb);
2320 ++ goto out;
2321 ++ }
2322 ++
2323 + if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
2324 + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
2325 + out:
2326 +diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
2327 +index 27a49068d32d..57ad56435dda 100644
2328 +--- a/drivers/net/wireless/intersil/p54/p54pci.c
2329 ++++ b/drivers/net/wireless/intersil/p54/p54pci.c
2330 +@@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev,
2331 + err = pci_enable_device(pdev);
2332 + if (err) {
2333 + dev_err(&pdev->dev, "Cannot enable new PCI device\n");
2334 +- return err;
2335 ++ goto err_put;
2336 + }
2337 +
2338 + mem_addr = pci_resource_start(pdev, 0);
2339 +@@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev,
2340 + pci_release_regions(pdev);
2341 + err_disable_dev:
2342 + pci_disable_device(pdev);
2343 ++err_put:
2344 + pci_dev_put(pdev);
2345 + return err;
2346 + }
2347 +diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
2348 +index 0c6e8b44b4ed..c60b465f6fe4 100644
2349 +--- a/drivers/parisc/led.c
2350 ++++ b/drivers/parisc/led.c
2351 +@@ -568,6 +568,9 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d
2352 + break;
2353 +
2354 + case DISPLAY_MODEL_LASI:
2355 ++ /* Skip to register LED in QEMU */
2356 ++ if (running_on_qemu)
2357 ++ return 1;
2358 + LED_DATA_REG = data_reg;
2359 + led_func_ptr = led_LASI_driver;
2360 + printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG);
2361 +diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
2362 +index c8febb009454..6a4e435bd35f 100644
2363 +--- a/drivers/pci/controller/pcie-rcar.c
2364 ++++ b/drivers/pci/controller/pcie-rcar.c
2365 +@@ -46,6 +46,7 @@
2366 +
2367 + /* Transfer control */
2368 + #define PCIETCTLR 0x02000
2369 ++#define DL_DOWN BIT(3)
2370 + #define CFINIT 1
2371 + #define PCIETSTR 0x02004
2372 + #define DATA_LINK_ACTIVE 1
2373 +@@ -94,6 +95,7 @@
2374 + #define MACCTLR 0x011058
2375 + #define SPEED_CHANGE BIT(24)
2376 + #define SCRAMBLE_DISABLE BIT(27)
2377 ++#define PMSR 0x01105c
2378 + #define MACS2R 0x011078
2379 + #define MACCGSPSETR 0x011084
2380 + #define SPCNGRSN BIT(31)
2381 +@@ -1130,6 +1132,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
2382 + pcie = pci_host_bridge_priv(bridge);
2383 +
2384 + pcie->dev = dev;
2385 ++ platform_set_drvdata(pdev, pcie);
2386 +
2387 + err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
2388 + if (err)
2389 +@@ -1221,10 +1224,28 @@ err_free_bridge:
2390 + return err;
2391 + }
2392 +
2393 ++static int rcar_pcie_resume_noirq(struct device *dev)
2394 ++{
2395 ++ struct rcar_pcie *pcie = dev_get_drvdata(dev);
2396 ++
2397 ++ if (rcar_pci_read_reg(pcie, PMSR) &&
2398 ++ !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
2399 ++ return 0;
2400 ++
2401 ++ /* Re-establish the PCIe link */
2402 ++ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
2403 ++ return rcar_pcie_wait_for_dl(pcie);
2404 ++}
2405 ++
2406 ++static const struct dev_pm_ops rcar_pcie_pm_ops = {
2407 ++ .resume_noirq = rcar_pcie_resume_noirq,
2408 ++};
2409 ++
2410 + static struct platform_driver rcar_pcie_driver = {
2411 + .driver = {
2412 + .name = "rcar-pcie",
2413 + .of_match_table = rcar_pcie_of_match,
2414 ++ .pm = &rcar_pcie_pm_ops,
2415 + .suppress_bind_attrs = true,
2416 + },
2417 + .probe = rcar_pcie_probe,
2418 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2419 +index e91005d0f20c..3f77bab698ce 100644
2420 +--- a/drivers/pci/pci.c
2421 ++++ b/drivers/pci/pci.c
2422 +@@ -6266,8 +6266,7 @@ static int __init pci_setup(char *str)
2423 + } else if (!strncmp(str, "pcie_scan_all", 13)) {
2424 + pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
2425 + } else if (!strncmp(str, "disable_acs_redir=", 18)) {
2426 +- disable_acs_redir_param =
2427 +- kstrdup(str + 18, GFP_KERNEL);
2428 ++ disable_acs_redir_param = str + 18;
2429 + } else {
2430 + printk(KERN_ERR "PCI: Unknown option `%s'\n",
2431 + str);
2432 +@@ -6278,3 +6277,19 @@ static int __init pci_setup(char *str)
2433 + return 0;
2434 + }
2435 + early_param("pci", pci_setup);
2436 ++
2437 ++/*
2438 ++ * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point
2439 ++ * to data in the __initdata section which will be freed after the init
2440 ++ * sequence is complete. We can't allocate memory in pci_setup() because some
2441 ++ * architectures do not have any memory allocation service available during
2442 ++ * an early_param() call. So we allocate memory and copy the variable here
2443 ++ * before the init section is freed.
2444 ++ */
2445 ++static int __init pci_realloc_setup_params(void)
2446 ++{
2447 ++ disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
2448 ++
2449 ++ return 0;
2450 ++}
2451 ++pure_initcall(pci_realloc_setup_params);
2452 +diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
2453 +index 224d88634115..17c4ed2021de 100644
2454 +--- a/drivers/pci/pci.h
2455 ++++ b/drivers/pci/pci.h
2456 +@@ -596,7 +596,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev);
2457 + void pci_aer_clear_device_status(struct pci_dev *dev);
2458 + #else
2459 + static inline void pci_no_aer(void) { }
2460 +-static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
2461 ++static inline void pci_aer_init(struct pci_dev *d) { }
2462 + static inline void pci_aer_exit(struct pci_dev *d) { }
2463 + static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
2464 + static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
2465 +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
2466 +index 727e3c1ef9a4..38e7017478b5 100644
2467 +--- a/drivers/pci/pcie/aspm.c
2468 ++++ b/drivers/pci/pcie/aspm.c
2469 +@@ -196,6 +196,38 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
2470 + link->clkpm_capable = (blacklist) ? 0 : capable;
2471 + }
2472 +
2473 ++static bool pcie_retrain_link(struct pcie_link_state *link)
2474 ++{
2475 ++ struct pci_dev *parent = link->pdev;
2476 ++ unsigned long start_jiffies;
2477 ++ u16 reg16;
2478 ++
2479 ++ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
2480 ++ reg16 |= PCI_EXP_LNKCTL_RL;
2481 ++ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
2482 ++ if (parent->clear_retrain_link) {
2483 ++ /*
2484 ++ * Due to an erratum in some devices the Retrain Link bit
2485 ++ * needs to be cleared again manually to allow the link
2486 ++ * training to succeed.
2487 ++ */
2488 ++ reg16 &= ~PCI_EXP_LNKCTL_RL;
2489 ++ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
2490 ++ }
2491 ++
2492 ++ /* Wait for link training end. Break out after waiting for timeout */
2493 ++ start_jiffies = jiffies;
2494 ++ for (;;) {
2495 ++ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
2496 ++ if (!(reg16 & PCI_EXP_LNKSTA_LT))
2497 ++ break;
2498 ++ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
2499 ++ break;
2500 ++ msleep(1);
2501 ++ }
2502 ++ return !(reg16 & PCI_EXP_LNKSTA_LT);
2503 ++}
2504 ++
2505 + /*
2506 + * pcie_aspm_configure_common_clock: check if the 2 ends of a link
2507 + * could use common clock. If they are, configure them to use the
2508 +@@ -205,7 +237,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
2509 + {
2510 + int same_clock = 1;
2511 + u16 reg16, parent_reg, child_reg[8];
2512 +- unsigned long start_jiffies;
2513 + struct pci_dev *child, *parent = link->pdev;
2514 + struct pci_bus *linkbus = parent->subordinate;
2515 + /*
2516 +@@ -263,21 +294,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
2517 + reg16 &= ~PCI_EXP_LNKCTL_CCC;
2518 + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
2519 +
2520 +- /* Retrain link */
2521 +- reg16 |= PCI_EXP_LNKCTL_RL;
2522 +- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
2523 +-
2524 +- /* Wait for link training end. Break out after waiting for timeout */
2525 +- start_jiffies = jiffies;
2526 +- for (;;) {
2527 +- pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
2528 +- if (!(reg16 & PCI_EXP_LNKSTA_LT))
2529 +- break;
2530 +- if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
2531 +- break;
2532 +- msleep(1);
2533 +- }
2534 +- if (!(reg16 & PCI_EXP_LNKSTA_LT))
2535 ++ if (pcie_retrain_link(link))
2536 + return;
2537 +
2538 + /* Training failed. Restore common clock configurations */
2539 +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
2540 +index c46a3fcb341e..3bb9bdb884e5 100644
2541 +--- a/drivers/pci/probe.c
2542 ++++ b/drivers/pci/probe.c
2543 +@@ -535,16 +535,9 @@ static void pci_release_host_bridge_dev(struct device *dev)
2544 + kfree(to_pci_host_bridge(dev));
2545 + }
2546 +
2547 +-struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
2548 ++static void pci_init_host_bridge(struct pci_host_bridge *bridge)
2549 + {
2550 +- struct pci_host_bridge *bridge;
2551 +-
2552 +- bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
2553 +- if (!bridge)
2554 +- return NULL;
2555 +-
2556 + INIT_LIST_HEAD(&bridge->windows);
2557 +- bridge->dev.release = pci_release_host_bridge_dev;
2558 +
2559 + /*
2560 + * We assume we can manage these PCIe features. Some systems may
2561 +@@ -557,6 +550,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
2562 + bridge->native_shpc_hotplug = 1;
2563 + bridge->native_pme = 1;
2564 + bridge->native_ltr = 1;
2565 ++}
2566 ++
2567 ++struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
2568 ++{
2569 ++ struct pci_host_bridge *bridge;
2570 ++
2571 ++ bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
2572 ++ if (!bridge)
2573 ++ return NULL;
2574 ++
2575 ++ pci_init_host_bridge(bridge);
2576 ++ bridge->dev.release = pci_release_host_bridge_dev;
2577 +
2578 + return bridge;
2579 + }
2580 +@@ -571,7 +576,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
2581 + if (!bridge)
2582 + return NULL;
2583 +
2584 +- INIT_LIST_HEAD(&bridge->windows);
2585 ++ pci_init_host_bridge(bridge);
2586 + bridge->dev.release = devm_pci_release_host_bridge_dev;
2587 +
2588 + return bridge;
2589 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2590 +index fba03a7d5c7f..c2c54dc4433e 100644
2591 +--- a/drivers/pci/quirks.c
2592 ++++ b/drivers/pci/quirks.c
2593 +@@ -2245,6 +2245,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
2594 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
2595 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
2596 +
2597 ++/*
2598 ++ * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
2599 ++ * Link bit cleared after starting the link retrain process to allow this
2600 ++ * process to finish.
2601 ++ *
2602 ++ * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the
2603 ++ * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
2604 ++ */
2605 ++static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
2606 ++{
2607 ++ dev->clear_retrain_link = 1;
2608 ++ pci_info(dev, "Enable PCIe Retrain Link quirk\n");
2609 ++}
2610 ++DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
2611 ++DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
2612 ++DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
2613 ++
2614 + static void fixup_rev1_53c810(struct pci_dev *dev)
2615 + {
2616 + u32 class = dev->class;
2617 +@@ -3408,6 +3425,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
2618 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
2619 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
2620 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
2621 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
2622 +
2623 + /*
2624 + * Root port on some Cavium CN8xxx chips do not successfully complete a bus
2625 +@@ -4903,6 +4921,7 @@ static void quirk_no_ats(struct pci_dev *pdev)
2626 +
2627 + /* AMD Stoney platform GPU */
2628 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
2629 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
2630 + #endif /* CONFIG_PCI_ATS */
2631 +
2632 + /* Freescale PCIe doesn't support MSI in RC mode */
2633 +@@ -5120,3 +5139,61 @@ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
2634 + SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
2635 + SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
2636 + SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
2637 ++
2638 ++/*
2639 ++ * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
2640 ++ * not always reset the secondary Nvidia GPU between reboots if the system
2641 ++ * is configured to use Hybrid Graphics mode. This results in the GPU
2642 ++ * being left in whatever state it was in during the *previous* boot, which
2643 ++ * causes spurious interrupts from the GPU, which in turn causes us to
2644 ++ * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly,
2645 ++ * this also completely breaks nouveau.
2646 ++ *
2647 ++ * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
2648 ++ * clean state and fixes all these issues.
2649 ++ *
2650 ++ * When the machine is configured in Dedicated display mode, the issue
2651 ++ * doesn't occur. Fortunately the GPU advertises NoReset+ when in this
2652 ++ * mode, so we can detect that and avoid resetting it.
2653 ++ */
2654 ++static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
2655 ++{
2656 ++ void __iomem *map;
2657 ++ int ret;
2658 ++
2659 ++ if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
2660 ++ pdev->subsystem_device != 0x222e ||
2661 ++ !pdev->reset_fn)
2662 ++ return;
2663 ++
2664 ++ if (pci_enable_device_mem(pdev))
2665 ++ return;
2666 ++
2667 ++ /*
2668 ++ * Based on nvkm_device_ctor() in
2669 ++ * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
2670 ++ */
2671 ++ map = pci_iomap(pdev, 0, 0x23000);
2672 ++ if (!map) {
2673 ++ pci_err(pdev, "Can't map MMIO space\n");
2674 ++ goto out_disable;
2675 ++ }
2676 ++
2677 ++ /*
2678 ++ * Make sure the GPU looks like it's been POSTed before resetting
2679 ++ * it.
2680 ++ */
2681 ++ if (ioread32(map + 0x2240c) & 0x2) {
2682 ++ pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
2683 ++ ret = pci_reset_function(pdev);
2684 ++ if (ret < 0)
2685 ++ pci_err(pdev, "Failed to reset GPU: %d\n", ret);
2686 ++ }
2687 ++
2688 ++ iounmap(map);
2689 ++out_disable:
2690 ++ pci_disable_device(pdev);
2691 ++}
2692 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
2693 ++ PCI_CLASS_DISPLAY_VGA, 8,
2694 ++ quirk_reset_lenovo_thinkpad_p50_nvgpu);
2695 +diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
2696 +index 68ce4a082b9b..693acc167351 100644
2697 +--- a/drivers/phy/ti/phy-ti-pipe3.c
2698 ++++ b/drivers/phy/ti/phy-ti-pipe3.c
2699 +@@ -303,7 +303,7 @@ static void ti_pipe3_calibrate(struct ti_pipe3 *phy)
2700 +
2701 + val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY);
2702 + val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV);
2703 +- val = (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
2704 ++ val |= (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
2705 + ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY, val);
2706 +
2707 + val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES);
2708 +diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
2709 +index 08d5037fd052..6887870ba32c 100644
2710 +--- a/drivers/power/supply/cpcap-battery.c
2711 ++++ b/drivers/power/supply/cpcap-battery.c
2712 +@@ -221,6 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
2713 + int avg_current;
2714 + u32 cc_lsb;
2715 +
2716 ++ if (!divider)
2717 ++ return 0;
2718 ++
2719 + sample &= 0xffffff; /* 24-bits, unsigned */
2720 + offset &= 0x7ff; /* 10-bits, signed */
2721 +
2722 +diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
2723 +index dce24f596160..5358a80d854f 100644
2724 +--- a/drivers/power/supply/power_supply_sysfs.c
2725 ++++ b/drivers/power/supply/power_supply_sysfs.c
2726 +@@ -383,15 +383,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
2727 + char *prop_buf;
2728 + char *attrname;
2729 +
2730 +- dev_dbg(dev, "uevent\n");
2731 +-
2732 + if (!psy || !psy->desc) {
2733 + dev_dbg(dev, "No power supply yet\n");
2734 + return ret;
2735 + }
2736 +
2737 +- dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
2738 +-
2739 + ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
2740 + if (ret)
2741 + return ret;
2742 +@@ -427,8 +423,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
2743 + goto out;
2744 + }
2745 +
2746 +- dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
2747 +-
2748 + ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
2749 + kfree(attrname);
2750 + if (ret)
2751 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
2752 +index e2caf11598c7..fb9fe26fd0fa 100644
2753 +--- a/drivers/regulator/core.c
2754 ++++ b/drivers/regulator/core.c
2755 +@@ -3360,15 +3360,12 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
2756 +
2757 + /* for not coupled regulators this will just set the voltage */
2758 + ret = regulator_balance_voltage(rdev, state);
2759 +- if (ret < 0)
2760 +- goto out2;
2761 ++ if (ret < 0) {
2762 ++ voltage->min_uV = old_min_uV;
2763 ++ voltage->max_uV = old_max_uV;
2764 ++ }
2765 +
2766 + out:
2767 +- return 0;
2768 +-out2:
2769 +- voltage->min_uV = old_min_uV;
2770 +- voltage->max_uV = old_max_uV;
2771 +-
2772 + return ret;
2773 + }
2774 +
2775 +diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
2776 +index be1e9e52b2a0..cbccb9b38503 100644
2777 +--- a/drivers/staging/media/imx/imx-media-csi.c
2778 ++++ b/drivers/staging/media/imx/imx-media-csi.c
2779 +@@ -153,9 +153,10 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
2780 + /*
2781 + * Parses the fwnode endpoint from the source pad of the entity
2782 + * connected to this CSI. This will either be the entity directly
2783 +- * upstream from the CSI-2 receiver, or directly upstream from the
2784 +- * video mux. The endpoint is needed to determine the bus type and
2785 +- * bus config coming into the CSI.
2786 ++ * upstream from the CSI-2 receiver, directly upstream from the
2787 ++ * video mux, or directly upstream from the CSI itself. The endpoint
2788 ++ * is needed to determine the bus type and bus config coming into
2789 ++ * the CSI.
2790 + */
2791 + static int csi_get_upstream_endpoint(struct csi_priv *priv,
2792 + struct v4l2_fwnode_endpoint *ep)
2793 +@@ -171,7 +172,8 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
2794 + if (!priv->src_sd)
2795 + return -EPIPE;
2796 +
2797 +- src = &priv->src_sd->entity;
2798 ++ sd = priv->src_sd;
2799 ++ src = &sd->entity;
2800 +
2801 + if (src->function == MEDIA_ENT_F_VID_MUX) {
2802 + /*
2803 +@@ -185,6 +187,14 @@ static int csi_get_upstream_endpoint(struct csi_priv *priv,
2804 + src = &sd->entity;
2805 + }
2806 +
2807 ++ /*
2808 ++ * If the source is neither the video mux nor the CSI-2 receiver,
2809 ++ * get the source pad directly upstream from CSI itself.
2810 ++ */
2811 ++ if (src->function != MEDIA_ENT_F_VID_MUX &&
2812 ++ sd->grp_id != IMX_MEDIA_GRP_ID_CSI2)
2813 ++ src = &priv->sd.entity;
2814 ++
2815 + /* get source pad of entity directly upstream from src */
2816 + pad = imx_media_find_upstream_pad(priv->md, src, 0);
2817 + if (IS_ERR(pad))
2818 +diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
2819 +index a01327f6e045..2da81a5af274 100644
2820 +--- a/drivers/staging/media/imx/imx-media-of.c
2821 ++++ b/drivers/staging/media/imx/imx-media-of.c
2822 +@@ -143,15 +143,18 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
2823 + struct v4l2_subdev *csi)
2824 + {
2825 + struct device_node *csi_np = csi->dev->of_node;
2826 +- struct fwnode_handle *fwnode, *csi_ep;
2827 +- struct v4l2_fwnode_link link;
2828 + struct device_node *ep;
2829 +- int ret;
2830 +-
2831 +- link.local_node = of_fwnode_handle(csi_np);
2832 +- link.local_port = CSI_SINK_PAD;
2833 +
2834 + for_each_child_of_node(csi_np, ep) {
2835 ++ struct fwnode_handle *fwnode, *csi_ep;
2836 ++ struct v4l2_fwnode_link link;
2837 ++ int ret;
2838 ++
2839 ++ memset(&link, 0, sizeof(link));
2840 ++
2841 ++ link.local_node = of_fwnode_handle(csi_np);
2842 ++ link.local_port = CSI_SINK_PAD;
2843 ++
2844 + csi_ep = of_fwnode_handle(ep);
2845 +
2846 + fwnode = fwnode_graph_get_remote_endpoint(csi_ep);
2847 +diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
2848 +index ba906876cc45..fd02e8a4841d 100644
2849 +--- a/drivers/video/fbdev/efifb.c
2850 ++++ b/drivers/video/fbdev/efifb.c
2851 +@@ -476,8 +476,12 @@ static int efifb_probe(struct platform_device *dev)
2852 + * If the UEFI memory map covers the efifb region, we may only
2853 + * remap it using the attributes the memory map prescribes.
2854 + */
2855 +- mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
2856 +- mem_flags &= md.attribute;
2857 ++ md.attribute &= EFI_MEMORY_UC | EFI_MEMORY_WC |
2858 ++ EFI_MEMORY_WT | EFI_MEMORY_WB;
2859 ++ if (md.attribute) {
2860 ++ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
2861 ++ mem_flags &= md.attribute;
2862 ++ }
2863 + }
2864 + if (mem_flags & EFI_MEMORY_WC)
2865 + info->screen_base = ioremap_wc(efifb_fix.smem_start,
2866 +diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h
2867 +index aad1cc4be34a..c7ebf03b8d53 100644
2868 +--- a/drivers/video/fbdev/sm712.h
2869 ++++ b/drivers/video/fbdev/sm712.h
2870 +@@ -15,14 +15,10 @@
2871 +
2872 + #define FB_ACCEL_SMI_LYNX 88
2873 +
2874 +-#define SCREEN_X_RES 1024
2875 +-#define SCREEN_Y_RES 600
2876 +-#define SCREEN_BPP 16
2877 +-
2878 +-/*Assume SM712 graphics chip has 4MB VRAM */
2879 +-#define SM712_VIDEOMEMORYSIZE 0x00400000
2880 +-/*Assume SM722 graphics chip has 8MB VRAM */
2881 +-#define SM722_VIDEOMEMORYSIZE 0x00800000
2882 ++#define SCREEN_X_RES 1024
2883 ++#define SCREEN_Y_RES_PC 768
2884 ++#define SCREEN_Y_RES_NETBOOK 600
2885 ++#define SCREEN_BPP 16
2886 +
2887 + #define dac_reg (0x3c8)
2888 + #define dac_val (0x3c9)
2889 +diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
2890 +index 502d0de2feec..f1dcc6766d1e 100644
2891 +--- a/drivers/video/fbdev/sm712fb.c
2892 ++++ b/drivers/video/fbdev/sm712fb.c
2893 +@@ -530,6 +530,65 @@ static const struct modeinit vgamode[] = {
2894 + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
2895 + },
2896 + },
2897 ++ { /* 1024 x 768 16Bpp 60Hz */
2898 ++ 1024, 768, 16, 60,
2899 ++ /* Init_MISC */
2900 ++ 0xEB,
2901 ++ { /* Init_SR0_SR4 */
2902 ++ 0x03, 0x01, 0x0F, 0x03, 0x0E,
2903 ++ },
2904 ++ { /* Init_SR10_SR24 */
2905 ++ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
2906 ++ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
2907 ++ 0xC4, 0x30, 0x02, 0x01, 0x01,
2908 ++ },
2909 ++ { /* Init_SR30_SR75 */
2910 ++ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
2911 ++ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
2912 ++ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
2913 ++ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
2914 ++ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
2915 ++ 0x0F, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
2916 ++ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
2917 ++ 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
2918 ++ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
2919 ++ },
2920 ++ { /* Init_SR80_SR93 */
2921 ++ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
2922 ++ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
2923 ++ 0x00, 0x00, 0x00, 0x00,
2924 ++ },
2925 ++ { /* Init_SRA0_SRAF */
2926 ++ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
2927 ++ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
2928 ++ },
2929 ++ { /* Init_GR00_GR08 */
2930 ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
2931 ++ 0xFF,
2932 ++ },
2933 ++ { /* Init_AR00_AR14 */
2934 ++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2935 ++ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
2936 ++ 0x41, 0x00, 0x0F, 0x00, 0x00,
2937 ++ },
2938 ++ { /* Init_CR00_CR18 */
2939 ++ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
2940 ++ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2941 ++ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
2942 ++ 0xFF,
2943 ++ },
2944 ++ { /* Init_CR30_CR4D */
2945 ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
2946 ++ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
2947 ++ 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
2948 ++ 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
2949 ++ },
2950 ++ { /* Init_CR90_CRA7 */
2951 ++ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
2952 ++ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
2953 ++ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
2954 ++ },
2955 ++ },
2956 + { /* mode#5: 1024 x 768 24Bpp 60Hz */
2957 + 1024, 768, 24, 60,
2958 + /* Init_MISC */
2959 +@@ -827,67 +886,80 @@ static inline unsigned int chan_to_field(unsigned int chan,
2960 +
2961 + static int smtc_blank(int blank_mode, struct fb_info *info)
2962 + {
2963 ++ struct smtcfb_info *sfb = info->par;
2964 ++
2965 + /* clear DPMS setting */
2966 + switch (blank_mode) {
2967 + case FB_BLANK_UNBLANK:
2968 + /* Screen On: HSync: On, VSync : On */
2969 ++
2970 ++ switch (sfb->chip_id) {
2971 ++ case 0x710:
2972 ++ case 0x712:
2973 ++ smtc_seqw(0x6a, 0x16);
2974 ++ smtc_seqw(0x6b, 0x02);
2975 ++ break;
2976 ++ case 0x720:
2977 ++ smtc_seqw(0x6a, 0x0d);
2978 ++ smtc_seqw(0x6b, 0x02);
2979 ++ break;
2980 ++ }
2981 ++
2982 ++ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
2983 + smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
2984 +- smtc_seqw(0x6a, 0x16);
2985 +- smtc_seqw(0x6b, 0x02);
2986 + smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
2987 + smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
2988 +- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
2989 +- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
2990 + smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
2991 ++ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
2992 + break;
2993 + case FB_BLANK_NORMAL:
2994 + /* Screen Off: HSync: On, VSync : On Soft blank */
2995 ++ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
2996 ++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
2997 ++ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
2998 + smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
2999 ++ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
3000 + smtc_seqw(0x6a, 0x16);
3001 + smtc_seqw(0x6b, 0x02);
3002 +- smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
3003 +- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
3004 +- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
3005 +- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3006 + break;
3007 + case FB_BLANK_VSYNC_SUSPEND:
3008 + /* Screen On: HSync: On, VSync : Off */
3009 ++ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3010 ++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3011 ++ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
3012 + smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
3013 +- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3014 +- smtc_seqw(0x6a, 0x0c);
3015 +- smtc_seqw(0x6b, 0x02);
3016 + smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
3017 ++ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3018 + smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
3019 +- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
3020 +- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3021 +- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3022 + smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
3023 ++ smtc_seqw(0x6a, 0x0c);
3024 ++ smtc_seqw(0x6b, 0x02);
3025 + break;
3026 + case FB_BLANK_HSYNC_SUSPEND:
3027 + /* Screen On: HSync: Off, VSync : On */
3028 ++ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3029 ++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3030 ++ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
3031 + smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
3032 +- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3033 +- smtc_seqw(0x6a, 0x0c);
3034 +- smtc_seqw(0x6b, 0x02);
3035 + smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
3036 ++ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3037 + smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
3038 +- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
3039 +- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3040 +- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3041 + smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
3042 ++ smtc_seqw(0x6a, 0x0c);
3043 ++ smtc_seqw(0x6b, 0x02);
3044 + break;
3045 + case FB_BLANK_POWERDOWN:
3046 + /* Screen On: HSync: Off, VSync : Off */
3047 ++ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3048 ++ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3049 ++ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
3050 + smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
3051 +- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3052 +- smtc_seqw(0x6a, 0x0c);
3053 +- smtc_seqw(0x6b, 0x02);
3054 + smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
3055 ++ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
3056 + smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
3057 +- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
3058 +- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
3059 +- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
3060 + smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
3061 ++ smtc_seqw(0x6a, 0x0c);
3062 ++ smtc_seqw(0x6b, 0x02);
3063 + break;
3064 + default:
3065 + return -EINVAL;
3066 +@@ -1145,8 +1217,10 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
3067 +
3068 + /* init SEQ register SR30 - SR75 */
3069 + for (i = 0; i < SIZE_SR30_SR75; i++)
3070 +- if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
3071 +- (i + 0x30) != 0x6b)
3072 ++ if ((i + 0x30) != 0x30 && (i + 0x30) != 0x62 &&
3073 ++ (i + 0x30) != 0x6a && (i + 0x30) != 0x6b &&
3074 ++ (i + 0x30) != 0x70 && (i + 0x30) != 0x71 &&
3075 ++ (i + 0x30) != 0x74 && (i + 0x30) != 0x75)
3076 + smtc_seqw(i + 0x30,
3077 + vgamode[j].init_sr30_sr75[i]);
3078 +
3079 +@@ -1171,8 +1245,12 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
3080 + smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
3081 +
3082 + /* init CRTC register CR30 - CR4D */
3083 +- for (i = 0; i < SIZE_CR30_CR4D; i++)
3084 ++ for (i = 0; i < SIZE_CR30_CR4D; i++) {
3085 ++ if ((i + 0x30) >= 0x3B && (i + 0x30) <= 0x3F)
3086 ++ /* side-effect, don't write to CR3B-CR3F */
3087 ++ continue;
3088 + smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
3089 ++ }
3090 +
3091 + /* init CRTC register CR90 - CRA7 */
3092 + for (i = 0; i < SIZE_CR90_CRA7; i++)
3093 +@@ -1323,6 +1401,11 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
3094 + {
3095 + sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
3096 +
3097 ++ if (sfb->chip_id == 0x720)
3098 ++ /* on SM720, the framebuffer starts at the 1 MB offset */
3099 ++ sfb->fb->fix.smem_start += 0x00200000;
3100 ++
3101 ++ /* XXX: is it safe for SM720 on Big-Endian? */
3102 + if (sfb->fb->var.bits_per_pixel == 32)
3103 + sfb->fb->fix.smem_start += big_addr;
3104 +
3105 +@@ -1360,12 +1443,82 @@ static inline void sm7xx_init_hw(void)
3106 + outb_p(0x11, 0x3c5);
3107 + }
3108 +
3109 ++static u_long sm7xx_vram_probe(struct smtcfb_info *sfb)
3110 ++{
3111 ++ u8 vram;
3112 ++
3113 ++ switch (sfb->chip_id) {
3114 ++ case 0x710:
3115 ++ case 0x712:
3116 ++ /*
3117 ++ * Assume SM712 graphics chip has 4MB VRAM.
3118 ++ *
3119 ++ * FIXME: SM712 can have 2MB VRAM, which is used on earlier
3120 ++ * laptops, such as IBM Thinkpad 240X. This driver would
3121 ++ * probably crash on those machines. If anyone gets one of
3122 ++ * those and is willing to help, run "git blame" and send me
3123 ++ * an E-mail.
3124 ++ */
3125 ++ return 0x00400000;
3126 ++ case 0x720:
3127 ++ outb_p(0x76, 0x3c4);
3128 ++ vram = inb_p(0x3c5) >> 6;
3129 ++
3130 ++ if (vram == 0x00)
3131 ++ return 0x00800000; /* 8 MB */
3132 ++ else if (vram == 0x01)
3133 ++ return 0x01000000; /* 16 MB */
3134 ++ else if (vram == 0x02)
3135 ++ return 0x00400000; /* illegal, fallback to 4 MB */
3136 ++ else if (vram == 0x03)
3137 ++ return 0x00400000; /* 4 MB */
3138 ++ }
3139 ++ return 0; /* unknown hardware */
3140 ++}
3141 ++
3142 ++static void sm7xx_resolution_probe(struct smtcfb_info *sfb)
3143 ++{
3144 ++ /* get mode parameter from smtc_scr_info */
3145 ++ if (smtc_scr_info.lfb_width != 0) {
3146 ++ sfb->fb->var.xres = smtc_scr_info.lfb_width;
3147 ++ sfb->fb->var.yres = smtc_scr_info.lfb_height;
3148 ++ sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
3149 ++ goto final;
3150 ++ }
3151 ++
3152 ++ /*
3153 ++ * No parameter, default resolution is 1024x768-16.
3154 ++ *
3155 ++ * FIXME: earlier laptops, such as IBM Thinkpad 240X, has a 800x600
3156 ++ * panel, also see the comments about Thinkpad 240X above.
3157 ++ */
3158 ++ sfb->fb->var.xres = SCREEN_X_RES;
3159 ++ sfb->fb->var.yres = SCREEN_Y_RES_PC;
3160 ++ sfb->fb->var.bits_per_pixel = SCREEN_BPP;
3161 ++
3162 ++#ifdef CONFIG_MIPS
3163 ++ /*
3164 ++ * Loongson MIPS netbooks use 1024x600 LCD panels, which is the original
3165 ++ * target platform of this driver, but nearly all old x86 laptops have
3166 ++ * 1024x768. Lighting 768 panels using 600's timings would partially
3167 ++ * garble the display, so we don't want that. But it's not possible to
3168 ++ * distinguish them reliably.
3169 ++ *
3170 ++ * So we change the default to 768, but keep 600 as-is on MIPS.
3171 ++ */
3172 ++ sfb->fb->var.yres = SCREEN_Y_RES_NETBOOK;
3173 ++#endif
3174 ++
3175 ++final:
3176 ++ big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
3177 ++}
3178 ++
3179 + static int smtcfb_pci_probe(struct pci_dev *pdev,
3180 + const struct pci_device_id *ent)
3181 + {
3182 + struct smtcfb_info *sfb;
3183 + struct fb_info *info;
3184 +- u_long smem_size = 0x00800000; /* default 8MB */
3185 ++ u_long smem_size;
3186 + int err;
3187 + unsigned long mmio_base;
3188 +
3189 +@@ -1405,29 +1558,19 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
3190 +
3191 + sm7xx_init_hw();
3192 +
3193 +- /* get mode parameter from smtc_scr_info */
3194 +- if (smtc_scr_info.lfb_width != 0) {
3195 +- sfb->fb->var.xres = smtc_scr_info.lfb_width;
3196 +- sfb->fb->var.yres = smtc_scr_info.lfb_height;
3197 +- sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
3198 +- } else {
3199 +- /* default resolution 1024x600 16bit mode */
3200 +- sfb->fb->var.xres = SCREEN_X_RES;
3201 +- sfb->fb->var.yres = SCREEN_Y_RES;
3202 +- sfb->fb->var.bits_per_pixel = SCREEN_BPP;
3203 +- }
3204 +-
3205 +- big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
3206 + /* Map address and memory detection */
3207 + mmio_base = pci_resource_start(pdev, 0);
3208 + pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
3209 +
3210 ++ smem_size = sm7xx_vram_probe(sfb);
3211 ++ dev_info(&pdev->dev, "%lu MiB of VRAM detected.\n",
3212 ++ smem_size / 1048576);
3213 ++
3214 + switch (sfb->chip_id) {
3215 + case 0x710:
3216 + case 0x712:
3217 + sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
3218 + sfb->fb->fix.mmio_len = 0x00400000;
3219 +- smem_size = SM712_VIDEOMEMORYSIZE;
3220 + sfb->lfb = ioremap(mmio_base, mmio_addr);
3221 + if (!sfb->lfb) {
3222 + dev_err(&pdev->dev,
3223 +@@ -1459,8 +1602,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
3224 + case 0x720:
3225 + sfb->fb->fix.mmio_start = mmio_base;
3226 + sfb->fb->fix.mmio_len = 0x00200000;
3227 +- smem_size = SM722_VIDEOMEMORYSIZE;
3228 +- sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
3229 ++ sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size);
3230 + sfb->lfb = sfb->dp_regs + 0x00200000;
3231 + sfb->mmio = (smtc_regbaseaddress =
3232 + sfb->dp_regs + 0x000c0000);
3233 +@@ -1477,6 +1619,9 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
3234 + goto failed_fb;
3235 + }
3236 +
3237 ++ /* probe and decide resolution */
3238 ++ sm7xx_resolution_probe(sfb);
3239 ++
3240 + /* can support 32 bpp */
3241 + if (sfb->fb->var.bits_per_pixel == 15)
3242 + sfb->fb->var.bits_per_pixel = 16;
3243 +@@ -1487,7 +1632,11 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
3244 + if (err)
3245 + goto failed;
3246 +
3247 +- smtcfb_setmode(sfb);
3248 ++ /*
3249 ++ * The screen would be temporarily garbled when sm712fb takes over
3250 ++ * vesafb or VGA text mode. Zero the framebuffer.
3251 ++ */
3252 ++ memset_io(sfb->lfb, 0, sfb->fb->fix.smem_len);
3253 +
3254 + err = register_framebuffer(info);
3255 + if (err < 0)
3256 +diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
3257 +index 1d034dddc556..5a0d6fb02bbc 100644
3258 +--- a/drivers/video/fbdev/udlfb.c
3259 ++++ b/drivers/video/fbdev/udlfb.c
3260 +@@ -594,8 +594,7 @@ static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr,
3261 + return 0;
3262 + }
3263 +
3264 +-static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
3265 +- int width, int height, char *data)
3266 ++static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
3267 + {
3268 + int i, ret;
3269 + char *cmd;
3270 +@@ -607,21 +606,29 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
3271 +
3272 + start_cycles = get_cycles();
3273 +
3274 ++ mutex_lock(&dlfb->render_mutex);
3275 ++
3276 + aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
3277 + width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
3278 + x = aligned_x;
3279 +
3280 + if ((width <= 0) ||
3281 + (x + width > dlfb->info->var.xres) ||
3282 +- (y + height > dlfb->info->var.yres))
3283 +- return -EINVAL;
3284 ++ (y + height > dlfb->info->var.yres)) {
3285 ++ ret = -EINVAL;
3286 ++ goto unlock_ret;
3287 ++ }
3288 +
3289 +- if (!atomic_read(&dlfb->usb_active))
3290 +- return 0;
3291 ++ if (!atomic_read(&dlfb->usb_active)) {
3292 ++ ret = 0;
3293 ++ goto unlock_ret;
3294 ++ }
3295 +
3296 + urb = dlfb_get_urb(dlfb);
3297 +- if (!urb)
3298 +- return 0;
3299 ++ if (!urb) {
3300 ++ ret = 0;
3301 ++ goto unlock_ret;
3302 ++ }
3303 + cmd = urb->transfer_buffer;
3304 +
3305 + for (i = y; i < y + height ; i++) {
3306 +@@ -641,7 +648,7 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y,
3307 + *cmd++ = 0xAF;
3308 + /* Send partial buffer remaining before exiting */
3309 + len = cmd - (char *) urb->transfer_buffer;
3310 +- ret = dlfb_submit_urb(dlfb, urb, len);
3311 ++ dlfb_submit_urb(dlfb, urb, len);
3312 + bytes_sent += len;
3313 + } else
3314 + dlfb_urb_completion(urb);
3315 +@@ -655,7 +662,55 @@ error:
3316 + >> 10)), /* Kcycles */
3317 + &dlfb->cpu_kcycles_used);
3318 +
3319 +- return 0;
3320 ++ ret = 0;
3321 ++
3322 ++unlock_ret:
3323 ++ mutex_unlock(&dlfb->render_mutex);
3324 ++ return ret;
3325 ++}
3326 ++
3327 ++static void dlfb_init_damage(struct dlfb_data *dlfb)
3328 ++{
3329 ++ dlfb->damage_x = INT_MAX;
3330 ++ dlfb->damage_x2 = 0;
3331 ++ dlfb->damage_y = INT_MAX;
3332 ++ dlfb->damage_y2 = 0;
3333 ++}
3334 ++
3335 ++static void dlfb_damage_work(struct work_struct *w)
3336 ++{
3337 ++ struct dlfb_data *dlfb = container_of(w, struct dlfb_data, damage_work);
3338 ++ int x, x2, y, y2;
3339 ++
3340 ++ spin_lock_irq(&dlfb->damage_lock);
3341 ++ x = dlfb->damage_x;
3342 ++ x2 = dlfb->damage_x2;
3343 ++ y = dlfb->damage_y;
3344 ++ y2 = dlfb->damage_y2;
3345 ++ dlfb_init_damage(dlfb);
3346 ++ spin_unlock_irq(&dlfb->damage_lock);
3347 ++
3348 ++ if (x < x2 && y < y2)
3349 ++ dlfb_handle_damage(dlfb, x, y, x2 - x, y2 - y);
3350 ++}
3351 ++
3352 ++static void dlfb_offload_damage(struct dlfb_data *dlfb, int x, int y, int width, int height)
3353 ++{
3354 ++ unsigned long flags;
3355 ++ int x2 = x + width;
3356 ++ int y2 = y + height;
3357 ++
3358 ++ if (x >= x2 || y >= y2)
3359 ++ return;
3360 ++
3361 ++ spin_lock_irqsave(&dlfb->damage_lock, flags);
3362 ++ dlfb->damage_x = min(x, dlfb->damage_x);
3363 ++ dlfb->damage_x2 = max(x2, dlfb->damage_x2);
3364 ++ dlfb->damage_y = min(y, dlfb->damage_y);
3365 ++ dlfb->damage_y2 = max(y2, dlfb->damage_y2);
3366 ++ spin_unlock_irqrestore(&dlfb->damage_lock, flags);
3367 ++
3368 ++ schedule_work(&dlfb->damage_work);
3369 + }
3370 +
3371 + /*
3372 +@@ -679,7 +734,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
3373 + (u32)info->var.yres);
3374 +
3375 + dlfb_handle_damage(dlfb, 0, start, info->var.xres,
3376 +- lines, info->screen_base);
3377 ++ lines);
3378 + }
3379 +
3380 + return result;
3381 +@@ -694,8 +749,8 @@ static void dlfb_ops_copyarea(struct fb_info *info,
3382 +
3383 + sys_copyarea(info, area);
3384 +
3385 +- dlfb_handle_damage(dlfb, area->dx, area->dy,
3386 +- area->width, area->height, info->screen_base);
3387 ++ dlfb_offload_damage(dlfb, area->dx, area->dy,
3388 ++ area->width, area->height);
3389 + }
3390 +
3391 + static void dlfb_ops_imageblit(struct fb_info *info,
3392 +@@ -705,8 +760,8 @@ static void dlfb_ops_imageblit(struct fb_info *info,
3393 +
3394 + sys_imageblit(info, image);
3395 +
3396 +- dlfb_handle_damage(dlfb, image->dx, image->dy,
3397 +- image->width, image->height, info->screen_base);
3398 ++ dlfb_offload_damage(dlfb, image->dx, image->dy,
3399 ++ image->width, image->height);
3400 + }
3401 +
3402 + static void dlfb_ops_fillrect(struct fb_info *info,
3403 +@@ -716,8 +771,8 @@ static void dlfb_ops_fillrect(struct fb_info *info,
3404 +
3405 + sys_fillrect(info, rect);
3406 +
3407 +- dlfb_handle_damage(dlfb, rect->dx, rect->dy, rect->width,
3408 +- rect->height, info->screen_base);
3409 ++ dlfb_offload_damage(dlfb, rect->dx, rect->dy, rect->width,
3410 ++ rect->height);
3411 + }
3412 +
3413 + /*
3414 +@@ -739,17 +794,19 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
3415 + int bytes_identical = 0;
3416 + int bytes_rendered = 0;
3417 +
3418 ++ mutex_lock(&dlfb->render_mutex);
3419 ++
3420 + if (!fb_defio)
3421 +- return;
3422 ++ goto unlock_ret;
3423 +
3424 + if (!atomic_read(&dlfb->usb_active))
3425 +- return;
3426 ++ goto unlock_ret;
3427 +
3428 + start_cycles = get_cycles();
3429 +
3430 + urb = dlfb_get_urb(dlfb);
3431 + if (!urb)
3432 +- return;
3433 ++ goto unlock_ret;
3434 +
3435 + cmd = urb->transfer_buffer;
3436 +
3437 +@@ -782,6 +839,8 @@ error:
3438 + atomic_add(((unsigned int) ((end_cycles - start_cycles)
3439 + >> 10)), /* Kcycles */
3440 + &dlfb->cpu_kcycles_used);
3441 ++unlock_ret:
3442 ++ mutex_unlock(&dlfb->render_mutex);
3443 + }
3444 +
3445 + static int dlfb_get_edid(struct dlfb_data *dlfb, char *edid, int len)
3446 +@@ -859,8 +918,7 @@ static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
3447 + if (area.y > info->var.yres)
3448 + area.y = info->var.yres;
3449 +
3450 +- dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h,
3451 +- info->screen_base);
3452 ++ dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h);
3453 + }
3454 +
3455 + return 0;
3456 +@@ -942,6 +1000,10 @@ static void dlfb_ops_destroy(struct fb_info *info)
3457 + {
3458 + struct dlfb_data *dlfb = info->par;
3459 +
3460 ++ cancel_work_sync(&dlfb->damage_work);
3461 ++
3462 ++ mutex_destroy(&dlfb->render_mutex);
3463 ++
3464 + if (info->cmap.len != 0)
3465 + fb_dealloc_cmap(&info->cmap);
3466 + if (info->monspecs.modedb)
3467 +@@ -1065,8 +1127,7 @@ static int dlfb_ops_set_par(struct fb_info *info)
3468 + pix_framebuffer[i] = 0x37e6;
3469 + }
3470 +
3471 +- dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres,
3472 +- info->screen_base);
3473 ++ dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres);
3474 +
3475 + return 0;
3476 + }
3477 +@@ -1639,6 +1700,11 @@ static int dlfb_usb_probe(struct usb_interface *intf,
3478 + dlfb->ops = dlfb_ops;
3479 + info->fbops = &dlfb->ops;
3480 +
3481 ++ mutex_init(&dlfb->render_mutex);
3482 ++ dlfb_init_damage(dlfb);
3483 ++ spin_lock_init(&dlfb->damage_lock);
3484 ++ INIT_WORK(&dlfb->damage_work, dlfb_damage_work);
3485 ++
3486 + INIT_LIST_HEAD(&info->modelist);
3487 +
3488 + if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
3489 +diff --git a/fs/ceph/super.c b/fs/ceph/super.c
3490 +index da2cd8e89062..950919411460 100644
3491 +--- a/fs/ceph/super.c
3492 ++++ b/fs/ceph/super.c
3493 +@@ -832,6 +832,12 @@ static void ceph_umount_begin(struct super_block *sb)
3494 + return;
3495 + }
3496 +
3497 ++static int ceph_remount(struct super_block *sb, int *flags, char *data)
3498 ++{
3499 ++ sync_filesystem(sb);
3500 ++ return 0;
3501 ++}
3502 ++
3503 + static const struct super_operations ceph_super_ops = {
3504 + .alloc_inode = ceph_alloc_inode,
3505 + .destroy_inode = ceph_destroy_inode,
3506 +@@ -839,6 +845,7 @@ static const struct super_operations ceph_super_ops = {
3507 + .drop_inode = ceph_drop_inode,
3508 + .sync_fs = ceph_sync_fs,
3509 + .put_super = ceph_put_super,
3510 ++ .remount_fs = ceph_remount,
3511 + .show_options = ceph_show_options,
3512 + .statfs = ceph_statfs,
3513 + .umount_begin = ceph_umount_begin,
3514 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3515 +index d5434ac0571b..105ddbad00e5 100644
3516 +--- a/fs/cifs/smb2ops.c
3517 ++++ b/fs/cifs/smb2ops.c
3518 +@@ -2652,26 +2652,28 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3519 + unsigned int epoch, bool *purge_cache)
3520 + {
3521 + char message[5] = {0};
3522 ++ unsigned int new_oplock = 0;
3523 +
3524 + oplock &= 0xFF;
3525 + if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3526 + return;
3527 +
3528 +- cinode->oplock = 0;
3529 + if (oplock & SMB2_LEASE_READ_CACHING_HE) {
3530 +- cinode->oplock |= CIFS_CACHE_READ_FLG;
3531 ++ new_oplock |= CIFS_CACHE_READ_FLG;
3532 + strcat(message, "R");
3533 + }
3534 + if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
3535 +- cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
3536 ++ new_oplock |= CIFS_CACHE_HANDLE_FLG;
3537 + strcat(message, "H");
3538 + }
3539 + if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
3540 +- cinode->oplock |= CIFS_CACHE_WRITE_FLG;
3541 ++ new_oplock |= CIFS_CACHE_WRITE_FLG;
3542 + strcat(message, "W");
3543 + }
3544 +- if (!cinode->oplock)
3545 +- strcat(message, "None");
3546 ++ if (!new_oplock)
3547 ++ strncpy(message, "None", sizeof(message));
3548 ++
3549 ++ cinode->oplock = new_oplock;
3550 + cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3551 + &cinode->vfs_inode);
3552 + }
3553 +diff --git a/fs/dcache.c b/fs/dcache.c
3554 +index aac41adf4743..c663c602f9ef 100644
3555 +--- a/fs/dcache.c
3556 ++++ b/fs/dcache.c
3557 +@@ -344,7 +344,7 @@ static void dentry_free(struct dentry *dentry)
3558 + }
3559 + }
3560 + /* if dentry was never visible to RCU, immediate free is OK */
3561 +- if (!(dentry->d_flags & DCACHE_RCUACCESS))
3562 ++ if (dentry->d_flags & DCACHE_NORCU)
3563 + __d_free(&dentry->d_u.d_rcu);
3564 + else
3565 + call_rcu(&dentry->d_u.d_rcu, __d_free);
3566 +@@ -1701,7 +1701,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
3567 + struct dentry *dentry = __d_alloc(parent->d_sb, name);
3568 + if (!dentry)
3569 + return NULL;
3570 +- dentry->d_flags |= DCACHE_RCUACCESS;
3571 + spin_lock(&parent->d_lock);
3572 + /*
3573 + * don't need child lock because it is not subject
3574 +@@ -1726,7 +1725,7 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
3575 + {
3576 + struct dentry *dentry = d_alloc_anon(parent->d_sb);
3577 + if (dentry) {
3578 +- dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
3579 ++ dentry->d_flags |= DCACHE_DENTRY_CURSOR;
3580 + dentry->d_parent = dget(parent);
3581 + }
3582 + return dentry;
3583 +@@ -1739,10 +1738,17 @@ struct dentry *d_alloc_cursor(struct dentry * parent)
3584 + *
3585 + * For a filesystem that just pins its dentries in memory and never
3586 + * performs lookups at all, return an unhashed IS_ROOT dentry.
3587 ++ * This is used for pipes, sockets et.al. - the stuff that should
3588 ++ * never be anyone's children or parents. Unlike all other
3589 ++ * dentries, these will not have RCU delay between dropping the
3590 ++ * last reference and freeing them.
3591 + */
3592 + struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
3593 + {
3594 +- return __d_alloc(sb, name);
3595 ++ struct dentry *dentry = __d_alloc(sb, name);
3596 ++ if (likely(dentry))
3597 ++ dentry->d_flags |= DCACHE_NORCU;
3598 ++ return dentry;
3599 + }
3600 + EXPORT_SYMBOL(d_alloc_pseudo);
3601 +
3602 +@@ -1911,12 +1917,10 @@ struct dentry *d_make_root(struct inode *root_inode)
3603 +
3604 + if (root_inode) {
3605 + res = d_alloc_anon(root_inode->i_sb);
3606 +- if (res) {
3607 +- res->d_flags |= DCACHE_RCUACCESS;
3608 ++ if (res)
3609 + d_instantiate(res, root_inode);
3610 +- } else {
3611 ++ else
3612 + iput(root_inode);
3613 +- }
3614 + }
3615 + return res;
3616 + }
3617 +@@ -2781,9 +2785,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
3618 + copy_name(dentry, target);
3619 + target->d_hash.pprev = NULL;
3620 + dentry->d_parent->d_lockref.count++;
3621 +- if (dentry == old_parent)
3622 +- dentry->d_flags |= DCACHE_RCUACCESS;
3623 +- else
3624 ++ if (dentry != old_parent) /* wasn't IS_ROOT */
3625 + WARN_ON(!--old_parent->d_lockref.count);
3626 + } else {
3627 + target->d_parent = old_parent;
3628 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
3629 +index a59c16bd90ac..d2926ac44f83 100644
3630 +--- a/fs/fuse/file.c
3631 ++++ b/fs/fuse/file.c
3632 +@@ -181,7 +181,9 @@ void fuse_finish_open(struct inode *inode, struct file *file)
3633 + file->f_op = &fuse_direct_io_file_operations;
3634 + if (!(ff->open_flags & FOPEN_KEEP_CACHE))
3635 + invalidate_inode_pages2(inode->i_mapping);
3636 +- if (ff->open_flags & FOPEN_NONSEEKABLE)
3637 ++ if (ff->open_flags & FOPEN_STREAM)
3638 ++ stream_open(inode, file);
3639 ++ else if (ff->open_flags & FOPEN_NONSEEKABLE)
3640 + nonseekable_open(inode, file);
3641 + if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
3642 + struct fuse_inode *fi = get_fuse_inode(inode);
3643 +@@ -1530,7 +1532,7 @@ __acquires(fc->lock)
3644 + {
3645 + struct fuse_conn *fc = get_fuse_conn(inode);
3646 + struct fuse_inode *fi = get_fuse_inode(inode);
3647 +- size_t crop = i_size_read(inode);
3648 ++ loff_t crop = i_size_read(inode);
3649 + struct fuse_req *req;
3650 +
3651 + while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
3652 +@@ -2987,6 +2989,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
3653 + }
3654 + }
3655 +
3656 ++ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3657 ++ offset + length > i_size_read(inode)) {
3658 ++ err = inode_newsize_ok(inode, offset + length);
3659 ++ if (err)
3660 ++ return err;
3661 ++ }
3662 ++
3663 + if (!(mode & FALLOC_FL_KEEP_SIZE))
3664 + set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3665 +
3666 +diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
3667 +index 61f46facb39c..b3e8ba3bd654 100644
3668 +--- a/fs/nfs/filelayout/filelayout.c
3669 ++++ b/fs/nfs/filelayout/filelayout.c
3670 +@@ -904,7 +904,7 @@ fl_pnfs_update_layout(struct inode *ino,
3671 + status = filelayout_check_deviceid(lo, fl, gfp_flags);
3672 + if (status) {
3673 + pnfs_put_lseg(lseg);
3674 +- lseg = ERR_PTR(status);
3675 ++ lseg = NULL;
3676 + }
3677 + out:
3678 + return lseg;
3679 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
3680 +index 02488b50534a..6999e870baa9 100644
3681 +--- a/fs/nfs/nfs4state.c
3682 ++++ b/fs/nfs/nfs4state.c
3683 +@@ -159,6 +159,10 @@ int nfs40_discover_server_trunking(struct nfs_client *clp,
3684 + /* Sustain the lease, even if it's empty. If the clientid4
3685 + * goes stale it's of no use for trunking discovery. */
3686 + nfs4_schedule_state_renewal(*result);
3687 ++
3688 ++ /* If the client state need to recover, do it. */
3689 ++ if (clp->cl_state)
3690 ++ nfs4_schedule_state_manager(clp);
3691 + }
3692 + out:
3693 + return status;
3694 +diff --git a/fs/nsfs.c b/fs/nsfs.c
3695 +index 60702d677bd4..30d150a4f0c6 100644
3696 +--- a/fs/nsfs.c
3697 ++++ b/fs/nsfs.c
3698 +@@ -85,13 +85,12 @@ slow:
3699 + inode->i_fop = &ns_file_operations;
3700 + inode->i_private = ns;
3701 +
3702 +- dentry = d_alloc_pseudo(mnt->mnt_sb, &empty_name);
3703 ++ dentry = d_alloc_anon(mnt->mnt_sb);
3704 + if (!dentry) {
3705 + iput(inode);
3706 + return ERR_PTR(-ENOMEM);
3707 + }
3708 + d_instantiate(dentry, inode);
3709 +- dentry->d_flags |= DCACHE_RCUACCESS;
3710 + dentry->d_fsdata = (void *)ns->ops;
3711 + d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
3712 + if (d) {
3713 +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
3714 +index 68b3303e4b46..56feaa739979 100644
3715 +--- a/fs/overlayfs/copy_up.c
3716 ++++ b/fs/overlayfs/copy_up.c
3717 +@@ -909,14 +909,14 @@ static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
3718 + return true;
3719 + }
3720 +
3721 +-int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
3722 ++int ovl_maybe_copy_up(struct dentry *dentry, int flags)
3723 + {
3724 + int err = 0;
3725 +
3726 +- if (ovl_open_need_copy_up(dentry, file_flags)) {
3727 ++ if (ovl_open_need_copy_up(dentry, flags)) {
3728 + err = ovl_want_write(dentry);
3729 + if (!err) {
3730 +- err = ovl_copy_up_flags(dentry, file_flags);
3731 ++ err = ovl_copy_up_flags(dentry, flags);
3732 + ovl_drop_write(dentry);
3733 + }
3734 + }
3735 +diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
3736 +index 84dd957efa24..50e4407398d8 100644
3737 +--- a/fs/overlayfs/file.c
3738 ++++ b/fs/overlayfs/file.c
3739 +@@ -116,11 +116,10 @@ static int ovl_real_fdget(const struct file *file, struct fd *real)
3740 +
3741 + static int ovl_open(struct inode *inode, struct file *file)
3742 + {
3743 +- struct dentry *dentry = file_dentry(file);
3744 + struct file *realfile;
3745 + int err;
3746 +
3747 +- err = ovl_open_maybe_copy_up(dentry, file->f_flags);
3748 ++ err = ovl_maybe_copy_up(file_dentry(file), file->f_flags);
3749 + if (err)
3750 + return err;
3751 +
3752 +@@ -390,7 +389,7 @@ static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3753 + if (ret)
3754 + return ret;
3755 +
3756 +- ret = ovl_copy_up_with_data(file_dentry(file));
3757 ++ ret = ovl_maybe_copy_up(file_dentry(file), O_WRONLY);
3758 + if (!ret) {
3759 + ret = ovl_real_ioctl(file, cmd, arg);
3760 +
3761 +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
3762 +index 9c6018287d57..d26efed9f80a 100644
3763 +--- a/fs/overlayfs/overlayfs.h
3764 ++++ b/fs/overlayfs/overlayfs.h
3765 +@@ -421,7 +421,7 @@ extern const struct file_operations ovl_file_operations;
3766 + int ovl_copy_up(struct dentry *dentry);
3767 + int ovl_copy_up_with_data(struct dentry *dentry);
3768 + int ovl_copy_up_flags(struct dentry *dentry, int flags);
3769 +-int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
3770 ++int ovl_maybe_copy_up(struct dentry *dentry, int flags);
3771 + int ovl_copy_xattr(struct dentry *old, struct dentry *new);
3772 + int ovl_set_attr(struct dentry *upper, struct kstat *stat);
3773 + struct ovl_fh *ovl_encode_real_fh(struct dentry *real, bool is_upper);
3774 +diff --git a/fs/proc/base.c b/fs/proc/base.c
3775 +index f5ed9512d193..ef11c54ad712 100644
3776 +--- a/fs/proc/base.c
3777 ++++ b/fs/proc/base.c
3778 +@@ -2550,6 +2550,11 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf,
3779 + rcu_read_unlock();
3780 + return -EACCES;
3781 + }
3782 ++ /* Prevent changes to overridden credentials. */
3783 ++ if (current_cred() != current_real_cred()) {
3784 ++ rcu_read_unlock();
3785 ++ return -EBUSY;
3786 ++ }
3787 + rcu_read_unlock();
3788 +
3789 + if (count > PAGE_SIZE)
3790 +diff --git a/fs/ufs/util.h b/fs/ufs/util.h
3791 +index 1fd3011ea623..7fd4802222b8 100644
3792 +--- a/fs/ufs/util.h
3793 ++++ b/fs/ufs/util.h
3794 +@@ -229,7 +229,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
3795 + case UFS_UID_44BSD:
3796 + return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
3797 + case UFS_UID_EFT:
3798 +- if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
3799 ++ if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
3800 + return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
3801 + /* Fall through */
3802 + default:
3803 +diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
3804 +index 8ac4e68a12f0..6736ed2f632b 100644
3805 +--- a/include/asm-generic/mm_hooks.h
3806 ++++ b/include/asm-generic/mm_hooks.h
3807 +@@ -18,7 +18,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
3808 + }
3809 +
3810 + static inline void arch_unmap(struct mm_struct *mm,
3811 +- struct vm_area_struct *vma,
3812 + unsigned long start, unsigned long end)
3813 + {
3814 + }
3815 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
3816 +index e734f163bd0b..bd8c322fd92a 100644
3817 +--- a/include/linux/bpf.h
3818 ++++ b/include/linux/bpf.h
3819 +@@ -35,6 +35,7 @@ struct bpf_map_ops {
3820 + void (*map_free)(struct bpf_map *map);
3821 + int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
3822 + void (*map_release_uref)(struct bpf_map *map);
3823 ++ void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
3824 +
3825 + /* funcs callable from userspace and from eBPF programs */
3826 + void *(*map_lookup_elem)(struct bpf_map *map, void *key);
3827 +@@ -455,7 +456,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
3828 + } \
3829 + _out: \
3830 + rcu_read_unlock(); \
3831 +- preempt_enable_no_resched(); \
3832 ++ preempt_enable(); \
3833 + _ret; \
3834 + })
3835 +
3836 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
3837 +index 60996e64c579..6e1e8e6602c6 100644
3838 +--- a/include/linux/dcache.h
3839 ++++ b/include/linux/dcache.h
3840 +@@ -176,7 +176,6 @@ struct dentry_operations {
3841 + * typically using d_splice_alias. */
3842 +
3843 + #define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
3844 +-#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */
3845 +
3846 + #define DCACHE_CANT_MOUNT 0x00000100
3847 + #define DCACHE_GENOCIDE 0x00000200
3848 +@@ -217,6 +216,7 @@ struct dentry_operations {
3849 +
3850 + #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
3851 + #define DCACHE_DENTRY_CURSOR 0x20000000
3852 ++#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */
3853 +
3854 + extern seqlock_t rename_lock;
3855 +
3856 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
3857 +index 4f001619f854..a6d4436c76b5 100644
3858 +--- a/include/linux/mlx5/driver.h
3859 ++++ b/include/linux/mlx5/driver.h
3860 +@@ -677,7 +677,6 @@ struct mlx5_core_dev {
3861 + #endif
3862 + struct mlx5_clock clock;
3863 + struct mlx5_ib_clock_info *clock_info;
3864 +- struct page *clock_info_page;
3865 + struct mlx5_fw_tracer *tracer;
3866 + };
3867 +
3868 +diff --git a/include/linux/of.h b/include/linux/of.h
3869 +index e240992e5cb6..074913002e39 100644
3870 +--- a/include/linux/of.h
3871 ++++ b/include/linux/of.h
3872 +@@ -234,8 +234,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
3873 + static inline u64 of_read_number(const __be32 *cell, int size)
3874 + {
3875 + u64 r = 0;
3876 +- while (size--)
3877 +- r = (r << 32) | be32_to_cpu(*(cell++));
3878 ++ for (; size--; cell++)
3879 ++ r = (r << 32) | be32_to_cpu(*cell);
3880 + return r;
3881 + }
3882 +
3883 +diff --git a/include/linux/pci.h b/include/linux/pci.h
3884 +index 65f1d8c2f082..0e5e1ceae27d 100644
3885 +--- a/include/linux/pci.h
3886 ++++ b/include/linux/pci.h
3887 +@@ -348,6 +348,8 @@ struct pci_dev {
3888 + unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
3889 + controlled exclusively by
3890 + user sysfs */
3891 ++ unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
3892 ++ bit manually */
3893 + unsigned int d3_delay; /* D3->D0 transition time in ms */
3894 + unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
3895 +
3896 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3897 +index b8679dcba96f..3b1a8f38a1ef 100644
3898 +--- a/include/linux/skbuff.h
3899 ++++ b/include/linux/skbuff.h
3900 +@@ -1366,10 +1366,12 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
3901 + struct ubuf_info *uarg = skb_zcopy(skb);
3902 +
3903 + if (uarg) {
3904 +- if (uarg->callback == sock_zerocopy_callback) {
3905 ++ if (skb_zcopy_is_nouarg(skb)) {
3906 ++ /* no notification callback */
3907 ++ } else if (uarg->callback == sock_zerocopy_callback) {
3908 + uarg->zerocopy = uarg->zerocopy && zerocopy;
3909 + sock_zerocopy_put(uarg);
3910 +- } else if (!skb_zcopy_is_nouarg(skb)) {
3911 ++ } else {
3912 + uarg->callback(uarg, zerocopy);
3913 + }
3914 +
3915 +@@ -2627,7 +2629,8 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
3916 + {
3917 + if (likely(!skb_zcopy(skb)))
3918 + return 0;
3919 +- if (skb_uarg(skb)->callback == sock_zerocopy_callback)
3920 ++ if (!skb_zcopy_is_nouarg(skb) &&
3921 ++ skb_uarg(skb)->callback == sock_zerocopy_callback)
3922 + return 0;
3923 + return skb_copy_ubufs(skb, gfp_mask);
3924 + }
3925 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
3926 +index 84097010237c..b5e3add90e99 100644
3927 +--- a/include/net/ip6_fib.h
3928 ++++ b/include/net/ip6_fib.h
3929 +@@ -171,7 +171,8 @@ struct fib6_info {
3930 + dst_nocount:1,
3931 + dst_nopolicy:1,
3932 + dst_host:1,
3933 +- unused:3;
3934 ++ fib6_destroying:1,
3935 ++ unused:2;
3936 +
3937 + struct fib6_nh fib6_nh;
3938 + struct rcu_head rcu;
3939 +diff --git a/include/net/xfrm.h b/include/net/xfrm.h
3940 +index 85386becbaea..c9b0b2b5d672 100644
3941 +--- a/include/net/xfrm.h
3942 ++++ b/include/net/xfrm.h
3943 +@@ -295,7 +295,8 @@ struct xfrm_replay {
3944 + };
3945 +
3946 + struct xfrm_if_cb {
3947 +- struct xfrm_if *(*decode_session)(struct sk_buff *skb);
3948 ++ struct xfrm_if *(*decode_session)(struct sk_buff *skb,
3949 ++ unsigned short family);
3950 + };
3951 +
3952 + void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
3953 +@@ -1404,6 +1405,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
3954 + return atomic_read(&x->tunnel_users);
3955 + }
3956 +
3957 ++static inline bool xfrm_id_proto_valid(u8 proto)
3958 ++{
3959 ++ switch (proto) {
3960 ++ case IPPROTO_AH:
3961 ++ case IPPROTO_ESP:
3962 ++ case IPPROTO_COMP:
3963 ++#if IS_ENABLED(CONFIG_IPV6)
3964 ++ case IPPROTO_ROUTING:
3965 ++ case IPPROTO_DSTOPTS:
3966 ++#endif
3967 ++ return true;
3968 ++ default:
3969 ++ return false;
3970 ++ }
3971 ++}
3972 ++
3973 ++/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
3974 + static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
3975 + {
3976 + return (!userproto || proto == userproto ||
3977 +diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
3978 +index b4967d48bfda..5f7c3a221894 100644
3979 +--- a/include/uapi/linux/fuse.h
3980 ++++ b/include/uapi/linux/fuse.h
3981 +@@ -226,11 +226,13 @@ struct fuse_file_lock {
3982 + * FOPEN_KEEP_CACHE: don't invalidate the data cache on open
3983 + * FOPEN_NONSEEKABLE: the file is not seekable
3984 + * FOPEN_CACHE_DIR: allow caching this directory
3985 ++ * FOPEN_STREAM: the file is stream-like (no file position at all)
3986 + */
3987 + #define FOPEN_DIRECT_IO (1 << 0)
3988 + #define FOPEN_KEEP_CACHE (1 << 1)
3989 + #define FOPEN_NONSEEKABLE (1 << 2)
3990 + #define FOPEN_CACHE_DIR (1 << 3)
3991 ++#define FOPEN_STREAM (1 << 4)
3992 +
3993 + /**
3994 + * INIT request/reply flags
3995 +diff --git a/include/video/udlfb.h b/include/video/udlfb.h
3996 +index 7d09e54ae54e..58fb5732831a 100644
3997 +--- a/include/video/udlfb.h
3998 ++++ b/include/video/udlfb.h
3999 +@@ -48,6 +48,13 @@ struct dlfb_data {
4000 + int base8;
4001 + u32 pseudo_palette[256];
4002 + int blank_mode; /*one of FB_BLANK_ */
4003 ++ struct mutex render_mutex;
4004 ++ int damage_x;
4005 ++ int damage_y;
4006 ++ int damage_x2;
4007 ++ int damage_y2;
4008 ++ spinlock_t damage_lock;
4009 ++ struct work_struct damage_work;
4010 + struct fb_ops ops;
4011 + /* blit-only rendering path metrics, exposed through sysfs */
4012 + atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
4013 +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
4014 +index f9274114c88d..be5747a5337a 100644
4015 +--- a/kernel/bpf/hashtab.c
4016 ++++ b/kernel/bpf/hashtab.c
4017 +@@ -527,18 +527,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
4018 + return insn - insn_buf;
4019 + }
4020 +
4021 +-static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
4022 ++static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
4023 ++ void *key, const bool mark)
4024 + {
4025 + struct htab_elem *l = __htab_map_lookup_elem(map, key);
4026 +
4027 + if (l) {
4028 +- bpf_lru_node_set_ref(&l->lru_node);
4029 ++ if (mark)
4030 ++ bpf_lru_node_set_ref(&l->lru_node);
4031 + return l->key + round_up(map->key_size, 8);
4032 + }
4033 +
4034 + return NULL;
4035 + }
4036 +
4037 ++static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
4038 ++{
4039 ++ return __htab_lru_map_lookup_elem(map, key, true);
4040 ++}
4041 ++
4042 ++static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
4043 ++{
4044 ++ return __htab_lru_map_lookup_elem(map, key, false);
4045 ++}
4046 ++
4047 + static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
4048 + struct bpf_insn *insn_buf)
4049 + {
4050 +@@ -1215,6 +1227,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
4051 + .map_free = htab_map_free,
4052 + .map_get_next_key = htab_map_get_next_key,
4053 + .map_lookup_elem = htab_lru_map_lookup_elem,
4054 ++ .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
4055 + .map_update_elem = htab_lru_map_update_elem,
4056 + .map_delete_elem = htab_lru_map_delete_elem,
4057 + .map_gen_lookup = htab_lru_map_gen_lookup,
4058 +@@ -1246,7 +1259,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
4059 +
4060 + int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
4061 + {
4062 +- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
4063 + struct htab_elem *l;
4064 + void __percpu *pptr;
4065 + int ret = -ENOENT;
4066 +@@ -1262,8 +1274,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
4067 + l = __htab_map_lookup_elem(map, key);
4068 + if (!l)
4069 + goto out;
4070 +- if (htab_is_lru(htab))
4071 +- bpf_lru_node_set_ref(&l->lru_node);
4072 ++ /* We do not mark LRU map element here in order to not mess up
4073 ++ * eviction heuristics when user space does a map walk.
4074 ++ */
4075 + pptr = htab_elem_get_ptr(l, map->key_size);
4076 + for_each_possible_cpu(cpu) {
4077 + bpf_long_memcpy(value + off,
4078 +diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
4079 +index 4a8f390a2b82..dc9d7ac8228d 100644
4080 +--- a/kernel/bpf/inode.c
4081 ++++ b/kernel/bpf/inode.c
4082 +@@ -518,7 +518,7 @@ out:
4083 + static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
4084 + {
4085 + struct bpf_prog *prog;
4086 +- int ret = inode_permission(inode, MAY_READ | MAY_WRITE);
4087 ++ int ret = inode_permission(inode, MAY_READ);
4088 + if (ret)
4089 + return ERR_PTR(ret);
4090 +
4091 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
4092 +index 84470d1480aa..07d9b76e90ce 100644
4093 +--- a/kernel/bpf/syscall.c
4094 ++++ b/kernel/bpf/syscall.c
4095 +@@ -738,7 +738,10 @@ static int map_lookup_elem(union bpf_attr *attr)
4096 + err = map->ops->map_peek_elem(map, value);
4097 + } else {
4098 + rcu_read_lock();
4099 +- ptr = map->ops->map_lookup_elem(map, key);
4100 ++ if (map->ops->map_lookup_elem_sys_only)
4101 ++ ptr = map->ops->map_lookup_elem_sys_only(map, key);
4102 ++ else
4103 ++ ptr = map->ops->map_lookup_elem(map, key);
4104 + if (IS_ERR(ptr)) {
4105 + err = PTR_ERR(ptr);
4106 + } else if (!ptr) {
4107 +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
4108 +index 1ccf77f6d346..d4ab9245e016 100644
4109 +--- a/kernel/sched/cpufreq_schedutil.c
4110 ++++ b/kernel/sched/cpufreq_schedutil.c
4111 +@@ -771,6 +771,7 @@ out:
4112 + return 0;
4113 +
4114 + fail:
4115 ++ kobject_put(&tunables->attr_set.kobj);
4116 + policy->governor_data = NULL;
4117 + sugov_tunables_free(tunables);
4118 +
4119 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
4120 +index 5b3b0c3c8a47..d910e36c34b5 100644
4121 +--- a/kernel/trace/trace_events.c
4122 ++++ b/kernel/trace/trace_events.c
4123 +@@ -1318,9 +1318,6 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
4124 + char buf[32];
4125 + int len;
4126 +
4127 +- if (*ppos)
4128 +- return 0;
4129 +-
4130 + if (unlikely(!id))
4131 + return -ENODEV;
4132 +
4133 +diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
4134 +index 9962cb5da8ac..44f078cda0ac 100644
4135 +--- a/kernel/trace/trace_probe.c
4136 ++++ b/kernel/trace/trace_probe.c
4137 +@@ -405,13 +405,14 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
4138 + return -E2BIG;
4139 + }
4140 + }
4141 +- /*
4142 +- * The default type of $comm should be "string", and it can't be
4143 +- * dereferenced.
4144 +- */
4145 +- if (!t && strcmp(arg, "$comm") == 0)
4146 ++
4147 ++ /* Since $comm can not be dereferred, we can find $comm by strcmp */
4148 ++ if (strcmp(arg, "$comm") == 0) {
4149 ++ /* The type of $comm must be "string", and not an array. */
4150 ++ if (parg->count || (t && strcmp(t, "string")))
4151 ++ return -EINVAL;
4152 + parg->type = find_fetch_type("string");
4153 +- else
4154 ++ } else
4155 + parg->type = find_fetch_type(t);
4156 + if (!parg->type) {
4157 + pr_info("Unsupported type: %s\n", t);
4158 +diff --git a/lib/Makefile b/lib/Makefile
4159 +index e1b59da71418..d1f312096bec 100644
4160 +--- a/lib/Makefile
4161 ++++ b/lib/Makefile
4162 +@@ -17,6 +17,17 @@ KCOV_INSTRUMENT_list_debug.o := n
4163 + KCOV_INSTRUMENT_debugobjects.o := n
4164 + KCOV_INSTRUMENT_dynamic_debug.o := n
4165 +
4166 ++# Early boot use of cmdline, don't instrument it
4167 ++ifdef CONFIG_AMD_MEM_ENCRYPT
4168 ++KASAN_SANITIZE_string.o := n
4169 ++
4170 ++ifdef CONFIG_FUNCTION_TRACER
4171 ++CFLAGS_REMOVE_string.o = -pg
4172 ++endif
4173 ++
4174 ++CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
4175 ++endif
4176 ++
4177 + lib-y := ctype.o string.o vsprintf.o cmdline.o \
4178 + rbtree.o radix-tree.o timerqueue.o xarray.o \
4179 + idr.o int_sqrt.o extable.o \
4180 +diff --git a/mm/gup.c b/mm/gup.c
4181 +index 81e0bdefa2cc..1a42b4367c3a 100644
4182 +--- a/mm/gup.c
4183 ++++ b/mm/gup.c
4184 +@@ -1811,7 +1811,7 @@ static void gup_pgd_range(unsigned long addr, unsigned long end,
4185 + * Check if it's allowed to use __get_user_pages_fast() for the range, or
4186 + * we need to fall back to the slow version:
4187 + */
4188 +-bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
4189 ++bool gup_fast_permitted(unsigned long start, int nr_pages)
4190 + {
4191 + unsigned long len, end;
4192 +
4193 +@@ -1853,7 +1853,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
4194 + * block IPIs that come from THPs splitting.
4195 + */
4196 +
4197 +- if (gup_fast_permitted(start, nr_pages, write)) {
4198 ++ if (gup_fast_permitted(start, nr_pages)) {
4199 + local_irq_save(flags);
4200 + gup_pgd_range(start, end, write, pages, &nr);
4201 + local_irq_restore(flags);
4202 +@@ -1895,7 +1895,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
4203 + if (unlikely(!access_ok((void __user *)start, len)))
4204 + return -EFAULT;
4205 +
4206 +- if (gup_fast_permitted(start, nr_pages, write)) {
4207 ++ if (gup_fast_permitted(start, nr_pages)) {
4208 + local_irq_disable();
4209 + gup_pgd_range(addr, end, write, pages, &nr);
4210 + local_irq_enable();
4211 +diff --git a/mm/mmap.c b/mm/mmap.c
4212 +index da9236a5022e..446698476e4c 100644
4213 +--- a/mm/mmap.c
4214 ++++ b/mm/mmap.c
4215 +@@ -2736,9 +2736,17 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
4216 + return -EINVAL;
4217 +
4218 + len = PAGE_ALIGN(len);
4219 ++ end = start + len;
4220 + if (len == 0)
4221 + return -EINVAL;
4222 +
4223 ++ /*
4224 ++ * arch_unmap() might do unmaps itself. It must be called
4225 ++ * and finish any rbtree manipulation before this code
4226 ++ * runs and also starts to manipulate the rbtree.
4227 ++ */
4228 ++ arch_unmap(mm, start, end);
4229 ++
4230 + /* Find the first overlapping VMA */
4231 + vma = find_vma(mm, start);
4232 + if (!vma)
4233 +@@ -2747,7 +2755,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
4234 + /* we have start < vma->vm_end */
4235 +
4236 + /* if it doesn't overlap, we have nothing.. */
4237 +- end = start + len;
4238 + if (vma->vm_start >= end)
4239 + return 0;
4240 +
4241 +@@ -2817,12 +2824,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
4242 + /* Detach vmas from rbtree */
4243 + detach_vmas_to_be_unmapped(mm, vma, prev, end);
4244 +
4245 +- /*
4246 +- * mpx unmap needs to be called with mmap_sem held for write.
4247 +- * It is safe to call it before unmap_region().
4248 +- */
4249 +- arch_unmap(mm, vma, start, end);
4250 +-
4251 + if (downgrade)
4252 + downgrade_write(&mm->mmap_sem);
4253 +
4254 +diff --git a/net/core/dev.c b/net/core/dev.c
4255 +index 7277dd393c00..c8e672ac32cb 100644
4256 +--- a/net/core/dev.c
4257 ++++ b/net/core/dev.c
4258 +@@ -8829,7 +8829,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
4259 +
4260 + refcnt = netdev_refcnt_read(dev);
4261 +
4262 +- if (time_after(jiffies, warning_time + 10 * HZ)) {
4263 ++ if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
4264 + pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
4265 + dev->name, refcnt);
4266 + warning_time = jiffies;
4267 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4268 +index 5ea1bed08ede..fd449017c55e 100644
4269 +--- a/net/core/rtnetlink.c
4270 ++++ b/net/core/rtnetlink.c
4271 +@@ -1502,14 +1502,15 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
4272 + return ret;
4273 + }
4274 +
4275 +-static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev)
4276 ++static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
4277 ++ bool force)
4278 + {
4279 + int ifindex = dev_get_iflink(dev);
4280 +
4281 +- if (dev->ifindex == ifindex)
4282 +- return 0;
4283 ++ if (force || dev->ifindex != ifindex)
4284 ++ return nla_put_u32(skb, IFLA_LINK, ifindex);
4285 +
4286 +- return nla_put_u32(skb, IFLA_LINK, ifindex);
4287 ++ return 0;
4288 + }
4289 +
4290 + static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
4291 +@@ -1526,6 +1527,8 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
4292 + const struct net_device *dev,
4293 + struct net *src_net)
4294 + {
4295 ++ bool put_iflink = false;
4296 ++
4297 + if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
4298 + struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
4299 +
4300 +@@ -1534,10 +1537,12 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
4301 +
4302 + if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
4303 + return -EMSGSIZE;
4304 ++
4305 ++ put_iflink = true;
4306 + }
4307 + }
4308 +
4309 +- return 0;
4310 ++ return nla_put_iflink(skb, dev, put_iflink);
4311 + }
4312 +
4313 + static int rtnl_fill_link_af(struct sk_buff *skb,
4314 +@@ -1623,7 +1628,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
4315 + #ifdef CONFIG_RPS
4316 + nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
4317 + #endif
4318 +- nla_put_iflink(skb, dev) ||
4319 + put_master_ifindex(skb, dev) ||
4320 + nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
4321 + (dev->qdisc &&
4322 +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
4323 +index 10e809b296ec..fb065a8937ea 100644
4324 +--- a/net/ipv4/esp4.c
4325 ++++ b/net/ipv4/esp4.c
4326 +@@ -226,7 +226,7 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
4327 + tail[plen - 1] = proto;
4328 + }
4329 +
4330 +-static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
4331 ++static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
4332 + {
4333 + int encap_type;
4334 + struct udphdr *uh;
4335 +@@ -234,6 +234,7 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
4336 + __be16 sport, dport;
4337 + struct xfrm_encap_tmpl *encap = x->encap;
4338 + struct ip_esp_hdr *esph = esp->esph;
4339 ++ unsigned int len;
4340 +
4341 + spin_lock_bh(&x->lock);
4342 + sport = encap->encap_sport;
4343 +@@ -241,11 +242,14 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
4344 + encap_type = encap->encap_type;
4345 + spin_unlock_bh(&x->lock);
4346 +
4347 ++ len = skb->len + esp->tailen - skb_transport_offset(skb);
4348 ++ if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
4349 ++ return -EMSGSIZE;
4350 ++
4351 + uh = (struct udphdr *)esph;
4352 + uh->source = sport;
4353 + uh->dest = dport;
4354 +- uh->len = htons(skb->len + esp->tailen
4355 +- - skb_transport_offset(skb));
4356 ++ uh->len = htons(len);
4357 + uh->check = 0;
4358 +
4359 + switch (encap_type) {
4360 +@@ -262,6 +266,8 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
4361 +
4362 + *skb_mac_header(skb) = IPPROTO_UDP;
4363 + esp->esph = esph;
4364 ++
4365 ++ return 0;
4366 + }
4367 +
4368 + int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
4369 +@@ -275,8 +281,12 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
4370 + int tailen = esp->tailen;
4371 +
4372 + /* this is non-NULL only with UDP Encapsulation */
4373 +- if (x->encap)
4374 +- esp_output_udp_encap(x, skb, esp);
4375 ++ if (x->encap) {
4376 ++ int err = esp_output_udp_encap(x, skb, esp);
4377 ++
4378 ++ if (err < 0)
4379 ++ return err;
4380 ++ }
4381 +
4382 + if (!skb_cloned(skb)) {
4383 + if (tailen <= skb_tailroom(skb)) {
4384 +diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
4385 +index 8756e0e790d2..d3170a8001b2 100644
4386 +--- a/net/ipv4/esp4_offload.c
4387 ++++ b/net/ipv4/esp4_offload.c
4388 +@@ -52,13 +52,13 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
4389 + goto out;
4390 +
4391 + if (sp->len == XFRM_MAX_DEPTH)
4392 +- goto out;
4393 ++ goto out_reset;
4394 +
4395 + x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
4396 + (xfrm_address_t *)&ip_hdr(skb)->daddr,
4397 + spi, IPPROTO_ESP, AF_INET);
4398 + if (!x)
4399 +- goto out;
4400 ++ goto out_reset;
4401 +
4402 + sp->xvec[sp->len++] = x;
4403 + sp->olen++;
4404 +@@ -66,7 +66,7 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
4405 + xo = xfrm_offload(skb);
4406 + if (!xo) {
4407 + xfrm_state_put(x);
4408 +- goto out;
4409 ++ goto out_reset;
4410 + }
4411 + }
4412 +
4413 +@@ -82,6 +82,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
4414 + xfrm_input(skb, IPPROTO_ESP, spi, -2);
4415 +
4416 + return ERR_PTR(-EINPROGRESS);
4417 ++out_reset:
4418 ++ secpath_reset(skb);
4419 + out:
4420 + skb_push(skb, offset);
4421 + NAPI_GRO_CB(skb)->same_flow = 0;
4422 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
4423 +index 68a21bf75dd0..b6235ca09fa5 100644
4424 +--- a/net/ipv4/ip_vti.c
4425 ++++ b/net/ipv4/ip_vti.c
4426 +@@ -659,9 +659,9 @@ static int __init vti_init(void)
4427 + return err;
4428 +
4429 + rtnl_link_failed:
4430 +- xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
4431 +-xfrm_tunnel_failed:
4432 + xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
4433 ++xfrm_tunnel_failed:
4434 ++ xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
4435 + xfrm_proto_comp_failed:
4436 + xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
4437 + xfrm_proto_ah_failed:
4438 +@@ -676,6 +676,7 @@ pernet_dev_failed:
4439 + static void __exit vti_fini(void)
4440 + {
4441 + rtnl_link_unregister(&vti_link_ops);
4442 ++ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
4443 + xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
4444 + xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
4445 + xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
4446 +diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
4447 +index d73a6d6652f6..2b144b92ae46 100644
4448 +--- a/net/ipv4/xfrm4_policy.c
4449 ++++ b/net/ipv4/xfrm4_policy.c
4450 +@@ -111,7 +111,8 @@ static void
4451 + _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
4452 + {
4453 + const struct iphdr *iph = ip_hdr(skb);
4454 +- u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
4455 ++ int ihl = iph->ihl;
4456 ++ u8 *xprth = skb_network_header(skb) + ihl * 4;
4457 + struct flowi4 *fl4 = &fl->u.ip4;
4458 + int oif = 0;
4459 +
4460 +@@ -122,6 +123,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
4461 + fl4->flowi4_mark = skb->mark;
4462 + fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
4463 +
4464 ++ fl4->flowi4_proto = iph->protocol;
4465 ++ fl4->daddr = reverse ? iph->saddr : iph->daddr;
4466 ++ fl4->saddr = reverse ? iph->daddr : iph->saddr;
4467 ++ fl4->flowi4_tos = iph->tos;
4468 ++
4469 + if (!ip_is_fragment(iph)) {
4470 + switch (iph->protocol) {
4471 + case IPPROTO_UDP:
4472 +@@ -133,7 +139,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
4473 + pskb_may_pull(skb, xprth + 4 - skb->data)) {
4474 + __be16 *ports;
4475 +
4476 +- xprth = skb_network_header(skb) + iph->ihl * 4;
4477 ++ xprth = skb_network_header(skb) + ihl * 4;
4478 + ports = (__be16 *)xprth;
4479 +
4480 + fl4->fl4_sport = ports[!!reverse];
4481 +@@ -146,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
4482 + pskb_may_pull(skb, xprth + 2 - skb->data)) {
4483 + u8 *icmp;
4484 +
4485 +- xprth = skb_network_header(skb) + iph->ihl * 4;
4486 ++ xprth = skb_network_header(skb) + ihl * 4;
4487 + icmp = xprth;
4488 +
4489 + fl4->fl4_icmp_type = icmp[0];
4490 +@@ -159,7 +165,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
4491 + pskb_may_pull(skb, xprth + 4 - skb->data)) {
4492 + __be32 *ehdr;
4493 +
4494 +- xprth = skb_network_header(skb) + iph->ihl * 4;
4495 ++ xprth = skb_network_header(skb) + ihl * 4;
4496 + ehdr = (__be32 *)xprth;
4497 +
4498 + fl4->fl4_ipsec_spi = ehdr[0];
4499 +@@ -171,7 +177,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
4500 + pskb_may_pull(skb, xprth + 8 - skb->data)) {
4501 + __be32 *ah_hdr;
4502 +
4503 +- xprth = skb_network_header(skb) + iph->ihl * 4;
4504 ++ xprth = skb_network_header(skb) + ihl * 4;
4505 + ah_hdr = (__be32 *)xprth;
4506 +
4507 + fl4->fl4_ipsec_spi = ah_hdr[1];
4508 +@@ -183,7 +189,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
4509 + pskb_may_pull(skb, xprth + 4 - skb->data)) {
4510 + __be16 *ipcomp_hdr;
4511 +
4512 +- xprth = skb_network_header(skb) + iph->ihl * 4;
4513 ++ xprth = skb_network_header(skb) + ihl * 4;
4514 + ipcomp_hdr = (__be16 *)xprth;
4515 +
4516 + fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
4517 +@@ -196,7 +202,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
4518 + __be16 *greflags;
4519 + __be32 *gre_hdr;
4520 +
4521 +- xprth = skb_network_header(skb) + iph->ihl * 4;
4522 ++ xprth = skb_network_header(skb) + ihl * 4;
4523 + greflags = (__be16 *)xprth;
4524 + gre_hdr = (__be32 *)xprth;
4525 +
4526 +@@ -213,10 +219,6 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
4527 + break;
4528 + }
4529 + }
4530 +- fl4->flowi4_proto = iph->protocol;
4531 +- fl4->daddr = reverse ? iph->saddr : iph->daddr;
4532 +- fl4->saddr = reverse ? iph->daddr : iph->saddr;
4533 +- fl4->flowi4_tos = iph->tos;
4534 + }
4535 +
4536 + static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
4537 +diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
4538 +index d46b4eb645c2..cb99f6fb79b7 100644
4539 +--- a/net/ipv6/esp6_offload.c
4540 ++++ b/net/ipv6/esp6_offload.c
4541 +@@ -74,13 +74,13 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
4542 + goto out;
4543 +
4544 + if (sp->len == XFRM_MAX_DEPTH)
4545 +- goto out;
4546 ++ goto out_reset;
4547 +
4548 + x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
4549 + (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
4550 + spi, IPPROTO_ESP, AF_INET6);
4551 + if (!x)
4552 +- goto out;
4553 ++ goto out_reset;
4554 +
4555 + sp->xvec[sp->len++] = x;
4556 + sp->olen++;
4557 +@@ -88,7 +88,7 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
4558 + xo = xfrm_offload(skb);
4559 + if (!xo) {
4560 + xfrm_state_put(x);
4561 +- goto out;
4562 ++ goto out_reset;
4563 + }
4564 + }
4565 +
4566 +@@ -109,6 +109,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
4567 + xfrm_input(skb, IPPROTO_ESP, spi, -2);
4568 +
4569 + return ERR_PTR(-EINPROGRESS);
4570 ++out_reset:
4571 ++ secpath_reset(skb);
4572 + out:
4573 + skb_push(skb, offset);
4574 + NAPI_GRO_CB(skb)->same_flow = 0;
4575 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
4576 +index 91247a6fc67f..9915f64b38a0 100644
4577 +--- a/net/ipv6/ip6_fib.c
4578 ++++ b/net/ipv6/ip6_fib.c
4579 +@@ -909,6 +909,12 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
4580 + {
4581 + int cpu;
4582 +
4583 ++ /* Make sure rt6_make_pcpu_route() wont add other percpu routes
4584 ++ * while we are cleaning them here.
4585 ++ */
4586 ++ f6i->fib6_destroying = 1;
4587 ++ mb(); /* paired with the cmpxchg() in rt6_make_pcpu_route() */
4588 ++
4589 + /* release the reference to this fib entry from
4590 + * all of its cached pcpu routes
4591 + */
4592 +@@ -932,6 +938,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
4593 + {
4594 + struct fib6_table *table = rt->fib6_table;
4595 +
4596 ++ if (rt->rt6i_pcpu)
4597 ++ fib6_drop_pcpu_from(rt, table);
4598 ++
4599 + if (atomic_read(&rt->fib6_ref) != 1) {
4600 + /* This route is used as dummy address holder in some split
4601 + * nodes. It is not leaked, but it still holds other resources,
4602 +@@ -953,9 +962,6 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
4603 + fn = rcu_dereference_protected(fn->parent,
4604 + lockdep_is_held(&table->tb6_lock));
4605 + }
4606 +-
4607 +- if (rt->rt6i_pcpu)
4608 +- fib6_drop_pcpu_from(rt, table);
4609 + }
4610 + }
4611 +
4612 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4613 +index 59c90bba048c..b471afce1330 100644
4614 +--- a/net/ipv6/route.c
4615 ++++ b/net/ipv6/route.c
4616 +@@ -110,8 +110,8 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4617 + int iif, int type, u32 portid, u32 seq,
4618 + unsigned int flags);
4619 + static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
4620 +- struct in6_addr *daddr,
4621 +- struct in6_addr *saddr);
4622 ++ const struct in6_addr *daddr,
4623 ++ const struct in6_addr *saddr);
4624 +
4625 + #ifdef CONFIG_IPV6_ROUTE_INFO
4626 + static struct fib6_info *rt6_add_route_info(struct net *net,
4627 +@@ -1260,6 +1260,13 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net,
4628 + prev = cmpxchg(p, NULL, pcpu_rt);
4629 + BUG_ON(prev);
4630 +
4631 ++ if (rt->fib6_destroying) {
4632 ++ struct fib6_info *from;
4633 ++
4634 ++ from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
4635 ++ fib6_info_release(from);
4636 ++ }
4637 ++
4638 + return pcpu_rt;
4639 + }
4640 +
4641 +@@ -1529,31 +1536,44 @@ out:
4642 + * Caller has to hold rcu_read_lock()
4643 + */
4644 + static struct rt6_info *rt6_find_cached_rt(struct fib6_info *rt,
4645 +- struct in6_addr *daddr,
4646 +- struct in6_addr *saddr)
4647 ++ const struct in6_addr *daddr,
4648 ++ const struct in6_addr *saddr)
4649 + {
4650 ++ const struct in6_addr *src_key = NULL;
4651 + struct rt6_exception_bucket *bucket;
4652 +- struct in6_addr *src_key = NULL;
4653 + struct rt6_exception *rt6_ex;
4654 + struct rt6_info *res = NULL;
4655 +
4656 +- bucket = rcu_dereference(rt->rt6i_exception_bucket);
4657 +-
4658 + #ifdef CONFIG_IPV6_SUBTREES
4659 + /* rt6i_src.plen != 0 indicates rt is in subtree
4660 + * and exception table is indexed by a hash of
4661 + * both rt6i_dst and rt6i_src.
4662 +- * Otherwise, the exception table is indexed by
4663 +- * a hash of only rt6i_dst.
4664 ++ * However, the src addr used to create the hash
4665 ++ * might not be exactly the passed in saddr which
4666 ++ * is a /128 addr from the flow.
4667 ++ * So we need to use f6i->fib6_src to redo lookup
4668 ++ * if the passed in saddr does not find anything.
4669 ++ * (See the logic in ip6_rt_cache_alloc() on how
4670 ++ * rt->rt6i_src is updated.)
4671 + */
4672 + if (rt->fib6_src.plen)
4673 + src_key = saddr;
4674 ++find_ex:
4675 + #endif
4676 ++ bucket = rcu_dereference(rt->rt6i_exception_bucket);
4677 + rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
4678 +
4679 + if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
4680 + res = rt6_ex->rt6i;
4681 +
4682 ++#ifdef CONFIG_IPV6_SUBTREES
4683 ++ /* Use fib6_src as src_key and redo lookup */
4684 ++ if (!res && src_key && src_key != &rt->fib6_src.addr) {
4685 ++ src_key = &rt->fib6_src.addr;
4686 ++ goto find_ex;
4687 ++ }
4688 ++#endif
4689 ++
4690 + return res;
4691 + }
4692 +
4693 +@@ -2614,10 +2634,8 @@ out:
4694 + u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
4695 + struct in6_addr *saddr)
4696 + {
4697 +- struct rt6_exception_bucket *bucket;
4698 +- struct rt6_exception *rt6_ex;
4699 +- struct in6_addr *src_key;
4700 + struct inet6_dev *idev;
4701 ++ struct rt6_info *rt;
4702 + u32 mtu = 0;
4703 +
4704 + if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
4705 +@@ -2626,18 +2644,10 @@ u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
4706 + goto out;
4707 + }
4708 +
4709 +- src_key = NULL;
4710 +-#ifdef CONFIG_IPV6_SUBTREES
4711 +- if (f6i->fib6_src.plen)
4712 +- src_key = saddr;
4713 +-#endif
4714 +-
4715 +- bucket = rcu_dereference(f6i->rt6i_exception_bucket);
4716 +- rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
4717 +- if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
4718 +- mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU);
4719 +-
4720 +- if (likely(!mtu)) {
4721 ++ rt = rt6_find_cached_rt(f6i, daddr, saddr);
4722 ++ if (unlikely(rt)) {
4723 ++ mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
4724 ++ } else {
4725 + struct net_device *dev = fib6_info_nh_dev(f6i);
4726 +
4727 + mtu = IPV6_MIN_MTU;
4728 +diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
4729 +index bc65db782bfb..d9e5f6808811 100644
4730 +--- a/net/ipv6/xfrm6_tunnel.c
4731 ++++ b/net/ipv6/xfrm6_tunnel.c
4732 +@@ -345,7 +345,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
4733 + unsigned int i;
4734 +
4735 + xfrm_flush_gc();
4736 +- xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
4737 ++ xfrm_state_flush(net, 0, false, true);
4738 +
4739 + for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
4740 + WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
4741 +@@ -402,6 +402,10 @@ static void __exit xfrm6_tunnel_fini(void)
4742 + xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
4743 + xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
4744 + unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
4745 ++ /* Someone maybe has gotten the xfrm6_tunnel_spi.
4746 ++ * So need to wait it.
4747 ++ */
4748 ++ rcu_barrier();
4749 + kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
4750 + }
4751 +
4752 +diff --git a/net/key/af_key.c b/net/key/af_key.c
4753 +index 5651c29cb5bd..4af1e1d60b9f 100644
4754 +--- a/net/key/af_key.c
4755 ++++ b/net/key/af_key.c
4756 +@@ -1951,8 +1951,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
4757 +
4758 + if (rq->sadb_x_ipsecrequest_mode == 0)
4759 + return -EINVAL;
4760 ++ if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
4761 ++ return -EINVAL;
4762 +
4763 +- t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
4764 ++ t->id.proto = rq->sadb_x_ipsecrequest_proto;
4765 + if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
4766 + return -EINVAL;
4767 + t->mode = mode;
4768 +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
4769 +index 4a6ff1482a9f..02d2e6f11e93 100644
4770 +--- a/net/mac80211/iface.c
4771 ++++ b/net/mac80211/iface.c
4772 +@@ -1908,6 +1908,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
4773 + list_del_rcu(&sdata->list);
4774 + mutex_unlock(&sdata->local->iflist_mtx);
4775 +
4776 ++ if (sdata->vif.txq)
4777 ++ ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
4778 ++
4779 + synchronize_rcu();
4780 +
4781 + if (sdata->dev) {
4782 +diff --git a/net/tipc/core.c b/net/tipc/core.c
4783 +index 5b38f5164281..d7b0688c98dd 100644
4784 +--- a/net/tipc/core.c
4785 ++++ b/net/tipc/core.c
4786 +@@ -66,6 +66,10 @@ static int __net_init tipc_init_net(struct net *net)
4787 + INIT_LIST_HEAD(&tn->node_list);
4788 + spin_lock_init(&tn->node_list_lock);
4789 +
4790 ++ err = tipc_socket_init();
4791 ++ if (err)
4792 ++ goto out_socket;
4793 ++
4794 + err = tipc_sk_rht_init(net);
4795 + if (err)
4796 + goto out_sk_rht;
4797 +@@ -92,6 +96,8 @@ out_subscr:
4798 + out_nametbl:
4799 + tipc_sk_rht_destroy(net);
4800 + out_sk_rht:
4801 ++ tipc_socket_stop();
4802 ++out_socket:
4803 + return err;
4804 + }
4805 +
4806 +@@ -102,6 +108,7 @@ static void __net_exit tipc_exit_net(struct net *net)
4807 + tipc_bcast_stop(net);
4808 + tipc_nametbl_stop(net);
4809 + tipc_sk_rht_destroy(net);
4810 ++ tipc_socket_stop();
4811 + }
4812 +
4813 + static struct pernet_operations tipc_net_ops = {
4814 +@@ -129,10 +136,6 @@ static int __init tipc_init(void)
4815 + if (err)
4816 + goto out_netlink_compat;
4817 +
4818 +- err = tipc_socket_init();
4819 +- if (err)
4820 +- goto out_socket;
4821 +-
4822 + err = tipc_register_sysctl();
4823 + if (err)
4824 + goto out_sysctl;
4825 +@@ -152,8 +155,6 @@ out_bearer:
4826 + out_pernet:
4827 + tipc_unregister_sysctl();
4828 + out_sysctl:
4829 +- tipc_socket_stop();
4830 +-out_socket:
4831 + tipc_netlink_compat_stop();
4832 + out_netlink_compat:
4833 + tipc_netlink_stop();
4834 +@@ -168,7 +169,6 @@ static void __exit tipc_exit(void)
4835 + unregister_pernet_subsys(&tipc_net_ops);
4836 + tipc_netlink_stop();
4837 + tipc_netlink_compat_stop();
4838 +- tipc_socket_stop();
4839 + tipc_unregister_sysctl();
4840 +
4841 + pr_info("Deactivated\n");
4842 +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
4843 +index 15eb5d3d4750..96ab344f17bb 100644
4844 +--- a/net/vmw_vsock/virtio_transport.c
4845 ++++ b/net/vmw_vsock/virtio_transport.c
4846 +@@ -702,28 +702,27 @@ static int __init virtio_vsock_init(void)
4847 + if (!virtio_vsock_workqueue)
4848 + return -ENOMEM;
4849 +
4850 +- ret = register_virtio_driver(&virtio_vsock_driver);
4851 ++ ret = vsock_core_init(&virtio_transport.transport);
4852 + if (ret)
4853 + goto out_wq;
4854 +
4855 +- ret = vsock_core_init(&virtio_transport.transport);
4856 ++ ret = register_virtio_driver(&virtio_vsock_driver);
4857 + if (ret)
4858 +- goto out_vdr;
4859 ++ goto out_vci;
4860 +
4861 + return 0;
4862 +
4863 +-out_vdr:
4864 +- unregister_virtio_driver(&virtio_vsock_driver);
4865 ++out_vci:
4866 ++ vsock_core_exit();
4867 + out_wq:
4868 + destroy_workqueue(virtio_vsock_workqueue);
4869 + return ret;
4870 +-
4871 + }
4872 +
4873 + static void __exit virtio_vsock_exit(void)
4874 + {
4875 +- vsock_core_exit();
4876 + unregister_virtio_driver(&virtio_vsock_driver);
4877 ++ vsock_core_exit();
4878 + destroy_workqueue(virtio_vsock_workqueue);
4879 + }
4880 +
4881 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
4882 +index 602715fc9a75..f3f3d06cb6d8 100644
4883 +--- a/net/vmw_vsock/virtio_transport_common.c
4884 ++++ b/net/vmw_vsock/virtio_transport_common.c
4885 +@@ -786,12 +786,19 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
4886 +
4887 + void virtio_transport_release(struct vsock_sock *vsk)
4888 + {
4889 ++ struct virtio_vsock_sock *vvs = vsk->trans;
4890 ++ struct virtio_vsock_pkt *pkt, *tmp;
4891 + struct sock *sk = &vsk->sk;
4892 + bool remove_sock = true;
4893 +
4894 + lock_sock(sk);
4895 + if (sk->sk_type == SOCK_STREAM)
4896 + remove_sock = virtio_transport_close(vsk);
4897 ++
4898 ++ list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
4899 ++ list_del(&pkt->list);
4900 ++ virtio_transport_free_pkt(pkt);
4901 ++ }
4902 + release_sock(sk);
4903 +
4904 + if (remove_sock)
4905 +diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
4906 +index dbb3c1945b5c..85fec98676d3 100644
4907 +--- a/net/xfrm/xfrm_interface.c
4908 ++++ b/net/xfrm/xfrm_interface.c
4909 +@@ -70,17 +70,28 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
4910 + return NULL;
4911 + }
4912 +
4913 +-static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
4914 ++static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
4915 ++ unsigned short family)
4916 + {
4917 + struct xfrmi_net *xfrmn;
4918 +- int ifindex;
4919 + struct xfrm_if *xi;
4920 ++ int ifindex = 0;
4921 +
4922 + if (!secpath_exists(skb) || !skb->dev)
4923 + return NULL;
4924 +
4925 ++ switch (family) {
4926 ++ case AF_INET6:
4927 ++ ifindex = inet6_sdif(skb);
4928 ++ break;
4929 ++ case AF_INET:
4930 ++ ifindex = inet_sdif(skb);
4931 ++ break;
4932 ++ }
4933 ++ if (!ifindex)
4934 ++ ifindex = skb->dev->ifindex;
4935 ++
4936 + xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
4937 +- ifindex = skb->dev->ifindex;
4938 +
4939 + for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
4940 + if (ifindex == xi->dev->ifindex &&
4941 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
4942 +index 8d1a898d0ba5..a6b58df7a70f 100644
4943 +--- a/net/xfrm/xfrm_policy.c
4944 ++++ b/net/xfrm/xfrm_policy.c
4945 +@@ -3313,7 +3313,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
4946 + ifcb = xfrm_if_get_cb();
4947 +
4948 + if (ifcb) {
4949 +- xi = ifcb->decode_session(skb);
4950 ++ xi = ifcb->decode_session(skb, family);
4951 + if (xi) {
4952 + if_id = xi->p.if_id;
4953 + net = xi->net;
4954 +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
4955 +index 1bb971f46fc6..178baaa037e5 100644
4956 +--- a/net/xfrm/xfrm_state.c
4957 ++++ b/net/xfrm/xfrm_state.c
4958 +@@ -2384,7 +2384,7 @@ void xfrm_state_fini(struct net *net)
4959 +
4960 + flush_work(&net->xfrm.state_hash_work);
4961 + flush_work(&xfrm_state_gc_work);
4962 +- xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
4963 ++ xfrm_state_flush(net, 0, false, true);
4964 +
4965 + WARN_ON(!list_empty(&net->xfrm.state_all));
4966 +
4967 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
4968 +index a131f9ff979e..6916931b1de1 100644
4969 +--- a/net/xfrm/xfrm_user.c
4970 ++++ b/net/xfrm/xfrm_user.c
4971 +@@ -1424,7 +1424,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
4972 + ret = verify_policy_dir(p->dir);
4973 + if (ret)
4974 + return ret;
4975 +- if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
4976 ++ if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
4977 + return -EINVAL;
4978 +
4979 + return 0;
4980 +@@ -1513,20 +1513,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
4981 + return -EINVAL;
4982 + }
4983 +
4984 +- switch (ut[i].id.proto) {
4985 +- case IPPROTO_AH:
4986 +- case IPPROTO_ESP:
4987 +- case IPPROTO_COMP:
4988 +-#if IS_ENABLED(CONFIG_IPV6)
4989 +- case IPPROTO_ROUTING:
4990 +- case IPPROTO_DSTOPTS:
4991 +-#endif
4992 +- case IPSEC_PROTO_ANY:
4993 +- break;
4994 +- default:
4995 ++ if (!xfrm_id_proto_valid(ut[i].id.proto))
4996 + return -EINVAL;
4997 +- }
4998 +-
4999 + }
5000 +
5001 + return 0;
5002 +diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
5003 +index 89c47f57d1ce..8c1af9bdcb1b 100644
5004 +--- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
5005 ++++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
5006 +@@ -36,7 +36,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
5007 + mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
5008 + masked_sp = gen_reg_rtx(Pmode);
5009 +
5010 +- emit_insn_before(gen_rtx_SET(masked_sp,
5011 ++ emit_insn_before(gen_rtx_set(masked_sp,
5012 + gen_rtx_AND(Pmode,
5013 + stack_pointer_rtx,
5014 + mask)),
5015 +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
5016 +index 3f80a684c232..665853dd517c 100644
5017 +--- a/security/apparmor/apparmorfs.c
5018 ++++ b/security/apparmor/apparmorfs.c
5019 +@@ -123,17 +123,22 @@ static int aafs_show_path(struct seq_file *seq, struct dentry *dentry)
5020 + return 0;
5021 + }
5022 +
5023 +-static void aafs_evict_inode(struct inode *inode)
5024 ++static void aafs_i_callback(struct rcu_head *head)
5025 + {
5026 +- truncate_inode_pages_final(&inode->i_data);
5027 +- clear_inode(inode);
5028 ++ struct inode *inode = container_of(head, struct inode, i_rcu);
5029 + if (S_ISLNK(inode->i_mode))
5030 + kfree(inode->i_link);
5031 ++ free_inode_nonrcu(inode);
5032 ++}
5033 ++
5034 ++static void aafs_destroy_inode(struct inode *inode)
5035 ++{
5036 ++ call_rcu(&inode->i_rcu, aafs_i_callback);
5037 + }
5038 +
5039 + static const struct super_operations aafs_super_ops = {
5040 + .statfs = simple_statfs,
5041 +- .evict_inode = aafs_evict_inode,
5042 ++ .destroy_inode = aafs_destroy_inode,
5043 + .show_path = aafs_show_path,
5044 + };
5045 +
5046 +diff --git a/security/inode.c b/security/inode.c
5047 +index b7772a9b315e..421dd72b5876 100644
5048 +--- a/security/inode.c
5049 ++++ b/security/inode.c
5050 +@@ -27,17 +27,22 @@
5051 + static struct vfsmount *mount;
5052 + static int mount_count;
5053 +
5054 +-static void securityfs_evict_inode(struct inode *inode)
5055 ++static void securityfs_i_callback(struct rcu_head *head)
5056 + {
5057 +- truncate_inode_pages_final(&inode->i_data);
5058 +- clear_inode(inode);
5059 ++ struct inode *inode = container_of(head, struct inode, i_rcu);
5060 + if (S_ISLNK(inode->i_mode))
5061 + kfree(inode->i_link);
5062 ++ free_inode_nonrcu(inode);
5063 ++}
5064 ++
5065 ++static void securityfs_destroy_inode(struct inode *inode)
5066 ++{
5067 ++ call_rcu(&inode->i_rcu, securityfs_i_callback);
5068 + }
5069 +
5070 + static const struct super_operations securityfs_super_operations = {
5071 + .statfs = simple_statfs,
5072 +- .evict_inode = securityfs_evict_inode,
5073 ++ .destroy_inode = securityfs_destroy_inode,
5074 + };
5075 +
5076 + static int fill_super(struct super_block *sb, void *data, int silent)
5077 +diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
5078 +index 1ef1ee2280a2..227766d9f43b 100644
5079 +--- a/tools/bpf/bpftool/map.c
5080 ++++ b/tools/bpf/bpftool/map.c
5081 +@@ -1111,6 +1111,9 @@ static int do_create(int argc, char **argv)
5082 + return -1;
5083 + }
5084 + NEXT_ARG();
5085 ++ } else {
5086 ++ p_err("unknown arg %s", *argv);
5087 ++ return -1;
5088 + }
5089 + }
5090 +
5091 +diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
5092 +index 53f8be0f4a1f..88158239622b 100644
5093 +--- a/tools/objtool/Makefile
5094 ++++ b/tools/objtool/Makefile
5095 +@@ -7,11 +7,12 @@ ARCH := x86
5096 + endif
5097 +
5098 + # always use the host compiler
5099 ++HOSTAR ?= ar
5100 + HOSTCC ?= gcc
5101 + HOSTLD ?= ld
5102 ++AR = $(HOSTAR)
5103 + CC = $(HOSTCC)
5104 + LD = $(HOSTLD)
5105 +-AR = ar
5106 +
5107 + ifeq ($(srctree),)
5108 + srctree := $(patsubst %/,%,$(dir $(CURDIR)))
5109 +diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
5110 +index 44195514b19e..fa56fde6e8d8 100644
5111 +--- a/tools/perf/bench/numa.c
5112 ++++ b/tools/perf/bench/numa.c
5113 +@@ -38,6 +38,10 @@
5114 + #include <numa.h>
5115 + #include <numaif.h>
5116 +
5117 ++#ifndef RUSAGE_THREAD
5118 ++# define RUSAGE_THREAD 1
5119 ++#endif
5120 ++
5121 + /*
5122 + * Regular printout to the terminal, supressed if -q is specified:
5123 + */
5124 +diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
5125 +index 27a374ddf661..947f1bb2fbdf 100644
5126 +--- a/tools/perf/util/cs-etm.c
5127 ++++ b/tools/perf/util/cs-etm.c
5128 +@@ -345,11 +345,9 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
5129 + if (!etmq->packet)
5130 + goto out_free;
5131 +
5132 +- if (etm->synth_opts.last_branch || etm->sample_branches) {
5133 +- etmq->prev_packet = zalloc(szp);
5134 +- if (!etmq->prev_packet)
5135 +- goto out_free;
5136 +- }
5137 ++ etmq->prev_packet = zalloc(szp);
5138 ++ if (!etmq->prev_packet)
5139 ++ goto out_free;
5140 +
5141 + if (etm->synth_opts.last_branch) {
5142 + size_t sz = sizeof(struct branch_stack);
5143 +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
5144 +index 7c0b975dd2f0..73fc4abee302 100644
5145 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
5146 ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
5147 +@@ -58,6 +58,7 @@ enum intel_pt_pkt_state {
5148 + INTEL_PT_STATE_NO_IP,
5149 + INTEL_PT_STATE_ERR_RESYNC,
5150 + INTEL_PT_STATE_IN_SYNC,
5151 ++ INTEL_PT_STATE_TNT_CONT,
5152 + INTEL_PT_STATE_TNT,
5153 + INTEL_PT_STATE_TIP,
5154 + INTEL_PT_STATE_TIP_PGD,
5155 +@@ -72,8 +73,9 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
5156 + case INTEL_PT_STATE_NO_IP:
5157 + case INTEL_PT_STATE_ERR_RESYNC:
5158 + case INTEL_PT_STATE_IN_SYNC:
5159 +- case INTEL_PT_STATE_TNT:
5160 ++ case INTEL_PT_STATE_TNT_CONT:
5161 + return true;
5162 ++ case INTEL_PT_STATE_TNT:
5163 + case INTEL_PT_STATE_TIP:
5164 + case INTEL_PT_STATE_TIP_PGD:
5165 + case INTEL_PT_STATE_FUP:
5166 +@@ -888,16 +890,20 @@ static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
5167 + timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
5168 + masked_timestamp = timestamp & decoder->period_mask;
5169 + if (decoder->continuous_period) {
5170 +- if (masked_timestamp != decoder->last_masked_timestamp)
5171 ++ if (masked_timestamp > decoder->last_masked_timestamp)
5172 + return 1;
5173 + } else {
5174 + timestamp += 1;
5175 + masked_timestamp = timestamp & decoder->period_mask;
5176 +- if (masked_timestamp != decoder->last_masked_timestamp) {
5177 ++ if (masked_timestamp > decoder->last_masked_timestamp) {
5178 + decoder->last_masked_timestamp = masked_timestamp;
5179 + decoder->continuous_period = true;
5180 + }
5181 + }
5182 ++
5183 ++ if (masked_timestamp < decoder->last_masked_timestamp)
5184 ++ return decoder->period_ticks;
5185 ++
5186 + return decoder->period_ticks - (timestamp - masked_timestamp);
5187 + }
5188 +
5189 +@@ -926,7 +932,10 @@ static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
5190 + case INTEL_PT_PERIOD_TICKS:
5191 + timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
5192 + masked_timestamp = timestamp & decoder->period_mask;
5193 +- decoder->last_masked_timestamp = masked_timestamp;
5194 ++ if (masked_timestamp > decoder->last_masked_timestamp)
5195 ++ decoder->last_masked_timestamp = masked_timestamp;
5196 ++ else
5197 ++ decoder->last_masked_timestamp += decoder->period_ticks;
5198 + break;
5199 + case INTEL_PT_PERIOD_NONE:
5200 + case INTEL_PT_PERIOD_MTC:
5201 +@@ -1254,7 +1263,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
5202 + return -ENOENT;
5203 + }
5204 + decoder->tnt.count -= 1;
5205 +- if (!decoder->tnt.count)
5206 ++ if (decoder->tnt.count)
5207 ++ decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
5208 ++ else
5209 + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
5210 + decoder->tnt.payload <<= 1;
5211 + decoder->state.from_ip = decoder->ip;
5212 +@@ -1285,7 +1296,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
5213 +
5214 + if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
5215 + decoder->tnt.count -= 1;
5216 +- if (!decoder->tnt.count)
5217 ++ if (decoder->tnt.count)
5218 ++ decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
5219 ++ else
5220 + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
5221 + if (decoder->tnt.payload & BIT63) {
5222 + decoder->tnt.payload <<= 1;
5223 +@@ -1305,8 +1318,11 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
5224 + return 0;
5225 + }
5226 + decoder->ip += intel_pt_insn.length;
5227 +- if (!decoder->tnt.count)
5228 ++ if (!decoder->tnt.count) {
5229 ++ decoder->sample_timestamp = decoder->timestamp;
5230 ++ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
5231 + return -EAGAIN;
5232 ++ }
5233 + decoder->tnt.payload <<= 1;
5234 + continue;
5235 + }
5236 +@@ -2365,6 +2381,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
5237 + err = intel_pt_walk_trace(decoder);
5238 + break;
5239 + case INTEL_PT_STATE_TNT:
5240 ++ case INTEL_PT_STATE_TNT_CONT:
5241 + err = intel_pt_walk_tnt(decoder);
5242 + if (err == -EAGAIN)
5243 + err = intel_pt_walk_trace(decoder);
5244 +diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
5245 +index 4715cfba20dc..93f99c6b7d79 100644
5246 +--- a/tools/testing/selftests/kvm/dirty_log_test.c
5247 ++++ b/tools/testing/selftests/kvm/dirty_log_test.c
5248 +@@ -288,8 +288,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
5249 + #endif
5250 + max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
5251 + guest_page_size = (1ul << guest_page_shift);
5252 +- /* 1G of guest page sized pages */
5253 +- guest_num_pages = (1ul << (30 - guest_page_shift));
5254 ++ /*
5255 ++ * A little more than 1G of guest page sized pages. Cover the
5256 ++ * case where the size is not aligned to 64 pages.
5257 ++ */
5258 ++ guest_num_pages = (1ul << (30 - guest_page_shift)) + 3;
5259 + host_page_size = getpagesize();
5260 + host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
5261 + !!((guest_num_pages * guest_page_size) % host_page_size);
5262 +@@ -359,7 +362,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
5263 + kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
5264 + #ifdef USE_CLEAR_DIRTY_LOG
5265 + kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
5266 +- DIV_ROUND_UP(host_num_pages, 64) * 64);
5267 ++ host_num_pages);
5268 + #endif
5269 + vm_dirty_log_verify(bmap);
5270 + iteration++;
5271 +diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
5272 +index 264425f75806..9a21e912097c 100644
5273 +--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
5274 ++++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
5275 +@@ -141,7 +141,13 @@ int main(int argc, char *argv[])
5276 +
5277 + free(hv_cpuid_entries);
5278 +
5279 +- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
5280 ++ rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
5281 ++
5282 ++ if (rv) {
5283 ++ fprintf(stderr,
5284 ++ "Enlightened VMCS is unsupported, skip related test\n");
5285 ++ goto vm_free;
5286 ++ }
5287 +
5288 + hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
5289 + if (!hv_cpuid_entries)
5290 +@@ -151,6 +157,7 @@ int main(int argc, char *argv[])
5291 +
5292 + free(hv_cpuid_entries);
5293 +
5294 ++vm_free:
5295 + kvm_vm_free(vm);
5296 +
5297 + return 0;
5298 +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
5299 +index 9c486fad3f9f..6202b4f718ce 100644
5300 +--- a/virt/kvm/arm/arm.c
5301 ++++ b/virt/kvm/arm/arm.c
5302 +@@ -949,7 +949,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
5303 + static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
5304 + const struct kvm_vcpu_init *init)
5305 + {
5306 +- unsigned int i;
5307 ++ unsigned int i, ret;
5308 + int phys_target = kvm_target_cpu();
5309 +
5310 + if (init->target != phys_target)
5311 +@@ -984,9 +984,14 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
5312 + vcpu->arch.target = phys_target;
5313 +
5314 + /* Now we know what it is, we can reset it. */
5315 +- return kvm_reset_vcpu(vcpu);
5316 +-}
5317 ++ ret = kvm_reset_vcpu(vcpu);
5318 ++ if (ret) {
5319 ++ vcpu->arch.target = -1;
5320 ++ bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
5321 ++ }
5322 +
5323 ++ return ret;
5324 ++}
5325 +
5326 + static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
5327 + struct kvm_vcpu_init *init)
5328 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
5329 +index b5238bcba72c..4cc0d8a46891 100644
5330 +--- a/virt/kvm/kvm_main.c
5331 ++++ b/virt/kvm/kvm_main.c
5332 +@@ -1241,7 +1241,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
5333 + if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
5334 + return -EINVAL;
5335 +
5336 +- if ((log->first_page & 63) || (log->num_pages & 63))
5337 ++ if (log->first_page & 63)
5338 + return -EINVAL;
5339 +
5340 + slots = __kvm_memslots(kvm, as_id);
5341 +@@ -1254,8 +1254,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
5342 + n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
5343 +
5344 + if (log->first_page > memslot->npages ||
5345 +- log->num_pages > memslot->npages - log->first_page)
5346 +- return -EINVAL;
5347 ++ log->num_pages > memslot->npages - log->first_page ||
5348 ++ (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
5349 ++ return -EINVAL;
5350 +
5351 + *flush = false;
5352 + dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);