Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.12 commit in: /
Date: Wed, 30 Aug 2017 10:05:25
Message-Id: 1504087512.333bb572330727d30ce9e4e2b5563e63819eda44.mpagano@gentoo
1 commit: 333bb572330727d30ce9e4e2b5563e63819eda44
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Aug 30 10:05:12 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Aug 30 10:05:12 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=333bb572
7
8 Linux patch 4.12.10
9
10 0000_README | 4 +
11 1009_linux-4.12.10.patch | 3576 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3580 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 90242d0..a64a189 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -79,6 +79,10 @@ Patch: 1008_linux-4.12.9.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.12.9
21
22 +Patch: 1009_linux-4.12.10.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.12.10
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1009_linux-4.12.10.patch b/1009_linux-4.12.10.patch
31 new file mode 100644
32 index 0000000..a2ab6c1
33 --- /dev/null
34 +++ b/1009_linux-4.12.10.patch
35 @@ -0,0 +1,3576 @@
36 +diff --git a/Makefile b/Makefile
37 +index a6c2a5e7a48d..6889ec6a091d 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 12
43 +-SUBLEVEL = 9
44 ++SUBLEVEL = 10
45 + EXTRAVERSION =
46 + NAME = Fearless Coyote
47 +
48 +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
49 +index 19ebddffb279..02fd1cece6ef 100644
50 +--- a/arch/arc/include/asm/cache.h
51 ++++ b/arch/arc/include/asm/cache.h
52 +@@ -96,7 +96,9 @@ extern unsigned long perip_base, perip_end;
53 + #define ARC_REG_SLC_FLUSH 0x904
54 + #define ARC_REG_SLC_INVALIDATE 0x905
55 + #define ARC_REG_SLC_RGN_START 0x914
56 ++#define ARC_REG_SLC_RGN_START1 0x915
57 + #define ARC_REG_SLC_RGN_END 0x916
58 ++#define ARC_REG_SLC_RGN_END1 0x917
59 +
60 + /* Bit val in SLC_CONTROL */
61 + #define SLC_CTRL_DIS 0x001
62 +diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
63 +index db7319e9b506..efb79fafff1d 100644
64 +--- a/arch/arc/include/asm/mmu.h
65 ++++ b/arch/arc/include/asm/mmu.h
66 +@@ -94,6 +94,8 @@ static inline int is_pae40_enabled(void)
67 + return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
68 + }
69 +
70 ++extern int pae40_exist_but_not_enab(void);
71 ++
72 + #endif /* !__ASSEMBLY__ */
73 +
74 + #endif
75 +diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
76 +index a867575a758b..7db283b46ebd 100644
77 +--- a/arch/arc/mm/cache.c
78 ++++ b/arch/arc/mm/cache.c
79 +@@ -665,6 +665,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
80 + static DEFINE_SPINLOCK(lock);
81 + unsigned long flags;
82 + unsigned int ctrl;
83 ++ phys_addr_t end;
84 +
85 + spin_lock_irqsave(&lock, flags);
86 +
87 +@@ -694,8 +695,19 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
88 + * END needs to be setup before START (latter triggers the operation)
89 + * END can't be same as START, so add (l2_line_sz - 1) to sz
90 + */
91 +- write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
92 +- write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
93 ++ end = paddr + sz + l2_line_sz - 1;
94 ++ if (is_pae40_enabled())
95 ++ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
96 ++
97 ++ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
98 ++
99 ++ if (is_pae40_enabled())
100 ++ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
101 ++
102 ++ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
103 ++
104 ++ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
105 ++ read_aux_reg(ARC_REG_SLC_CTRL);
106 +
107 + while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
108 +
109 +@@ -1111,6 +1123,13 @@ noinline void __init arc_ioc_setup(void)
110 + __dc_enable();
111 + }
112 +
113 ++/*
114 ++ * Cache related boot time checks/setups only needed on master CPU:
115 ++ * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
116 ++ * Assume SMP only, so all cores will have same cache config. A check on
117 ++ * one core suffices for all
118 ++ * - IOC setup / dma callbacks only need to be done once
119 ++ */
120 + void __init arc_cache_init_master(void)
121 + {
122 + unsigned int __maybe_unused cpu = smp_processor_id();
123 +@@ -1190,12 +1209,27 @@ void __ref arc_cache_init(void)
124 +
125 + printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
126 +
127 +- /*
128 +- * Only master CPU needs to execute rest of function:
129 +- * - Assume SMP so all cores will have same cache config so
130 +- * any geomtry checks will be same for all
131 +- * - IOC setup / dma callbacks only need to be setup once
132 +- */
133 + if (!cpu)
134 + arc_cache_init_master();
135 ++
136 ++ /*
137 ++ * In PAE regime, TLB and cache maintenance ops take wider addresses
138 ++ * And even if PAE is not enabled in kernel, the upper 32-bits still need
139 ++ * to be zeroed to keep the ops sane.
140 ++ * As an optimization for more common !PAE enabled case, zero them out
141 ++ * once at init, rather than checking/setting to 0 for every runtime op
142 ++ */
143 ++ if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
144 ++
145 ++ if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
146 ++ write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
147 ++
148 ++ if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
149 ++ write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
150 ++
151 ++ if (l2_line_sz) {
152 ++ write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
153 ++ write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
154 ++ }
155 ++ }
156 + }
157 +diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
158 +index d0126fdfe2d8..b181f3ee38aa 100644
159 +--- a/arch/arc/mm/tlb.c
160 ++++ b/arch/arc/mm/tlb.c
161 +@@ -104,6 +104,8 @@
162 + /* A copy of the ASID from the PID reg is kept in asid_cache */
163 + DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
164 +
165 ++static int __read_mostly pae_exists;
166 ++
167 + /*
168 + * Utility Routine to erase a J-TLB entry
169 + * Caller needs to setup Index Reg (manually or via getIndex)
170 +@@ -784,7 +786,7 @@ void read_decode_mmu_bcr(void)
171 + mmu->u_dtlb = mmu4->u_dtlb * 4;
172 + mmu->u_itlb = mmu4->u_itlb * 4;
173 + mmu->sasid = mmu4->sasid;
174 +- mmu->pae = mmu4->pae;
175 ++ pae_exists = mmu->pae = mmu4->pae;
176 + }
177 + }
178 +
179 +@@ -809,6 +811,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
180 + return buf;
181 + }
182 +
183 ++int pae40_exist_but_not_enab(void)
184 ++{
185 ++ return pae_exists && !is_pae40_enabled();
186 ++}
187 ++
188 + void arc_mmu_init(void)
189 + {
190 + char str[256];
191 +@@ -859,6 +866,9 @@ void arc_mmu_init(void)
192 + /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
193 + write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
194 + #endif
195 ++
196 ++ if (pae40_exist_but_not_enab())
197 ++ write_aux_reg(ARC_REG_TLBPD1HI, 0);
198 + }
199 +
200 + /*
201 +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
202 +index 06da8ea16bbe..c7b4995868e1 100644
203 +--- a/arch/arm64/kernel/fpsimd.c
204 ++++ b/arch/arm64/kernel/fpsimd.c
205 +@@ -161,9 +161,11 @@ void fpsimd_flush_thread(void)
206 + {
207 + if (!system_supports_fpsimd())
208 + return;
209 ++ preempt_disable();
210 + memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
211 + fpsimd_flush_task_state(current);
212 + set_thread_flag(TIF_FOREIGN_FPSTATE);
213 ++ preempt_enable();
214 + }
215 +
216 + /*
217 +diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
218 +index da7e9432fa8f..db80b301c080 100644
219 +--- a/arch/powerpc/include/asm/mmu_context.h
220 ++++ b/arch/powerpc/include/asm/mmu_context.h
221 +@@ -80,9 +80,27 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
222 + struct task_struct *tsk)
223 + {
224 + /* Mark this context has been used on the new CPU */
225 +- if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
226 ++ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
227 + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
228 +
229 ++ /*
230 ++ * This full barrier orders the store to the cpumask above vs
231 ++ * a subsequent operation which allows this CPU to begin loading
232 ++ * translations for next.
233 ++ *
234 ++ * When using the radix MMU that operation is the load of the
235 ++ * MMU context id, which is then moved to SPRN_PID.
236 ++ *
237 ++ * For the hash MMU it is either the first load from slb_cache
238 ++ * in switch_slb(), and/or the store of paca->mm_ctx_id in
239 ++ * copy_mm_to_paca().
240 ++ *
241 ++ * On the read side the barrier is in pte_xchg(), which orders
242 ++ * the store to the PTE vs the load of mm_cpumask.
243 ++ */
244 ++ smp_mb();
245 ++ }
246 ++
247 + /* 32-bit keeps track of the current PGDIR in the thread struct */
248 + #ifdef CONFIG_PPC32
249 + tsk->thread.pgdir = next->pgd;
250 +diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
251 +index 9c0f5db5cf46..67e7e3d990f4 100644
252 +--- a/arch/powerpc/include/asm/pgtable-be-types.h
253 ++++ b/arch/powerpc/include/asm/pgtable-be-types.h
254 +@@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
255 + unsigned long *p = (unsigned long *)ptep;
256 + __be64 prev;
257 +
258 ++ /* See comment in switch_mm_irqs_off() */
259 + prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
260 + (__force unsigned long)pte_raw(new));
261 +
262 +diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
263 +index 8bd3b13fe2fb..369a164b545c 100644
264 +--- a/arch/powerpc/include/asm/pgtable-types.h
265 ++++ b/arch/powerpc/include/asm/pgtable-types.h
266 +@@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
267 + {
268 + unsigned long *p = (unsigned long *)ptep;
269 +
270 ++ /* See comment in switch_mm_irqs_off() */
271 + return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
272 + }
273 + #endif
274 +diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
275 +index 926b5244263e..a2e5c24f47a7 100644
276 +--- a/arch/s390/kvm/sthyi.c
277 ++++ b/arch/s390/kvm/sthyi.c
278 +@@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
279 + "srl %[cc],28\n"
280 + : [cc] "=d" (cc)
281 + : [code] "d" (code), [addr] "a" (addr)
282 +- : "memory", "cc");
283 ++ : "3", "memory", "cc");
284 + return cc;
285 + }
286 +
287 +@@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
288 + VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
289 + trace_kvm_s390_handle_sthyi(vcpu, code, addr);
290 +
291 +- if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
292 ++ if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
293 + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
294 +
295 + if (code & 0xffff) {
296 +@@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
297 + goto out;
298 + }
299 +
300 ++ if (addr & ~PAGE_MASK)
301 ++ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
302 ++
303 + /*
304 + * If the page has not yet been faulted in, we want to do that
305 + * now and not after all the expensive calculations.
306 +diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
307 +index 68bec7c97cb8..af6ac9c5d32e 100644
308 +--- a/arch/sparc/kernel/pci_sun4v.c
309 ++++ b/arch/sparc/kernel/pci_sun4v.c
310 +@@ -1241,8 +1241,6 @@ static int pci_sun4v_probe(struct platform_device *op)
311 + * ATU group, but ATU hcalls won't be available.
312 + */
313 + hv_atu = false;
314 +- pr_err(PFX "Could not register hvapi ATU err=%d\n",
315 +- err);
316 + } else {
317 + pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
318 + vatu_major, vatu_minor);
319 +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
320 +index 255645f60ca2..554cdb205d17 100644
321 +--- a/arch/x86/include/asm/fpu/internal.h
322 ++++ b/arch/x86/include/asm/fpu/internal.h
323 +@@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
324 + return 0;
325 + }
326 +
327 +-static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
328 ++static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
329 + {
330 + if (use_xsave()) {
331 +- copy_kernel_to_xregs(&fpstate->xsave, -1);
332 ++ copy_kernel_to_xregs(&fpstate->xsave, mask);
333 + } else {
334 + if (use_fxsr())
335 + copy_kernel_to_fxregs(&fpstate->fxsave);
336 +@@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
337 + : : [addr] "m" (fpstate));
338 + }
339 +
340 +- __copy_kernel_to_fpregs(fpstate);
341 ++ __copy_kernel_to_fpregs(fpstate, -1);
342 + }
343 +
344 + extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
345 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
346 +index 695605eb1dfb..ed8fdf86acfb 100644
347 +--- a/arch/x86/include/asm/kvm_host.h
348 ++++ b/arch/x86/include/asm/kvm_host.h
349 +@@ -486,6 +486,7 @@ struct kvm_vcpu_arch {
350 + unsigned long cr4;
351 + unsigned long cr4_guest_owned_bits;
352 + unsigned long cr8;
353 ++ u32 pkru;
354 + u32 hflags;
355 + u64 efer;
356 + u64 apic_base;
357 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
358 +index 68b329d77b3a..8463a136f711 100644
359 +--- a/arch/x86/include/asm/mmu_context.h
360 ++++ b/arch/x86/include/asm/mmu_context.h
361 +@@ -116,9 +116,7 @@ static inline int init_new_context(struct task_struct *tsk,
362 + mm->context.execute_only_pkey = -1;
363 + }
364 + #endif
365 +- init_new_context_ldt(tsk, mm);
366 +-
367 +- return 0;
368 ++ return init_new_context_ldt(tsk, mm);
369 + }
370 + static inline void destroy_context(struct mm_struct *mm)
371 + {
372 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
373 +index 59ca2eea522c..19adbb418443 100644
374 +--- a/arch/x86/kvm/cpuid.c
375 ++++ b/arch/x86/kvm/cpuid.c
376 +@@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
377 + entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
378 + cpuid_mask(&entry->ecx, CPUID_7_ECX);
379 + /* PKU is not yet implemented for shadow paging. */
380 +- if (!tdp_enabled)
381 ++ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
382 + entry->ecx &= ~F(PKU);
383 + entry->edx &= kvm_cpuid_7_0_edx_x86_features;
384 + entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
385 +diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
386 +index 762cdf2595f9..e1e89ee4af75 100644
387 +--- a/arch/x86/kvm/kvm_cache_regs.h
388 ++++ b/arch/x86/kvm/kvm_cache_regs.h
389 +@@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
390 + | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
391 + }
392 +
393 +-static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
394 +-{
395 +- return kvm_x86_ops->get_pkru(vcpu);
396 +-}
397 +-
398 + static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
399 + {
400 + vcpu->arch.hflags |= HF_GUEST_MASK;
401 +diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
402 +index 330bf3a811fb..b0d36a229d2e 100644
403 +--- a/arch/x86/kvm/mmu.h
404 ++++ b/arch/x86/kvm/mmu.h
405 +@@ -182,7 +182,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
406 + * index of the protection domain, so pte_pkey * 2 is
407 + * is the index of the first bit for the domain.
408 + */
409 +- pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
410 ++ pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
411 +
412 + /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
413 + offset = (pfec & ~1) +
414 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
415 +index ba9891ac5c56..58dbca7f2106 100644
416 +--- a/arch/x86/kvm/svm.c
417 ++++ b/arch/x86/kvm/svm.c
418 +@@ -1725,11 +1725,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
419 + to_svm(vcpu)->vmcb->save.rflags = rflags;
420 + }
421 +
422 +-static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
423 +-{
424 +- return 0;
425 +-}
426 +-
427 + static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
428 + {
429 + switch (reg) {
430 +@@ -5313,8 +5308,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
431 + .get_rflags = svm_get_rflags,
432 + .set_rflags = svm_set_rflags,
433 +
434 +- .get_pkru = svm_get_pkru,
435 +-
436 + .tlb_flush = svm_flush_tlb,
437 +
438 + .run = svm_vcpu_run,
439 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
440 +index 270d83da090c..2461e1a53f8c 100644
441 +--- a/arch/x86/kvm/vmx.c
442 ++++ b/arch/x86/kvm/vmx.c
443 +@@ -636,8 +636,6 @@ struct vcpu_vmx {
444 +
445 + u64 current_tsc_ratio;
446 +
447 +- bool guest_pkru_valid;
448 +- u32 guest_pkru;
449 + u32 host_pkru;
450 +
451 + /*
452 +@@ -2368,11 +2366,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
453 + vmcs_writel(GUEST_RFLAGS, rflags);
454 + }
455 +
456 +-static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
457 +-{
458 +- return to_vmx(vcpu)->guest_pkru;
459 +-}
460 +-
461 + static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
462 + {
463 + u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
464 +@@ -8860,8 +8853,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
465 + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
466 + vmx_set_interrupt_shadow(vcpu, 0);
467 +
468 +- if (vmx->guest_pkru_valid)
469 +- __write_pkru(vmx->guest_pkru);
470 ++ if (static_cpu_has(X86_FEATURE_PKU) &&
471 ++ kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
472 ++ vcpu->arch.pkru != vmx->host_pkru)
473 ++ __write_pkru(vcpu->arch.pkru);
474 +
475 + atomic_switch_perf_msrs(vmx);
476 + debugctlmsr = get_debugctlmsr();
477 +@@ -9009,13 +9004,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
478 + * back on host, so it is safe to read guest PKRU from current
479 + * XSAVE.
480 + */
481 +- if (boot_cpu_has(X86_FEATURE_OSPKE)) {
482 +- vmx->guest_pkru = __read_pkru();
483 +- if (vmx->guest_pkru != vmx->host_pkru) {
484 +- vmx->guest_pkru_valid = true;
485 ++ if (static_cpu_has(X86_FEATURE_PKU) &&
486 ++ kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
487 ++ vcpu->arch.pkru = __read_pkru();
488 ++ if (vcpu->arch.pkru != vmx->host_pkru)
489 + __write_pkru(vmx->host_pkru);
490 +- } else
491 +- vmx->guest_pkru_valid = false;
492 + }
493 +
494 + /*
495 +@@ -11507,8 +11500,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
496 + .get_rflags = vmx_get_rflags,
497 + .set_rflags = vmx_set_rflags,
498 +
499 +- .get_pkru = vmx_get_pkru,
500 +-
501 + .tlb_flush = vmx_flush_tlb,
502 +
503 + .run = vmx_vcpu_run,
504 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
505 +index 0e846f0cb83b..786e47fc6092 100644
506 +--- a/arch/x86/kvm/x86.c
507 ++++ b/arch/x86/kvm/x86.c
508 +@@ -3236,7 +3236,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
509 + u32 size, offset, ecx, edx;
510 + cpuid_count(XSTATE_CPUID, index,
511 + &size, &offset, &ecx, &edx);
512 +- memcpy(dest + offset, src, size);
513 ++ if (feature == XFEATURE_MASK_PKRU)
514 ++ memcpy(dest + offset, &vcpu->arch.pkru,
515 ++ sizeof(vcpu->arch.pkru));
516 ++ else
517 ++ memcpy(dest + offset, src, size);
518 ++
519 + }
520 +
521 + valid -= feature;
522 +@@ -3274,7 +3279,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
523 + u32 size, offset, ecx, edx;
524 + cpuid_count(XSTATE_CPUID, index,
525 + &size, &offset, &ecx, &edx);
526 +- memcpy(dest, src + offset, size);
527 ++ if (feature == XFEATURE_MASK_PKRU)
528 ++ memcpy(&vcpu->arch.pkru, src + offset,
529 ++ sizeof(vcpu->arch.pkru));
530 ++ else
531 ++ memcpy(dest, src + offset, size);
532 + }
533 +
534 + valid -= feature;
535 +@@ -7616,7 +7625,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
536 + */
537 + vcpu->guest_fpu_loaded = 1;
538 + __kernel_fpu_begin();
539 +- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
540 ++ /* PKRU is separately restored in kvm_x86_ops->run. */
541 ++ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
542 ++ ~XFEATURE_MASK_PKRU);
543 + trace_kvm_fpu(1);
544 + }
545 +
546 +diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
547 +index fc6c416f8724..d5999eb41c00 100644
548 +--- a/drivers/acpi/acpi_apd.c
549 ++++ b/drivers/acpi/acpi_apd.c
550 +@@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
551 + { "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
552 + { "BRCM900D", APD_ADDR(vulcan_spi_desc) },
553 + { "CAV900D", APD_ADDR(vulcan_spi_desc) },
554 +- { "HISI0A21", APD_ADDR(hip07_i2c_desc) },
555 +- { "HISI0A22", APD_ADDR(hip08_i2c_desc) },
556 ++ { "HISI02A1", APD_ADDR(hip07_i2c_desc) },
557 ++ { "HISI02A2", APD_ADDR(hip08_i2c_desc) },
558 + #endif
559 + { }
560 + };
561 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
562 +index cfad5d9a22f3..d8b2779b0140 100644
563 +--- a/drivers/acpi/ec.c
564 ++++ b/drivers/acpi/ec.c
565 +@@ -1703,7 +1703,7 @@ int __init acpi_ec_dsdt_probe(void)
566 + * functioning ECDT EC first in order to handle the events.
567 + * https://bugzilla.kernel.org/show_bug.cgi?id=115021
568 + */
569 +-int __init acpi_ec_ecdt_start(void)
570 ++static int __init acpi_ec_ecdt_start(void)
571 + {
572 + acpi_handle handle;
573 +
574 +@@ -1906,20 +1906,17 @@ static inline void acpi_ec_query_exit(void)
575 + int __init acpi_ec_init(void)
576 + {
577 + int result;
578 ++ int ecdt_fail, dsdt_fail;
579 +
580 + /* register workqueue for _Qxx evaluations */
581 + result = acpi_ec_query_init();
582 + if (result)
583 +- goto err_exit;
584 +- /* Now register the driver for the EC */
585 +- result = acpi_bus_register_driver(&acpi_ec_driver);
586 +- if (result)
587 +- goto err_exit;
588 ++ return result;
589 +
590 +-err_exit:
591 +- if (result)
592 +- acpi_ec_query_exit();
593 +- return result;
594 ++ /* Drivers must be started after acpi_ec_query_init() */
595 ++ ecdt_fail = acpi_ec_ecdt_start();
596 ++ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
597 ++ return ecdt_fail && dsdt_fail ? -ENODEV : 0;
598 + }
599 +
600 + /* EC driver currently not unloadable */
601 +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
602 +index 66229ffa909b..7e66f3c72b81 100644
603 +--- a/drivers/acpi/internal.h
604 ++++ b/drivers/acpi/internal.h
605 +@@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
606 + int acpi_ec_init(void);
607 + int acpi_ec_ecdt_probe(void);
608 + int acpi_ec_dsdt_probe(void);
609 +-int acpi_ec_ecdt_start(void);
610 + void acpi_ec_block_transactions(void);
611 + void acpi_ec_unblock_transactions(void);
612 + int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
613 +diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
614 +index 9364398204e9..6822ac9f106b 100644
615 +--- a/drivers/acpi/property.c
616 ++++ b/drivers/acpi/property.c
617 +@@ -1046,7 +1046,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
618 + fwnode_for_each_child_node(fwnode, child) {
619 + u32 nr;
620 +
621 +- if (!fwnode_property_read_u32(fwnode, prop_name, &nr))
622 ++ if (fwnode_property_read_u32(child, prop_name, &nr))
623 + continue;
624 +
625 + if (val == nr)
626 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
627 +index d53162997f32..359d16c30002 100644
628 +--- a/drivers/acpi/scan.c
629 ++++ b/drivers/acpi/scan.c
630 +@@ -2085,7 +2085,6 @@ int __init acpi_scan_init(void)
631 +
632 + acpi_gpe_apply_masked_gpes();
633 + acpi_update_all_gpes();
634 +- acpi_ec_ecdt_start();
635 +
636 + acpi_scan_initialized = true;
637 +
638 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
639 +index aae4d8d4be36..831cdd7d197d 100644
640 +--- a/drivers/android/binder.c
641 ++++ b/drivers/android/binder.c
642 +@@ -2200,8 +2200,12 @@ static void binder_transaction(struct binder_proc *proc,
643 + list_add_tail(&t->work.entry, target_list);
644 + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
645 + list_add_tail(&tcomplete->entry, &thread->todo);
646 +- if (target_wait)
647 +- wake_up_interruptible(target_wait);
648 ++ if (target_wait) {
649 ++ if (reply || !(t->flags & TF_ONE_WAY))
650 ++ wake_up_interruptible_sync(target_wait);
651 ++ else
652 ++ wake_up_interruptible(target_wait);
653 ++ }
654 + return;
655 +
656 + err_translate_failed:
657 +@@ -3247,10 +3251,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
658 + /*pr_info("binder_ioctl: %d:%d %x %lx\n",
659 + proc->pid, current->pid, cmd, arg);*/
660 +
661 +- if (unlikely(current->mm != proc->vma_vm_mm)) {
662 +- pr_err("current mm mismatch proc mm\n");
663 +- return -EINVAL;
664 +- }
665 + trace_binder_ioctl(cmd, arg);
666 +
667 + ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
668 +@@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
669 + const char *failure_string;
670 + struct binder_buffer *buffer;
671 +
672 +- if (proc->tsk != current)
673 ++ if (proc->tsk != current->group_leader)
674 + return -EINVAL;
675 +
676 + if ((vma->vm_end - vma->vm_start) > SZ_4M)
677 +@@ -3464,9 +3464,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
678 + proc = kzalloc(sizeof(*proc), GFP_KERNEL);
679 + if (proc == NULL)
680 + return -ENOMEM;
681 +- get_task_struct(current);
682 +- proc->tsk = current;
683 +- proc->vma_vm_mm = current->mm;
684 ++ get_task_struct(current->group_leader);
685 ++ proc->tsk = current->group_leader;
686 + INIT_LIST_HEAD(&proc->todo);
687 + init_waitqueue_head(&proc->wait);
688 + proc->default_priority = task_nice(current);
689 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
690 +index c0a806280257..f4a4efec8737 100644
691 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
692 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
693 +@@ -839,8 +839,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
694 +
695 + mode_info = info->mode_info;
696 + if (mode_info) {
697 +- /* if the displays are off, vblank time is max */
698 +- mode_info->vblank_time_us = 0xffffffff;
699 + /* always set the reference clock */
700 + mode_info->ref_clock = adev->clock.spll.reference_freq;
701 + }
702 +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
703 +index f32506a7c1d6..422404dbfabb 100644
704 +--- a/drivers/gpu/drm/drm_atomic.c
705 ++++ b/drivers/gpu/drm/drm_atomic.c
706 +@@ -1581,6 +1581,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
707 + if (config->funcs->atomic_check)
708 + ret = config->funcs->atomic_check(state->dev, state);
709 +
710 ++ if (ret)
711 ++ return ret;
712 ++
713 + if (!state->allow_modeset) {
714 + for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
715 + if (drm_atomic_crtc_needs_modeset(crtc_state)) {
716 +@@ -1591,7 +1594,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
717 + }
718 + }
719 +
720 +- return ret;
721 ++ return 0;
722 + }
723 + EXPORT_SYMBOL(drm_atomic_check_only);
724 +
725 +@@ -2093,10 +2096,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
726 + struct drm_atomic_state *state;
727 + struct drm_modeset_acquire_ctx ctx;
728 + struct drm_plane *plane;
729 +- struct drm_out_fence_state *fence_state = NULL;
730 ++ struct drm_out_fence_state *fence_state;
731 + unsigned plane_mask;
732 + int ret = 0;
733 +- unsigned int i, j, num_fences = 0;
734 ++ unsigned int i, j, num_fences;
735 +
736 + /* disallow for drivers not supporting atomic: */
737 + if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
738 +@@ -2137,6 +2140,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
739 + plane_mask = 0;
740 + copied_objs = 0;
741 + copied_props = 0;
742 ++ fence_state = NULL;
743 ++ num_fences = 0;
744 +
745 + for (i = 0; i < arg->count_objs; i++) {
746 + uint32_t obj_id, count_props;
747 +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
748 +index b1e28c944637..08e6e981104f 100644
749 +--- a/drivers/gpu/drm/drm_gem.c
750 ++++ b/drivers/gpu/drm/drm_gem.c
751 +@@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
752 + struct drm_gem_object *obj = ptr;
753 + struct drm_device *dev = obj->dev;
754 +
755 ++ if (dev->driver->gem_close_object)
756 ++ dev->driver->gem_close_object(obj, file_priv);
757 ++
758 + if (drm_core_check_feature(dev, DRIVER_PRIME))
759 + drm_gem_remove_prime_handles(obj, file_priv);
760 + drm_vma_node_revoke(&obj->vma_node, file_priv);
761 +
762 +- if (dev->driver->gem_close_object)
763 +- dev->driver->gem_close_object(obj, file_priv);
764 +-
765 + drm_gem_object_handle_put_unlocked(obj);
766 +
767 + return 0;
768 +diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
769 +index 5dc8c4350602..e40c12fabbde 100644
770 +--- a/drivers/gpu/drm/drm_plane.c
771 ++++ b/drivers/gpu/drm/drm_plane.c
772 +@@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
773 +
774 + crtc = drm_crtc_find(dev, plane_req->crtc_id);
775 + if (!crtc) {
776 ++ drm_framebuffer_put(fb);
777 + DRM_DEBUG_KMS("Unknown crtc ID %d\n",
778 + plane_req->crtc_id);
779 + return -ENOENT;
780 +diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
781 +index 41b2c3aaa04a..37258b7d1bce 100644
782 +--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
783 ++++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
784 +@@ -2754,7 +2754,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
785 + unmap_src:
786 + i915_gem_object_unpin_map(obj);
787 + put_obj:
788 +- i915_gem_object_put(wa_ctx->indirect_ctx.obj);
789 ++ i915_gem_object_put(obj);
790 + return ret;
791 + }
792 +
793 +diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
794 +index 639d45c1dd2e..7ea7fd1e8856 100644
795 +--- a/drivers/gpu/drm/i915/intel_bios.c
796 ++++ b/drivers/gpu/drm/i915/intel_bios.c
797 +@@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
798 + bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
799 + uint8_t aux_channel, ddc_pin;
800 + /* Each DDI port can have more than one value on the "DVO Port" field,
801 +- * so look for all the possible values for each port and abort if more
802 +- * than one is found. */
803 ++ * so look for all the possible values for each port.
804 ++ */
805 + int dvo_ports[][3] = {
806 + {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
807 + {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
808 +@@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
809 + {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
810 + };
811 +
812 +- /* Find the child device to use, abort if more than one found. */
813 ++ /*
814 ++ * Find the first child device to reference the port, report if more
815 ++ * than one found.
816 ++ */
817 + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
818 + it = dev_priv->vbt.child_dev + i;
819 +
820 +@@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
821 +
822 + if (it->common.dvo_port == dvo_ports[port][j]) {
823 + if (child) {
824 +- DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
825 ++ DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
826 + port_name(port));
827 +- return;
828 ++ } else {
829 ++ child = it;
830 + }
831 +- child = it;
832 + }
833 + }
834 + }
835 +diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
836 +index 8ddd72cd5873..05601ab27d7c 100644
837 +--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
838 ++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
839 +@@ -25,12 +25,20 @@
840 + #include "sun4i_framebuffer.h"
841 + #include "sun4i_tcon.h"
842 +
843 ++static void sun4i_drv_lastclose(struct drm_device *dev)
844 ++{
845 ++ struct sun4i_drv *drv = dev->dev_private;
846 ++
847 ++ drm_fbdev_cma_restore_mode(drv->fbdev);
848 ++}
849 ++
850 + DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
851 +
852 + static struct drm_driver sun4i_drv_driver = {
853 + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
854 +
855 + /* Generic Operations */
856 ++ .lastclose = sun4i_drv_lastclose,
857 + .fops = &sun4i_drv_fops,
858 + .name = "sun4i-drm",
859 + .desc = "Allwinner sun4i Display Engine",
860 +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
861 +index 0703da1d946a..eea71c4e969d 100644
862 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c
863 ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
864 +@@ -392,7 +392,7 @@ static void dw_i2c_plat_complete(struct device *dev)
865 + #endif
866 +
867 + #ifdef CONFIG_PM
868 +-static int dw_i2c_plat_suspend(struct device *dev)
869 ++static int dw_i2c_plat_runtime_suspend(struct device *dev)
870 + {
871 + struct platform_device *pdev = to_platform_device(dev);
872 + struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
873 +@@ -414,11 +414,21 @@ static int dw_i2c_plat_resume(struct device *dev)
874 + return 0;
875 + }
876 +
877 ++#ifdef CONFIG_PM_SLEEP
878 ++static int dw_i2c_plat_suspend(struct device *dev)
879 ++{
880 ++ pm_runtime_resume(dev);
881 ++ return dw_i2c_plat_runtime_suspend(dev);
882 ++}
883 ++#endif
884 ++
885 + static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
886 + .prepare = dw_i2c_plat_prepare,
887 + .complete = dw_i2c_plat_complete,
888 + SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
889 +- SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
890 ++ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
891 ++ dw_i2c_plat_resume,
892 ++ NULL)
893 + };
894 +
895 + #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
896 +diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
897 +index 0b5dea050239..6dda332f252a 100644
898 +--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
899 ++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
900 +@@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
901 + s32 poll_value = 0;
902 +
903 + if (state) {
904 +- if (!atomic_read(&st->user_requested_state))
905 +- return 0;
906 + if (sensor_hub_device_open(st->hsdev))
907 + return -EIO;
908 +
909 +@@ -86,6 +84,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
910 + &report_val);
911 + }
912 +
913 ++ pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
914 ++ st->pdev->name, state_val, report_val);
915 ++
916 + sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
917 + st->power_state.index,
918 + sizeof(state_val), &state_val);
919 +@@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
920 + ret = pm_runtime_get_sync(&st->pdev->dev);
921 + else {
922 + pm_runtime_mark_last_busy(&st->pdev->dev);
923 ++ pm_runtime_use_autosuspend(&st->pdev->dev);
924 + ret = pm_runtime_put_autosuspend(&st->pdev->dev);
925 + }
926 + if (ret < 0) {
927 +@@ -205,8 +207,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
928 + /* Default to 3 seconds, but can be changed from sysfs */
929 + pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
930 + 3000);
931 +- pm_runtime_use_autosuspend(&attrb->pdev->dev);
932 +-
933 + return ret;
934 + error_unreg_trigger:
935 + iio_trigger_unregister(trig);
936 +diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
937 +index 8cf84d3488b2..12898424d838 100644
938 +--- a/drivers/iio/imu/adis16480.c
939 ++++ b/drivers/iio/imu/adis16480.c
940 +@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
941 + .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
942 + .gyro_max_scale = 450,
943 + .accel_max_val = IIO_M_S_2_TO_G(12500),
944 +- .accel_max_scale = 5,
945 ++ .accel_max_scale = 10,
946 + },
947 + [ADIS16485] = {
948 + .channels = adis16485_channels,
949 +diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
950 +index 8e1b0861fbe4..c38563699984 100644
951 +--- a/drivers/iio/magnetometer/st_magn_core.c
952 ++++ b/drivers/iio/magnetometer/st_magn_core.c
953 +@@ -356,9 +356,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
954 + .drdy_irq = {
955 + .addr = 0x62,
956 + .mask_int1 = 0x01,
957 +- .addr_ihl = 0x63,
958 +- .mask_ihl = 0x04,
959 +- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
960 ++ .addr_stat_drdy = 0x67,
961 + },
962 + .multi_read_bit = false,
963 + .bootime = 2,
964 +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
965 +index f3f9d0b5dce0..5ea2d80800f9 100644
966 +--- a/drivers/infiniband/core/uverbs_cmd.c
967 ++++ b/drivers/infiniband/core/uverbs_cmd.c
968 +@@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
969 + cq->uobject = &obj->uobject;
970 + cq->comp_handler = ib_uverbs_comp_handler;
971 + cq->event_handler = ib_uverbs_cq_event_handler;
972 +- cq->cq_context = &ev_file->ev_queue;
973 ++ cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
974 + atomic_set(&cq->usecnt, 0);
975 +
976 + obj->uobject.object = cq;
977 +diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
978 +index 262d1057c1da..850b00e3ad8e 100644
979 +--- a/drivers/input/mouse/alps.c
980 ++++ b/drivers/input/mouse/alps.c
981 +@@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
982 +
983 + case SS4_PACKET_ID_TWO:
984 + if (priv->flags & ALPS_BUTTONPAD) {
985 +- f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
986 ++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
987 ++ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
988 ++ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
989 ++ } else {
990 ++ f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
991 ++ f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
992 ++ }
993 + f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
994 +- f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
995 + f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
996 + } else {
997 +- f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
998 ++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
999 ++ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
1000 ++ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
1001 ++ } else {
1002 ++ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
1003 ++ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
1004 ++ }
1005 + f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
1006 +- f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
1007 + f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
1008 + }
1009 + f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
1010 +@@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
1011 +
1012 + case SS4_PACKET_ID_MULTI:
1013 + if (priv->flags & ALPS_BUTTONPAD) {
1014 +- f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
1015 ++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
1016 ++ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
1017 ++ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
1018 ++ } else {
1019 ++ f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
1020 ++ f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
1021 ++ }
1022 ++
1023 + f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
1024 +- f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
1025 + f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
1026 + no_data_x = SS4_MFPACKET_NO_AX_BL;
1027 + no_data_y = SS4_MFPACKET_NO_AY_BL;
1028 + } else {
1029 +- f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
1030 ++ if (IS_SS4PLUS_DEV(priv->dev_id)) {
1031 ++ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
1032 ++ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
1033 ++ } else {
1034 ++ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
1035 ++ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
1036 ++ }
1037 + f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
1038 +- f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
1039 + f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
1040 + no_data_x = SS4_MFPACKET_NO_AX;
1041 + no_data_y = SS4_MFPACKET_NO_AY;
1042 +@@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
1043 +
1044 + memset(otp, 0, sizeof(otp));
1045 +
1046 +- if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) ||
1047 +- alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]))
1048 ++ if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
1049 ++ alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
1050 + return -1;
1051 +
1052 + alps_update_device_area_ss4_v2(otp, priv);
1053 +diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
1054 +index ed2d6879fa52..c80a7c76cb76 100644
1055 +--- a/drivers/input/mouse/alps.h
1056 ++++ b/drivers/input/mouse/alps.h
1057 +@@ -100,6 +100,10 @@ enum SS4_PACKET_ID {
1058 + ((_b[1 + _i * 3] << 5) & 0x1F00) \
1059 + )
1060 +
1061 ++#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
1062 ++ ((_b[1 + (_i) * 3] << 4) & 0x0F80) \
1063 ++ )
1064 ++
1065 + #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
1066 + ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
1067 + ((_b[2 + (_i) * 3] << 4) & 0x0E00) \
1068 +@@ -109,6 +113,10 @@ enum SS4_PACKET_ID {
1069 + ((_b[0 + (_i) * 3] >> 3) & 0x0010) \
1070 + )
1071 +
1072 ++#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
1073 ++ ((_b[0 + (_i) * 3] >> 4) & 0x0008) \
1074 ++ )
1075 ++
1076 + #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
1077 + ((_b[0 + (_i) * 3] >> 3) & 0x0008) \
1078 + )
1079 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1080 +index 3b0e9fb33afe..4f3d3543b2fb 100644
1081 +--- a/drivers/input/mouse/elan_i2c_core.c
1082 ++++ b/drivers/input/mouse/elan_i2c_core.c
1083 +@@ -1223,6 +1223,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1084 + { "ELAN0000", 0 },
1085 + { "ELAN0100", 0 },
1086 + { "ELAN0600", 0 },
1087 ++ { "ELAN0602", 0 },
1088 + { "ELAN0605", 0 },
1089 + { "ELAN0608", 0 },
1090 + { "ELAN0605", 0 },
1091 +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
1092 +index 922ea02edcc3..fb3810d35c44 100644
1093 +--- a/drivers/input/mouse/trackpoint.c
1094 ++++ b/drivers/input/mouse/trackpoint.c
1095 +@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
1096 + if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
1097 + return -1;
1098 +
1099 +- if (param[0] != TP_MAGIC_IDENT)
1100 ++ /* add new TP ID. */
1101 ++ if (!(param[0] & TP_MAGIC_IDENT))
1102 + return -1;
1103 +
1104 + if (firmware_id)
1105 +diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
1106 +index 5617ed3a7d7a..88055755f82e 100644
1107 +--- a/drivers/input/mouse/trackpoint.h
1108 ++++ b/drivers/input/mouse/trackpoint.h
1109 +@@ -21,8 +21,9 @@
1110 + #define TP_COMMAND 0xE2 /* Commands start with this */
1111 +
1112 + #define TP_READ_ID 0xE1 /* Sent for device identification */
1113 +-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */
1114 ++#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
1115 + /* by the firmware ID */
1116 ++ /* Firmware ID includes 0x1, 0x2, 0x3 */
1117 +
1118 +
1119 + /*
1120 +diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
1121 +index 4de8f4160bb8..09f9dd166827 100644
1122 +--- a/drivers/iommu/amd_iommu_types.h
1123 ++++ b/drivers/iommu/amd_iommu_types.h
1124 +@@ -571,7 +571,9 @@ struct amd_iommu {
1125 +
1126 + static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
1127 + {
1128 +- return container_of(dev, struct amd_iommu, iommu.dev);
1129 ++ struct iommu_device *iommu = dev_to_iommu_device(dev);
1130 ++
1131 ++ return container_of(iommu, struct amd_iommu, iommu);
1132 + }
1133 +
1134 + #define ACPIHID_UID_LEN 256
1135 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1136 +index fc2765ccdb57..76791fded8a4 100644
1137 +--- a/drivers/iommu/intel-iommu.c
1138 ++++ b/drivers/iommu/intel-iommu.c
1139 +@@ -4749,7 +4749,9 @@ static void intel_disable_iommus(void)
1140 +
1141 + static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
1142 + {
1143 +- return container_of(dev, struct intel_iommu, iommu.dev);
1144 ++ struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
1145 ++
1146 ++ return container_of(iommu_dev, struct intel_iommu, iommu);
1147 + }
1148 +
1149 + static ssize_t intel_iommu_show_version(struct device *dev,
1150 +diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
1151 +index c58351ed61c1..36d1a7ce7fc4 100644
1152 +--- a/drivers/iommu/iommu-sysfs.c
1153 ++++ b/drivers/iommu/iommu-sysfs.c
1154 +@@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
1155 + va_list vargs;
1156 + int ret;
1157 +
1158 +- device_initialize(&iommu->dev);
1159 ++ iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL);
1160 ++ if (!iommu->dev)
1161 ++ return -ENOMEM;
1162 +
1163 +- iommu->dev.class = &iommu_class;
1164 +- iommu->dev.parent = parent;
1165 +- iommu->dev.groups = groups;
1166 ++ device_initialize(iommu->dev);
1167 ++
1168 ++ iommu->dev->class = &iommu_class;
1169 ++ iommu->dev->parent = parent;
1170 ++ iommu->dev->groups = groups;
1171 +
1172 + va_start(vargs, fmt);
1173 +- ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
1174 ++ ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs);
1175 + va_end(vargs);
1176 + if (ret)
1177 + goto error;
1178 +
1179 +- ret = device_add(&iommu->dev);
1180 ++ ret = device_add(iommu->dev);
1181 + if (ret)
1182 + goto error;
1183 +
1184 ++ dev_set_drvdata(iommu->dev, iommu);
1185 ++
1186 + return 0;
1187 +
1188 + error:
1189 +- put_device(&iommu->dev);
1190 ++ put_device(iommu->dev);
1191 + return ret;
1192 + }
1193 +
1194 + void iommu_device_sysfs_remove(struct iommu_device *iommu)
1195 + {
1196 +- device_unregister(&iommu->dev);
1197 ++ dev_set_drvdata(iommu->dev, NULL);
1198 ++ device_unregister(iommu->dev);
1199 ++ iommu->dev = NULL;
1200 + }
1201 + /*
1202 + * IOMMU drivers can indicate a device is managed by a given IOMMU using
1203 +@@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
1204 + if (!iommu || IS_ERR(iommu))
1205 + return -ENODEV;
1206 +
1207 +- ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
1208 ++ ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
1209 + &link->kobj, dev_name(link));
1210 + if (ret)
1211 + return ret;
1212 +
1213 +- ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
1214 ++ ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu");
1215 + if (ret)
1216 +- sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
1217 ++ sysfs_remove_link_from_group(&iommu->dev->kobj, "devices",
1218 + dev_name(link));
1219 +
1220 + return ret;
1221 +@@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
1222 + return;
1223 +
1224 + sysfs_remove_link(&link->kobj, "iommu");
1225 +- sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
1226 ++ sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
1227 + }
1228 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1229 +index 224e93aa6d23..510a580e0348 100644
1230 +--- a/drivers/net/bonding/bond_main.c
1231 ++++ b/drivers/net/bonding/bond_main.c
1232 +@@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1233 + new_slave->delay = 0;
1234 + new_slave->link_failure_count = 0;
1235 +
1236 +- if (bond_update_speed_duplex(new_slave))
1237 ++ if (bond_update_speed_duplex(new_slave) &&
1238 ++ bond_needs_speed_duplex(bond))
1239 + new_slave->link = BOND_LINK_DOWN;
1240 +
1241 + new_slave->last_rx = jiffies -
1242 +@@ -2137,11 +2138,13 @@ static void bond_miimon_commit(struct bonding *bond)
1243 + continue;
1244 +
1245 + case BOND_LINK_UP:
1246 +- if (bond_update_speed_duplex(slave)) {
1247 ++ if (bond_update_speed_duplex(slave) &&
1248 ++ bond_needs_speed_duplex(bond)) {
1249 + slave->link = BOND_LINK_DOWN;
1250 +- netdev_warn(bond->dev,
1251 +- "failed to get link speed/duplex for %s\n",
1252 +- slave->dev->name);
1253 ++ if (net_ratelimit())
1254 ++ netdev_warn(bond->dev,
1255 ++ "failed to get link speed/duplex for %s\n",
1256 ++ slave->dev->name);
1257 + continue;
1258 + }
1259 + bond_set_slave_link_state(slave, BOND_LINK_UP,
1260 +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
1261 +index 83aab1e4c8c8..9f214f9fb48c 100644
1262 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c
1263 ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
1264 +@@ -430,7 +430,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
1265 + /* Virtual PCI function needs to determine UAR page size from
1266 + * firmware. Only master PCI function can set the uar page size
1267 + */
1268 +- if (enable_4k_uar)
1269 ++ if (enable_4k_uar || !dev->persist->num_vfs)
1270 + dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
1271 + else
1272 + dev->uar_page_shift = PAGE_SHIFT;
1273 +@@ -2275,7 +2275,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
1274 +
1275 + dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
1276 +
1277 +- if (enable_4k_uar) {
1278 ++ if (enable_4k_uar || !dev->persist->num_vfs) {
1279 + init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
1280 + PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
1281 + init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
1282 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1283 +index 82bd6b0935f1..fd4a785431ac 100644
1284 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1285 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1286 +@@ -881,8 +881,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
1287 + return NETDEV_TX_OK;
1288 +
1289 + err_unmap:
1290 +- --f;
1291 +- while (f >= 0) {
1292 ++ while (--f >= 0) {
1293 + frag = &skb_shinfo(skb)->frags[f];
1294 + dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
1295 + skb_frag_size(frag), DMA_TO_DEVICE);
1296 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1297 +index 9ee7d4275640..5bd954d12541 100644
1298 +--- a/drivers/net/tun.c
1299 ++++ b/drivers/net/tun.c
1300 +@@ -1876,6 +1876,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1301 +
1302 + err_detach:
1303 + tun_detach_all(dev);
1304 ++ /* register_netdevice() already called tun_free_netdev() */
1305 ++ goto err_free_dev;
1306 ++
1307 + err_free_flow:
1308 + tun_flow_uninit(tun);
1309 + security_tun_dev_free_security(tun->security);
1310 +diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
1311 +index 10e5bf460139..f27d1344d198 100644
1312 +--- a/drivers/ntb/ntb_transport.c
1313 ++++ b/drivers/ntb/ntb_transport.c
1314 +@@ -920,10 +920,8 @@ static void ntb_transport_link_work(struct work_struct *work)
1315 + ntb_free_mw(nt, i);
1316 +
1317 + /* if there's an actual failure, we should just bail */
1318 +- if (rc < 0) {
1319 +- ntb_link_disable(ndev);
1320 ++ if (rc < 0)
1321 + return;
1322 +- }
1323 +
1324 + out:
1325 + if (ntb_link_is_up(ndev, NULL, NULL) == 1)
1326 +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1327 +index d283341cfe43..56cd4e5e51b2 100644
1328 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1329 ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1330 +@@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
1331 + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
1332 + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
1333 + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
1334 ++ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
1335 + {} /* Terminating entry */
1336 + };
1337 +
1338 +diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
1339 +index 007a4f366086..1c4797e53f68 100644
1340 +--- a/drivers/virtio/virtio_pci_common.c
1341 ++++ b/drivers/virtio/virtio_pci_common.c
1342 +@@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
1343 + {
1344 + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
1345 + const char *name = dev_name(&vp_dev->vdev.dev);
1346 ++ unsigned flags = PCI_IRQ_MSIX;
1347 + unsigned i, v;
1348 + int err = -ENOMEM;
1349 +
1350 +@@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
1351 + GFP_KERNEL))
1352 + goto error;
1353 +
1354 ++ if (desc) {
1355 ++ flags |= PCI_IRQ_AFFINITY;
1356 ++ desc->pre_vectors++; /* virtio config vector */
1357 ++ }
1358 ++
1359 + err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
1360 +- nvectors, PCI_IRQ_MSIX |
1361 +- (desc ? PCI_IRQ_AFFINITY : 0),
1362 +- desc);
1363 ++ nvectors, flags, desc);
1364 + if (err < 0)
1365 + goto error;
1366 + vp_dev->msix_enabled = 1;
1367 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
1368 +index 56366e984076..569d3fb736be 100644
1369 +--- a/fs/cifs/dir.c
1370 ++++ b/fs/cifs/dir.c
1371 +@@ -194,15 +194,20 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, bool prefix)
1372 + }
1373 +
1374 + /*
1375 ++ * Don't allow path components longer than the server max.
1376 + * Don't allow the separator character in a path component.
1377 + * The VFS will not allow "/", but "\" is allowed by posix.
1378 + */
1379 + static int
1380 +-check_name(struct dentry *direntry)
1381 ++check_name(struct dentry *direntry, struct cifs_tcon *tcon)
1382 + {
1383 + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
1384 + int i;
1385 +
1386 ++ if (unlikely(direntry->d_name.len >
1387 ++ tcon->fsAttrInfo.MaxPathNameComponentLength))
1388 ++ return -ENAMETOOLONG;
1389 ++
1390 + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
1391 + for (i = 0; i < direntry->d_name.len; i++) {
1392 + if (direntry->d_name.name[i] == '\\') {
1393 +@@ -500,10 +505,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
1394 + return finish_no_open(file, res);
1395 + }
1396 +
1397 +- rc = check_name(direntry);
1398 +- if (rc)
1399 +- return rc;
1400 +-
1401 + xid = get_xid();
1402 +
1403 + cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
1404 +@@ -516,6 +517,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
1405 + }
1406 +
1407 + tcon = tlink_tcon(tlink);
1408 ++
1409 ++ rc = check_name(direntry, tcon);
1410 ++ if (rc)
1411 ++ goto out_free_xid;
1412 ++
1413 + server = tcon->ses->server;
1414 +
1415 + if (server->ops->new_lease_key)
1416 +@@ -776,7 +782,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
1417 + }
1418 + pTcon = tlink_tcon(tlink);
1419 +
1420 +- rc = check_name(direntry);
1421 ++ rc = check_name(direntry, pTcon);
1422 + if (rc)
1423 + goto lookup_out;
1424 +
1425 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1426 +index e4afdaae743f..c398f393f2b3 100644
1427 +--- a/fs/cifs/smb2pdu.c
1428 ++++ b/fs/cifs/smb2pdu.c
1429 +@@ -3195,8 +3195,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
1430 + kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
1431 + le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
1432 + kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
1433 +- kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
1434 +- kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
1435 ++ kst->f_bfree = kst->f_bavail =
1436 ++ le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
1437 + return;
1438 + }
1439 +
1440 +diff --git a/fs/dax.c b/fs/dax.c
1441 +index 9187f3b07f3e..f3ac7674b5cb 100644
1442 +--- a/fs/dax.c
1443 ++++ b/fs/dax.c
1444 +@@ -1380,6 +1380,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1445 +
1446 + trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1447 +
1448 ++ /*
1449 ++ * Make sure that the faulting address's PMD offset (color) matches
1450 ++ * the PMD offset from the start of the file. This is necessary so
1451 ++ * that a PMD range in the page table overlaps exactly with a PMD
1452 ++ * range in the radix tree.
1453 ++ */
1454 ++ if ((vmf->pgoff & PG_PMD_COLOUR) !=
1455 ++ ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1456 ++ goto fallback;
1457 ++
1458 + /* Fall back to PTEs if we're going to COW */
1459 + if (write && !(vma->vm_flags & VM_SHARED))
1460 + goto fallback;
1461 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
1462 +index 26780d53a6f9..ed8d6b73d12a 100644
1463 +--- a/fs/nfsd/nfs4xdr.c
1464 ++++ b/fs/nfsd/nfs4xdr.c
1465 +@@ -144,7 +144,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp)
1466 + argp->p = page_address(argp->pagelist[0]);
1467 + argp->pagelist++;
1468 + if (argp->pagelen < PAGE_SIZE) {
1469 +- argp->end = argp->p + (argp->pagelen>>2);
1470 ++ argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
1471 + argp->pagelen = 0;
1472 + } else {
1473 + argp->end = argp->p + (PAGE_SIZE>>2);
1474 +@@ -1279,9 +1279,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
1475 + argp->pagelen -= pages * PAGE_SIZE;
1476 + len -= pages * PAGE_SIZE;
1477 +
1478 +- argp->p = (__be32 *)page_address(argp->pagelist[0]);
1479 +- argp->pagelist++;
1480 +- argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
1481 ++ next_decode_page(argp);
1482 + }
1483 + argp->p += XDR_QUADLEN(len);
1484 +
1485 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
1486 +index 314a0b9219c6..a06342f11259 100644
1487 +--- a/include/asm-generic/vmlinux.lds.h
1488 ++++ b/include/asm-generic/vmlinux.lds.h
1489 +@@ -59,6 +59,22 @@
1490 + /* Align . to a 8 byte boundary equals to maximum function alignment. */
1491 + #define ALIGN_FUNCTION() . = ALIGN(8)
1492 +
1493 ++/*
1494 ++ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
1495 ++ * generates .data.identifier sections, which need to be pulled in with
1496 ++ * .data. We don't want to pull in .data..other sections, which Linux
1497 ++ * has defined. Same for text and bss.
1498 ++ */
1499 ++#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
1500 ++#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
1501 ++#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
1502 ++#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
1503 ++#else
1504 ++#define TEXT_MAIN .text
1505 ++#define DATA_MAIN .data
1506 ++#define BSS_MAIN .bss
1507 ++#endif
1508 ++
1509 + /*
1510 + * Align to a 32 byte boundary equal to the
1511 + * alignment gcc 4.5 uses for a struct
1512 +@@ -199,12 +215,9 @@
1513 +
1514 + /*
1515 + * .data section
1516 +- * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
1517 +- * .data.identifier which needs to be pulled in with .data, but don't want to
1518 +- * pull in .data..stuff which has its own requirements. Same for bss.
1519 + */
1520 + #define DATA_DATA \
1521 +- *(.data .data.[0-9a-zA-Z_]*) \
1522 ++ *(DATA_MAIN) \
1523 + *(.ref.data) \
1524 + *(.data..shared_aligned) /* percpu related */ \
1525 + MEM_KEEP(init.data) \
1526 +@@ -435,16 +448,17 @@
1527 + VMLINUX_SYMBOL(__security_initcall_end) = .; \
1528 + }
1529 +
1530 +-/* .text section. Map to function alignment to avoid address changes
1531 ++/*
1532 ++ * .text section. Map to function alignment to avoid address changes
1533 + * during second ld run in second ld pass when generating System.map
1534 +- * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
1535 +- * .text.identifier which needs to be pulled in with .text , but some
1536 +- * architectures define .text.foo which is not intended to be pulled in here.
1537 +- * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
1538 +- * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
1539 ++ *
1540 ++ * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
1541 ++ * code elimination is enabled, so these sections should be converted
1542 ++ * to use ".." first.
1543 ++ */
1544 + #define TEXT_TEXT \
1545 + ALIGN_FUNCTION(); \
1546 +- *(.text.hot .text .text.fixup .text.unlikely) \
1547 ++ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
1548 + *(.ref.text) \
1549 + MEM_KEEP(init.text) \
1550 + MEM_KEEP(exit.text) \
1551 +@@ -613,7 +627,7 @@
1552 + BSS_FIRST_SECTIONS \
1553 + *(.bss..page_aligned) \
1554 + *(.dynbss) \
1555 +- *(.bss .bss.[0-9a-zA-Z_]*) \
1556 ++ *(BSS_MAIN) \
1557 + *(COMMON) \
1558 + }
1559 +
1560 +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
1561 +index d5093b52b485..88f4289e7eee 100644
1562 +--- a/include/linux/bpf_verifier.h
1563 ++++ b/include/linux/bpf_verifier.h
1564 +@@ -43,6 +43,7 @@ struct bpf_reg_state {
1565 + u32 min_align;
1566 + u32 aux_off;
1567 + u32 aux_off_align;
1568 ++ bool value_from_signed;
1569 + };
1570 +
1571 + enum bpf_stack_slot_type {
1572 +diff --git a/include/linux/fs.h b/include/linux/fs.h
1573 +index 803e5a9b2654..d6d525039496 100644
1574 +--- a/include/linux/fs.h
1575 ++++ b/include/linux/fs.h
1576 +@@ -891,9 +891,9 @@ static inline struct file *get_file(struct file *f)
1577 + /* Page cache limit. The filesystems should put that into their s_maxbytes
1578 + limits, otherwise bad things can happen in VM. */
1579 + #if BITS_PER_LONG==32
1580 +-#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
1581 ++#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
1582 + #elif BITS_PER_LONG==64
1583 +-#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
1584 ++#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
1585 + #endif
1586 +
1587 + #define FL_POSIX 1
1588 +diff --git a/include/linux/iommu.h b/include/linux/iommu.h
1589 +index 2cb54adc4a33..176f7569d874 100644
1590 +--- a/include/linux/iommu.h
1591 ++++ b/include/linux/iommu.h
1592 +@@ -240,7 +240,7 @@ struct iommu_device {
1593 + struct list_head list;
1594 + const struct iommu_ops *ops;
1595 + struct fwnode_handle *fwnode;
1596 +- struct device dev;
1597 ++ struct device *dev;
1598 + };
1599 +
1600 + int iommu_device_register(struct iommu_device *iommu);
1601 +@@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
1602 + iommu->fwnode = fwnode;
1603 + }
1604 +
1605 ++static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
1606 ++{
1607 ++ return (struct iommu_device *)dev_get_drvdata(dev);
1608 ++}
1609 ++
1610 + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
1611 + #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
1612 + #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
1613 +@@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
1614 + {
1615 + }
1616 +
1617 ++static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
1618 ++{
1619 ++ return NULL;
1620 ++}
1621 ++
1622 + static inline void iommu_device_unregister(struct iommu_device *iommu)
1623 + {
1624 + }
1625 +diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
1626 +index 6b2e0dd88569..feff771e8ea0 100644
1627 +--- a/include/linux/ptr_ring.h
1628 ++++ b/include/linux/ptr_ring.h
1629 +@@ -371,9 +371,9 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
1630 + __PTR_RING_PEEK_CALL_v; \
1631 + })
1632 +
1633 +-static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
1634 ++static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
1635 + {
1636 +- return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
1637 ++ return kcalloc(size, sizeof(void *), gfp);
1638 + }
1639 +
1640 + static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
1641 +@@ -462,7 +462,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
1642 + * In particular if you consume ring in interrupt or BH context, you must
1643 + * disable interrupts/BH when doing so.
1644 + */
1645 +-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
1646 ++static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
1647 ++ unsigned int nrings,
1648 + int size,
1649 + gfp_t gfp, void (*destroy)(void *))
1650 + {
1651 +@@ -470,7 +471,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
1652 + void ***queues;
1653 + int i;
1654 +
1655 +- queues = kmalloc(nrings * sizeof *queues, gfp);
1656 ++ queues = kmalloc_array(nrings, sizeof(*queues), gfp);
1657 + if (!queues)
1658 + goto noqueues;
1659 +
1660 +diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
1661 +index f4dfade428f0..be8b902b5845 100644
1662 +--- a/include/linux/skb_array.h
1663 ++++ b/include/linux/skb_array.h
1664 +@@ -162,7 +162,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
1665 + }
1666 +
1667 + static inline int skb_array_resize_multiple(struct skb_array **rings,
1668 +- int nrings, int size, gfp_t gfp)
1669 ++ int nrings, unsigned int size,
1670 ++ gfp_t gfp)
1671 + {
1672 + BUILD_BUG_ON(offsetof(struct skb_array, ring));
1673 + return ptr_ring_resize_multiple((struct ptr_ring **)rings,
1674 +diff --git a/include/net/bonding.h b/include/net/bonding.h
1675 +index b00508d22e0a..b2e68657a216 100644
1676 +--- a/include/net/bonding.h
1677 ++++ b/include/net/bonding.h
1678 +@@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond)
1679 + BOND_MODE(bond) == BOND_MODE_ALB;
1680 + }
1681 +
1682 ++static inline bool bond_needs_speed_duplex(const struct bonding *bond)
1683 ++{
1684 ++ return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
1685 ++}
1686 ++
1687 + static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
1688 + {
1689 + return (BOND_MODE(bond) == BOND_MODE_TLB) &&
1690 +diff --git a/include/net/ip.h b/include/net/ip.h
1691 +index 821cedcc8e73..0cf7f5a65fe6 100644
1692 +--- a/include/net/ip.h
1693 ++++ b/include/net/ip.h
1694 +@@ -352,7 +352,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
1695 + !forwarding)
1696 + return dst_mtu(dst);
1697 +
1698 +- return min(dst->dev->mtu, IP_MAX_MTU);
1699 ++ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
1700 + }
1701 +
1702 + static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
1703 +@@ -364,7 +364,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
1704 + return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
1705 + }
1706 +
1707 +- return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
1708 ++ return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
1709 + }
1710 +
1711 + u32 ip_idents_reserve(u32 hash, int segs);
1712 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
1713 +index 22e52093bfda..db5b6b6346b3 100644
1714 +--- a/include/net/sch_generic.h
1715 ++++ b/include/net/sch_generic.h
1716 +@@ -785,8 +785,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1717 + old = *pold;
1718 + *pold = new;
1719 + if (old != NULL) {
1720 +- qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
1721 ++ unsigned int qlen = old->q.qlen;
1722 ++ unsigned int backlog = old->qstats.backlog;
1723 ++
1724 + qdisc_reset(old);
1725 ++ qdisc_tree_reduce_backlog(old, qlen, backlog);
1726 + }
1727 + sch_tree_unlock(sch);
1728 +
1729 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1730 +index a8a725697bed..1e64ee3dd650 100644
1731 +--- a/kernel/bpf/verifier.c
1732 ++++ b/kernel/bpf/verifier.c
1733 +@@ -504,6 +504,7 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
1734 + {
1735 + regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
1736 + regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
1737 ++ regs[regno].value_from_signed = false;
1738 + regs[regno].min_align = 0;
1739 + }
1740 +
1741 +@@ -777,12 +778,13 @@ static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
1742 + return -EACCES;
1743 + }
1744 +
1745 +-static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1746 ++static bool __is_pointer_value(bool allow_ptr_leaks,
1747 ++ const struct bpf_reg_state *reg)
1748 + {
1749 +- if (env->allow_ptr_leaks)
1750 ++ if (allow_ptr_leaks)
1751 + return false;
1752 +
1753 +- switch (env->cur_state.regs[regno].type) {
1754 ++ switch (reg->type) {
1755 + case UNKNOWN_VALUE:
1756 + case CONST_IMM:
1757 + return false;
1758 +@@ -791,6 +793,11 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1759 + }
1760 + }
1761 +
1762 ++static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1763 ++{
1764 ++ return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
1765 ++}
1766 ++
1767 + static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
1768 + int off, int size, bool strict)
1769 + {
1770 +@@ -1650,6 +1657,65 @@ static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
1771 + return 0;
1772 + }
1773 +
1774 ++static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env,
1775 ++ struct bpf_insn *insn)
1776 ++{
1777 ++ struct bpf_reg_state *regs = env->cur_state.regs;
1778 ++ struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
1779 ++ struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1780 ++ u8 opcode = BPF_OP(insn->code);
1781 ++ s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm);
1782 ++
1783 ++ /* BPF_X code with src_reg->type UNKNOWN_VALUE here. */
1784 ++ if (src_reg->imm > 0 && dst_reg->imm) {
1785 ++ switch (opcode) {
1786 ++ case BPF_ADD:
1787 ++ /* dreg += sreg
1788 ++ * where both have zero upper bits. Adding them
1789 ++ * can only result making one more bit non-zero
1790 ++ * in the larger value.
1791 ++ * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
1792 ++ * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
1793 ++ */
1794 ++ dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
1795 ++ dst_reg->imm--;
1796 ++ break;
1797 ++ case BPF_AND:
1798 ++ /* dreg &= sreg
1799 ++ * AND can not extend zero bits only shrink
1800 ++ * Ex. 0x00..00ffffff
1801 ++ * & 0x0f..ffffffff
1802 ++ * ----------------
1803 ++ * 0x00..00ffffff
1804 ++ */
1805 ++ dst_reg->imm = max(src_reg->imm, 63 - imm_log2);
1806 ++ break;
1807 ++ case BPF_OR:
1808 ++ /* dreg |= sreg
1809 ++ * OR can only extend zero bits
1810 ++ * Ex. 0x00..00ffffff
1811 ++ * | 0x0f..ffffffff
1812 ++ * ----------------
1813 ++ * 0x0f..00ffffff
1814 ++ */
1815 ++ dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
1816 ++ break;
1817 ++ case BPF_SUB:
1818 ++ case BPF_MUL:
1819 ++ case BPF_RSH:
1820 ++ case BPF_LSH:
1821 ++ /* These may be flushed out later */
1822 ++ default:
1823 ++ mark_reg_unknown_value(regs, insn->dst_reg);
1824 ++ }
1825 ++ } else {
1826 ++ mark_reg_unknown_value(regs, insn->dst_reg);
1827 ++ }
1828 ++
1829 ++ dst_reg->type = UNKNOWN_VALUE;
1830 ++ return 0;
1831 ++}
1832 ++
1833 + static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
1834 + struct bpf_insn *insn)
1835 + {
1836 +@@ -1659,6 +1725,9 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
1837 + u8 opcode = BPF_OP(insn->code);
1838 + u64 dst_imm = dst_reg->imm;
1839 +
1840 ++ if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE)
1841 ++ return evaluate_reg_imm_alu_unknown(env, insn);
1842 ++
1843 + /* dst_reg->type == CONST_IMM here. Simulate execution of insns
1844 + * containing ALU ops. Don't care about overflow or negative
1845 + * values, just add/sub/... them; registers are in u64.
1846 +@@ -1763,10 +1832,24 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1847 + dst_align = dst_reg->min_align;
1848 +
1849 + /* We don't know anything about what was done to this register, mark it
1850 +- * as unknown.
1851 ++ * as unknown. Also, if both derived bounds came from signed/unsigned
1852 ++ * mixed compares and one side is unbounded, we cannot really do anything
1853 ++ * with them as boundaries cannot be trusted. Thus, arithmetic of two
1854 ++ * regs of such kind will get invalidated bounds on the dst side.
1855 + */
1856 +- if (min_val == BPF_REGISTER_MIN_RANGE &&
1857 +- max_val == BPF_REGISTER_MAX_RANGE) {
1858 ++ if ((min_val == BPF_REGISTER_MIN_RANGE &&
1859 ++ max_val == BPF_REGISTER_MAX_RANGE) ||
1860 ++ (BPF_SRC(insn->code) == BPF_X &&
1861 ++ ((min_val != BPF_REGISTER_MIN_RANGE &&
1862 ++ max_val == BPF_REGISTER_MAX_RANGE) ||
1863 ++ (min_val == BPF_REGISTER_MIN_RANGE &&
1864 ++ max_val != BPF_REGISTER_MAX_RANGE) ||
1865 ++ (dst_reg->min_value != BPF_REGISTER_MIN_RANGE &&
1866 ++ dst_reg->max_value == BPF_REGISTER_MAX_RANGE) ||
1867 ++ (dst_reg->min_value == BPF_REGISTER_MIN_RANGE &&
1868 ++ dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) &&
1869 ++ regs[insn->dst_reg].value_from_signed !=
1870 ++ regs[insn->src_reg].value_from_signed)) {
1871 + reset_reg_range_values(regs, insn->dst_reg);
1872 + return;
1873 + }
1874 +@@ -1775,10 +1858,12 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1875 + * do our normal operations to the register, we need to set the values
1876 + * to the min/max since they are undefined.
1877 + */
1878 +- if (min_val == BPF_REGISTER_MIN_RANGE)
1879 +- dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1880 +- if (max_val == BPF_REGISTER_MAX_RANGE)
1881 +- dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1882 ++ if (opcode != BPF_SUB) {
1883 ++ if (min_val == BPF_REGISTER_MIN_RANGE)
1884 ++ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1885 ++ if (max_val == BPF_REGISTER_MAX_RANGE)
1886 ++ dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1887 ++ }
1888 +
1889 + switch (opcode) {
1890 + case BPF_ADD:
1891 +@@ -1789,10 +1874,17 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1892 + dst_reg->min_align = min(src_align, dst_align);
1893 + break;
1894 + case BPF_SUB:
1895 ++ /* If one of our values was at the end of our ranges, then the
1896 ++ * _opposite_ value in the dst_reg goes to the end of our range.
1897 ++ */
1898 ++ if (min_val == BPF_REGISTER_MIN_RANGE)
1899 ++ dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1900 ++ if (max_val == BPF_REGISTER_MAX_RANGE)
1901 ++ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1902 + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1903 +- dst_reg->min_value -= min_val;
1904 ++ dst_reg->min_value -= max_val;
1905 + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1906 +- dst_reg->max_value -= max_val;
1907 ++ dst_reg->max_value -= min_val;
1908 + dst_reg->min_align = min(src_align, dst_align);
1909 + break;
1910 + case BPF_MUL:
1911 +@@ -1953,6 +2045,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1912 + regs[insn->dst_reg].max_value = insn->imm;
1913 + regs[insn->dst_reg].min_value = insn->imm;
1914 + regs[insn->dst_reg].min_align = calc_align(insn->imm);
1915 ++ regs[insn->dst_reg].value_from_signed = false;
1916 + }
1917 +
1918 + } else if (opcode > BPF_END) {
1919 +@@ -2128,40 +2221,63 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
1920 + struct bpf_reg_state *false_reg, u64 val,
1921 + u8 opcode)
1922 + {
1923 ++ bool value_from_signed = true;
1924 ++ bool is_range = true;
1925 ++
1926 + switch (opcode) {
1927 + case BPF_JEQ:
1928 + /* If this is false then we know nothing Jon Snow, but if it is
1929 + * true then we know for sure.
1930 + */
1931 + true_reg->max_value = true_reg->min_value = val;
1932 ++ is_range = false;
1933 + break;
1934 + case BPF_JNE:
1935 + /* If this is true we know nothing Jon Snow, but if it is false
1936 + * we know the value for sure;
1937 + */
1938 + false_reg->max_value = false_reg->min_value = val;
1939 ++ is_range = false;
1940 + break;
1941 + case BPF_JGT:
1942 +- /* Unsigned comparison, the minimum value is 0. */
1943 +- false_reg->min_value = 0;
1944 ++ value_from_signed = false;
1945 + /* fallthrough */
1946 + case BPF_JSGT:
1947 ++ if (true_reg->value_from_signed != value_from_signed)
1948 ++ reset_reg_range_values(true_reg, 0);
1949 ++ if (false_reg->value_from_signed != value_from_signed)
1950 ++ reset_reg_range_values(false_reg, 0);
1951 ++ if (opcode == BPF_JGT) {
1952 ++ /* Unsigned comparison, the minimum value is 0. */
1953 ++ false_reg->min_value = 0;
1954 ++ }
1955 + /* If this is false then we know the maximum val is val,
1956 + * otherwise we know the min val is val+1.
1957 + */
1958 + false_reg->max_value = val;
1959 ++ false_reg->value_from_signed = value_from_signed;
1960 + true_reg->min_value = val + 1;
1961 ++ true_reg->value_from_signed = value_from_signed;
1962 + break;
1963 + case BPF_JGE:
1964 +- /* Unsigned comparison, the minimum value is 0. */
1965 +- false_reg->min_value = 0;
1966 ++ value_from_signed = false;
1967 + /* fallthrough */
1968 + case BPF_JSGE:
1969 ++ if (true_reg->value_from_signed != value_from_signed)
1970 ++ reset_reg_range_values(true_reg, 0);
1971 ++ if (false_reg->value_from_signed != value_from_signed)
1972 ++ reset_reg_range_values(false_reg, 0);
1973 ++ if (opcode == BPF_JGE) {
1974 ++ /* Unsigned comparison, the minimum value is 0. */
1975 ++ false_reg->min_value = 0;
1976 ++ }
1977 + /* If this is false then we know the maximum value is val - 1,
1978 + * otherwise we know the mimimum value is val.
1979 + */
1980 + false_reg->max_value = val - 1;
1981 ++ false_reg->value_from_signed = value_from_signed;
1982 + true_reg->min_value = val;
1983 ++ true_reg->value_from_signed = value_from_signed;
1984 + break;
1985 + default:
1986 + break;
1987 +@@ -2169,6 +2285,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
1988 +
1989 + check_reg_overflow(false_reg);
1990 + check_reg_overflow(true_reg);
1991 ++ if (is_range) {
1992 ++ if (__is_pointer_value(false, false_reg))
1993 ++ reset_reg_range_values(false_reg, 0);
1994 ++ if (__is_pointer_value(false, true_reg))
1995 ++ reset_reg_range_values(true_reg, 0);
1996 ++ }
1997 + }
1998 +
1999 + /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
2000 +@@ -2178,41 +2300,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
2001 + struct bpf_reg_state *false_reg, u64 val,
2002 + u8 opcode)
2003 + {
2004 ++ bool value_from_signed = true;
2005 ++ bool is_range = true;
2006 ++
2007 + switch (opcode) {
2008 + case BPF_JEQ:
2009 + /* If this is false then we know nothing Jon Snow, but if it is
2010 + * true then we know for sure.
2011 + */
2012 + true_reg->max_value = true_reg->min_value = val;
2013 ++ is_range = false;
2014 + break;
2015 + case BPF_JNE:
2016 + /* If this is true we know nothing Jon Snow, but if it is false
2017 + * we know the value for sure;
2018 + */
2019 + false_reg->max_value = false_reg->min_value = val;
2020 ++ is_range = false;
2021 + break;
2022 + case BPF_JGT:
2023 +- /* Unsigned comparison, the minimum value is 0. */
2024 +- true_reg->min_value = 0;
2025 ++ value_from_signed = false;
2026 + /* fallthrough */
2027 + case BPF_JSGT:
2028 ++ if (true_reg->value_from_signed != value_from_signed)
2029 ++ reset_reg_range_values(true_reg, 0);
2030 ++ if (false_reg->value_from_signed != value_from_signed)
2031 ++ reset_reg_range_values(false_reg, 0);
2032 ++ if (opcode == BPF_JGT) {
2033 ++ /* Unsigned comparison, the minimum value is 0. */
2034 ++ true_reg->min_value = 0;
2035 ++ }
2036 + /*
2037 + * If this is false, then the val is <= the register, if it is
2038 + * true the register <= to the val.
2039 + */
2040 + false_reg->min_value = val;
2041 ++ false_reg->value_from_signed = value_from_signed;
2042 + true_reg->max_value = val - 1;
2043 ++ true_reg->value_from_signed = value_from_signed;
2044 + break;
2045 + case BPF_JGE:
2046 +- /* Unsigned comparison, the minimum value is 0. */
2047 +- true_reg->min_value = 0;
2048 ++ value_from_signed = false;
2049 + /* fallthrough */
2050 + case BPF_JSGE:
2051 ++ if (true_reg->value_from_signed != value_from_signed)
2052 ++ reset_reg_range_values(true_reg, 0);
2053 ++ if (false_reg->value_from_signed != value_from_signed)
2054 ++ reset_reg_range_values(false_reg, 0);
2055 ++ if (opcode == BPF_JGE) {
2056 ++ /* Unsigned comparison, the minimum value is 0. */
2057 ++ true_reg->min_value = 0;
2058 ++ }
2059 + /* If this is false then constant < register, if it is true then
2060 + * the register < constant.
2061 + */
2062 + false_reg->min_value = val + 1;
2063 ++ false_reg->value_from_signed = value_from_signed;
2064 + true_reg->max_value = val;
2065 ++ true_reg->value_from_signed = value_from_signed;
2066 + break;
2067 + default:
2068 + break;
2069 +@@ -2220,6 +2365,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
2070 +
2071 + check_reg_overflow(false_reg);
2072 + check_reg_overflow(true_reg);
2073 ++ if (is_range) {
2074 ++ if (__is_pointer_value(false, false_reg))
2075 ++ reset_reg_range_values(false_reg, 0);
2076 ++ if (__is_pointer_value(false, true_reg))
2077 ++ reset_reg_range_values(true_reg, 0);
2078 ++ }
2079 + }
2080 +
2081 + static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
2082 +diff --git a/kernel/events/core.c b/kernel/events/core.c
2083 +index dbb3d273d497..51ecc01b78ff 100644
2084 +--- a/kernel/events/core.c
2085 ++++ b/kernel/events/core.c
2086 +@@ -9996,28 +9996,27 @@ SYSCALL_DEFINE5(perf_event_open,
2087 + goto err_context;
2088 +
2089 + /*
2090 +- * Do not allow to attach to a group in a different
2091 +- * task or CPU context:
2092 ++ * Make sure we're both events for the same CPU;
2093 ++ * grouping events for different CPUs is broken; since
2094 ++ * you can never concurrently schedule them anyhow.
2095 + */
2096 +- if (move_group) {
2097 +- /*
2098 +- * Make sure we're both on the same task, or both
2099 +- * per-cpu events.
2100 +- */
2101 +- if (group_leader->ctx->task != ctx->task)
2102 +- goto err_context;
2103 ++ if (group_leader->cpu != event->cpu)
2104 ++ goto err_context;
2105 +
2106 +- /*
2107 +- * Make sure we're both events for the same CPU;
2108 +- * grouping events for different CPUs is broken; since
2109 +- * you can never concurrently schedule them anyhow.
2110 +- */
2111 +- if (group_leader->cpu != event->cpu)
2112 +- goto err_context;
2113 +- } else {
2114 +- if (group_leader->ctx != ctx)
2115 +- goto err_context;
2116 +- }
2117 ++ /*
2118 ++ * Make sure we're both on the same task, or both
2119 ++ * per-CPU events.
2120 ++ */
2121 ++ if (group_leader->ctx->task != ctx->task)
2122 ++ goto err_context;
2123 ++
2124 ++ /*
2125 ++ * Do not allow to attach to a group in a different task
2126 ++ * or CPU context. If we're moving SW events, we'll fix
2127 ++ * this up later, so allow that.
2128 ++ */
2129 ++ if (!move_group && group_leader->ctx != ctx)
2130 ++ goto err_context;
2131 +
2132 + /*
2133 + * Only a group leader can be exclusive or pinned
2134 +diff --git a/kernel/fork.c b/kernel/fork.c
2135 +index 6440e0b70cad..9a2b4b4f13b4 100644
2136 +--- a/kernel/fork.c
2137 ++++ b/kernel/fork.c
2138 +@@ -802,6 +802,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
2139 + mm_init_cpumask(mm);
2140 + mm_init_aio(mm);
2141 + mm_init_owner(mm, p);
2142 ++ RCU_INIT_POINTER(mm->exe_file, NULL);
2143 + mmu_notifier_mm_init(mm);
2144 + clear_tlb_flush_pending(mm);
2145 + #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
2146 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
2147 +index d3f33020a06b..36cec054b8ae 100644
2148 +--- a/kernel/time/timer.c
2149 ++++ b/kernel/time/timer.c
2150 +@@ -203,6 +203,7 @@ struct timer_base {
2151 + bool migration_enabled;
2152 + bool nohz_active;
2153 + bool is_idle;
2154 ++ bool must_forward_clk;
2155 + DECLARE_BITMAP(pending_map, WHEEL_SIZE);
2156 + struct hlist_head vectors[WHEEL_SIZE];
2157 + } ____cacheline_aligned;
2158 +@@ -856,13 +857,19 @@ get_target_base(struct timer_base *base, unsigned tflags)
2159 +
2160 + static inline void forward_timer_base(struct timer_base *base)
2161 + {
2162 +- unsigned long jnow = READ_ONCE(jiffies);
2163 ++ unsigned long jnow;
2164 +
2165 + /*
2166 +- * We only forward the base when it's idle and we have a delta between
2167 +- * base clock and jiffies.
2168 ++ * We only forward the base when we are idle or have just come out of
2169 ++ * idle (must_forward_clk logic), and have a delta between base clock
2170 ++ * and jiffies. In the common case, run_timers will take care of it.
2171 + */
2172 +- if (!base->is_idle || (long) (jnow - base->clk) < 2)
2173 ++ if (likely(!base->must_forward_clk))
2174 ++ return;
2175 ++
2176 ++ jnow = READ_ONCE(jiffies);
2177 ++ base->must_forward_clk = base->is_idle;
2178 ++ if ((long)(jnow - base->clk) < 2)
2179 + return;
2180 +
2181 + /*
2182 +@@ -938,6 +945,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
2183 + * same array bucket then just return:
2184 + */
2185 + if (timer_pending(timer)) {
2186 ++ /*
2187 ++ * The downside of this optimization is that it can result in
2188 ++ * larger granularity than you would get from adding a new
2189 ++ * timer with this expiry.
2190 ++ */
2191 + if (timer->expires == expires)
2192 + return 1;
2193 +
2194 +@@ -948,6 +960,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
2195 + * dequeue/enqueue dance.
2196 + */
2197 + base = lock_timer_base(timer, &flags);
2198 ++ forward_timer_base(base);
2199 +
2200 + clk = base->clk;
2201 + idx = calc_wheel_index(expires, clk);
2202 +@@ -964,6 +977,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
2203 + }
2204 + } else {
2205 + base = lock_timer_base(timer, &flags);
2206 ++ forward_timer_base(base);
2207 + }
2208 +
2209 + ret = detach_if_pending(timer, base, false);
2210 +@@ -991,12 +1005,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
2211 + spin_lock(&base->lock);
2212 + WRITE_ONCE(timer->flags,
2213 + (timer->flags & ~TIMER_BASEMASK) | base->cpu);
2214 ++ forward_timer_base(base);
2215 + }
2216 + }
2217 +
2218 +- /* Try to forward a stale timer base clock */
2219 +- forward_timer_base(base);
2220 +-
2221 + timer->expires = expires;
2222 + /*
2223 + * If 'idx' was calculated above and the base time did not advance
2224 +@@ -1112,6 +1124,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
2225 + WRITE_ONCE(timer->flags,
2226 + (timer->flags & ~TIMER_BASEMASK) | cpu);
2227 + }
2228 ++ forward_timer_base(base);
2229 +
2230 + debug_activate(timer, timer->expires);
2231 + internal_add_timer(base, timer);
2232 +@@ -1497,10 +1510,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
2233 + if (!is_max_delta)
2234 + expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
2235 + /*
2236 +- * If we expect to sleep more than a tick, mark the base idle:
2237 ++ * If we expect to sleep more than a tick, mark the base idle.
2238 ++ * Also the tick is stopped so any added timer must forward
2239 ++ * the base clk itself to keep granularity small. This idle
2240 ++ * logic is only maintained for the BASE_STD base, deferrable
2241 ++ * timers may still see large granularity skew (by design).
2242 + */
2243 +- if ((expires - basem) > TICK_NSEC)
2244 ++ if ((expires - basem) > TICK_NSEC) {
2245 ++ base->must_forward_clk = true;
2246 + base->is_idle = true;
2247 ++ }
2248 + }
2249 + spin_unlock(&base->lock);
2250 +
2251 +@@ -1611,6 +1630,19 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
2252 + {
2253 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
2254 +
2255 ++ /*
2256 ++ * must_forward_clk must be cleared before running timers so that any
2257 ++ * timer functions that call mod_timer will not try to forward the
2258 ++ * base. idle trcking / clock forwarding logic is only used with
2259 ++ * BASE_STD timers.
2260 ++ *
2261 ++ * The deferrable base does not do idle tracking at all, so we do
2262 ++ * not forward it. This can result in very large variations in
2263 ++ * granularity for deferrable timers, but they can be deferred for
2264 ++ * long periods due to idle.
2265 ++ */
2266 ++ base->must_forward_clk = false;
2267 ++
2268 + __run_timers(base);
2269 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
2270 + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
2271 +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
2272 +index 460a031c77e5..d521b301dee9 100644
2273 +--- a/kernel/trace/bpf_trace.c
2274 ++++ b/kernel/trace/bpf_trace.c
2275 +@@ -203,10 +203,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
2276 + fmt_cnt++;
2277 + }
2278 +
2279 +- return __trace_printk(1/* fake ip will not be printed */, fmt,
2280 +- mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
2281 +- mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
2282 +- mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
2283 ++/* Horrid workaround for getting va_list handling working with different
2284 ++ * argument type combinations generically for 32 and 64 bit archs.
2285 ++ */
2286 ++#define __BPF_TP_EMIT() __BPF_ARG3_TP()
2287 ++#define __BPF_TP(...) \
2288 ++ __trace_printk(1 /* Fake ip will not be printed. */, \
2289 ++ fmt, ##__VA_ARGS__)
2290 ++
2291 ++#define __BPF_ARG1_TP(...) \
2292 ++ ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
2293 ++ ? __BPF_TP(arg1, ##__VA_ARGS__) \
2294 ++ : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
2295 ++ ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
2296 ++ : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
2297 ++
2298 ++#define __BPF_ARG2_TP(...) \
2299 ++ ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
2300 ++ ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
2301 ++ : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
2302 ++ ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
2303 ++ : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
2304 ++
2305 ++#define __BPF_ARG3_TP(...) \
2306 ++ ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
2307 ++ ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
2308 ++ : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
2309 ++ ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
2310 ++ : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
2311 ++
2312 ++ return __BPF_TP_EMIT();
2313 + }
2314 +
2315 + static const struct bpf_func_proto bpf_trace_printk_proto = {
2316 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
2317 +index 28e980d2851b..a2bbce575e88 100644
2318 +--- a/kernel/trace/ftrace.c
2319 ++++ b/kernel/trace/ftrace.c
2320 +@@ -878,6 +878,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
2321 +
2322 + function_profile_call(trace->func, 0, NULL, NULL);
2323 +
2324 ++ /* If function graph is shutting down, ret_stack can be NULL */
2325 ++ if (!current->ret_stack)
2326 ++ return 0;
2327 ++
2328 + if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
2329 + current->ret_stack[index].subtime = 0;
2330 +
2331 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
2332 +index 4ae268e687fe..912f62df0279 100644
2333 +--- a/kernel/trace/ring_buffer.c
2334 ++++ b/kernel/trace/ring_buffer.c
2335 +@@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2336 + * the page that was allocated, with the read page of the buffer.
2337 + *
2338 + * Returns:
2339 +- * The page allocated, or NULL on error.
2340 ++ * The page allocated, or ERR_PTR
2341 + */
2342 + void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
2343 + {
2344 +- struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2345 ++ struct ring_buffer_per_cpu *cpu_buffer;
2346 + struct buffer_data_page *bpage = NULL;
2347 + unsigned long flags;
2348 + struct page *page;
2349 +
2350 ++ if (!cpumask_test_cpu(cpu, buffer->cpumask))
2351 ++ return ERR_PTR(-ENODEV);
2352 ++
2353 ++ cpu_buffer = buffer->buffers[cpu];
2354 + local_irq_save(flags);
2355 + arch_spin_lock(&cpu_buffer->lock);
2356 +
2357 +@@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
2358 + page = alloc_pages_node(cpu_to_node(cpu),
2359 + GFP_KERNEL | __GFP_NORETRY, 0);
2360 + if (!page)
2361 +- return NULL;
2362 ++ return ERR_PTR(-ENOMEM);
2363 +
2364 + bpage = page_address(page);
2365 +
2366 +@@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
2367 + *
2368 + * for example:
2369 + * rpage = ring_buffer_alloc_read_page(buffer, cpu);
2370 +- * if (!rpage)
2371 +- * return error;
2372 ++ * if (IS_ERR(rpage))
2373 ++ * return PTR_ERR(rpage);
2374 + * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2375 + * if (ret >= 0)
2376 + * process_page(rpage, ret);
2377 +diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
2378 +index 9fbcaf567886..68ee79afe31c 100644
2379 +--- a/kernel/trace/ring_buffer_benchmark.c
2380 ++++ b/kernel/trace/ring_buffer_benchmark.c
2381 +@@ -113,7 +113,7 @@ static enum event_status read_page(int cpu)
2382 + int i;
2383 +
2384 + bpage = ring_buffer_alloc_read_page(buffer, cpu);
2385 +- if (!bpage)
2386 ++ if (IS_ERR(bpage))
2387 + return EVENT_DROPPED;
2388 +
2389 + ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
2390 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2391 +index 5764318357de..749a82c6a832 100644
2392 +--- a/kernel/trace/trace.c
2393 ++++ b/kernel/trace/trace.c
2394 +@@ -6403,7 +6403,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
2395 + {
2396 + struct ftrace_buffer_info *info = filp->private_data;
2397 + struct trace_iterator *iter = &info->iter;
2398 +- ssize_t ret;
2399 ++ ssize_t ret = 0;
2400 + ssize_t size;
2401 +
2402 + if (!count)
2403 +@@ -6417,10 +6417,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
2404 + if (!info->spare) {
2405 + info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
2406 + iter->cpu_file);
2407 +- info->spare_cpu = iter->cpu_file;
2408 ++ if (IS_ERR(info->spare)) {
2409 ++ ret = PTR_ERR(info->spare);
2410 ++ info->spare = NULL;
2411 ++ } else {
2412 ++ info->spare_cpu = iter->cpu_file;
2413 ++ }
2414 + }
2415 + if (!info->spare)
2416 +- return -ENOMEM;
2417 ++ return ret;
2418 +
2419 + /* Do we have previous read data to read? */
2420 + if (info->read < PAGE_SIZE)
2421 +@@ -6595,8 +6600,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2422 + ref->ref = 1;
2423 + ref->buffer = iter->trace_buffer->buffer;
2424 + ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2425 +- if (!ref->page) {
2426 +- ret = -ENOMEM;
2427 ++ if (IS_ERR(ref->page)) {
2428 ++ ret = PTR_ERR(ref->page);
2429 ++ ref->page = NULL;
2430 + kfree(ref);
2431 + break;
2432 + }
2433 +@@ -8110,6 +8116,7 @@ __init static int tracer_alloc_buffers(void)
2434 + if (ret < 0)
2435 + goto out_free_cpumask;
2436 + /* Used for event triggers */
2437 ++ ret = -ENOMEM;
2438 + temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
2439 + if (!temp_buffer)
2440 + goto out_rm_hp_state;
2441 +@@ -8224,4 +8231,4 @@ __init static int clear_boot_tracer(void)
2442 + }
2443 +
2444 + fs_initcall(tracer_init_tracefs);
2445 +-late_initcall(clear_boot_tracer);
2446 ++late_initcall_sync(clear_boot_tracer);
2447 +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
2448 +index 59a411ff60c7..181e139a8057 100644
2449 +--- a/kernel/trace/trace_events_filter.c
2450 ++++ b/kernel/trace/trace_events_filter.c
2451 +@@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call,
2452 + if (err && set_str)
2453 + append_filter_err(ps, filter);
2454 + }
2455 ++ if (err && !set_str) {
2456 ++ free_event_filter(filter);
2457 ++ filter = NULL;
2458 ++ }
2459 + create_filter_finish(ps);
2460 +
2461 + *filterp = filter;
2462 +diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
2463 +index 0a689bbb78ef..305039b122fa 100644
2464 +--- a/kernel/trace/tracing_map.c
2465 ++++ b/kernel/trace/tracing_map.c
2466 +@@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
2467 + if (!a)
2468 + return;
2469 +
2470 +- if (!a->pages) {
2471 +- kfree(a);
2472 +- return;
2473 +- }
2474 ++ if (!a->pages)
2475 ++ goto free;
2476 +
2477 + for (i = 0; i < a->n_pages; i++) {
2478 + if (!a->pages[i])
2479 + break;
2480 + free_page((unsigned long)a->pages[i]);
2481 + }
2482 ++
2483 ++ kfree(a->pages);
2484 ++
2485 ++ free:
2486 ++ kfree(a);
2487 + }
2488 +
2489 + struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
2490 +diff --git a/mm/madvise.c b/mm/madvise.c
2491 +index 75d2cffbe61d..fc6bfbe19a16 100644
2492 +--- a/mm/madvise.c
2493 ++++ b/mm/madvise.c
2494 +@@ -368,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
2495 + pte_offset_map_lock(mm, pmd, addr, &ptl);
2496 + goto out;
2497 + }
2498 +- put_page(page);
2499 + unlock_page(page);
2500 ++ put_page(page);
2501 + pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
2502 + pte--;
2503 + addr -= PAGE_SIZE;
2504 +diff --git a/mm/memblock.c b/mm/memblock.c
2505 +index 7087d5578866..43d0919e29f3 100644
2506 +--- a/mm/memblock.c
2507 ++++ b/mm/memblock.c
2508 +@@ -302,7 +302,7 @@ void __init memblock_discard(void)
2509 + __memblock_free_late(addr, size);
2510 + }
2511 +
2512 +- if (memblock.memory.regions == memblock_memory_init_regions) {
2513 ++ if (memblock.memory.regions != memblock_memory_init_regions) {
2514 + addr = __pa(memblock.memory.regions);
2515 + size = PAGE_ALIGN(sizeof(struct memblock_region) *
2516 + memblock.memory.max);
2517 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2518 +index 4d16ef9d42a9..f553b3a6eca8 100644
2519 +--- a/mm/page_alloc.c
2520 ++++ b/mm/page_alloc.c
2521 +@@ -66,6 +66,7 @@
2522 + #include <linux/kthread.h>
2523 + #include <linux/memcontrol.h>
2524 + #include <linux/ftrace.h>
2525 ++#include <linux/nmi.h>
2526 +
2527 + #include <asm/sections.h>
2528 + #include <asm/tlbflush.h>
2529 +@@ -2495,9 +2496,14 @@ void drain_all_pages(struct zone *zone)
2530 +
2531 + #ifdef CONFIG_HIBERNATION
2532 +
2533 ++/*
2534 ++ * Touch the watchdog for every WD_PAGE_COUNT pages.
2535 ++ */
2536 ++#define WD_PAGE_COUNT (128*1024)
2537 ++
2538 + void mark_free_pages(struct zone *zone)
2539 + {
2540 +- unsigned long pfn, max_zone_pfn;
2541 ++ unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
2542 + unsigned long flags;
2543 + unsigned int order, t;
2544 + struct page *page;
2545 +@@ -2512,6 +2518,11 @@ void mark_free_pages(struct zone *zone)
2546 + if (pfn_valid(pfn)) {
2547 + page = pfn_to_page(pfn);
2548 +
2549 ++ if (!--page_count) {
2550 ++ touch_nmi_watchdog();
2551 ++ page_count = WD_PAGE_COUNT;
2552 ++ }
2553 ++
2554 + if (page_zone(page) != zone)
2555 + continue;
2556 +
2557 +@@ -2525,8 +2536,13 @@ void mark_free_pages(struct zone *zone)
2558 + unsigned long i;
2559 +
2560 + pfn = page_to_pfn(page);
2561 +- for (i = 0; i < (1UL << order); i++)
2562 ++ for (i = 0; i < (1UL << order); i++) {
2563 ++ if (!--page_count) {
2564 ++ touch_nmi_watchdog();
2565 ++ page_count = WD_PAGE_COUNT;
2566 ++ }
2567 + swsusp_set_page_free(pfn_to_page(pfn + i));
2568 ++ }
2569 + }
2570 + }
2571 + spin_unlock_irqrestore(&zone->lock, flags);
2572 +diff --git a/mm/shmem.c b/mm/shmem.c
2573 +index 1183e898743b..0474c7a73cfa 100644
2574 +--- a/mm/shmem.c
2575 ++++ b/mm/shmem.c
2576 +@@ -3964,7 +3964,7 @@ int __init shmem_init(void)
2577 + }
2578 +
2579 + #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
2580 +- if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
2581 ++ if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
2582 + SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
2583 + else
2584 + shmem_huge = 0; /* just in case it was patched */
2585 +@@ -4025,7 +4025,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
2586 + return -EINVAL;
2587 +
2588 + shmem_huge = huge;
2589 +- if (shmem_huge < SHMEM_HUGE_DENY)
2590 ++ if (shmem_huge > SHMEM_HUGE_DENY)
2591 + SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
2592 + return count;
2593 + }
2594 +diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
2595 +index fbf251fef70f..4d6b94d7ce5f 100644
2596 +--- a/net/bluetooth/bnep/core.c
2597 ++++ b/net/bluetooth/bnep/core.c
2598 +@@ -484,16 +484,16 @@ static int bnep_session(void *arg)
2599 + struct net_device *dev = s->dev;
2600 + struct sock *sk = s->sock->sk;
2601 + struct sk_buff *skb;
2602 +- wait_queue_t wait;
2603 ++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
2604 +
2605 + BT_DBG("");
2606 +
2607 + set_user_nice(current, -15);
2608 +
2609 +- init_waitqueue_entry(&wait, current);
2610 + add_wait_queue(sk_sleep(sk), &wait);
2611 + while (1) {
2612 +- set_current_state(TASK_INTERRUPTIBLE);
2613 ++ /* Ensure session->terminate is updated */
2614 ++ smp_mb__before_atomic();
2615 +
2616 + if (atomic_read(&s->terminate))
2617 + break;
2618 +@@ -515,9 +515,8 @@ static int bnep_session(void *arg)
2619 + break;
2620 + netif_wake_queue(dev);
2621 +
2622 +- schedule();
2623 ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2624 + }
2625 +- __set_current_state(TASK_RUNNING);
2626 + remove_wait_queue(sk_sleep(sk), &wait);
2627 +
2628 + /* Cleanup session */
2629 +@@ -666,7 +665,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
2630 + s = __bnep_get_session(req->dst);
2631 + if (s) {
2632 + atomic_inc(&s->terminate);
2633 +- wake_up_process(s->task);
2634 ++ wake_up_interruptible(sk_sleep(s->sock->sk));
2635 + } else
2636 + err = -ENOENT;
2637 +
2638 +diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
2639 +index 9e59b6654126..1152ce34dad4 100644
2640 +--- a/net/bluetooth/cmtp/core.c
2641 ++++ b/net/bluetooth/cmtp/core.c
2642 +@@ -280,16 +280,16 @@ static int cmtp_session(void *arg)
2643 + struct cmtp_session *session = arg;
2644 + struct sock *sk = session->sock->sk;
2645 + struct sk_buff *skb;
2646 +- wait_queue_t wait;
2647 ++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
2648 +
2649 + BT_DBG("session %p", session);
2650 +
2651 + set_user_nice(current, -15);
2652 +
2653 +- init_waitqueue_entry(&wait, current);
2654 + add_wait_queue(sk_sleep(sk), &wait);
2655 + while (1) {
2656 +- set_current_state(TASK_INTERRUPTIBLE);
2657 ++ /* Ensure session->terminate is updated */
2658 ++ smp_mb__before_atomic();
2659 +
2660 + if (atomic_read(&session->terminate))
2661 + break;
2662 +@@ -306,9 +306,8 @@ static int cmtp_session(void *arg)
2663 +
2664 + cmtp_process_transmit(session);
2665 +
2666 +- schedule();
2667 ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2668 + }
2669 +- __set_current_state(TASK_RUNNING);
2670 + remove_wait_queue(sk_sleep(sk), &wait);
2671 +
2672 + down_write(&cmtp_session_sem);
2673 +@@ -393,7 +392,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
2674 + err = cmtp_attach_device(session);
2675 + if (err < 0) {
2676 + atomic_inc(&session->terminate);
2677 +- wake_up_process(session->task);
2678 ++ wake_up_interruptible(sk_sleep(session->sock->sk));
2679 + up_write(&cmtp_session_sem);
2680 + return err;
2681 + }
2682 +@@ -431,7 +430,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
2683 +
2684 + /* Stop session thread */
2685 + atomic_inc(&session->terminate);
2686 +- wake_up_process(session->task);
2687 ++
2688 ++ /* Ensure session->terminate is updated */
2689 ++ smp_mb__after_atomic();
2690 ++
2691 ++ wake_up_interruptible(sk_sleep(session->sock->sk));
2692 + } else
2693 + err = -ENOENT;
2694 +
2695 +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
2696 +index 0bec4588c3c8..1fc076420d1e 100644
2697 +--- a/net/bluetooth/hidp/core.c
2698 ++++ b/net/bluetooth/hidp/core.c
2699 +@@ -36,6 +36,7 @@
2700 + #define VERSION "1.2"
2701 +
2702 + static DECLARE_RWSEM(hidp_session_sem);
2703 ++static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq);
2704 + static LIST_HEAD(hidp_session_list);
2705 +
2706 + static unsigned char hidp_keycode[256] = {
2707 +@@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session *session)
2708 + * Wake up session thread and notify it to stop. This is asynchronous and
2709 + * returns immediately. Call this whenever a runtime error occurs and you want
2710 + * the session to stop.
2711 +- * Note: wake_up_process() performs any necessary memory-barriers for us.
2712 ++ * Note: wake_up_interruptible() performs any necessary memory-barriers for us.
2713 + */
2714 + static void hidp_session_terminate(struct hidp_session *session)
2715 + {
2716 + atomic_inc(&session->terminate);
2717 +- wake_up_process(session->task);
2718 ++ wake_up_interruptible(&hidp_session_wq);
2719 + }
2720 +
2721 + /*
2722 +@@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session *session)
2723 + struct sock *ctrl_sk = session->ctrl_sock->sk;
2724 + struct sock *intr_sk = session->intr_sock->sk;
2725 + struct sk_buff *skb;
2726 ++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
2727 +
2728 ++ add_wait_queue(&hidp_session_wq, &wait);
2729 + for (;;) {
2730 + /*
2731 + * This thread can be woken up two ways:
2732 +@@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session *session)
2733 + * session->terminate flag and wakes this thread up.
2734 + * - Via modifying the socket state of ctrl/intr_sock. This
2735 + * thread is woken up by ->sk_state_changed().
2736 +- *
2737 +- * Note: set_current_state() performs any necessary
2738 +- * memory-barriers for us.
2739 + */
2740 +- set_current_state(TASK_INTERRUPTIBLE);
2741 +
2742 ++ /* Ensure session->terminate is updated */
2743 ++ smp_mb__before_atomic();
2744 + if (atomic_read(&session->terminate))
2745 + break;
2746 +
2747 +@@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session *session)
2748 + hidp_process_transmit(session, &session->ctrl_transmit,
2749 + session->ctrl_sock);
2750 +
2751 +- schedule();
2752 ++ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2753 + }
2754 ++ remove_wait_queue(&hidp_session_wq, &wait);
2755 +
2756 + atomic_inc(&session->terminate);
2757 +- set_current_state(TASK_RUNNING);
2758 ++
2759 ++ /* Ensure session->terminate is updated */
2760 ++ smp_mb__after_atomic();
2761 ++}
2762 ++
2763 ++static int hidp_session_wake_function(wait_queue_t *wait,
2764 ++ unsigned int mode,
2765 ++ int sync, void *key)
2766 ++{
2767 ++ wake_up_interruptible(&hidp_session_wq);
2768 ++ return false;
2769 + }
2770 +
2771 + /*
2772 +@@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session *session)
2773 + static int hidp_session_thread(void *arg)
2774 + {
2775 + struct hidp_session *session = arg;
2776 +- wait_queue_t ctrl_wait, intr_wait;
2777 ++ DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function);
2778 ++ DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function);
2779 +
2780 + BT_DBG("session %p", session);
2781 +
2782 +@@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg)
2783 + set_user_nice(current, -15);
2784 + hidp_set_timer(session);
2785 +
2786 +- init_waitqueue_entry(&ctrl_wait, current);
2787 +- init_waitqueue_entry(&intr_wait, current);
2788 + add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
2789 + add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
2790 + /* This memory barrier is paired with wq_has_sleeper(). See
2791 +diff --git a/net/dccp/proto.c b/net/dccp/proto.c
2792 +index 9fe25bf63296..b68168fcc06a 100644
2793 +--- a/net/dccp/proto.c
2794 ++++ b/net/dccp/proto.c
2795 +@@ -24,6 +24,7 @@
2796 + #include <net/checksum.h>
2797 +
2798 + #include <net/inet_sock.h>
2799 ++#include <net/inet_common.h>
2800 + #include <net/sock.h>
2801 + #include <net/xfrm.h>
2802 +
2803 +@@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type)
2804 +
2805 + EXPORT_SYMBOL_GPL(dccp_packet_name);
2806 +
2807 ++static void dccp_sk_destruct(struct sock *sk)
2808 ++{
2809 ++ struct dccp_sock *dp = dccp_sk(sk);
2810 ++
2811 ++ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
2812 ++ dp->dccps_hc_tx_ccid = NULL;
2813 ++ inet_sock_destruct(sk);
2814 ++}
2815 ++
2816 + int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
2817 + {
2818 + struct dccp_sock *dp = dccp_sk(sk);
2819 +@@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
2820 + icsk->icsk_syn_retries = sysctl_dccp_request_retries;
2821 + sk->sk_state = DCCP_CLOSED;
2822 + sk->sk_write_space = dccp_write_space;
2823 ++ sk->sk_destruct = dccp_sk_destruct;
2824 + icsk->icsk_sync_mss = dccp_sync_mss;
2825 + dp->dccps_mss_cache = 536;
2826 + dp->dccps_rate_last = jiffies;
2827 +@@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk)
2828 + {
2829 + struct dccp_sock *dp = dccp_sk(sk);
2830 +
2831 +- /*
2832 +- * DCCP doesn't use sk_write_queue, just sk_send_head
2833 +- * for retransmissions
2834 +- */
2835 ++ __skb_queue_purge(&sk->sk_write_queue);
2836 + if (sk->sk_send_head != NULL) {
2837 + kfree_skb(sk->sk_send_head);
2838 + sk->sk_send_head = NULL;
2839 +@@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk)
2840 + dp->dccps_hc_rx_ackvec = NULL;
2841 + }
2842 + ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
2843 +- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
2844 +- dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
2845 ++ dp->dccps_hc_rx_ccid = NULL;
2846 +
2847 + /* clean up feature negotiation state */
2848 + dccp_feat_list_purge(&dp->dccps_featneg);
2849 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
2850 +index ce7bc2e5175a..ac9a8fbbacfd 100644
2851 +--- a/net/ipv4/fib_semantics.c
2852 ++++ b/net/ipv4/fib_semantics.c
2853 +@@ -1033,15 +1033,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
2854 + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
2855 + if (!fi)
2856 + goto failure;
2857 +- fib_info_cnt++;
2858 + if (cfg->fc_mx) {
2859 + fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
2860 +- if (!fi->fib_metrics)
2861 +- goto failure;
2862 ++ if (unlikely(!fi->fib_metrics)) {
2863 ++ kfree(fi);
2864 ++ return ERR_PTR(err);
2865 ++ }
2866 + atomic_set(&fi->fib_metrics->refcnt, 1);
2867 +- } else
2868 ++ } else {
2869 + fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
2870 +-
2871 ++ }
2872 ++ fib_info_cnt++;
2873 + fi->fib_net = net;
2874 + fi->fib_protocol = cfg->fc_protocol;
2875 + fi->fib_scope = cfg->fc_scope;
2876 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
2877 +index 3db1adb6b7a0..abdbe79ee175 100644
2878 +--- a/net/ipv4/igmp.c
2879 ++++ b/net/ipv4/igmp.c
2880 +@@ -1007,10 +1007,18 @@ int igmp_rcv(struct sk_buff *skb)
2881 + {
2882 + /* This basically follows the spec line by line -- see RFC1112 */
2883 + struct igmphdr *ih;
2884 +- struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
2885 ++ struct net_device *dev = skb->dev;
2886 ++ struct in_device *in_dev;
2887 + int len = skb->len;
2888 + bool dropped = true;
2889 +
2890 ++ if (netif_is_l3_master(dev)) {
2891 ++ dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif);
2892 ++ if (!dev)
2893 ++ goto drop;
2894 ++ }
2895 ++
2896 ++ in_dev = __in_dev_get_rcu(dev);
2897 + if (!in_dev)
2898 + goto drop;
2899 +
2900 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2901 +index 6883b3d4ba8f..22ba873546c3 100644
2902 +--- a/net/ipv4/route.c
2903 ++++ b/net/ipv4/route.c
2904 +@@ -1268,7 +1268,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
2905 + if (mtu)
2906 + return mtu;
2907 +
2908 +- mtu = dst->dev->mtu;
2909 ++ mtu = READ_ONCE(dst->dev->mtu);
2910 +
2911 + if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
2912 + if (rt->rt_uses_gateway && mtu > 576)
2913 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2914 +index 57bcae81fe42..fbaac4423a99 100644
2915 +--- a/net/ipv4/tcp_input.c
2916 ++++ b/net/ipv4/tcp_input.c
2917 +@@ -3007,8 +3007,7 @@ void tcp_rearm_rto(struct sock *sk)
2918 + /* delta may not be positive if the socket is locked
2919 + * when the retrans timer fires and is rescheduled.
2920 + */
2921 +- if (delta > 0)
2922 +- rto = delta;
2923 ++ rto = max(delta, 1);
2924 + }
2925 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
2926 + TCP_RTO_MAX);
2927 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2928 +index e4e9f752ebbf..cd8dd8c4e819 100644
2929 +--- a/net/ipv6/ip6_fib.c
2930 ++++ b/net/ipv6/ip6_fib.c
2931 +@@ -912,6 +912,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2932 + }
2933 + nsiblings = iter->rt6i_nsiblings;
2934 + fib6_purge_rt(iter, fn, info->nl_net);
2935 ++ if (fn->rr_ptr == iter)
2936 ++ fn->rr_ptr = NULL;
2937 + rt6_release(iter);
2938 +
2939 + if (nsiblings) {
2940 +@@ -924,6 +926,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2941 + if (rt6_qualify_for_ecmp(iter)) {
2942 + *ins = iter->dst.rt6_next;
2943 + fib6_purge_rt(iter, fn, info->nl_net);
2944 ++ if (fn->rr_ptr == iter)
2945 ++ fn->rr_ptr = NULL;
2946 + rt6_release(iter);
2947 + nsiblings--;
2948 + } else {
2949 +@@ -1012,7 +1016,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2950 + /* Create subtree root node */
2951 + sfn = node_alloc();
2952 + if (!sfn)
2953 +- goto st_failure;
2954 ++ goto failure;
2955 +
2956 + sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
2957 + atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
2958 +@@ -1028,12 +1032,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2959 +
2960 + if (IS_ERR(sn)) {
2961 + /* If it is failed, discard just allocated
2962 +- root, and then (in st_failure) stale node
2963 ++ root, and then (in failure) stale node
2964 + in main tree.
2965 + */
2966 + node_free(sfn);
2967 + err = PTR_ERR(sn);
2968 +- goto st_failure;
2969 ++ goto failure;
2970 + }
2971 +
2972 + /* Now link new subtree to main tree */
2973 +@@ -1047,7 +1051,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2974 +
2975 + if (IS_ERR(sn)) {
2976 + err = PTR_ERR(sn);
2977 +- goto st_failure;
2978 ++ goto failure;
2979 + }
2980 + }
2981 +
2982 +@@ -1089,22 +1093,22 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2983 + atomic_inc(&pn->leaf->rt6i_ref);
2984 + }
2985 + #endif
2986 +- if (!(rt->dst.flags & DST_NOCACHE))
2987 +- dst_free(&rt->dst);
2988 ++ goto failure;
2989 + }
2990 + return err;
2991 +
2992 +-#ifdef CONFIG_IPV6_SUBTREES
2993 +- /* Subtree creation failed, probably main tree node
2994 +- is orphan. If it is, shoot it.
2995 ++failure:
2996 ++ /* fn->leaf could be NULL if fn is an intermediate node and we
2997 ++ * failed to add the new route to it in both subtree creation
2998 ++ * failure and fib6_add_rt2node() failure case.
2999 ++ * In both cases, fib6_repair_tree() should be called to fix
3000 ++ * fn->leaf.
3001 + */
3002 +-st_failure:
3003 + if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
3004 + fib6_repair_tree(info->nl_net, fn);
3005 + if (!(rt->dst.flags & DST_NOCACHE))
3006 + dst_free(&rt->dst);
3007 + return err;
3008 +-#endif
3009 + }
3010 +
3011 + /*
3012 +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
3013 +index 8d77ad5cadaf..4cadc29f547c 100644
3014 +--- a/net/irda/af_irda.c
3015 ++++ b/net/irda/af_irda.c
3016 +@@ -2225,7 +2225,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
3017 + {
3018 + struct sock *sk = sock->sk;
3019 + struct irda_sock *self = irda_sk(sk);
3020 +- struct irda_device_list list;
3021 ++ struct irda_device_list list = { 0 };
3022 + struct irda_device_info *discoveries;
3023 + struct irda_ias_set * ias_opt; /* IAS get/query params */
3024 + struct ias_object * ias_obj; /* Object in IAS */
3025 +diff --git a/net/key/af_key.c b/net/key/af_key.c
3026 +index b1432b668033..166e32c93038 100644
3027 +--- a/net/key/af_key.c
3028 ++++ b/net/key/af_key.c
3029 +@@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
3030 + #define BROADCAST_ONE 1
3031 + #define BROADCAST_REGISTERED 2
3032 + #define BROADCAST_PROMISC_ONLY 4
3033 +-static int pfkey_broadcast(struct sk_buff *skb,
3034 ++static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
3035 + int broadcast_flags, struct sock *one_sk,
3036 + struct net *net)
3037 + {
3038 +@@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
3039 + rcu_read_unlock();
3040 +
3041 + if (one_sk != NULL)
3042 +- err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
3043 ++ err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
3044 +
3045 + kfree_skb(skb2);
3046 + kfree_skb(skb);
3047 +@@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
3048 + hdr = (struct sadb_msg *) pfk->dump.skb->data;
3049 + hdr->sadb_msg_seq = 0;
3050 + hdr->sadb_msg_errno = rc;
3051 +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
3052 ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
3053 + &pfk->sk, sock_net(&pfk->sk));
3054 + pfk->dump.skb = NULL;
3055 + }
3056 +@@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
3057 + hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
3058 + sizeof(uint64_t));
3059 +
3060 +- pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
3061 ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
3062 +
3063 + return 0;
3064 + }
3065 +@@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
3066 +
3067 + xfrm_state_put(x);
3068 +
3069 +- pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
3070 ++ pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
3071 +
3072 + return 0;
3073 + }
3074 +@@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
3075 + hdr->sadb_msg_seq = c->seq;
3076 + hdr->sadb_msg_pid = c->portid;
3077 +
3078 +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
3079 ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
3080 +
3081 + return 0;
3082 + }
3083 +@@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
3084 + out_hdr->sadb_msg_reserved = 0;
3085 + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
3086 + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
3087 +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
3088 ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
3089 +
3090 + return 0;
3091 + }
3092 +@@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
3093 + return -ENOBUFS;
3094 + }
3095 +
3096 +- pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
3097 +-
3098 ++ pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
3099 ++ sock_net(sk));
3100 + return 0;
3101 + }
3102 +
3103 +@@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
3104 + hdr->sadb_msg_errno = (uint8_t) 0;
3105 + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
3106 +
3107 +- return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
3108 ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
3109 ++ sock_net(sk));
3110 + }
3111 +
3112 + static int key_notify_sa_flush(const struct km_event *c)
3113 +@@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c)
3114 + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
3115 + hdr->sadb_msg_reserved = 0;
3116 +
3117 +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
3118 ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
3119 +
3120 + return 0;
3121 + }
3122 +@@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
3123 + out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
3124 +
3125 + if (pfk->dump.skb)
3126 +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
3127 ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
3128 + &pfk->sk, sock_net(&pfk->sk));
3129 + pfk->dump.skb = out_skb;
3130 +
3131 +@@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
3132 + new_hdr->sadb_msg_errno = 0;
3133 + }
3134 +
3135 +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
3136 ++ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
3137 + return 0;
3138 + }
3139 +
3140 +@@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
3141 + out_hdr->sadb_msg_errno = 0;
3142 + out_hdr->sadb_msg_seq = c->seq;
3143 + out_hdr->sadb_msg_pid = c->portid;
3144 +- pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
3145 ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
3146 + return 0;
3147 +
3148 + }
3149 +@@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
3150 + out_hdr->sadb_msg_errno = 0;
3151 + out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
3152 + out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
3153 +- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
3154 ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
3155 + err = 0;
3156 +
3157 + out:
3158 +@@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
3159 + out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
3160 +
3161 + if (pfk->dump.skb)
3162 +- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
3163 ++ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
3164 + &pfk->sk, sock_net(&pfk->sk));
3165 + pfk->dump.skb = out_skb;
3166 +
3167 +@@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c)
3168 + hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
3169 + hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
3170 + hdr->sadb_msg_reserved = 0;
3171 +- pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
3172 ++ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
3173 + return 0;
3174 +
3175 + }
3176 +@@ -2816,7 +2817,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
3177 + void *ext_hdrs[SADB_EXT_MAX];
3178 + int err;
3179 +
3180 +- pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
3181 ++ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
3182 + BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
3183 +
3184 + memset(ext_hdrs, 0, sizeof(ext_hdrs));
3185 +@@ -3038,7 +3039,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
3186 + out_hdr->sadb_msg_seq = 0;
3187 + out_hdr->sadb_msg_pid = 0;
3188 +
3189 +- pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
3190 ++ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
3191 ++ xs_net(x));
3192 + return 0;
3193 + }
3194 +
3195 +@@ -3228,7 +3230,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
3196 + xfrm_ctx->ctx_len);
3197 + }
3198 +
3199 +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
3200 ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
3201 ++ xs_net(x));
3202 + }
3203 +
3204 + static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
3205 +@@ -3426,7 +3429,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3206 + n_port->sadb_x_nat_t_port_port = sport;
3207 + n_port->sadb_x_nat_t_port_reserved = 0;
3208 +
3209 +- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
3210 ++ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
3211 ++ xs_net(x));
3212 + }
3213 +
3214 + #ifdef CONFIG_NET_KEY_MIGRATE
3215 +@@ -3618,7 +3622,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3216 + }
3217 +
3218 + /* broadcast migrate message to sockets */
3219 +- pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
3220 ++ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
3221 +
3222 + return 0;
3223 +
3224 +diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
3225 +index e03d16ed550d..899c2c36da13 100644
3226 +--- a/net/netfilter/nf_conntrack_expect.c
3227 ++++ b/net/netfilter/nf_conntrack_expect.c
3228 +@@ -422,7 +422,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
3229 + h = nf_ct_expect_dst_hash(net, &expect->tuple);
3230 + hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
3231 + if (expect_matches(i, expect)) {
3232 +- if (nf_ct_remove_expect(expect))
3233 ++ if (nf_ct_remove_expect(i))
3234 + break;
3235 + } else if (expect_clash(i, expect)) {
3236 + ret = -EBUSY;
3237 +diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
3238 +index 6c72922d20ca..b93a46ef812d 100644
3239 +--- a/net/netfilter/nf_nat_core.c
3240 ++++ b/net/netfilter/nf_nat_core.c
3241 +@@ -222,20 +222,21 @@ find_appropriate_src(struct net *net,
3242 + .tuple = tuple,
3243 + .zone = zone
3244 + };
3245 +- struct rhlist_head *hl;
3246 ++ struct rhlist_head *hl, *h;
3247 +
3248 + hl = rhltable_lookup(&nf_nat_bysource_table, &key,
3249 + nf_nat_bysource_params);
3250 +- if (!hl)
3251 +- return 0;
3252 +
3253 +- ct = container_of(hl, typeof(*ct), nat_bysource);
3254 ++ rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
3255 ++ nf_ct_invert_tuplepr(result,
3256 ++ &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
3257 ++ result->dst = tuple->dst;
3258 +
3259 +- nf_ct_invert_tuplepr(result,
3260 +- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
3261 +- result->dst = tuple->dst;
3262 ++ if (in_range(l3proto, l4proto, result, range))
3263 ++ return 1;
3264 ++ }
3265 +
3266 +- return in_range(l3proto, l4proto, result, range);
3267 ++ return 0;
3268 + }
3269 +
3270 + /* For [FUTURE] fragmentation handling, we want the least-used
3271 +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
3272 +index 80f5ecf2c3d7..ff1f4ce6fba4 100644
3273 +--- a/net/netfilter/nfnetlink.c
3274 ++++ b/net/netfilter/nfnetlink.c
3275 +@@ -463,8 +463,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
3276 + if (msglen > skb->len)
3277 + msglen = skb->len;
3278 +
3279 +- if (nlh->nlmsg_len < NLMSG_HDRLEN ||
3280 +- skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
3281 ++ if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
3282 + return;
3283 +
3284 + err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy,
3285 +@@ -491,7 +490,8 @@ static void nfnetlink_rcv(struct sk_buff *skb)
3286 + {
3287 + struct nlmsghdr *nlh = nlmsg_hdr(skb);
3288 +
3289 +- if (nlh->nlmsg_len < NLMSG_HDRLEN ||
3290 ++ if (skb->len < NLMSG_HDRLEN ||
3291 ++ nlh->nlmsg_len < NLMSG_HDRLEN ||
3292 + skb->len < nlh->nlmsg_len)
3293 + return;
3294 +
3295 +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
3296 +index e4610676299b..a54a556fcdb5 100644
3297 +--- a/net/openvswitch/actions.c
3298 ++++ b/net/openvswitch/actions.c
3299 +@@ -1337,6 +1337,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
3300 + goto out;
3301 + }
3302 +
3303 ++ OVS_CB(skb)->acts_origlen = acts->orig_len;
3304 + err = do_execute_actions(dp, skb, key,
3305 + acts->actions, acts->actions_len);
3306 +
3307 +diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
3308 +index 7b17da9a94a0..57ce10b6cf6b 100644
3309 +--- a/net/openvswitch/datapath.c
3310 ++++ b/net/openvswitch/datapath.c
3311 +@@ -381,7 +381,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
3312 + }
3313 +
3314 + static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
3315 +- unsigned int hdrlen)
3316 ++ unsigned int hdrlen, int actions_attrlen)
3317 + {
3318 + size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
3319 + + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
3320 +@@ -398,7 +398,7 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
3321 +
3322 + /* OVS_PACKET_ATTR_ACTIONS */
3323 + if (upcall_info->actions_len)
3324 +- size += nla_total_size(upcall_info->actions_len);
3325 ++ size += nla_total_size(actions_attrlen);
3326 +
3327 + /* OVS_PACKET_ATTR_MRU */
3328 + if (upcall_info->mru)
3329 +@@ -465,7 +465,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
3330 + else
3331 + hlen = skb->len;
3332 +
3333 +- len = upcall_msg_size(upcall_info, hlen - cutlen);
3334 ++ len = upcall_msg_size(upcall_info, hlen - cutlen,
3335 ++ OVS_CB(skb)->acts_origlen);
3336 + user_skb = genlmsg_new(len, GFP_ATOMIC);
3337 + if (!user_skb) {
3338 + err = -ENOMEM;
3339 +diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
3340 +index da931bdef8a7..98a28f78aff2 100644
3341 +--- a/net/openvswitch/datapath.h
3342 ++++ b/net/openvswitch/datapath.h
3343 +@@ -98,12 +98,14 @@ struct datapath {
3344 + * @input_vport: The original vport packet came in on. This value is cached
3345 + * when a packet is received by OVS.
3346 + * @mru: The maximum received fragement size; 0 if the packet is not
3347 ++ * @acts_origlen: The netlink size of the flow actions applied to this skb.
3348 + * @cutlen: The number of bytes from the packet end to be removed.
3349 + * fragmented.
3350 + */
3351 + struct ovs_skb_cb {
3352 + struct vport *input_vport;
3353 + u16 mru;
3354 ++ u16 acts_origlen;
3355 + u32 cutlen;
3356 + };
3357 + #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
3358 +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
3359 +index d516ba8178b8..541707802a23 100644
3360 +--- a/net/sched/act_ipt.c
3361 ++++ b/net/sched/act_ipt.c
3362 +@@ -41,6 +41,7 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
3363 + {
3364 + struct xt_tgchk_param par;
3365 + struct xt_target *target;
3366 ++ struct ipt_entry e = {};
3367 + int ret = 0;
3368 +
3369 + target = xt_request_find_target(AF_INET, t->u.user.name,
3370 +@@ -52,6 +53,7 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
3371 + memset(&par, 0, sizeof(par));
3372 + par.net = net;
3373 + par.table = table;
3374 ++ par.entryinfo = &e;
3375 + par.target = target;
3376 + par.targinfo = t->data;
3377 + par.hook_mask = hook;
3378 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
3379 +index cfdbfa18a95e..fdbbdfd8e9a8 100644
3380 +--- a/net/sched/sch_api.c
3381 ++++ b/net/sched/sch_api.c
3382 +@@ -286,9 +286,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
3383 + void qdisc_hash_add(struct Qdisc *q, bool invisible)
3384 + {
3385 + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
3386 +- struct Qdisc *root = qdisc_dev(q)->qdisc;
3387 +-
3388 +- WARN_ON_ONCE(root == &noop_qdisc);
3389 + ASSERT_RTNL();
3390 + hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
3391 + if (invisible)
3392 +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
3393 +index 332d94be6e1c..22451a9eb89d 100644
3394 +--- a/net/sched/sch_sfq.c
3395 ++++ b/net/sched/sch_sfq.c
3396 +@@ -435,6 +435,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
3397 + qdisc_drop(head, sch, to_free);
3398 +
3399 + slot_queue_add(slot, skb);
3400 ++ qdisc_tree_reduce_backlog(sch, 0, delta);
3401 + return NET_XMIT_CN;
3402 + }
3403 +
3404 +@@ -466,8 +467,10 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
3405 + /* Return Congestion Notification only if we dropped a packet
3406 + * from this flow.
3407 + */
3408 +- if (qlen != slot->qlen)
3409 ++ if (qlen != slot->qlen) {
3410 ++ qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
3411 + return NET_XMIT_CN;
3412 ++ }
3413 +
3414 + /* As we dropped a packet, better let upper stack know this */
3415 + qdisc_tree_reduce_backlog(sch, 1, dropped);
3416 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
3417 +index f5b45b8b8b16..0de5f5f8ddbc 100644
3418 +--- a/net/sctp/ipv6.c
3419 ++++ b/net/sctp/ipv6.c
3420 +@@ -510,7 +510,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
3421 + {
3422 + addr->sa.sa_family = AF_INET6;
3423 + addr->v6.sin6_port = port;
3424 ++ addr->v6.sin6_flowinfo = 0;
3425 + addr->v6.sin6_addr = *saddr;
3426 ++ addr->v6.sin6_scope_id = 0;
3427 + }
3428 +
3429 + /* Compare addresses exactly.
3430 +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
3431 +index 2b720fa35c4f..e18500151236 100644
3432 +--- a/net/sunrpc/svcsock.c
3433 ++++ b/net/sunrpc/svcsock.c
3434 +@@ -421,6 +421,9 @@ static void svc_data_ready(struct sock *sk)
3435 + dprintk("svc: socket %p(inet %p), busy=%d\n",
3436 + svsk, sk,
3437 + test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
3438 ++
3439 ++ /* Refer to svc_setup_socket() for details. */
3440 ++ rmb();
3441 + svsk->sk_odata(sk);
3442 + if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
3443 + svc_xprt_enqueue(&svsk->sk_xprt);
3444 +@@ -437,6 +440,9 @@ static void svc_write_space(struct sock *sk)
3445 + if (svsk) {
3446 + dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
3447 + svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
3448 ++
3449 ++ /* Refer to svc_setup_socket() for details. */
3450 ++ rmb();
3451 + svsk->sk_owspace(sk);
3452 + svc_xprt_enqueue(&svsk->sk_xprt);
3453 + }
3454 +@@ -760,8 +766,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
3455 + dprintk("svc: socket %p TCP (listen) state change %d\n",
3456 + sk, sk->sk_state);
3457 +
3458 +- if (svsk)
3459 ++ if (svsk) {
3460 ++ /* Refer to svc_setup_socket() for details. */
3461 ++ rmb();
3462 + svsk->sk_odata(sk);
3463 ++ }
3464 ++
3465 + /*
3466 + * This callback may called twice when a new connection
3467 + * is established as a child socket inherits everything
3468 +@@ -794,6 +804,8 @@ static void svc_tcp_state_change(struct sock *sk)
3469 + if (!svsk)
3470 + printk("svc: socket %p: no user data\n", sk);
3471 + else {
3472 ++ /* Refer to svc_setup_socket() for details. */
3473 ++ rmb();
3474 + svsk->sk_ostate(sk);
3475 + if (sk->sk_state != TCP_ESTABLISHED) {
3476 + set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
3477 +@@ -1381,12 +1393,18 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
3478 + return ERR_PTR(err);
3479 + }
3480 +
3481 +- inet->sk_user_data = svsk;
3482 + svsk->sk_sock = sock;
3483 + svsk->sk_sk = inet;
3484 + svsk->sk_ostate = inet->sk_state_change;
3485 + svsk->sk_odata = inet->sk_data_ready;
3486 + svsk->sk_owspace = inet->sk_write_space;
3487 ++ /*
3488 ++ * This barrier is necessary in order to prevent race condition
3489 ++ * with svc_data_ready(), svc_listen_data_ready() and others
3490 ++ * when calling callbacks above.
3491 ++ */
3492 ++ wmb();
3493 ++ inet->sk_user_data = svsk;
3494 +
3495 + /* Initialize the socket */
3496 + if (sock->type == SOCK_DGRAM)
3497 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
3498 +index 9bfe886ab330..750949dfc1d7 100644
3499 +--- a/net/tipc/netlink_compat.c
3500 ++++ b/net/tipc/netlink_compat.c
3501 +@@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
3502 + arg = nlmsg_new(0, GFP_KERNEL);
3503 + if (!arg) {
3504 + kfree_skb(msg->rep);
3505 ++ msg->rep = NULL;
3506 + return -ENOMEM;
3507 + }
3508 +
3509 + err = __tipc_nl_compat_dumpit(cmd, msg, arg);
3510 +- if (err)
3511 ++ if (err) {
3512 + kfree_skb(msg->rep);
3513 +-
3514 ++ msg->rep = NULL;
3515 ++ }
3516 + kfree_skb(arg);
3517 +
3518 + return err;
3519 +diff --git a/sound/core/control.c b/sound/core/control.c
3520 +index c109b82eef4b..7b43b0f74b84 100644
3521 +--- a/sound/core/control.c
3522 ++++ b/sound/core/control.c
3523 +@@ -1157,7 +1157,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
3524 + mutex_lock(&ue->card->user_ctl_lock);
3525 + change = ue->tlv_data_size != size;
3526 + if (!change)
3527 +- change = memcmp(ue->tlv_data, new_data, size);
3528 ++ change = memcmp(ue->tlv_data, new_data, size) != 0;
3529 + kfree(ue->tlv_data);
3530 + ue->tlv_data = new_data;
3531 + ue->tlv_data_size = size;
3532 +diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
3533 +index f0e4d502d604..066b5df666f4 100644
3534 +--- a/sound/firewire/iso-resources.c
3535 ++++ b/sound/firewire/iso-resources.c
3536 +@@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update);
3537 + */
3538 + void fw_iso_resources_free(struct fw_iso_resources *r)
3539 + {
3540 +- struct fw_card *card = fw_parent_device(r->unit)->card;
3541 ++ struct fw_card *card;
3542 + int bandwidth, channel;
3543 +
3544 ++ /* Not initialized. */
3545 ++ if (r->unit == NULL)
3546 ++ return;
3547 ++ card = fw_parent_device(r->unit)->card;
3548 ++
3549 + mutex_lock(&r->mutex);
3550 +
3551 + if (r->allocated) {
3552 +diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
3553 +index bf779cfeef0d..59a270406353 100644
3554 +--- a/sound/firewire/motu/motu.c
3555 ++++ b/sound/firewire/motu/motu.c
3556 +@@ -128,6 +128,7 @@ static void do_registration(struct work_struct *work)
3557 + return;
3558 + error:
3559 + snd_motu_transaction_unregister(motu);
3560 ++ snd_motu_stream_destroy_duplex(motu);
3561 + snd_card_free(motu->card);
3562 + dev_info(&motu->unit->device,
3563 + "Sound card registration failed: %d\n", err);
3564 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3565 +index 8c1289963c80..a81aacf684b2 100644
3566 +--- a/sound/pci/hda/patch_conexant.c
3567 ++++ b/sound/pci/hda/patch_conexant.c
3568 +@@ -947,6 +947,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
3569 + SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
3570 + SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
3571 + SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
3572 ++ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
3573 + SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
3574 + SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
3575 + SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
3576 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
3577 +index 6a03f9697039..5d2a63248b1d 100644
3578 +--- a/sound/usb/quirks.c
3579 ++++ b/sound/usb/quirks.c
3580 +@@ -1309,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
3581 + && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
3582 + mdelay(20);
3583 +
3584 +- /* Zoom R16/24 needs a tiny delay here, otherwise requests like
3585 +- * get/set frequency return as failed despite actually succeeding.
3586 ++ /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
3587 ++ * otherwise requests like get/set frequency return as failed despite
3588 ++ * actually succeeding.
3589 + */
3590 +- if (chip->usb_id == USB_ID(0x1686, 0x00dd) &&
3591 ++ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
3592 ++ chip->usb_id == USB_ID(0x046d, 0x0a46) ||
3593 ++ chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
3594 + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
3595 + mdelay(1);
3596 + }
3597 +diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
3598 +index 13f5198ba0ee..b3c48fc6ea4b 100755
3599 +--- a/tools/testing/selftests/ntb/ntb_test.sh
3600 ++++ b/tools/testing/selftests/ntb/ntb_test.sh
3601 +@@ -326,6 +326,10 @@ function ntb_tool_tests()
3602 + link_test $LOCAL_TOOL $REMOTE_TOOL
3603 + link_test $REMOTE_TOOL $LOCAL_TOOL
3604 +
3605 ++ #Ensure the link is up on both sides before continuing
3606 ++ write_file Y $LOCAL_TOOL/link_event
3607 ++ write_file Y $REMOTE_TOOL/link_event
3608 ++
3609 + for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do
3610 + PT=$(basename $PEER_TRANS)
3611 + write_file $MW_SIZE $LOCAL_TOOL/$PT