Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 29 Oct 2019 14:00:04
Message-Id: 1572357542.6ba39296510cc9d89f543c0ed736ff502c7963ec.mpagano@gentoo
1 commit: 6ba39296510cc9d89f543c0ed736ff502c7963ec
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Aug 25 17:35:46 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Oct 29 13:59:02 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6ba39296
7
8 Linux patch 4.14.140
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1139_linux-4.14.140.patch | 2636 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2640 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 9510e96..46d7bd2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -599,6 +599,10 @@ Patch: 1138_linux-4.14.139.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.139
23
24 +Patch: 1139_linux-4.14.140.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.140
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1139_linux-4.14.140.patch b/1139_linux-4.14.140.patch
33 new file mode 100644
34 index 0000000..cc77c13
35 --- /dev/null
36 +++ b/1139_linux-4.14.140.patch
37 @@ -0,0 +1,2636 @@
38 +diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
39 +index b67044a2575f..e12b39f40a6b 100644
40 +--- a/Documentation/sysctl/net.txt
41 ++++ b/Documentation/sysctl/net.txt
42 +@@ -91,6 +91,14 @@ Values :
43 + 0 - disable JIT kallsyms export (default value)
44 + 1 - enable JIT kallsyms export for privileged users only
45 +
46 ++bpf_jit_limit
47 ++-------------
48 ++
49 ++This enforces a global limit for memory allocations to the BPF JIT
50 ++compiler in order to reject unprivileged JIT requests once it has
51 ++been surpassed. bpf_jit_limit contains the value of the global limit
52 ++in bytes.
53 ++
54 + dev_weight
55 + --------------
56 +
57 +diff --git a/Makefile b/Makefile
58 +index 3ccf48b2714a..be7290af771e 100644
59 +--- a/Makefile
60 ++++ b/Makefile
61 +@@ -1,7 +1,7 @@
62 + # SPDX-License-Identifier: GPL-2.0
63 + VERSION = 4
64 + PATCHLEVEL = 14
65 +-SUBLEVEL = 139
66 ++SUBLEVEL = 140
67 + EXTRAVERSION =
68 + NAME = Petit Gorille
69 +
70 +diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
71 +index dafeb5f81353..b18fb70c5dcf 100644
72 +--- a/arch/arm/net/bpf_jit_32.c
73 ++++ b/arch/arm/net/bpf_jit_32.c
74 +@@ -25,8 +25,6 @@
75 +
76 + #include "bpf_jit_32.h"
77 +
78 +-int bpf_jit_enable __read_mostly;
79 +-
80 + /*
81 + * eBPF prog stack layout:
82 + *
83 +diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
84 +index 8389050328bb..558542086069 100644
85 +--- a/arch/arm64/include/asm/efi.h
86 ++++ b/arch/arm64/include/asm/efi.h
87 +@@ -89,7 +89,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
88 + ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
89 +
90 + #define alloc_screen_info(x...) &screen_info
91 +-#define free_screen_info(x...)
92 ++
93 ++static inline void free_screen_info(efi_system_table_t *sys_table_arg,
94 ++ struct screen_info *si)
95 ++{
96 ++}
97 +
98 + /* redeclare as 'hidden' so the compiler will generate relative references */
99 + extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
100 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
101 +index ee77556b0124..4cf248185e6f 100644
102 +--- a/arch/arm64/include/asm/pgtable.h
103 ++++ b/arch/arm64/include/asm/pgtable.h
104 +@@ -394,8 +394,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
105 + PMD_TYPE_SECT)
106 +
107 + #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
108 +-#define pud_sect(pud) (0)
109 +-#define pud_table(pud) (1)
110 ++static inline bool pud_sect(pud_t pud) { return false; }
111 ++static inline bool pud_table(pud_t pud) { return true; }
112 + #else
113 + #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
114 + PUD_TYPE_SECT)
115 +diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
116 +index 50986e388d2b..fac79d75d1d9 100644
117 +--- a/arch/arm64/kernel/ftrace.c
118 ++++ b/arch/arm64/kernel/ftrace.c
119 +@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
120 +
121 + if (offset < -SZ_128M || offset >= SZ_128M) {
122 + #ifdef CONFIG_ARM64_MODULE_PLTS
123 +- struct plt_entry trampoline;
124 ++ struct plt_entry trampoline, *dst;
125 + struct module *mod;
126 +
127 + /*
128 +@@ -104,24 +104,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
129 + * is added in the future, but for now, the pr_err() below
130 + * deals with a theoretical issue only.
131 + */
132 ++ dst = mod->arch.ftrace_trampoline;
133 + trampoline = get_plt_entry(addr);
134 +- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
135 +- &trampoline)) {
136 +- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
137 +- &(struct plt_entry){})) {
138 ++ if (!plt_entries_equal(dst, &trampoline)) {
139 ++ if (!plt_entries_equal(dst, &(struct plt_entry){})) {
140 + pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
141 + return -EINVAL;
142 + }
143 +
144 + /* point the trampoline to our ftrace entry point */
145 + module_disable_ro(mod);
146 +- *mod->arch.ftrace_trampoline = trampoline;
147 ++ *dst = trampoline;
148 + module_enable_ro(mod, true);
149 +
150 +- /* update trampoline before patching in the branch */
151 +- smp_wmb();
152 ++ /*
153 ++ * Ensure updated trampoline is visible to instruction
154 ++ * fetch before we patch in the branch.
155 ++ */
156 ++ flush_icache_range((unsigned long)&dst[0],
157 ++ (unsigned long)&dst[1]);
158 + }
159 +- addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
160 ++ addr = (unsigned long)dst;
161 + #else /* CONFIG_ARM64_MODULE_PLTS */
162 + return -EINVAL;
163 + #endif /* CONFIG_ARM64_MODULE_PLTS */
164 +diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
165 +index 749f81779420..95697a9c1245 100644
166 +--- a/arch/arm64/kernel/hw_breakpoint.c
167 ++++ b/arch/arm64/kernel/hw_breakpoint.c
168 +@@ -548,13 +548,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
169 + /* Aligned */
170 + break;
171 + case 1:
172 +- /* Allow single byte watchpoint. */
173 +- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
174 +- break;
175 + case 2:
176 + /* Allow halfword watchpoints and breakpoints. */
177 + if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
178 + break;
179 ++ case 3:
180 ++ /* Allow single byte watchpoint. */
181 ++ if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
182 ++ break;
183 + default:
184 + return -EINVAL;
185 + }
186 +diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
187 +index 933adbc0f654..0311fe52c8ff 100644
188 +--- a/arch/arm64/kernel/return_address.c
189 ++++ b/arch/arm64/kernel/return_address.c
190 +@@ -11,6 +11,7 @@
191 +
192 + #include <linux/export.h>
193 + #include <linux/ftrace.h>
194 ++#include <linux/kprobes.h>
195 +
196 + #include <asm/stack_pointer.h>
197 + #include <asm/stacktrace.h>
198 +@@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
199 + return 0;
200 + }
201 + }
202 ++NOKPROBE_SYMBOL(save_return_addr);
203 +
204 + void *return_address(unsigned int level)
205 + {
206 +@@ -55,3 +57,4 @@ void *return_address(unsigned int level)
207 + return NULL;
208 + }
209 + EXPORT_SYMBOL_GPL(return_address);
210 ++NOKPROBE_SYMBOL(return_address);
211 +diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
212 +index d5718a060672..2ae7630d685b 100644
213 +--- a/arch/arm64/kernel/stacktrace.c
214 ++++ b/arch/arm64/kernel/stacktrace.c
215 +@@ -18,6 +18,7 @@
216 + #include <linux/kernel.h>
217 + #include <linux/export.h>
218 + #include <linux/ftrace.h>
219 ++#include <linux/kprobes.h>
220 + #include <linux/sched.h>
221 + #include <linux/sched/debug.h>
222 + #include <linux/sched/task_stack.h>
223 +@@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
224 +
225 + return 0;
226 + }
227 ++NOKPROBE_SYMBOL(unwind_frame);
228 +
229 + void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
230 + int (*fn)(struct stackframe *, void *), void *data)
231 +@@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
232 + break;
233 + }
234 + }
235 ++NOKPROBE_SYMBOL(walk_stackframe);
236 +
237 + #ifdef CONFIG_STACKTRACE
238 + struct stack_trace_data {
239 +diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
240 +index b742171bfef7..1bbb457c293f 100644
241 +--- a/arch/arm64/net/bpf_jit_comp.c
242 ++++ b/arch/arm64/net/bpf_jit_comp.c
243 +@@ -31,8 +31,6 @@
244 +
245 + #include "bpf_jit.h"
246 +
247 +-int bpf_jit_enable __read_mostly;
248 +-
249 + #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
250 + #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
251 + #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
252 +diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
253 +index 44b925005dd3..4d8cb9bb8365 100644
254 +--- a/arch/mips/net/bpf_jit.c
255 ++++ b/arch/mips/net/bpf_jit.c
256 +@@ -1207,8 +1207,6 @@ jmp_cmp:
257 + return 0;
258 + }
259 +
260 +-int bpf_jit_enable __read_mostly;
261 +-
262 + void bpf_jit_compile(struct bpf_prog *fp)
263 + {
264 + struct jit_ctx ctx;
265 +diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
266 +index 8004bfcfb033..42faa95ce664 100644
267 +--- a/arch/mips/net/ebpf_jit.c
268 ++++ b/arch/mips/net/ebpf_jit.c
269 +@@ -177,8 +177,6 @@ static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
270 + (ctx->idx * 4) - 4;
271 + }
272 +
273 +-int bpf_jit_enable __read_mostly;
274 +-
275 + enum which_ebpf_reg {
276 + src_reg,
277 + src_reg_no_fp,
278 +diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
279 +index f760494ecd66..a9636d8cba15 100644
280 +--- a/arch/powerpc/net/bpf_jit_comp.c
281 ++++ b/arch/powerpc/net/bpf_jit_comp.c
282 +@@ -18,8 +18,6 @@
283 +
284 + #include "bpf_jit32.h"
285 +
286 +-int bpf_jit_enable __read_mostly;
287 +-
288 + static inline void bpf_flush_icache(void *start, void *end)
289 + {
290 + smp_wmb();
291 +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
292 +index 70e8216a39f0..28434040cfb6 100644
293 +--- a/arch/powerpc/net/bpf_jit_comp64.c
294 ++++ b/arch/powerpc/net/bpf_jit_comp64.c
295 +@@ -21,8 +21,6 @@
296 +
297 + #include "bpf_jit64.h"
298 +
299 +-int bpf_jit_enable __read_mostly;
300 +-
301 + static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
302 + {
303 + memset32(area, BREAKPOINT_INSTRUCTION, size/4);
304 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
305 +index 6b1474fa99ab..bc9431aace05 100644
306 +--- a/arch/s390/net/bpf_jit_comp.c
307 ++++ b/arch/s390/net/bpf_jit_comp.c
308 +@@ -30,8 +30,6 @@
309 + #include <asm/set_memory.h>
310 + #include "bpf_jit.h"
311 +
312 +-int bpf_jit_enable __read_mostly;
313 +-
314 + struct bpf_jit {
315 + u32 seen; /* Flags to remember seen eBPF instructions */
316 + u32 seen_reg[16]; /* Array to remember which registers are used */
317 +diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
318 +index afe965712a69..dea2e23520e0 100644
319 +--- a/arch/sh/kernel/hw_breakpoint.c
320 ++++ b/arch/sh/kernel/hw_breakpoint.c
321 +@@ -161,6 +161,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
322 + switch (sh_type) {
323 + case SH_BREAKPOINT_READ:
324 + *gen_type = HW_BREAKPOINT_R;
325 ++ break;
326 + case SH_BREAKPOINT_WRITE:
327 + *gen_type = HW_BREAKPOINT_W;
328 + break;
329 +diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
330 +index 09e318eb34ee..3bd8ca95e521 100644
331 +--- a/arch/sparc/net/bpf_jit_comp_32.c
332 ++++ b/arch/sparc/net/bpf_jit_comp_32.c
333 +@@ -11,8 +11,6 @@
334 +
335 + #include "bpf_jit_32.h"
336 +
337 +-int bpf_jit_enable __read_mostly;
338 +-
339 + static inline bool is_simm13(unsigned int value)
340 + {
341 + return value + 0x1000 < 0x2000;
342 +diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
343 +index ff5f9cb3039a..adfb4581bd80 100644
344 +--- a/arch/sparc/net/bpf_jit_comp_64.c
345 ++++ b/arch/sparc/net/bpf_jit_comp_64.c
346 +@@ -12,8 +12,6 @@
347 +
348 + #include "bpf_jit_64.h"
349 +
350 +-int bpf_jit_enable __read_mostly;
351 +-
352 + static inline bool is_simm13(unsigned int value)
353 + {
354 + return value + 0x1000 < 0x2000;
355 +diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
356 +index ef938583147e..3a33de4133d1 100644
357 +--- a/arch/x86/include/asm/pgtable_64.h
358 ++++ b/arch/x86/include/asm/pgtable_64.h
359 +@@ -56,15 +56,15 @@ struct mm_struct;
360 + void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
361 + void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
362 +
363 +-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
364 +- pte_t *ptep)
365 ++static inline void native_set_pte(pte_t *ptep, pte_t pte)
366 + {
367 +- *ptep = native_make_pte(0);
368 ++ WRITE_ONCE(*ptep, pte);
369 + }
370 +
371 +-static inline void native_set_pte(pte_t *ptep, pte_t pte)
372 ++static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
373 ++ pte_t *ptep)
374 + {
375 +- *ptep = pte;
376 ++ native_set_pte(ptep, native_make_pte(0));
377 + }
378 +
379 + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
380 +@@ -74,7 +74,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
381 +
382 + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
383 + {
384 +- *pmdp = pmd;
385 ++ WRITE_ONCE(*pmdp, pmd);
386 + }
387 +
388 + static inline void native_pmd_clear(pmd_t *pmd)
389 +@@ -110,7 +110,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
390 +
391 + static inline void native_set_pud(pud_t *pudp, pud_t pud)
392 + {
393 +- *pudp = pud;
394 ++ WRITE_ONCE(*pudp, pud);
395 + }
396 +
397 + static inline void native_pud_clear(pud_t *pud)
398 +@@ -220,9 +220,9 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
399 + static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
400 + {
401 + #if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL)
402 +- p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd);
403 ++ WRITE_ONCE(p4dp->pgd, pti_set_user_pgd(&p4dp->pgd, p4d.pgd));
404 + #else
405 +- *p4dp = p4d;
406 ++ WRITE_ONCE(*p4dp, p4d);
407 + #endif
408 + }
409 +
410 +@@ -238,9 +238,9 @@ static inline void native_p4d_clear(p4d_t *p4d)
411 + static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
412 + {
413 + #ifdef CONFIG_PAGE_TABLE_ISOLATION
414 +- *pgdp = pti_set_user_pgd(pgdp, pgd);
415 ++ WRITE_ONCE(*pgdp, pti_set_user_pgd(pgdp, pgd));
416 + #else
417 +- *pgdp = pgd;
418 ++ WRITE_ONCE(*pgdp, pgd);
419 + #endif
420 + }
421 +
422 +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
423 +index aafd4edfa2ac..b4fd36271f90 100644
424 +--- a/arch/x86/mm/pgtable.c
425 ++++ b/arch/x86/mm/pgtable.c
426 +@@ -260,7 +260,7 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
427 + if (pgd_val(pgd) != 0) {
428 + pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
429 +
430 +- pgdp[i] = native_make_pgd(0);
431 ++ pgd_clear(&pgdp[i]);
432 +
433 + paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
434 + pmd_free(mm, pmd);
435 +@@ -430,7 +430,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
436 + int changed = !pte_same(*ptep, entry);
437 +
438 + if (changed && dirty)
439 +- *ptep = entry;
440 ++ set_pte(ptep, entry);
441 +
442 + return changed;
443 + }
444 +@@ -445,7 +445,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
445 + VM_BUG_ON(address & ~HPAGE_PMD_MASK);
446 +
447 + if (changed && dirty) {
448 +- *pmdp = entry;
449 ++ set_pmd(pmdp, entry);
450 + /*
451 + * We had a write-protection fault here and changed the pmd
452 + * to to more permissive. No need to flush the TLB for that,
453 +@@ -465,7 +465,7 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
454 + VM_BUG_ON(address & ~HPAGE_PUD_MASK);
455 +
456 + if (changed && dirty) {
457 +- *pudp = entry;
458 ++ set_pud(pudp, entry);
459 + /*
460 + * We had a write-protection fault here and changed the pud
461 + * to to more permissive. No need to flush the TLB for that,
462 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
463 +index a9deb2b0397d..cdb386fa7101 100644
464 +--- a/arch/x86/net/bpf_jit_comp.c
465 ++++ b/arch/x86/net/bpf_jit_comp.c
466 +@@ -16,8 +16,6 @@
467 + #include <asm/nospec-branch.h>
468 + #include <linux/bpf.h>
469 +
470 +-int bpf_jit_enable __read_mostly;
471 +-
472 + /*
473 + * assembly code in arch/x86/net/bpf_jit.S
474 + */
475 +diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
476 +index 66eefe1919a4..92fb20777bb0 100644
477 +--- a/arch/xtensa/kernel/setup.c
478 ++++ b/arch/xtensa/kernel/setup.c
479 +@@ -508,6 +508,7 @@ void cpu_reset(void)
480 + "add %2, %2, %7\n\t"
481 + "addi %0, %0, -1\n\t"
482 + "bnez %0, 1b\n\t"
483 ++ "isync\n\t"
484 + /* Jump to identity mapping */
485 + "jx %3\n"
486 + "2:\n\t"
487 +diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
488 +index a270a1173c8c..70cdbf1b0f9a 100644
489 +--- a/drivers/ata/libahci_platform.c
490 ++++ b/drivers/ata/libahci_platform.c
491 +@@ -300,6 +300,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
492 + hpriv->phys[port] = NULL;
493 + rc = 0;
494 + break;
495 ++ case -EPROBE_DEFER:
496 ++ /* Do not complain yet */
497 ++ break;
498 +
499 + default:
500 + dev_err(dev,
501 +diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
502 +index 173e6f2dd9af..eefda51f97d3 100644
503 +--- a/drivers/ata/libata-zpodd.c
504 ++++ b/drivers/ata/libata-zpodd.c
505 +@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
506 + unsigned int ret;
507 + struct rm_feature_desc *desc;
508 + struct ata_taskfile tf;
509 +- static const char cdb[] = { GPCMD_GET_CONFIGURATION,
510 ++ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
511 + 2, /* only 1 feature descriptor requested */
512 + 0, 3, /* 3, removable medium feature */
513 + 0, 0, 0,/* reserved */
514 +diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
515 +index 33481368740e..113152425a95 100644
516 +--- a/drivers/clk/at91/clk-generated.c
517 ++++ b/drivers/clk/at91/clk-generated.c
518 +@@ -153,6 +153,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
519 + continue;
520 +
521 + div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
522 ++ if (div > GENERATED_MAX_DIV + 1)
523 ++ div = GENERATED_MAX_DIV + 1;
524 +
525 + clk_generated_best_diff(req, parent, parent_rate, div,
526 + &best_diff, &best_rate);
527 +diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
528 +index 30c23b882675..fe25d37ce9d3 100644
529 +--- a/drivers/clk/renesas/renesas-cpg-mssr.c
530 ++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
531 +@@ -522,17 +522,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
532 + unsigned int reg = id / 32;
533 + unsigned int bit = id % 32;
534 + u32 bitmask = BIT(bit);
535 +- unsigned long flags;
536 +- u32 value;
537 +
538 + dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
539 +
540 + /* Reset module */
541 +- spin_lock_irqsave(&priv->rmw_lock, flags);
542 +- value = readl(priv->base + SRCR(reg));
543 +- value |= bitmask;
544 +- writel(value, priv->base + SRCR(reg));
545 +- spin_unlock_irqrestore(&priv->rmw_lock, flags);
546 ++ writel(bitmask, priv->base + SRCR(reg));
547 +
548 + /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
549 + udelay(35);
550 +@@ -549,16 +543,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
551 + unsigned int reg = id / 32;
552 + unsigned int bit = id % 32;
553 + u32 bitmask = BIT(bit);
554 +- unsigned long flags;
555 +- u32 value;
556 +
557 + dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
558 +
559 +- spin_lock_irqsave(&priv->rmw_lock, flags);
560 +- value = readl(priv->base + SRCR(reg));
561 +- value |= bitmask;
562 +- writel(value, priv->base + SRCR(reg));
563 +- spin_unlock_irqrestore(&priv->rmw_lock, flags);
564 ++ writel(bitmask, priv->base + SRCR(reg));
565 + return 0;
566 + }
567 +
568 +diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
569 +index adf9ae0e0b7c..85aa824317f0 100644
570 +--- a/drivers/gpu/drm/bridge/Kconfig
571 ++++ b/drivers/gpu/drm/bridge/Kconfig
572 +@@ -35,6 +35,7 @@ config DRM_DUMB_VGA_DAC
573 + config DRM_LVDS_ENCODER
574 + tristate "Transparent parallel to LVDS encoder support"
575 + depends on OF
576 ++ select DRM_KMS_HELPER
577 + select DRM_PANEL_BRIDGE
578 + help
579 + Support for transparent parallel to LVDS encoders that don't require
580 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
581 +index b970427e53a7..77c45a2ebd83 100644
582 +--- a/drivers/gpu/drm/msm/msm_drv.c
583 ++++ b/drivers/gpu/drm/msm/msm_drv.c
584 +@@ -1060,7 +1060,8 @@ static int add_gpu_components(struct device *dev,
585 + if (!np)
586 + return 0;
587 +
588 +- drm_of_component_match_add(dev, matchptr, compare_of, np);
589 ++ if (of_device_is_available(np))
590 ++ drm_of_component_match_add(dev, matchptr, compare_of, np);
591 +
592 + of_node_put(np);
593 +
594 +diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
595 +index 6e1a4a4fc0c1..ab9da597106f 100644
596 +--- a/drivers/hid/hid-holtek-kbd.c
597 ++++ b/drivers/hid/hid-holtek-kbd.c
598 +@@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
599 +
600 + /* Locate the boot interface, to receive the LED change events */
601 + struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
602 ++ struct hid_device *boot_hid;
603 ++ struct hid_input *boot_hid_input;
604 +
605 +- struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
606 +- struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
607 ++ if (unlikely(boot_interface == NULL))
608 ++ return -ENODEV;
609 ++
610 ++ boot_hid = usb_get_intfdata(boot_interface);
611 ++ boot_hid_input = list_first_entry(&boot_hid->inputs,
612 + struct hid_input, list);
613 +
614 + return boot_hid_input->input->event(boot_hid_input->input, type, code,
615 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
616 +index 89761551c15d..ce342fd0457e 100644
617 +--- a/drivers/hid/usbhid/hiddev.c
618 ++++ b/drivers/hid/usbhid/hiddev.c
619 +@@ -297,6 +297,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
620 + spin_unlock_irq(&list->hiddev->list_lock);
621 +
622 + mutex_lock(&hiddev->existancelock);
623 ++ /*
624 ++ * recheck exist with existance lock held to
625 ++ * avoid opening a disconnected device
626 ++ */
627 ++ if (!list->hiddev->exist) {
628 ++ res = -ENODEV;
629 ++ goto bail_unlock;
630 ++ }
631 + if (!list->hiddev->open++)
632 + if (list->hiddev->exist) {
633 + struct hid_device *hid = hiddev->hid;
634 +@@ -313,6 +321,10 @@ bail_normal_power:
635 + hid_hw_power(hid, PM_HINT_NORMAL);
636 + bail_unlock:
637 + mutex_unlock(&hiddev->existancelock);
638 ++
639 ++ spin_lock_irq(&list->hiddev->list_lock);
640 ++ list_del(&list->node);
641 ++ spin_unlock_irq(&list->hiddev->list_lock);
642 + bail:
643 + file->private_data = NULL;
644 + vfree(list);
645 +diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
646 +index f8f298c33b28..c61fbf560271 100644
647 +--- a/drivers/iio/adc/max9611.c
648 ++++ b/drivers/iio/adc/max9611.c
649 +@@ -484,7 +484,7 @@ static int max9611_init(struct max9611_dev *max9611)
650 + if (ret)
651 + return ret;
652 +
653 +- regval = ret & MAX9611_TEMP_MASK;
654 ++ regval &= MAX9611_TEMP_MASK;
655 +
656 + if ((regval > MAX9611_TEMP_MAX_POS &&
657 + regval < MAX9611_TEMP_MIN_NEG) ||
658 +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
659 +index 55252079faf6..49b6da1d990f 100644
660 +--- a/drivers/infiniband/core/mad.c
661 ++++ b/drivers/infiniband/core/mad.c
662 +@@ -3170,18 +3170,18 @@ static int ib_mad_port_open(struct ib_device *device,
663 + if (has_smi)
664 + cq_size *= 2;
665 +
666 ++ port_priv->pd = ib_alloc_pd(device, 0);
667 ++ if (IS_ERR(port_priv->pd)) {
668 ++ dev_err(&device->dev, "Couldn't create ib_mad PD\n");
669 ++ ret = PTR_ERR(port_priv->pd);
670 ++ goto error3;
671 ++ }
672 ++
673 + port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
674 + IB_POLL_WORKQUEUE);
675 + if (IS_ERR(port_priv->cq)) {
676 + dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
677 + ret = PTR_ERR(port_priv->cq);
678 +- goto error3;
679 +- }
680 +-
681 +- port_priv->pd = ib_alloc_pd(device, 0);
682 +- if (IS_ERR(port_priv->pd)) {
683 +- dev_err(&device->dev, "Couldn't create ib_mad PD\n");
684 +- ret = PTR_ERR(port_priv->pd);
685 + goto error4;
686 + }
687 +
688 +@@ -3224,11 +3224,11 @@ error8:
689 + error7:
690 + destroy_mad_qp(&port_priv->qp_info[0]);
691 + error6:
692 +- ib_dealloc_pd(port_priv->pd);
693 +-error4:
694 + ib_free_cq(port_priv->cq);
695 + cleanup_recv_queue(&port_priv->qp_info[1]);
696 + cleanup_recv_queue(&port_priv->qp_info[0]);
697 ++error4:
698 ++ ib_dealloc_pd(port_priv->pd);
699 + error3:
700 + kfree(port_priv);
701 +
702 +@@ -3258,8 +3258,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
703 + destroy_workqueue(port_priv->wq);
704 + destroy_mad_qp(&port_priv->qp_info[1]);
705 + destroy_mad_qp(&port_priv->qp_info[0]);
706 +- ib_dealloc_pd(port_priv->pd);
707 + ib_free_cq(port_priv->cq);
708 ++ ib_dealloc_pd(port_priv->pd);
709 + cleanup_recv_queue(&port_priv->qp_info[1]);
710 + cleanup_recv_queue(&port_priv->qp_info[0]);
711 + /* XXX: Handle deallocation of MAD registration tables */
712 +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
713 +index 6511cb21f6e2..4a137bf584b0 100644
714 +--- a/drivers/infiniband/core/user_mad.c
715 ++++ b/drivers/infiniband/core/user_mad.c
716 +@@ -49,6 +49,7 @@
717 + #include <linux/sched.h>
718 + #include <linux/semaphore.h>
719 + #include <linux/slab.h>
720 ++#include <linux/nospec.h>
721 +
722 + #include <linux/uaccess.h>
723 +
724 +@@ -856,11 +857,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
725 +
726 + if (get_user(id, arg))
727 + return -EFAULT;
728 ++ if (id >= IB_UMAD_MAX_AGENTS)
729 ++ return -EINVAL;
730 +
731 + mutex_lock(&file->port->file_mutex);
732 + mutex_lock(&file->mutex);
733 +
734 +- if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
735 ++ id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
736 ++ if (!__get_agent(file, id)) {
737 + ret = -EINVAL;
738 + goto out;
739 + }
740 +diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
741 +index e8724f1a4a25..f1d4d543d945 100644
742 +--- a/drivers/input/joystick/iforce/iforce-usb.c
743 ++++ b/drivers/input/joystick/iforce/iforce-usb.c
744 +@@ -145,7 +145,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
745 + return -ENODEV;
746 +
747 + epirq = &interface->endpoint[0].desc;
748 ++ if (!usb_endpoint_is_int_in(epirq))
749 ++ return -ENODEV;
750 ++
751 + epout = &interface->endpoint[1].desc;
752 ++ if (!usb_endpoint_is_int_out(epout))
753 ++ return -ENODEV;
754 +
755 + if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
756 + goto fail;
757 +diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
758 +index 10a039148234..538986e5ac5b 100644
759 +--- a/drivers/input/mouse/trackpoint.h
760 ++++ b/drivers/input/mouse/trackpoint.h
761 +@@ -161,7 +161,8 @@ struct trackpoint_data {
762 + #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
763 + int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
764 + #else
765 +-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
766 ++static inline int trackpoint_detect(struct psmouse *psmouse,
767 ++ bool set_properties)
768 + {
769 + return -ENOSYS;
770 + }
771 +diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
772 +index a41c3ff7c9af..705f38c12acb 100644
773 +--- a/drivers/input/tablet/kbtab.c
774 ++++ b/drivers/input/tablet/kbtab.c
775 +@@ -125,6 +125,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
776 + if (intf->cur_altsetting->desc.bNumEndpoints < 1)
777 + return -ENODEV;
778 +
779 ++ endpoint = &intf->cur_altsetting->endpoint[0].desc;
780 ++ if (!usb_endpoint_is_int_in(endpoint))
781 ++ return -ENODEV;
782 ++
783 + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
784 + input_dev = input_allocate_device();
785 + if (!kbtab || !input_dev)
786 +@@ -163,8 +167,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
787 + input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
788 + input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
789 +
790 +- endpoint = &intf->cur_altsetting->endpoint[0].desc;
791 +-
792 + usb_fill_int_urb(kbtab->irq, dev,
793 + usb_rcvintpipe(dev, endpoint->bEndpointAddress),
794 + kbtab->data, 8,
795 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
796 +index 3884e82d24e9..6a3cf4d0bd5e 100644
797 +--- a/drivers/iommu/amd_iommu_init.c
798 ++++ b/drivers/iommu/amd_iommu_init.c
799 +@@ -1692,7 +1692,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
800 + NULL,
801 + };
802 +
803 +-static int iommu_init_pci(struct amd_iommu *iommu)
804 ++static int __init iommu_init_pci(struct amd_iommu *iommu)
805 + {
806 + int cap_ptr = iommu->cap_ptr;
807 + u32 range, misc, low, high;
808 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
809 +index 121fb552f873..f80666acb9ef 100644
810 +--- a/drivers/irqchip/irq-gic-v3-its.c
811 ++++ b/drivers/irqchip/irq-gic-v3-its.c
812 +@@ -2631,7 +2631,7 @@ static int its_vpe_init(struct its_vpe *vpe)
813 +
814 + if (!its_alloc_vpe_table(vpe_id)) {
815 + its_vpe_id_free(vpe_id);
816 +- its_free_pending_table(vpe->vpt_page);
817 ++ its_free_pending_table(vpt_page);
818 + return -ENOMEM;
819 + }
820 +
821 +diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
822 +index 675eda5ff2b8..e4831491a3c4 100644
823 +--- a/drivers/irqchip/irq-imx-gpcv2.c
824 ++++ b/drivers/irqchip/irq-imx-gpcv2.c
825 +@@ -145,6 +145,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
826 + .irq_unmask = imx_gpcv2_irq_unmask,
827 + .irq_set_wake = imx_gpcv2_irq_set_wake,
828 + .irq_retrigger = irq_chip_retrigger_hierarchy,
829 ++ .irq_set_type = irq_chip_set_type_parent,
830 + #ifdef CONFIG_SMP
831 + .irq_set_affinity = irq_chip_set_affinity_parent,
832 + #endif
833 +diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
834 +index 0720ea717011..e033ad477715 100644
835 +--- a/drivers/mmc/host/sdhci-of-arasan.c
836 ++++ b/drivers/mmc/host/sdhci-of-arasan.c
837 +@@ -638,7 +638,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
838 +
839 + ret = mmc_of_parse(host->mmc);
840 + if (ret) {
841 +- dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
842 ++ if (ret != -EPROBE_DEFER)
843 ++ dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
844 + goto unreg_clk;
845 + }
846 +
847 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
848 +index 11a0e84d3d7c..60d0c270af85 100644
849 +--- a/drivers/net/bonding/bond_main.c
850 ++++ b/drivers/net/bonding/bond_main.c
851 +@@ -1108,7 +1108,9 @@ static void bond_compute_features(struct bonding *bond)
852 +
853 + done:
854 + bond_dev->vlan_features = vlan_features;
855 +- bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
856 ++ bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
857 ++ NETIF_F_HW_VLAN_CTAG_TX |
858 ++ NETIF_F_HW_VLAN_STAG_TX;
859 + bond_dev->gso_max_segs = gso_max_segs;
860 + netif_set_gso_max_size(bond_dev, gso_max_size);
861 +
862 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
863 +index 17b825f73c52..faa45491ae4d 100644
864 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
865 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
866 +@@ -3057,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
867 + /* if VF indicate to PF this function is going down (PF will delete sp
868 + * elements and clear initializations
869 + */
870 +- if (IS_VF(bp))
871 ++ if (IS_VF(bp)) {
872 ++ bnx2x_clear_vlan_info(bp);
873 + bnx2x_vfpf_close_vf(bp);
874 +- else if (unload_mode != UNLOAD_RECOVERY)
875 ++ } else if (unload_mode != UNLOAD_RECOVERY) {
876 + /* if this is a normal/close unload need to clean up chip*/
877 + bnx2x_chip_cleanup(bp, unload_mode, keep_link);
878 +- else {
879 ++ } else {
880 + /* Send the UNLOAD_REQUEST to the MCP */
881 + bnx2x_send_unload_req(bp, unload_mode);
882 +
883 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
884 +index a5265e1344f1..4e091a11daaf 100644
885 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
886 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
887 +@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
888 + void bnx2x_disable_close_the_gate(struct bnx2x *bp);
889 + int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
890 +
891 ++void bnx2x_clear_vlan_info(struct bnx2x *bp);
892 ++
893 + /**
894 + * bnx2x_sp_event - handle ramrods completion.
895 + *
896 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
897 +index 53fa4f88ed4d..8f0c9f6de893 100644
898 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
899 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
900 +@@ -8488,11 +8488,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
901 + return rc;
902 + }
903 +
904 ++void bnx2x_clear_vlan_info(struct bnx2x *bp)
905 ++{
906 ++ struct bnx2x_vlan_entry *vlan;
907 ++
908 ++ /* Mark that hw forgot all entries */
909 ++ list_for_each_entry(vlan, &bp->vlan_reg, link)
910 ++ vlan->hw = false;
911 ++
912 ++ bp->vlan_cnt = 0;
913 ++}
914 ++
915 + static int bnx2x_del_all_vlans(struct bnx2x *bp)
916 + {
917 + struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
918 + unsigned long ramrod_flags = 0, vlan_flags = 0;
919 +- struct bnx2x_vlan_entry *vlan;
920 + int rc;
921 +
922 + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
923 +@@ -8501,10 +8511,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
924 + if (rc)
925 + return rc;
926 +
927 +- /* Mark that hw forgot all entries */
928 +- list_for_each_entry(vlan, &bp->vlan_reg, link)
929 +- vlan->hw = false;
930 +- bp->vlan_cnt = 0;
931 ++ bnx2x_clear_vlan_info(bp);
932 +
933 + return 0;
934 + }
935 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
936 +index 8fcf9dd42740..c6d101351537 100644
937 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
938 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
939 +@@ -1193,7 +1193,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
940 + err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
941 + if (err) {
942 + en_err(priv, "Failed to allocate RSS indirection QP\n");
943 +- goto rss_err;
944 ++ goto qp_alloc_err;
945 + }
946 +
947 + rss_map->indir_qp->event = mlx4_en_sqp_event;
948 +@@ -1247,6 +1247,7 @@ indir_err:
949 + MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
950 + mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
951 + mlx4_qp_free(mdev->dev, rss_map->indir_qp);
952 ++qp_alloc_err:
953 + kfree(rss_map->indir_qp);
954 + rss_map->indir_qp = NULL;
955 + rss_err:
956 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
957 +index e87923e046c9..c567cff499d1 100644
958 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
959 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
960 +@@ -439,12 +439,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
961 + return &arfs_t->rules_hash[bucket_idx];
962 + }
963 +
964 +-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
965 +-{
966 +- return (skb->protocol == htons(ETH_P_IP)) ?
967 +- ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
968 +-}
969 +-
970 + static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
971 + u8 ip_proto, __be16 etype)
972 + {
973 +@@ -601,31 +595,9 @@ out:
974 + arfs_may_expire_flow(priv);
975 + }
976 +
977 +-/* return L4 destination port from ip4/6 packets */
978 +-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
979 +-{
980 +- char *transport_header;
981 +-
982 +- transport_header = skb_transport_header(skb);
983 +- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
984 +- return ((struct tcphdr *)transport_header)->dest;
985 +- return ((struct udphdr *)transport_header)->dest;
986 +-}
987 +-
988 +-/* return L4 source port from ip4/6 packets */
989 +-static __be16 arfs_get_src_port(const struct sk_buff *skb)
990 +-{
991 +- char *transport_header;
992 +-
993 +- transport_header = skb_transport_header(skb);
994 +- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
995 +- return ((struct tcphdr *)transport_header)->source;
996 +- return ((struct udphdr *)transport_header)->source;
997 +-}
998 +-
999 + static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
1000 + struct arfs_table *arfs_t,
1001 +- const struct sk_buff *skb,
1002 ++ const struct flow_keys *fk,
1003 + u16 rxq, u32 flow_id)
1004 + {
1005 + struct arfs_rule *rule;
1006 +@@ -640,19 +612,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
1007 + INIT_WORK(&rule->arfs_work, arfs_handle_work);
1008 +
1009 + tuple = &rule->tuple;
1010 +- tuple->etype = skb->protocol;
1011 ++ tuple->etype = fk->basic.n_proto;
1012 ++ tuple->ip_proto = fk->basic.ip_proto;
1013 + if (tuple->etype == htons(ETH_P_IP)) {
1014 +- tuple->src_ipv4 = ip_hdr(skb)->saddr;
1015 +- tuple->dst_ipv4 = ip_hdr(skb)->daddr;
1016 ++ tuple->src_ipv4 = fk->addrs.v4addrs.src;
1017 ++ tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
1018 + } else {
1019 +- memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
1020 ++ memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
1021 + sizeof(struct in6_addr));
1022 +- memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
1023 ++ memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
1024 + sizeof(struct in6_addr));
1025 + }
1026 +- tuple->ip_proto = arfs_get_ip_proto(skb);
1027 +- tuple->src_port = arfs_get_src_port(skb);
1028 +- tuple->dst_port = arfs_get_dst_port(skb);
1029 ++ tuple->src_port = fk->ports.src;
1030 ++ tuple->dst_port = fk->ports.dst;
1031 +
1032 + rule->flow_id = flow_id;
1033 + rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
1034 +@@ -663,37 +635,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
1035 + return rule;
1036 + }
1037 +
1038 +-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
1039 +- const struct sk_buff *skb)
1040 ++static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
1041 + {
1042 +- if (tuple->etype == htons(ETH_P_IP) &&
1043 +- tuple->src_ipv4 == ip_hdr(skb)->saddr &&
1044 +- tuple->dst_ipv4 == ip_hdr(skb)->daddr)
1045 +- return true;
1046 +- if (tuple->etype == htons(ETH_P_IPV6) &&
1047 +- (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
1048 +- sizeof(struct in6_addr))) &&
1049 +- (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
1050 +- sizeof(struct in6_addr))))
1051 +- return true;
1052 ++ if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
1053 ++ return false;
1054 ++ if (tuple->etype != fk->basic.n_proto)
1055 ++ return false;
1056 ++ if (tuple->etype == htons(ETH_P_IP))
1057 ++ return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
1058 ++ tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
1059 ++ if (tuple->etype == htons(ETH_P_IPV6))
1060 ++ return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
1061 ++ sizeof(struct in6_addr)) &&
1062 ++ !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
1063 ++ sizeof(struct in6_addr));
1064 + return false;
1065 + }
1066 +
1067 + static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
1068 +- const struct sk_buff *skb)
1069 ++ const struct flow_keys *fk)
1070 + {
1071 + struct arfs_rule *arfs_rule;
1072 + struct hlist_head *head;
1073 +- __be16 src_port = arfs_get_src_port(skb);
1074 +- __be16 dst_port = arfs_get_dst_port(skb);
1075 +
1076 +- head = arfs_hash_bucket(arfs_t, src_port, dst_port);
1077 ++ head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
1078 + hlist_for_each_entry(arfs_rule, head, hlist) {
1079 +- if (arfs_rule->tuple.src_port == src_port &&
1080 +- arfs_rule->tuple.dst_port == dst_port &&
1081 +- arfs_cmp_ips(&arfs_rule->tuple, skb)) {
1082 ++ if (arfs_cmp(&arfs_rule->tuple, fk))
1083 + return arfs_rule;
1084 +- }
1085 + }
1086 +
1087 + return NULL;
1088 +@@ -706,20 +674,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1089 + struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
1090 + struct arfs_table *arfs_t;
1091 + struct arfs_rule *arfs_rule;
1092 ++ struct flow_keys fk;
1093 ++
1094 ++ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
1095 ++ return -EPROTONOSUPPORT;
1096 +
1097 +- if (skb->protocol != htons(ETH_P_IP) &&
1098 +- skb->protocol != htons(ETH_P_IPV6))
1099 ++ if (fk.basic.n_proto != htons(ETH_P_IP) &&
1100 ++ fk.basic.n_proto != htons(ETH_P_IPV6))
1101 + return -EPROTONOSUPPORT;
1102 +
1103 + if (skb->encapsulation)
1104 + return -EPROTONOSUPPORT;
1105 +
1106 +- arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
1107 ++ arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
1108 + if (!arfs_t)
1109 + return -EPROTONOSUPPORT;
1110 +
1111 + spin_lock_bh(&arfs->arfs_lock);
1112 +- arfs_rule = arfs_find_rule(arfs_t, skb);
1113 ++ arfs_rule = arfs_find_rule(arfs_t, &fk);
1114 + if (arfs_rule) {
1115 + if (arfs_rule->rxq == rxq_index) {
1116 + spin_unlock_bh(&arfs->arfs_lock);
1117 +@@ -727,8 +699,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1118 + }
1119 + arfs_rule->rxq = rxq_index;
1120 + } else {
1121 +- arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
1122 +- rxq_index, flow_id);
1123 ++ arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
1124 + if (!arfs_rule) {
1125 + spin_unlock_bh(&arfs->arfs_lock);
1126 + return -ENOMEM;
1127 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1128 +index 26ad27b3f687..f6beb5ef5971 100644
1129 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1130 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1131 +@@ -1400,6 +1400,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
1132 + struct mlx5_core_dev *mdev = priv->mdev;
1133 + int err;
1134 +
1135 ++ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
1136 ++ return -EOPNOTSUPP;
1137 ++
1138 + if (pauseparam->autoneg)
1139 + return -EINVAL;
1140 +
1141 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1142 +index f3e3e568311a..f1aabf8a16c2 100644
1143 +--- a/drivers/net/team/team.c
1144 ++++ b/drivers/net/team/team.c
1145 +@@ -1014,7 +1014,9 @@ static void __team_compute_features(struct team *team)
1146 + }
1147 +
1148 + team->dev->vlan_features = vlan_features;
1149 +- team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
1150 ++ team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1151 ++ NETIF_F_HW_VLAN_CTAG_TX |
1152 ++ NETIF_F_HW_VLAN_STAG_TX;
1153 + team->dev->hard_header_len = max_hard_header_len;
1154 +
1155 + team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1156 +diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
1157 +index 6514c86f043e..5435c34dfcc7 100644
1158 +--- a/drivers/net/usb/pegasus.c
1159 ++++ b/drivers/net/usb/pegasus.c
1160 +@@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
1161 + static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
1162 + {
1163 + int i;
1164 +- __u8 tmp;
1165 ++ __u8 tmp = 0;
1166 + __le16 retdatai;
1167 + int ret;
1168 +
1169 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1170 +index d09dea77c287..a871aa6418d0 100644
1171 +--- a/drivers/net/xen-netback/netback.c
1172 ++++ b/drivers/net/xen-netback/netback.c
1173 +@@ -927,6 +927,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1174 + skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1175 + nskb = xenvif_alloc_skb(0);
1176 + if (unlikely(nskb == NULL)) {
1177 ++ skb_shinfo(skb)->nr_frags = 0;
1178 + kfree_skb(skb);
1179 + xenvif_tx_err(queue, &txreq, extra_count, idx);
1180 + if (net_ratelimit())
1181 +@@ -942,6 +943,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1182 +
1183 + if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1184 + /* Failure in xenvif_set_skb_gso is fatal. */
1185 ++ skb_shinfo(skb)->nr_frags = 0;
1186 + kfree_skb(skb);
1187 + kfree_skb(nskb);
1188 + break;
1189 +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
1190 +index 6d520e8945f7..3b892918d821 100644
1191 +--- a/drivers/scsi/hpsa.c
1192 ++++ b/drivers/scsi/hpsa.c
1193 +@@ -2266,6 +2266,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1194 + case IOACCEL2_SERV_RESPONSE_COMPLETE:
1195 + switch (c2->error_data.status) {
1196 + case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1197 ++ if (cmd)
1198 ++ cmd->result = 0;
1199 + break;
1200 + case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1201 + cmd->result |= SAM_STAT_CHECK_CONDITION;
1202 +@@ -2425,8 +2427,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
1203 +
1204 + /* check for good status */
1205 + if (likely(c2->error_data.serv_response == 0 &&
1206 +- c2->error_data.status == 0))
1207 ++ c2->error_data.status == 0)) {
1208 ++ cmd->result = 0;
1209 + return hpsa_cmd_free_and_done(h, c, cmd);
1210 ++ }
1211 +
1212 + /*
1213 + * Any RAID offload error results in retry which will use
1214 +@@ -5494,6 +5498,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
1215 + }
1216 + c = cmd_tagged_alloc(h, cmd);
1217 +
1218 ++ /*
1219 ++ * This is necessary because the SML doesn't zero out this field during
1220 ++ * error recovery.
1221 ++ */
1222 ++ cmd->result = 0;
1223 ++
1224 + /*
1225 + * Call alternate submit routine for I/O accelerated commands.
1226 + * Retries always go down the normal I/O path.
1227 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
1228 +index 9b716c8c558a..7bfe53f48d1d 100644
1229 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
1230 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
1231 +@@ -1724,9 +1724,11 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1232 + {
1233 + struct sysinfo s;
1234 + u64 consistent_dma_mask;
1235 ++ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
1236 ++ int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
1237 +
1238 + if (ioc->dma_mask)
1239 +- consistent_dma_mask = DMA_BIT_MASK(64);
1240 ++ consistent_dma_mask = DMA_BIT_MASK(dma_mask);
1241 + else
1242 + consistent_dma_mask = DMA_BIT_MASK(32);
1243 +
1244 +@@ -1734,11 +1736,11 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1245 + const uint64_t required_mask =
1246 + dma_get_required_mask(&pdev->dev);
1247 + if ((required_mask > DMA_BIT_MASK(32)) &&
1248 +- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1249 ++ !pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask)) &&
1250 + !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
1251 + ioc->base_add_sg_single = &_base_add_sg_single_64;
1252 + ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1253 +- ioc->dma_mask = 64;
1254 ++ ioc->dma_mask = dma_mask;
1255 + goto out;
1256 + }
1257 + }
1258 +@@ -1764,7 +1766,7 @@ static int
1259 + _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
1260 + struct pci_dev *pdev)
1261 + {
1262 +- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1263 ++ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
1264 + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1265 + return -ENODEV;
1266 + }
1267 +@@ -3477,7 +3479,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
1268 + total_sz += sz;
1269 + } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
1270 +
1271 +- if (ioc->dma_mask == 64) {
1272 ++ if (ioc->dma_mask > 32) {
1273 + if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
1274 + pr_warn(MPT3SAS_FMT
1275 + "no suitable consistent DMA mask for %s\n",
1276 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
1277 +index aef1e1a55535..0e154fea693e 100644
1278 +--- a/drivers/scsi/qla2xxx/qla_init.c
1279 ++++ b/drivers/scsi/qla2xxx/qla_init.c
1280 +@@ -4252,7 +4252,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1281 + ql_log(ql_log_warn, vha, 0xd049,
1282 + "Failed to allocate ct_sns request.\n");
1283 + kfree(fcport);
1284 +- fcport = NULL;
1285 ++ return NULL;
1286 + }
1287 + INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
1288 + INIT_LIST_HEAD(&fcport->gnl_entry);
1289 +diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
1290 +index 19e0b7be8495..917d13abef88 100644
1291 +--- a/drivers/staging/comedi/drivers/dt3000.c
1292 ++++ b/drivers/staging/comedi/drivers/dt3000.c
1293 +@@ -351,9 +351,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
1294 + static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
1295 + unsigned int flags)
1296 + {
1297 +- int divider, base, prescale;
1298 ++ unsigned int divider, base, prescale;
1299 +
1300 +- /* This function needs improvment */
1301 ++ /* This function needs improvement */
1302 + /* Don't know if divider==0 works. */
1303 +
1304 + for (prescale = 0; prescale < 16; prescale++) {
1305 +@@ -367,7 +367,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
1306 + divider = (*nanosec) / base;
1307 + break;
1308 + case CMDF_ROUND_UP:
1309 +- divider = (*nanosec) / base;
1310 ++ divider = DIV_ROUND_UP(*nanosec, base);
1311 + break;
1312 + }
1313 + if (divider < 65536) {
1314 +@@ -377,7 +377,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
1315 + }
1316 +
1317 + prescale = 15;
1318 +- base = timer_base * (1 << prescale);
1319 ++ base = timer_base * (prescale + 1);
1320 + divider = 65535;
1321 + *nanosec = divider * base;
1322 + return (prescale << 16) | (divider);
1323 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1324 +index efa8b620fee8..0453f0eb1178 100644
1325 +--- a/drivers/usb/class/cdc-acm.c
1326 ++++ b/drivers/usb/class/cdc-acm.c
1327 +@@ -1342,10 +1342,6 @@ made_compressed_probe:
1328 + if (acm == NULL)
1329 + goto alloc_fail;
1330 +
1331 +- minor = acm_alloc_minor(acm);
1332 +- if (minor < 0)
1333 +- goto alloc_fail1;
1334 +-
1335 + ctrlsize = usb_endpoint_maxp(epctrl);
1336 + readsize = usb_endpoint_maxp(epread) *
1337 + (quirks == SINGLE_RX_URB ? 1 : 2);
1338 +@@ -1353,6 +1349,13 @@ made_compressed_probe:
1339 + acm->writesize = usb_endpoint_maxp(epwrite) * 20;
1340 + acm->control = control_interface;
1341 + acm->data = data_interface;
1342 ++
1343 ++ usb_get_intf(acm->control); /* undone in destruct() */
1344 ++
1345 ++ minor = acm_alloc_minor(acm);
1346 ++ if (minor < 0)
1347 ++ goto alloc_fail1;
1348 ++
1349 + acm->minor = minor;
1350 + acm->dev = usb_dev;
1351 + if (h.usb_cdc_acm_descriptor)
1352 +@@ -1501,7 +1504,6 @@ skip_countries:
1353 + usb_driver_claim_interface(&acm_driver, data_interface, acm);
1354 + usb_set_intfdata(data_interface, acm);
1355 +
1356 +- usb_get_intf(control_interface);
1357 + tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
1358 + &control_interface->dev);
1359 + if (IS_ERR(tty_dev)) {
1360 +diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
1361 +index 87ad6b6bfee8..1e578e2ef20c 100644
1362 +--- a/drivers/usb/core/file.c
1363 ++++ b/drivers/usb/core/file.c
1364 +@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
1365 + intf->minor = minor;
1366 + break;
1367 + }
1368 +- up_write(&minor_rwsem);
1369 +- if (intf->minor < 0)
1370 ++ if (intf->minor < 0) {
1371 ++ up_write(&minor_rwsem);
1372 + return -EXFULL;
1373 ++ }
1374 +
1375 + /* create a usb class device for this usb interface */
1376 + snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
1377 +@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
1378 + MKDEV(USB_MAJOR, minor), class_driver,
1379 + "%s", kbasename(name));
1380 + if (IS_ERR(intf->usb_dev)) {
1381 +- down_write(&minor_rwsem);
1382 + usb_minors[minor] = NULL;
1383 + intf->minor = -1;
1384 +- up_write(&minor_rwsem);
1385 + retval = PTR_ERR(intf->usb_dev);
1386 + }
1387 ++ up_write(&minor_rwsem);
1388 + return retval;
1389 + }
1390 + EXPORT_SYMBOL_GPL(usb_register_dev);
1391 +@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
1392 + return;
1393 +
1394 + dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
1395 ++ device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
1396 +
1397 + down_write(&minor_rwsem);
1398 + usb_minors[intf->minor] = NULL;
1399 + up_write(&minor_rwsem);
1400 +
1401 +- device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
1402 + intf->usb_dev = NULL;
1403 + intf->minor = -1;
1404 + destroy_usb_class();
1405 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1406 +index c3f3f6370f64..e70578e11156 100644
1407 +--- a/drivers/usb/core/message.c
1408 ++++ b/drivers/usb/core/message.c
1409 +@@ -2143,14 +2143,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
1410 + (struct usb_cdc_dmm_desc *)buffer;
1411 + break;
1412 + case USB_CDC_MDLM_TYPE:
1413 +- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
1414 ++ if (elength < sizeof(struct usb_cdc_mdlm_desc))
1415 + goto next_desc;
1416 + if (desc)
1417 + return -EINVAL;
1418 + desc = (struct usb_cdc_mdlm_desc *)buffer;
1419 + break;
1420 + case USB_CDC_MDLM_DETAIL_TYPE:
1421 +- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
1422 ++ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
1423 + goto next_desc;
1424 + if (detail)
1425 + return -EINVAL;
1426 +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
1427 +index e5355ede2c46..189d4e01010b 100644
1428 +--- a/drivers/usb/gadget/udc/renesas_usb3.c
1429 ++++ b/drivers/usb/gadget/udc/renesas_usb3.c
1430 +@@ -21,6 +21,7 @@
1431 + #include <linux/pm_runtime.h>
1432 + #include <linux/sizes.h>
1433 + #include <linux/slab.h>
1434 ++#include <linux/string.h>
1435 + #include <linux/sys_soc.h>
1436 + #include <linux/uaccess.h>
1437 + #include <linux/usb/ch9.h>
1438 +@@ -2315,9 +2316,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
1439 + if (usb3->forced_b_device)
1440 + return -EBUSY;
1441 +
1442 +- if (!strncmp(buf, "host", strlen("host")))
1443 ++ if (sysfs_streq(buf, "host"))
1444 + new_mode_is_host = true;
1445 +- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
1446 ++ else if (sysfs_streq(buf, "peripheral"))
1447 + new_mode_is_host = false;
1448 + else
1449 + return -EINVAL;
1450 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1451 +index 8b9e12ab1fe6..959f462c6f72 100644
1452 +--- a/drivers/usb/serial/option.c
1453 ++++ b/drivers/usb/serial/option.c
1454 +@@ -971,6 +971,11 @@ static const struct usb_device_id option_ids[] = {
1455 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
1456 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
1457 +
1458 ++ /* Motorola devices */
1459 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
1460 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
1461 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
1462 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
1463 +
1464 + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
1465 + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
1466 +@@ -1552,6 +1557,7 @@ static const struct usb_device_id option_ids[] = {
1467 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1468 + .driver_info = RSVD(2) },
1469 + { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
1470 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
1471 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1472 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1473 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
1474 +@@ -1954,11 +1960,15 @@ static const struct usb_device_id option_ids[] = {
1475 + .driver_info = RSVD(4) },
1476 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
1477 + .driver_info = RSVD(4) },
1478 ++ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
1479 ++ .driver_info = RSVD(4) },
1480 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1481 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1482 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
1483 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
1484 + .driver_info = RSVD(4) },
1485 ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
1486 ++ .driver_info = RSVD(4) },
1487 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1488 + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1489 + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
1490 +diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
1491 +index 73427d8e0116..e5694133ebe5 100644
1492 +--- a/drivers/xen/xen-pciback/conf_space_capability.c
1493 ++++ b/drivers/xen/xen-pciback/conf_space_capability.c
1494 +@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
1495 + {
1496 + int err;
1497 + u16 old_value;
1498 +- pci_power_t new_state, old_state;
1499 ++ pci_power_t new_state;
1500 +
1501 + err = pci_read_config_word(dev, offset, &old_value);
1502 + if (err)
1503 + goto out;
1504 +
1505 +- old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
1506 + new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
1507 +
1508 + new_value &= PM_OK_BITS;
1509 +diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
1510 +index fb0a4eec310c..77740ef5a8e8 100644
1511 +--- a/fs/ocfs2/xattr.c
1512 ++++ b/fs/ocfs2/xattr.c
1513 +@@ -3832,7 +3832,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
1514 + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
1515 + int low_bucket = 0, bucket, high_bucket;
1516 + struct ocfs2_xattr_bucket *search;
1517 +- u32 last_hash;
1518 + u64 blkno, lower_blkno = 0;
1519 +
1520 + search = ocfs2_xattr_bucket_new(inode);
1521 +@@ -3876,8 +3875,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
1522 + if (xh->xh_count)
1523 + xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
1524 +
1525 +- last_hash = le32_to_cpu(xe->xe_name_hash);
1526 +-
1527 + /* record lower_blkno which may be the insert place. */
1528 + lower_blkno = blkno;
1529 +
1530 +diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
1531 +index c64bea7a52be..e9f20b813a69 100644
1532 +--- a/include/asm-generic/getorder.h
1533 ++++ b/include/asm-generic/getorder.h
1534 +@@ -7,24 +7,6 @@
1535 + #include <linux/compiler.h>
1536 + #include <linux/log2.h>
1537 +
1538 +-/*
1539 +- * Runtime evaluation of get_order()
1540 +- */
1541 +-static inline __attribute_const__
1542 +-int __get_order(unsigned long size)
1543 +-{
1544 +- int order;
1545 +-
1546 +- size--;
1547 +- size >>= PAGE_SHIFT;
1548 +-#if BITS_PER_LONG == 32
1549 +- order = fls(size);
1550 +-#else
1551 +- order = fls64(size);
1552 +-#endif
1553 +- return order;
1554 +-}
1555 +-
1556 + /**
1557 + * get_order - Determine the allocation order of a memory size
1558 + * @size: The size for which to get the order
1559 +@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
1560 + * to hold an object of the specified size.
1561 + *
1562 + * The result is undefined if the size is 0.
1563 +- *
1564 +- * This function may be used to initialise variables with compile time
1565 +- * evaluations of constants.
1566 + */
1567 +-#define get_order(n) \
1568 +-( \
1569 +- __builtin_constant_p(n) ? ( \
1570 +- ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
1571 +- (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
1572 +- ilog2((n) - 1) - PAGE_SHIFT + 1) \
1573 +- ) : \
1574 +- __get_order(n) \
1575 +-)
1576 ++static inline __attribute_const__ int get_order(unsigned long size)
1577 ++{
1578 ++ if (__builtin_constant_p(size)) {
1579 ++ if (!size)
1580 ++ return BITS_PER_LONG - PAGE_SHIFT;
1581 ++
1582 ++ if (size < (1UL << PAGE_SHIFT))
1583 ++ return 0;
1584 ++
1585 ++ return ilog2((size) - 1) - PAGE_SHIFT + 1;
1586 ++ }
1587 ++
1588 ++ size--;
1589 ++ size >>= PAGE_SHIFT;
1590 ++#if BITS_PER_LONG == 32
1591 ++ return fls(size);
1592 ++#else
1593 ++ return fls64(size);
1594 ++#endif
1595 ++}
1596 +
1597 + #endif /* __ASSEMBLY__ */
1598 +
1599 +diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
1600 +index 34dba516ef24..d5c6637ed638 100644
1601 +--- a/include/kvm/arm_vgic.h
1602 ++++ b/include/kvm/arm_vgic.h
1603 +@@ -315,6 +315,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
1604 +
1605 + void kvm_vgic_load(struct kvm_vcpu *vcpu);
1606 + void kvm_vgic_put(struct kvm_vcpu *vcpu);
1607 ++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
1608 +
1609 + #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
1610 + #define vgic_initialized(k) ((k)->arch.vgic.initialized)
1611 +diff --git a/include/linux/filter.h b/include/linux/filter.h
1612 +index ac2272778f2e..5ca676d64652 100644
1613 +--- a/include/linux/filter.h
1614 ++++ b/include/linux/filter.h
1615 +@@ -729,6 +729,7 @@ struct sock *do_sk_redirect_map(struct sk_buff *skb);
1616 + extern int bpf_jit_enable;
1617 + extern int bpf_jit_harden;
1618 + extern int bpf_jit_kallsyms;
1619 ++extern long bpf_jit_limit;
1620 +
1621 + typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
1622 +
1623 +diff --git a/include/net/tcp.h b/include/net/tcp.h
1624 +index 9de2c8cdcc51..7994e569644e 100644
1625 +--- a/include/net/tcp.h
1626 ++++ b/include/net/tcp.h
1627 +@@ -1613,8 +1613,6 @@ static inline void tcp_init_send_head(struct sock *sk)
1628 + sk->sk_send_head = NULL;
1629 + }
1630 +
1631 +-static inline void tcp_init_send_head(struct sock *sk);
1632 +-
1633 + /* write queue abstraction */
1634 + static inline void tcp_write_queue_purge(struct sock *sk)
1635 + {
1636 +@@ -1623,7 +1621,6 @@ static inline void tcp_write_queue_purge(struct sock *sk)
1637 + tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1638 + while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1639 + sk_wmem_free_skb(sk, skb);
1640 +- tcp_init_send_head(sk);
1641 + sk_mem_reclaim(sk);
1642 + tcp_clear_all_retrans_hints(tcp_sk(sk));
1643 + tcp_init_send_head(sk);
1644 +diff --git a/include/net/xfrm.h b/include/net/xfrm.h
1645 +index db99efb2d1d0..bdf185ae93db 100644
1646 +--- a/include/net/xfrm.h
1647 ++++ b/include/net/xfrm.h
1648 +@@ -323,7 +323,6 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam
1649 + void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
1650 + void km_policy_notify(struct xfrm_policy *xp, int dir,
1651 + const struct km_event *c);
1652 +-void xfrm_policy_cache_flush(void);
1653 + void km_state_notify(struct xfrm_state *x, const struct km_event *c);
1654 +
1655 + struct xfrm_tmpl;
1656 +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
1657 +index e46106c6ac39..e7211b0fa27c 100644
1658 +--- a/kernel/bpf/core.c
1659 ++++ b/kernel/bpf/core.c
1660 +@@ -290,6 +290,12 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
1661 + }
1662 +
1663 + #ifdef CONFIG_BPF_JIT
1664 ++/* All BPF JIT sysctl knobs here. */
1665 ++int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
1666 ++int bpf_jit_harden __read_mostly;
1667 ++int bpf_jit_kallsyms __read_mostly;
1668 ++long bpf_jit_limit __read_mostly;
1669 ++
1670 + static __always_inline void
1671 + bpf_get_prog_addr_region(const struct bpf_prog *prog,
1672 + unsigned long *symbol_start,
1673 +@@ -358,8 +364,6 @@ static DEFINE_SPINLOCK(bpf_lock);
1674 + static LIST_HEAD(bpf_kallsyms);
1675 + static struct latch_tree_root bpf_tree __cacheline_aligned;
1676 +
1677 +-int bpf_jit_kallsyms __read_mostly;
1678 +-
1679 + static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
1680 + {
1681 + WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
1682 +@@ -486,27 +490,75 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
1683 + return ret;
1684 + }
1685 +
1686 ++static atomic_long_t bpf_jit_current;
1687 ++
1688 ++/* Can be overridden by an arch's JIT compiler if it has a custom,
1689 ++ * dedicated BPF backend memory area, or if neither of the two
1690 ++ * below apply.
1691 ++ */
1692 ++u64 __weak bpf_jit_alloc_exec_limit(void)
1693 ++{
1694 ++#if defined(MODULES_VADDR)
1695 ++ return MODULES_END - MODULES_VADDR;
1696 ++#else
1697 ++ return VMALLOC_END - VMALLOC_START;
1698 ++#endif
1699 ++}
1700 ++
1701 ++static int __init bpf_jit_charge_init(void)
1702 ++{
1703 ++ /* Only used as heuristic here to derive limit. */
1704 ++ bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
1705 ++ PAGE_SIZE), LONG_MAX);
1706 ++ return 0;
1707 ++}
1708 ++pure_initcall(bpf_jit_charge_init);
1709 ++
1710 ++static int bpf_jit_charge_modmem(u32 pages)
1711 ++{
1712 ++ if (atomic_long_add_return(pages, &bpf_jit_current) >
1713 ++ (bpf_jit_limit >> PAGE_SHIFT)) {
1714 ++ if (!capable(CAP_SYS_ADMIN)) {
1715 ++ atomic_long_sub(pages, &bpf_jit_current);
1716 ++ return -EPERM;
1717 ++ }
1718 ++ }
1719 ++
1720 ++ return 0;
1721 ++}
1722 ++
1723 ++static void bpf_jit_uncharge_modmem(u32 pages)
1724 ++{
1725 ++ atomic_long_sub(pages, &bpf_jit_current);
1726 ++}
1727 ++
1728 + struct bpf_binary_header *
1729 + bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1730 + unsigned int alignment,
1731 + bpf_jit_fill_hole_t bpf_fill_ill_insns)
1732 + {
1733 + struct bpf_binary_header *hdr;
1734 +- unsigned int size, hole, start;
1735 ++ u32 size, hole, start, pages;
1736 +
1737 + /* Most of BPF filters are really small, but if some of them
1738 + * fill a page, allow at least 128 extra bytes to insert a
1739 + * random section of illegal instructions.
1740 + */
1741 + size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1742 ++ pages = size / PAGE_SIZE;
1743 ++
1744 ++ if (bpf_jit_charge_modmem(pages))
1745 ++ return NULL;
1746 + hdr = module_alloc(size);
1747 +- if (hdr == NULL)
1748 ++ if (!hdr) {
1749 ++ bpf_jit_uncharge_modmem(pages);
1750 + return NULL;
1751 ++ }
1752 +
1753 + /* Fill space with illegal/arch-dep instructions. */
1754 + bpf_fill_ill_insns(hdr, size);
1755 +
1756 +- hdr->pages = size / PAGE_SIZE;
1757 ++ hdr->pages = pages;
1758 + hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1759 + PAGE_SIZE - sizeof(*hdr));
1760 + start = (get_random_int() % hole) & ~(alignment - 1);
1761 +@@ -519,7 +571,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1762 +
1763 + void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1764 + {
1765 ++ u32 pages = hdr->pages;
1766 ++
1767 + module_memfree(hdr);
1768 ++ bpf_jit_uncharge_modmem(pages);
1769 + }
1770 +
1771 + /* This symbol is only overridden by archs that have different
1772 +@@ -540,8 +595,6 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
1773 + bpf_prog_unlock_free(fp);
1774 + }
1775 +
1776 +-int bpf_jit_harden __read_mostly;
1777 +-
1778 + static int bpf_jit_blind_insn(const struct bpf_insn *from,
1779 + const struct bpf_insn *aux,
1780 + struct bpf_insn *to_buff)
1781 +@@ -1327,9 +1380,13 @@ EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1782 + };
1783 +
1784 + #else
1785 +-static unsigned int __bpf_prog_ret0(const void *ctx,
1786 +- const struct bpf_insn *insn)
1787 ++static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1788 ++ const struct bpf_insn *insn)
1789 + {
1790 ++ /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1791 ++ * is not working properly, so warn about it!
1792 ++ */
1793 ++ WARN_ON_ONCE(1);
1794 + return 0;
1795 + }
1796 + #endif
1797 +@@ -1386,7 +1443,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1798 +
1799 + fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1800 + #else
1801 +- fp->bpf_func = __bpf_prog_ret0;
1802 ++ fp->bpf_func = __bpf_prog_ret0_warn;
1803 + #endif
1804 +
1805 + /* eBPF JITs can rewrite the program in case constant
1806 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1807 +index 6a9a7e1066ef..84e4c23ed606 100644
1808 +--- a/mm/memcontrol.c
1809 ++++ b/mm/memcontrol.c
1810 +@@ -871,26 +871,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
1811 + css_put(&prev->css);
1812 + }
1813 +
1814 +-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1815 ++static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1816 ++ struct mem_cgroup *dead_memcg)
1817 + {
1818 +- struct mem_cgroup *memcg = dead_memcg;
1819 + struct mem_cgroup_reclaim_iter *iter;
1820 + struct mem_cgroup_per_node *mz;
1821 + int nid;
1822 + int i;
1823 +
1824 +- for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1825 +- for_each_node(nid) {
1826 +- mz = mem_cgroup_nodeinfo(memcg, nid);
1827 +- for (i = 0; i <= DEF_PRIORITY; i++) {
1828 +- iter = &mz->iter[i];
1829 +- cmpxchg(&iter->position,
1830 +- dead_memcg, NULL);
1831 +- }
1832 ++ for_each_node(nid) {
1833 ++ mz = mem_cgroup_nodeinfo(from, nid);
1834 ++ for (i = 0; i <= DEF_PRIORITY; i++) {
1835 ++ iter = &mz->iter[i];
1836 ++ cmpxchg(&iter->position,
1837 ++ dead_memcg, NULL);
1838 + }
1839 + }
1840 + }
1841 +
1842 ++static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1843 ++{
1844 ++ struct mem_cgroup *memcg = dead_memcg;
1845 ++ struct mem_cgroup *last;
1846 ++
1847 ++ do {
1848 ++ __invalidate_reclaim_iterators(memcg, dead_memcg);
1849 ++ last = memcg;
1850 ++ } while ((memcg = parent_mem_cgroup(memcg)));
1851 ++
1852 ++ /*
1853 ++ * When cgruop1 non-hierarchy mode is used,
1854 ++ * parent_mem_cgroup() does not walk all the way up to the
1855 ++ * cgroup root (root_mem_cgroup). So we have to handle
1856 ++ * dead_memcg from cgroup root separately.
1857 ++ */
1858 ++ if (last != root_mem_cgroup)
1859 ++ __invalidate_reclaim_iterators(root_mem_cgroup,
1860 ++ dead_memcg);
1861 ++}
1862 ++
1863 + /*
1864 + * Iteration constructs for visiting all cgroups (under a tree). If
1865 + * loops are exited prematurely (break), mem_cgroup_iter_break() must
1866 +diff --git a/mm/usercopy.c b/mm/usercopy.c
1867 +index a9852b24715d..975f7dff8059 100644
1868 +--- a/mm/usercopy.c
1869 ++++ b/mm/usercopy.c
1870 +@@ -121,7 +121,7 @@ static inline const char *check_kernel_text_object(const void *ptr,
1871 + static inline const char *check_bogus_address(const void *ptr, unsigned long n)
1872 + {
1873 + /* Reject if object wraps past end of memory. */
1874 +- if ((unsigned long)ptr + n < (unsigned long)ptr)
1875 ++ if ((unsigned long)ptr + (n - 1) < (unsigned long)ptr)
1876 + return "<wrapped address>";
1877 +
1878 + /* Reject if NULL or ZERO-allocation. */
1879 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
1880 +index b967bd51bf1f..f9c6e8ca1fcb 100644
1881 +--- a/net/bridge/netfilter/ebtables.c
1882 ++++ b/net/bridge/netfilter/ebtables.c
1883 +@@ -1779,20 +1779,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
1884 + return 0;
1885 + }
1886 +
1887 ++static int ebt_compat_init_offsets(unsigned int number)
1888 ++{
1889 ++ if (number > INT_MAX)
1890 ++ return -EINVAL;
1891 ++
1892 ++ /* also count the base chain policies */
1893 ++ number += NF_BR_NUMHOOKS;
1894 ++
1895 ++ return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
1896 ++}
1897 +
1898 + static int compat_table_info(const struct ebt_table_info *info,
1899 + struct compat_ebt_replace *newinfo)
1900 + {
1901 + unsigned int size = info->entries_size;
1902 + const void *entries = info->entries;
1903 ++ int ret;
1904 +
1905 + newinfo->entries_size = size;
1906 +- if (info->nentries) {
1907 +- int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
1908 +- info->nentries);
1909 +- if (ret)
1910 +- return ret;
1911 +- }
1912 ++ ret = ebt_compat_init_offsets(info->nentries);
1913 ++ if (ret)
1914 ++ return ret;
1915 +
1916 + return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1917 + entries, newinfo);
1918 +@@ -2240,11 +2248,9 @@ static int compat_do_replace(struct net *net, void __user *user,
1919 +
1920 + xt_compat_lock(NFPROTO_BRIDGE);
1921 +
1922 +- if (tmp.nentries) {
1923 +- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
1924 +- if (ret < 0)
1925 +- goto out_unlock;
1926 +- }
1927 ++ ret = ebt_compat_init_offsets(tmp.nentries);
1928 ++ if (ret < 0)
1929 ++ goto out_unlock;
1930 +
1931 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
1932 + if (ret < 0)
1933 +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
1934 +index a47ad6cd41c0..144cd1acd7e3 100644
1935 +--- a/net/core/sysctl_net_core.c
1936 ++++ b/net/core/sysctl_net_core.c
1937 +@@ -25,9 +25,12 @@
1938 +
1939 + static int zero = 0;
1940 + static int one = 1;
1941 ++static int two __maybe_unused = 2;
1942 + static int min_sndbuf = SOCK_MIN_SNDBUF;
1943 + static int min_rcvbuf = SOCK_MIN_RCVBUF;
1944 + static int max_skb_frags = MAX_SKB_FRAGS;
1945 ++static long long_one __maybe_unused = 1;
1946 ++static long long_max __maybe_unused = LONG_MAX;
1947 +
1948 + static int net_msg_warn; /* Unused, but still a sysctl */
1949 +
1950 +@@ -250,6 +253,50 @@ static int proc_do_rss_key(struct ctl_table *table, int write,
1951 + return proc_dostring(&fake_table, write, buffer, lenp, ppos);
1952 + }
1953 +
1954 ++#ifdef CONFIG_BPF_JIT
1955 ++static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
1956 ++ void __user *buffer, size_t *lenp,
1957 ++ loff_t *ppos)
1958 ++{
1959 ++ int ret, jit_enable = *(int *)table->data;
1960 ++ struct ctl_table tmp = *table;
1961 ++
1962 ++ if (write && !capable(CAP_SYS_ADMIN))
1963 ++ return -EPERM;
1964 ++
1965 ++ tmp.data = &jit_enable;
1966 ++ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
1967 ++ if (write && !ret) {
1968 ++ *(int *)table->data = jit_enable;
1969 ++ if (jit_enable == 2)
1970 ++ pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
1971 ++ }
1972 ++ return ret;
1973 ++}
1974 ++
1975 ++static int
1976 ++proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
1977 ++ void __user *buffer, size_t *lenp,
1978 ++ loff_t *ppos)
1979 ++{
1980 ++ if (!capable(CAP_SYS_ADMIN))
1981 ++ return -EPERM;
1982 ++
1983 ++ return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1984 ++}
1985 ++
1986 ++static int
1987 ++proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
1988 ++ void __user *buffer, size_t *lenp,
1989 ++ loff_t *ppos)
1990 ++{
1991 ++ if (!capable(CAP_SYS_ADMIN))
1992 ++ return -EPERM;
1993 ++
1994 ++ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
1995 ++}
1996 ++#endif
1997 ++
1998 + static struct ctl_table net_core_table[] = {
1999 + #ifdef CONFIG_NET
2000 + {
2001 +@@ -325,13 +372,14 @@ static struct ctl_table net_core_table[] = {
2002 + .data = &bpf_jit_enable,
2003 + .maxlen = sizeof(int),
2004 + .mode = 0644,
2005 +-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
2006 +- .proc_handler = proc_dointvec
2007 +-#else
2008 +- .proc_handler = proc_dointvec_minmax,
2009 ++ .proc_handler = proc_dointvec_minmax_bpf_enable,
2010 ++# ifdef CONFIG_BPF_JIT_ALWAYS_ON
2011 + .extra1 = &one,
2012 + .extra2 = &one,
2013 +-#endif
2014 ++# else
2015 ++ .extra1 = &zero,
2016 ++ .extra2 = &two,
2017 ++# endif
2018 + },
2019 + # ifdef CONFIG_HAVE_EBPF_JIT
2020 + {
2021 +@@ -339,16 +387,29 @@ static struct ctl_table net_core_table[] = {
2022 + .data = &bpf_jit_harden,
2023 + .maxlen = sizeof(int),
2024 + .mode = 0600,
2025 +- .proc_handler = proc_dointvec,
2026 ++ .proc_handler = proc_dointvec_minmax_bpf_restricted,
2027 ++ .extra1 = &zero,
2028 ++ .extra2 = &two,
2029 + },
2030 + {
2031 + .procname = "bpf_jit_kallsyms",
2032 + .data = &bpf_jit_kallsyms,
2033 + .maxlen = sizeof(int),
2034 + .mode = 0600,
2035 +- .proc_handler = proc_dointvec,
2036 ++ .proc_handler = proc_dointvec_minmax_bpf_restricted,
2037 ++ .extra1 = &zero,
2038 ++ .extra2 = &one,
2039 + },
2040 + # endif
2041 ++ {
2042 ++ .procname = "bpf_jit_limit",
2043 ++ .data = &bpf_jit_limit,
2044 ++ .maxlen = sizeof(long),
2045 ++ .mode = 0600,
2046 ++ .proc_handler = proc_dolongvec_minmax_bpf_restricted,
2047 ++ .extra1 = &long_one,
2048 ++ .extra2 = &long_max,
2049 ++ },
2050 + #endif
2051 + {
2052 + .procname = "netdev_tstamp_prequeue",
2053 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
2054 +index fa49a627b681..2e65271bed01 100644
2055 +--- a/net/netfilter/nf_conntrack_core.c
2056 ++++ b/net/netfilter/nf_conntrack_core.c
2057 +@@ -307,13 +307,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
2058 + * table location, we assume id gets exposed to userspace.
2059 + *
2060 + * Following nf_conn items do not change throughout lifetime
2061 +- * of the nf_conn after it has been committed to main hash table:
2062 ++ * of the nf_conn:
2063 + *
2064 + * 1. nf_conn address
2065 +- * 2. nf_conn->ext address
2066 +- * 3. nf_conn->master address (normally NULL)
2067 +- * 4. tuple
2068 +- * 5. the associated net namespace
2069 ++ * 2. nf_conn->master address (normally NULL)
2070 ++ * 3. the associated net namespace
2071 ++ * 4. the original direction tuple
2072 + */
2073 + u32 nf_ct_get_id(const struct nf_conn *ct)
2074 + {
2075 +@@ -323,9 +322,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
2076 + net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
2077 +
2078 + a = (unsigned long)ct;
2079 +- b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
2080 +- c = (unsigned long)ct->ext;
2081 +- d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
2082 ++ b = (unsigned long)ct->master;
2083 ++ c = (unsigned long)nf_ct_net(ct);
2084 ++ d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2085 ++ sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
2086 + &ct_id_seed);
2087 + #ifdef CONFIG_64BIT
2088 + return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
2089 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2090 +index 047ee7ff7038..1f86bf0d1649 100644
2091 +--- a/net/packet/af_packet.c
2092 ++++ b/net/packet/af_packet.c
2093 +@@ -2654,6 +2654,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2094 +
2095 + mutex_lock(&po->pg_vec_lock);
2096 +
2097 ++ /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2098 ++ * we need to confirm it under protection of pg_vec_lock.
2099 ++ */
2100 ++ if (unlikely(!po->tx_ring.pg_vec)) {
2101 ++ err = -EBUSY;
2102 ++ goto out;
2103 ++ }
2104 + if (likely(saddr == NULL)) {
2105 + dev = packet_cached_dev_get(po);
2106 + proto = po->num;
2107 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
2108 +index d13c1f1a77fb..c3ae3e80a5a4 100644
2109 +--- a/net/sctp/sm_sideeffect.c
2110 ++++ b/net/sctp/sm_sideeffect.c
2111 +@@ -541,7 +541,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
2112 + */
2113 + if (net->sctp.pf_enable &&
2114 + (transport->state == SCTP_ACTIVE) &&
2115 +- (asoc->pf_retrans < transport->pathmaxrxt) &&
2116 ++ (transport->error_count < transport->pathmaxrxt) &&
2117 + (transport->error_count > asoc->pf_retrans)) {
2118 +
2119 + sctp_assoc_control_transport(asoc, transport,
2120 +diff --git a/net/socket.c b/net/socket.c
2121 +index 6d8f0c248c7e..aab65277314d 100644
2122 +--- a/net/socket.c
2123 ++++ b/net/socket.c
2124 +@@ -2656,15 +2656,6 @@ out_fs:
2125 +
2126 + core_initcall(sock_init); /* early initcall */
2127 +
2128 +-static int __init jit_init(void)
2129 +-{
2130 +-#ifdef CONFIG_BPF_JIT_ALWAYS_ON
2131 +- bpf_jit_enable = 1;
2132 +-#endif
2133 +- return 0;
2134 +-}
2135 +-pure_initcall(jit_init);
2136 +-
2137 + #ifdef CONFIG_PROC_FS
2138 + void socket_seq_show(struct seq_file *seq)
2139 + {
2140 +diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
2141 +index 30e5746085b8..4e458fd9236a 100644
2142 +--- a/net/xfrm/xfrm_device.c
2143 ++++ b/net/xfrm/xfrm_device.c
2144 +@@ -153,12 +153,6 @@ static int xfrm_dev_register(struct net_device *dev)
2145 + return NOTIFY_DONE;
2146 + }
2147 +
2148 +-static int xfrm_dev_unregister(struct net_device *dev)
2149 +-{
2150 +- xfrm_policy_cache_flush();
2151 +- return NOTIFY_DONE;
2152 +-}
2153 +-
2154 + static int xfrm_dev_feat_change(struct net_device *dev)
2155 + {
2156 + if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
2157 +@@ -178,7 +172,6 @@ static int xfrm_dev_down(struct net_device *dev)
2158 + if (dev->features & NETIF_F_HW_ESP)
2159 + xfrm_dev_state_flush(dev_net(dev), dev, true);
2160 +
2161 +- xfrm_policy_cache_flush();
2162 + return NOTIFY_DONE;
2163 + }
2164 +
2165 +@@ -190,9 +183,6 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
2166 + case NETDEV_REGISTER:
2167 + return xfrm_dev_register(dev);
2168 +
2169 +- case NETDEV_UNREGISTER:
2170 +- return xfrm_dev_unregister(dev);
2171 +-
2172 + case NETDEV_FEAT_CHANGE:
2173 + return xfrm_dev_feat_change(dev);
2174 +
2175 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
2176 +index 70ec57b887f6..b5006a091fd6 100644
2177 +--- a/net/xfrm/xfrm_policy.c
2178 ++++ b/net/xfrm/xfrm_policy.c
2179 +@@ -45,8 +45,6 @@ struct xfrm_flo {
2180 + u8 flags;
2181 + };
2182 +
2183 +-static DEFINE_PER_CPU(struct xfrm_dst *, xfrm_last_dst);
2184 +-static struct work_struct *xfrm_pcpu_work __read_mostly;
2185 + static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
2186 + static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
2187 + __read_mostly;
2188 +@@ -1715,108 +1713,6 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2189 +
2190 + }
2191 +
2192 +-static void xfrm_last_dst_update(struct xfrm_dst *xdst, struct xfrm_dst *old)
2193 +-{
2194 +- this_cpu_write(xfrm_last_dst, xdst);
2195 +- if (old)
2196 +- dst_release(&old->u.dst);
2197 +-}
2198 +-
2199 +-static void __xfrm_pcpu_work_fn(void)
2200 +-{
2201 +- struct xfrm_dst *old;
2202 +-
2203 +- old = this_cpu_read(xfrm_last_dst);
2204 +- if (old && !xfrm_bundle_ok(old))
2205 +- xfrm_last_dst_update(NULL, old);
2206 +-}
2207 +-
2208 +-static void xfrm_pcpu_work_fn(struct work_struct *work)
2209 +-{
2210 +- local_bh_disable();
2211 +- rcu_read_lock();
2212 +- __xfrm_pcpu_work_fn();
2213 +- rcu_read_unlock();
2214 +- local_bh_enable();
2215 +-}
2216 +-
2217 +-void xfrm_policy_cache_flush(void)
2218 +-{
2219 +- struct xfrm_dst *old;
2220 +- bool found = 0;
2221 +- int cpu;
2222 +-
2223 +- might_sleep();
2224 +-
2225 +- local_bh_disable();
2226 +- rcu_read_lock();
2227 +- for_each_possible_cpu(cpu) {
2228 +- old = per_cpu(xfrm_last_dst, cpu);
2229 +- if (old && !xfrm_bundle_ok(old)) {
2230 +- if (smp_processor_id() == cpu) {
2231 +- __xfrm_pcpu_work_fn();
2232 +- continue;
2233 +- }
2234 +- found = true;
2235 +- break;
2236 +- }
2237 +- }
2238 +-
2239 +- rcu_read_unlock();
2240 +- local_bh_enable();
2241 +-
2242 +- if (!found)
2243 +- return;
2244 +-
2245 +- get_online_cpus();
2246 +-
2247 +- for_each_possible_cpu(cpu) {
2248 +- bool bundle_release;
2249 +-
2250 +- rcu_read_lock();
2251 +- old = per_cpu(xfrm_last_dst, cpu);
2252 +- bundle_release = old && !xfrm_bundle_ok(old);
2253 +- rcu_read_unlock();
2254 +-
2255 +- if (!bundle_release)
2256 +- continue;
2257 +-
2258 +- if (cpu_online(cpu)) {
2259 +- schedule_work_on(cpu, &xfrm_pcpu_work[cpu]);
2260 +- continue;
2261 +- }
2262 +-
2263 +- rcu_read_lock();
2264 +- old = per_cpu(xfrm_last_dst, cpu);
2265 +- if (old && !xfrm_bundle_ok(old)) {
2266 +- per_cpu(xfrm_last_dst, cpu) = NULL;
2267 +- dst_release(&old->u.dst);
2268 +- }
2269 +- rcu_read_unlock();
2270 +- }
2271 +-
2272 +- put_online_cpus();
2273 +-}
2274 +-
2275 +-static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst,
2276 +- struct xfrm_state * const xfrm[],
2277 +- int num)
2278 +-{
2279 +- const struct dst_entry *dst = &xdst->u.dst;
2280 +- int i;
2281 +-
2282 +- if (xdst->num_xfrms != num)
2283 +- return false;
2284 +-
2285 +- for (i = 0; i < num; i++) {
2286 +- if (!dst || dst->xfrm != xfrm[i])
2287 +- return false;
2288 +- dst = dst->child;
2289 +- }
2290 +-
2291 +- return xfrm_bundle_ok(xdst);
2292 +-}
2293 +-
2294 + static struct xfrm_dst *
2295 + xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2296 + const struct flowi *fl, u16 family,
2297 +@@ -1824,7 +1720,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2298 + {
2299 + struct net *net = xp_net(pols[0]);
2300 + struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2301 +- struct xfrm_dst *xdst, *old;
2302 ++ struct xfrm_dst *xdst;
2303 + struct dst_entry *dst;
2304 + int err;
2305 +
2306 +@@ -1839,21 +1735,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2307 + return ERR_PTR(err);
2308 + }
2309 +
2310 +- xdst = this_cpu_read(xfrm_last_dst);
2311 +- if (xdst &&
2312 +- xdst->u.dst.dev == dst_orig->dev &&
2313 +- xdst->num_pols == num_pols &&
2314 +- memcmp(xdst->pols, pols,
2315 +- sizeof(struct xfrm_policy *) * num_pols) == 0 &&
2316 +- xfrm_xdst_can_reuse(xdst, xfrm, err)) {
2317 +- dst_hold(&xdst->u.dst);
2318 +- while (err > 0)
2319 +- xfrm_state_put(xfrm[--err]);
2320 +- return xdst;
2321 +- }
2322 +-
2323 +- old = xdst;
2324 +-
2325 + dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
2326 + if (IS_ERR(dst)) {
2327 + XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2328 +@@ -1866,9 +1747,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2329 + memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2330 + xdst->policy_genid = atomic_read(&pols[0]->genid);
2331 +
2332 +- atomic_set(&xdst->u.dst.__refcnt, 2);
2333 +- xfrm_last_dst_update(xdst, old);
2334 +-
2335 + return xdst;
2336 + }
2337 +
2338 +@@ -2069,11 +1947,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
2339 + if (num_xfrms <= 0)
2340 + goto make_dummy_bundle;
2341 +
2342 +- local_bh_disable();
2343 + xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2344 + xflo->dst_orig);
2345 +- local_bh_enable();
2346 +-
2347 + if (IS_ERR(xdst)) {
2348 + err = PTR_ERR(xdst);
2349 + if (err != -EAGAIN)
2350 +@@ -2160,11 +2035,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2351 + goto no_transform;
2352 + }
2353 +
2354 +- local_bh_disable();
2355 + xdst = xfrm_resolve_and_create_bundle(
2356 + pols, num_pols, fl,
2357 + family, dst_orig);
2358 +- local_bh_enable();
2359 +
2360 + if (IS_ERR(xdst)) {
2361 + xfrm_pols_put(pols, num_pols);
2362 +@@ -2992,15 +2865,6 @@ static struct pernet_operations __net_initdata xfrm_net_ops = {
2363 +
2364 + void __init xfrm_init(void)
2365 + {
2366 +- int i;
2367 +-
2368 +- xfrm_pcpu_work = kmalloc_array(NR_CPUS, sizeof(*xfrm_pcpu_work),
2369 +- GFP_KERNEL);
2370 +- BUG_ON(!xfrm_pcpu_work);
2371 +-
2372 +- for (i = 0; i < NR_CPUS; i++)
2373 +- INIT_WORK(&xfrm_pcpu_work[i], xfrm_pcpu_work_fn);
2374 +-
2375 + register_pernet_subsys(&xfrm_net_ops);
2376 + seqcount_init(&xfrm_policy_hash_generation);
2377 + xfrm_input_init();
2378 +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
2379 +index 0cd2bdf3b217..7c093de68780 100644
2380 +--- a/net/xfrm/xfrm_state.c
2381 ++++ b/net/xfrm/xfrm_state.c
2382 +@@ -735,10 +735,9 @@ restart:
2383 + }
2384 + out:
2385 + spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2386 +- if (cnt) {
2387 ++ if (cnt)
2388 + err = 0;
2389 +- xfrm_policy_cache_flush();
2390 +- }
2391 ++
2392 + return err;
2393 + }
2394 + EXPORT_SYMBOL(xfrm_state_flush);
2395 +diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
2396 +index 991db7d6e4df..cf6f33b2633d 100644
2397 +--- a/scripts/Makefile.modpost
2398 ++++ b/scripts/Makefile.modpost
2399 +@@ -75,7 +75,7 @@ modpost = scripts/mod/modpost \
2400 + $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
2401 + $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
2402 + $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
2403 +- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
2404 ++ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
2405 + $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
2406 + $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
2407 + $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
2408 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
2409 +index 28e265a88383..ec9dda536d89 100644
2410 +--- a/sound/pci/hda/hda_generic.c
2411 ++++ b/sound/pci/hda/hda_generic.c
2412 +@@ -5896,6 +5896,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
2413 + }
2414 + EXPORT_SYMBOL_GPL(snd_hda_gen_free);
2415 +
2416 ++/**
2417 ++ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
2418 ++ * @codec: the HDA codec
2419 ++ *
2420 ++ * This can be put as patch_ops reboot_notify function.
2421 ++ */
2422 ++void snd_hda_gen_reboot_notify(struct hda_codec *codec)
2423 ++{
2424 ++ /* Make the codec enter D3 to avoid spurious noises from the internal
2425 ++ * speaker during (and after) reboot
2426 ++ */
2427 ++ snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
2428 ++ snd_hda_codec_write(codec, codec->core.afg, 0,
2429 ++ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
2430 ++ msleep(10);
2431 ++}
2432 ++EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
2433 ++
2434 + #ifdef CONFIG_PM
2435 + /**
2436 + * snd_hda_gen_check_power_status - check the loopback power save state
2437 +@@ -5923,6 +5941,7 @@ static const struct hda_codec_ops generic_patch_ops = {
2438 + .init = snd_hda_gen_init,
2439 + .free = snd_hda_gen_free,
2440 + .unsol_event = snd_hda_jack_unsol_event,
2441 ++ .reboot_notify = snd_hda_gen_reboot_notify,
2442 + #ifdef CONFIG_PM
2443 + .check_power_status = snd_hda_gen_check_power_status,
2444 + #endif
2445 +@@ -5945,7 +5964,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
2446 +
2447 + err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
2448 + if (err < 0)
2449 +- return err;
2450 ++ goto error;
2451 +
2452 + err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
2453 + if (err < 0)
2454 +diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
2455 +index 61772317de46..d82c09db0276 100644
2456 +--- a/sound/pci/hda/hda_generic.h
2457 ++++ b/sound/pci/hda/hda_generic.h
2458 +@@ -323,6 +323,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
2459 + struct auto_pin_cfg *cfg);
2460 + int snd_hda_gen_build_controls(struct hda_codec *codec);
2461 + int snd_hda_gen_build_pcms(struct hda_codec *codec);
2462 ++void snd_hda_gen_reboot_notify(struct hda_codec *codec);
2463 +
2464 + /* standard jack event callbacks */
2465 + void snd_hda_gen_hp_automute(struct hda_codec *codec,
2466 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2467 +index 4631579e1e18..682f9657c16c 100644
2468 +--- a/sound/pci/hda/hda_intel.c
2469 ++++ b/sound/pci/hda/hda_intel.c
2470 +@@ -2577,6 +2577,9 @@ static const struct pci_device_id azx_ids[] = {
2471 + /* AMD, X370 & co */
2472 + { PCI_DEVICE(0x1022, 0x1457),
2473 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2474 ++ /* AMD, X570 & co */
2475 ++ { PCI_DEVICE(0x1022, 0x1487),
2476 ++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2477 + /* AMD Stoney */
2478 + { PCI_DEVICE(0x1022, 0x157a),
2479 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
2480 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2481 +index 7bdf10e754f5..49be42d27761 100644
2482 +--- a/sound/pci/hda/patch_conexant.c
2483 ++++ b/sound/pci/hda/patch_conexant.c
2484 +@@ -210,23 +210,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
2485 + {
2486 + struct conexant_spec *spec = codec->spec;
2487 +
2488 +- switch (codec->core.vendor_id) {
2489 +- case 0x14f12008: /* CX8200 */
2490 +- case 0x14f150f2: /* CX20722 */
2491 +- case 0x14f150f4: /* CX20724 */
2492 +- break;
2493 +- default:
2494 +- return;
2495 +- }
2496 +-
2497 + /* Turn the problematic codec into D3 to avoid spurious noises
2498 + from the internal speaker during (and after) reboot */
2499 + cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
2500 +-
2501 +- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
2502 +- snd_hda_codec_write(codec, codec->core.afg, 0,
2503 +- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
2504 +- msleep(10);
2505 ++ snd_hda_gen_reboot_notify(codec);
2506 + }
2507 +
2508 + static void cx_auto_free(struct hda_codec *codec)
2509 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2510 +index 6d32c44cd0c8..32115e0b26c9 100644
2511 +--- a/sound/pci/hda/patch_realtek.c
2512 ++++ b/sound/pci/hda/patch_realtek.c
2513 +@@ -810,15 +810,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
2514 + alc_shutup(codec);
2515 + }
2516 +
2517 +-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
2518 +-static void alc_d3_at_reboot(struct hda_codec *codec)
2519 +-{
2520 +- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
2521 +- snd_hda_codec_write(codec, codec->core.afg, 0,
2522 +- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
2523 +- msleep(10);
2524 +-}
2525 +-
2526 + #define alc_free snd_hda_gen_free
2527 +
2528 + #ifdef CONFIG_PM
2529 +@@ -4937,7 +4928,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
2530 + struct alc_spec *spec = codec->spec;
2531 +
2532 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
2533 +- spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
2534 ++ spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
2535 + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
2536 + codec->power_save_node = 0; /* avoid click noises */
2537 + snd_hda_apply_pincfgs(codec, pincfgs);
2538 +diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
2539 +index c892a28e7b04..6da7afa7d328 100644
2540 +--- a/tools/perf/util/header.c
2541 ++++ b/tools/perf/util/header.c
2542 +@@ -2901,6 +2901,13 @@ int perf_session__read_header(struct perf_session *session)
2543 + file->path);
2544 + }
2545 +
2546 ++ if (f_header.attr_size == 0) {
2547 ++ pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
2548 ++ "Was the 'perf record' command properly terminated?\n",
2549 ++ file->path);
2550 ++ return -EINVAL;
2551 ++ }
2552 ++
2553 + nr_attrs = f_header.attrs.size / f_header.attr_size;
2554 + lseek(fd, f_header.attrs.offset, SEEK_SET);
2555 +
2556 +@@ -2983,7 +2990,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
2557 + size += sizeof(struct perf_event_header);
2558 + size += ids * sizeof(u64);
2559 +
2560 +- ev = malloc(size);
2561 ++ ev = zalloc(size);
2562 +
2563 + if (ev == NULL)
2564 + return -ENOMEM;
2565 +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
2566 +index f574d02ac860..09ef6260477e 100644
2567 +--- a/virt/kvm/arm/arm.c
2568 ++++ b/virt/kvm/arm/arm.c
2569 +@@ -317,6 +317,16 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
2570 + void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
2571 + {
2572 + kvm_timer_schedule(vcpu);
2573 ++ /*
2574 ++ * If we're about to block (most likely because we've just hit a
2575 ++ * WFI), we need to sync back the state of the GIC CPU interface
2576 ++ * so that we have the lastest PMR and group enables. This ensures
2577 ++ * that kvm_arch_vcpu_runnable has up-to-date data to decide
2578 ++ * whether we have pending interrupts.
2579 ++ */
2580 ++ preempt_disable();
2581 ++ kvm_vgic_vmcr_sync(vcpu);
2582 ++ preempt_enable();
2583 + }
2584 +
2585 + void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
2586 +diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
2587 +index 841d4b27555a..a2273a5aaece 100644
2588 +--- a/virt/kvm/arm/vgic/vgic-v2.c
2589 ++++ b/virt/kvm/arm/vgic/vgic-v2.c
2590 +@@ -407,10 +407,19 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
2591 + writel_relaxed(cpu_if->vgic_vmcr, vgic->vctrl_base + GICH_VMCR);
2592 + }
2593 +
2594 +-void vgic_v2_put(struct kvm_vcpu *vcpu)
2595 ++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
2596 + {
2597 + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
2598 + struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
2599 +
2600 + cpu_if->vgic_vmcr = readl_relaxed(vgic->vctrl_base + GICH_VMCR);
2601 + }
2602 ++
2603 ++void vgic_v2_put(struct kvm_vcpu *vcpu)
2604 ++{
2605 ++ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
2606 ++ struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
2607 ++
2608 ++ vgic_v2_vmcr_sync(vcpu);
2609 ++ cpu_if->vgic_apr = readl_relaxed(vgic->vctrl_base + GICH_APR);
2610 ++}
2611 +diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
2612 +index a37b03c25457..094f8ff8f7ba 100644
2613 +--- a/virt/kvm/arm/vgic/vgic-v3.c
2614 ++++ b/virt/kvm/arm/vgic/vgic-v3.c
2615 +@@ -547,10 +547,15 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
2616 + kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
2617 + }
2618 +
2619 +-void vgic_v3_put(struct kvm_vcpu *vcpu)
2620 ++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
2621 + {
2622 + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
2623 +
2624 + if (likely(cpu_if->vgic_sre))
2625 + cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
2626 + }
2627 ++
2628 ++void vgic_v3_put(struct kvm_vcpu *vcpu)
2629 ++{
2630 ++ vgic_v3_vmcr_sync(vcpu);
2631 ++}
2632 +diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
2633 +index c9a8e7b7c300..9d4e01f10949 100644
2634 +--- a/virt/kvm/arm/vgic/vgic.c
2635 ++++ b/virt/kvm/arm/vgic/vgic.c
2636 +@@ -764,6 +764,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
2637 + vgic_v3_put(vcpu);
2638 + }
2639 +
2640 ++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
2641 ++{
2642 ++ if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
2643 ++ return;
2644 ++
2645 ++ if (kvm_vgic_global_state.type == VGIC_V2)
2646 ++ vgic_v2_vmcr_sync(vcpu);
2647 ++ else
2648 ++ vgic_v3_vmcr_sync(vcpu);
2649 ++}
2650 ++
2651 + int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
2652 + {
2653 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
2654 +diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
2655 +index 21a2240164f3..ade076da828b 100644
2656 +--- a/virt/kvm/arm/vgic/vgic.h
2657 ++++ b/virt/kvm/arm/vgic/vgic.h
2658 +@@ -168,6 +168,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
2659 + void vgic_v2_init_lrs(void);
2660 + void vgic_v2_load(struct kvm_vcpu *vcpu);
2661 + void vgic_v2_put(struct kvm_vcpu *vcpu);
2662 ++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
2663 +
2664 + static inline void vgic_get_irq_kref(struct vgic_irq *irq)
2665 + {
2666 +@@ -195,6 +196,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
2667 +
2668 + void vgic_v3_load(struct kvm_vcpu *vcpu);
2669 + void vgic_v3_put(struct kvm_vcpu *vcpu);
2670 ++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
2671 +
2672 + bool vgic_has_its(struct kvm *kvm);
2673 + int kvm_vgic_register_its_device(void);