Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri, 24 Jun 2016 20:40:22
Message-Id: 1466800804.0a21ca48b5ca15cd0f5699ea94da60754a272c0c.mpagano@gentoo
1 commit: 0a21ca48b5ca15cd0f5699ea94da60754a272c0c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jun 24 20:40:04 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Jun 24 20:40:04 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0a21ca48
7
8 Linux patch 4.4.14
9
10 0000_README | 4 +
11 1013_linux-4.4.14.patch | 5210 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5214 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 7a07f5e..9f33955 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -95,6 +95,10 @@ Patch: 1012_linux-4.4.13.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.13
21
22 +Patch: 1013_linux-4.4.14.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.14
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1013_linux-4.4.14.patch b/1013_linux-4.4.14.patch
31 new file mode 100644
32 index 0000000..075c39a
33 --- /dev/null
34 +++ b/1013_linux-4.4.14.patch
35 @@ -0,0 +1,5210 @@
36 +diff --git a/Makefile b/Makefile
37 +index f4b33cdf991a..fadbb9d73c6d 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 13
44 ++SUBLEVEL = 14
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
49 +index ef9119f7462e..4d9375814b53 100644
50 +--- a/arch/arm/kernel/ptrace.c
51 ++++ b/arch/arm/kernel/ptrace.c
52 +@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
53 + if (ret)
54 + return ret;
55 +
56 +- vfp_flush_hwstate(thread);
57 + thread->vfpstate.hard = new_vfp;
58 ++ vfp_flush_hwstate(thread);
59 +
60 + return 0;
61 + }
62 +diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
63 +index faad6df49e5b..bc6492b9a924 100644
64 +--- a/arch/arm64/include/asm/elf.h
65 ++++ b/arch/arm64/include/asm/elf.h
66 +@@ -156,14 +156,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
67 + #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
68 + #endif
69 +
70 +-#ifdef CONFIG_COMPAT
71 +-
72 + #ifdef __AARCH64EB__
73 + #define COMPAT_ELF_PLATFORM ("v8b")
74 + #else
75 + #define COMPAT_ELF_PLATFORM ("v8l")
76 + #endif
77 +
78 ++#ifdef CONFIG_COMPAT
79 ++
80 + #define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
81 +
82 + /* AArch32 registers. */
83 +diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
84 +index a5f234039616..0166cfbc866c 100644
85 +--- a/arch/arm64/kernel/cpuinfo.c
86 ++++ b/arch/arm64/kernel/cpuinfo.c
87 +@@ -22,6 +22,8 @@
88 +
89 + #include <linux/bitops.h>
90 + #include <linux/bug.h>
91 ++#include <linux/compat.h>
92 ++#include <linux/elf.h>
93 + #include <linux/init.h>
94 + #include <linux/kernel.h>
95 + #include <linux/personality.h>
96 +@@ -102,6 +104,7 @@ static const char *const compat_hwcap2_str[] = {
97 + static int c_show(struct seq_file *m, void *v)
98 + {
99 + int i, j;
100 ++ bool compat = personality(current->personality) == PER_LINUX32;
101 +
102 + for_each_online_cpu(i) {
103 + struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
104 +@@ -113,6 +116,9 @@ static int c_show(struct seq_file *m, void *v)
105 + * "processor". Give glibc what it expects.
106 + */
107 + seq_printf(m, "processor\t: %d\n", i);
108 ++ if (compat)
109 ++ seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
110 ++ MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
111 +
112 + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
113 + loops_per_jiffy / (500000UL/HZ),
114 +@@ -125,7 +131,7 @@ static int c_show(struct seq_file *m, void *v)
115 + * software which does already (at least for 32-bit).
116 + */
117 + seq_puts(m, "Features\t:");
118 +- if (personality(current->personality) == PER_LINUX32) {
119 ++ if (compat) {
120 + #ifdef CONFIG_COMPAT
121 + for (j = 0; compat_hwcap_str[j]; j++)
122 + if (compat_elf_hwcap & (1 << j))
123 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
124 +index 40f5522245a2..4c1a118c1d09 100644
125 +--- a/arch/arm64/mm/fault.c
126 ++++ b/arch/arm64/mm/fault.c
127 +@@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
128 + * PTE_RDONLY is cleared by default in the asm below, so set it in
129 + * back if necessary (read-only or clean PTE).
130 + */
131 +- if (!pte_write(entry) || !dirty)
132 ++ if (!pte_write(entry) || !pte_sw_dirty(entry))
133 + pte_val(entry) |= PTE_RDONLY;
134 +
135 + /*
136 +diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
137 +index 3f832c3dd8f5..041153f5cf93 100644
138 +--- a/arch/mips/include/asm/processor.h
139 ++++ b/arch/mips/include/asm/processor.h
140 +@@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count;
141 + * User space process size: 2GB. This is hardcoded into a few places,
142 + * so don't change it unless you know what you are doing.
143 + */
144 +-#define TASK_SIZE 0x7fff8000UL
145 ++#define TASK_SIZE 0x80000000UL
146 + #endif
147 +
148 + #define STACK_TOP_MAX TASK_SIZE
149 +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
150 +index d7c0acb35ec2..8d49614d600d 100644
151 +--- a/arch/parisc/kernel/unaligned.c
152 ++++ b/arch/parisc/kernel/unaligned.c
153 +@@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs)
154 + break;
155 + }
156 +
157 +- if (modify && R1(regs->iir))
158 ++ if (ret == 0 && modify && R1(regs->iir))
159 + regs->gr[R1(regs->iir)] = newbase;
160 +
161 +
162 +@@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs)
163 +
164 + if (ret)
165 + {
166 ++ /*
167 ++ * The unaligned handler failed.
168 ++ * If we were called by __get_user() or __put_user() jump
169 ++ * to it's exception fixup handler instead of crashing.
170 ++ */
171 ++ if (!user_mode(regs) && fixup_exception(regs))
172 ++ return;
173 ++
174 + printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
175 + die_if_kernel("Unaligned data reference", regs, 28);
176 +
177 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
178 +index 2220f7a60def..070fa8552051 100644
179 +--- a/arch/powerpc/include/asm/reg.h
180 ++++ b/arch/powerpc/include/asm/reg.h
181 +@@ -707,7 +707,7 @@
182 + #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
183 + #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
184 + #define SPRN_MMCR1 798
185 +-#define SPRN_MMCR2 769
186 ++#define SPRN_MMCR2 785
187 + #define SPRN_MMCRA 0x312
188 + #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
189 + #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
190 +@@ -744,13 +744,13 @@
191 + #define SPRN_PMC6 792
192 + #define SPRN_PMC7 793
193 + #define SPRN_PMC8 794
194 +-#define SPRN_SIAR 780
195 +-#define SPRN_SDAR 781
196 + #define SPRN_SIER 784
197 + #define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
198 + #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
199 + #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
200 + #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
201 ++#define SPRN_SIAR 796
202 ++#define SPRN_SDAR 797
203 + #define SPRN_TACR 888
204 + #define SPRN_TCSCR 889
205 + #define SPRN_CSIGR 890
206 +diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
207 +index 92dea8df6b26..e52b82b71d79 100644
208 +--- a/arch/powerpc/kernel/prom_init.c
209 ++++ b/arch/powerpc/kernel/prom_init.c
210 +@@ -655,6 +655,7 @@ unsigned char ibm_architecture_vec[] = {
211 + W(0xffff0000), W(0x003e0000), /* POWER6 */
212 + W(0xffff0000), W(0x003f0000), /* POWER7 */
213 + W(0xffff0000), W(0x004b0000), /* POWER8E */
214 ++ W(0xffff0000), W(0x004c0000), /* POWER8NVL */
215 + W(0xffff0000), W(0x004d0000), /* POWER8 */
216 + W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
217 + W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
218 +diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
219 +index ac3ffd97e059..405baaf96864 100644
220 +--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
221 ++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
222 +@@ -615,29 +615,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
223 + {
224 + int config_addr;
225 + int ret;
226 ++ /* Waiting 0.2s maximum before skipping configuration */
227 ++ int max_wait = 200;
228 +
229 + /* Figure out the PE address */
230 + config_addr = pe->config_addr;
231 + if (pe->addr)
232 + config_addr = pe->addr;
233 +
234 +- /* Use new configure-pe function, if supported */
235 +- if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
236 +- ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
237 +- config_addr, BUID_HI(pe->phb->buid),
238 +- BUID_LO(pe->phb->buid));
239 +- } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
240 +- ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
241 +- config_addr, BUID_HI(pe->phb->buid),
242 +- BUID_LO(pe->phb->buid));
243 +- } else {
244 +- return -EFAULT;
245 +- }
246 ++ while (max_wait > 0) {
247 ++ /* Use new configure-pe function, if supported */
248 ++ if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
249 ++ ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
250 ++ config_addr, BUID_HI(pe->phb->buid),
251 ++ BUID_LO(pe->phb->buid));
252 ++ } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
253 ++ ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
254 ++ config_addr, BUID_HI(pe->phb->buid),
255 ++ BUID_LO(pe->phb->buid));
256 ++ } else {
257 ++ return -EFAULT;
258 ++ }
259 +
260 +- if (ret)
261 +- pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
262 +- __func__, pe->phb->global_number, pe->addr, ret);
263 ++ if (!ret)
264 ++ return ret;
265 ++
266 ++ /*
267 ++ * If RTAS returns a delay value that's above 100ms, cut it
268 ++ * down to 100ms in case firmware made a mistake. For more
269 ++ * on how these delay values work see rtas_busy_delay_time
270 ++ */
271 ++ if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
272 ++ ret <= RTAS_EXTENDED_DELAY_MAX)
273 ++ ret = RTAS_EXTENDED_DELAY_MIN+2;
274 ++
275 ++ max_wait -= rtas_busy_delay_time(ret);
276 ++
277 ++ if (max_wait < 0)
278 ++ break;
279 ++
280 ++ rtas_busy_delay(ret);
281 ++ }
282 +
283 ++ pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
284 ++ __func__, pe->phb->global_number, pe->addr, ret);
285 + return ret;
286 + }
287 +
288 +diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
289 +index f010c93a88b1..fda605dbc1b4 100644
290 +--- a/arch/s390/net/bpf_jit.h
291 ++++ b/arch/s390/net/bpf_jit.h
292 +@@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
293 + * | | |
294 + * +---------------+ |
295 + * | 8 byte skbp | |
296 +- * R15+170 -> +---------------+ |
297 ++ * R15+176 -> +---------------+ |
298 + * | 8 byte hlen | |
299 + * R15+168 -> +---------------+ |
300 + * | 4 byte align | |
301 +@@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
302 + #define STK_OFF (STK_SPACE - STK_160_UNUSED)
303 + #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
304 + #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
305 +-#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */
306 ++#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
307 +
308 + #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
309 + #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
310 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
311 +index 9a0c4c22e536..0e2919dd8df3 100644
312 +--- a/arch/s390/net/bpf_jit_comp.c
313 ++++ b/arch/s390/net/bpf_jit_comp.c
314 +@@ -45,7 +45,7 @@ struct bpf_jit {
315 + int labels[1]; /* Labels for local jumps */
316 + };
317 +
318 +-#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */
319 ++#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
320 +
321 + #define SEEN_SKB 1 /* skb access */
322 + #define SEEN_MEM 2 /* use mem[] for temporary storage */
323 +@@ -446,7 +446,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
324 + emit_load_skb_data_hlen(jit);
325 + if (jit->seen & SEEN_SKB_CHANGE)
326 + /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
327 +- EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
328 ++ EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
329 + STK_OFF_SKBP);
330 + /* Clear A (%b0) and X (%b7) registers for converted BPF programs */
331 + if (is_classic) {
332 +diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
333 +index 10e9dabc4c41..f0700cfeedd7 100644
334 +--- a/arch/sparc/include/asm/head_64.h
335 ++++ b/arch/sparc/include/asm/head_64.h
336 +@@ -15,6 +15,10 @@
337 +
338 + #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
339 +
340 ++#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
341 ++#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
342 ++#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
343 ++
344 + #define __CHEETAH_ID 0x003e0014
345 + #define __JALAPENO_ID 0x003e0016
346 + #define __SERRANO_ID 0x003e0022
347 +diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
348 +index 131d36fcd07a..408b715c95a5 100644
349 +--- a/arch/sparc/include/asm/pgtable_64.h
350 ++++ b/arch/sparc/include/asm/pgtable_64.h
351 +@@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
352 + #define pgprot_noncached pgprot_noncached
353 +
354 + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
355 +-static inline pte_t pte_mkhuge(pte_t pte)
356 ++static inline unsigned long __pte_huge_mask(void)
357 + {
358 + unsigned long mask;
359 +
360 +@@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte)
361 + : "=r" (mask)
362 + : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
363 +
364 +- return __pte(pte_val(pte) | mask);
365 ++ return mask;
366 ++}
367 ++
368 ++static inline pte_t pte_mkhuge(pte_t pte)
369 ++{
370 ++ return __pte(pte_val(pte) | __pte_huge_mask());
371 ++}
372 ++
373 ++static inline bool is_hugetlb_pte(pte_t pte)
374 ++{
375 ++ return !!(pte_val(pte) & __pte_huge_mask());
376 + }
377 ++
378 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
379 + static inline pmd_t pmd_mkhuge(pmd_t pmd)
380 + {
381 +@@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
382 + return __pmd(pte_val(pte));
383 + }
384 + #endif
385 ++#else
386 ++static inline bool is_hugetlb_pte(pte_t pte)
387 ++{
388 ++ return false;
389 ++}
390 + #endif
391 +
392 + static inline pte_t pte_mkdirty(pte_t pte)
393 +@@ -865,6 +881,19 @@ static inline unsigned long pud_pfn(pud_t pud)
394 + void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
395 + pte_t *ptep, pte_t orig, int fullmm);
396 +
397 ++static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
398 ++ pte_t *ptep, pte_t orig, int fullmm)
399 ++{
400 ++ /* It is more efficient to let flush_tlb_kernel_range()
401 ++ * handle init_mm tlb flushes.
402 ++ *
403 ++ * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
404 ++ * and SUN4V pte layout, so this inline test is fine.
405 ++ */
406 ++ if (likely(mm != &init_mm) && pte_accessible(mm, orig))
407 ++ tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
408 ++}
409 ++
410 + #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
411 + static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
412 + unsigned long addr,
413 +@@ -881,15 +910,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
414 + pte_t orig = *ptep;
415 +
416 + *ptep = pte;
417 +-
418 +- /* It is more efficient to let flush_tlb_kernel_range()
419 +- * handle init_mm tlb flushes.
420 +- *
421 +- * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
422 +- * and SUN4V pte layout, so this inline test is fine.
423 +- */
424 +- if (likely(mm != &init_mm) && pte_accessible(mm, orig))
425 +- tlb_batch_add(mm, addr, ptep, orig, fullmm);
426 ++ maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
427 + }
428 +
429 + #define set_pte_at(mm,addr,ptep,pte) \
430 +diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
431 +index dea1cfa2122b..a8e192e90700 100644
432 +--- a/arch/sparc/include/asm/tlbflush_64.h
433 ++++ b/arch/sparc/include/asm/tlbflush_64.h
434 +@@ -8,6 +8,7 @@
435 + #define TLB_BATCH_NR 192
436 +
437 + struct tlb_batch {
438 ++ bool huge;
439 + struct mm_struct *mm;
440 + unsigned long tlb_nr;
441 + unsigned long active;
442 +@@ -16,7 +17,7 @@ struct tlb_batch {
443 +
444 + void flush_tsb_kernel_range(unsigned long start, unsigned long end);
445 + void flush_tsb_user(struct tlb_batch *tb);
446 +-void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
447 ++void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
448 +
449 + /* TLB flush operations. */
450 +
451 +diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
452 +index 71b5a67522ab..781b9f1dbdc2 100644
453 +--- a/arch/sparc/include/asm/ttable.h
454 ++++ b/arch/sparc/include/asm/ttable.h
455 +@@ -589,8 +589,8 @@ user_rtt_fill_64bit: \
456 + restored; \
457 + nop; nop; nop; nop; nop; nop; \
458 + nop; nop; nop; nop; nop; \
459 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
460 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
461 ++ ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
462 ++ ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
463 + ba,a,pt %xcc, user_rtt_fill_fixup;
464 +
465 +
466 +@@ -652,8 +652,8 @@ user_rtt_fill_32bit: \
467 + restored; \
468 + nop; nop; nop; nop; nop; \
469 + nop; nop; nop; \
470 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
471 +- ba,a,pt %xcc, user_rtt_fill_fixup; \
472 ++ ba,a,pt %xcc, user_rtt_fill_fixup_dax; \
473 ++ ba,a,pt %xcc, user_rtt_fill_fixup_mna; \
474 + ba,a,pt %xcc, user_rtt_fill_fixup;
475 +
476 +
477 +diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
478 +index 7cf9c6ea3f1f..fdb13327fded 100644
479 +--- a/arch/sparc/kernel/Makefile
480 ++++ b/arch/sparc/kernel/Makefile
481 +@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
482 + CFLAGS_REMOVE_pcr.o := -pg
483 + endif
484 +
485 ++obj-$(CONFIG_SPARC64) += urtt_fill.o
486 + obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
487 + obj-$(CONFIG_SPARC32) += etrap_32.o
488 + obj-$(CONFIG_SPARC32) += rtrap_32.o
489 +diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S
490 +index 4ee1ad420862..655628def68e 100644
491 +--- a/arch/sparc/kernel/cherrs.S
492 ++++ b/arch/sparc/kernel/cherrs.S
493 +@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
494 + subcc %g1, %g2, %g1 ! Next cacheline
495 + bge,pt %icc, 1b
496 + nop
497 +- ba,pt %xcc, dcpe_icpe_tl1_common
498 +- nop
499 ++ ba,a,pt %xcc, dcpe_icpe_tl1_common
500 +
501 + do_dcpe_tl1_fatal:
502 + sethi %hi(1f), %g7
503 +@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
504 + mov 0x2, %o0
505 + call cheetah_plus_parity_error
506 + add %sp, PTREGS_OFF, %o1
507 +- ba,pt %xcc, rtrap
508 +- nop
509 ++ ba,a,pt %xcc, rtrap
510 + .size do_dcpe_tl1,.-do_dcpe_tl1
511 +
512 + .globl do_icpe_tl1
513 +@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
514 + subcc %g1, %g2, %g1
515 + bge,pt %icc, 1b
516 + nop
517 +- ba,pt %xcc, dcpe_icpe_tl1_common
518 +- nop
519 ++ ba,a,pt %xcc, dcpe_icpe_tl1_common
520 +
521 + do_icpe_tl1_fatal:
522 + sethi %hi(1f), %g7
523 +@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
524 + mov 0x3, %o0
525 + call cheetah_plus_parity_error
526 + add %sp, PTREGS_OFF, %o1
527 +- ba,pt %xcc, rtrap
528 +- nop
529 ++ ba,a,pt %xcc, rtrap
530 + .size do_icpe_tl1,.-do_icpe_tl1
531 +
532 + .type dcpe_icpe_tl1_common,#function
533 +@@ -456,7 +452,7 @@ __cheetah_log_error:
534 + cmp %g2, 0x63
535 + be c_cee
536 + nop
537 +- ba,pt %xcc, c_deferred
538 ++ ba,a,pt %xcc, c_deferred
539 + .size __cheetah_log_error,.-__cheetah_log_error
540 +
541 + /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
542 +diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
543 +index 33c02b15f478..a83707c83be8 100644
544 +--- a/arch/sparc/kernel/entry.S
545 ++++ b/arch/sparc/kernel/entry.S
546 +@@ -948,7 +948,24 @@ linux_syscall_trace:
547 + cmp %o0, 0
548 + bne 3f
549 + mov -ENOSYS, %o0
550 ++
551 ++ /* Syscall tracing can modify the registers. */
552 ++ ld [%sp + STACKFRAME_SZ + PT_G1], %g1
553 ++ sethi %hi(sys_call_table), %l7
554 ++ ld [%sp + STACKFRAME_SZ + PT_I0], %i0
555 ++ or %l7, %lo(sys_call_table), %l7
556 ++ ld [%sp + STACKFRAME_SZ + PT_I1], %i1
557 ++ ld [%sp + STACKFRAME_SZ + PT_I2], %i2
558 ++ ld [%sp + STACKFRAME_SZ + PT_I3], %i3
559 ++ ld [%sp + STACKFRAME_SZ + PT_I4], %i4
560 ++ ld [%sp + STACKFRAME_SZ + PT_I5], %i5
561 ++ cmp %g1, NR_syscalls
562 ++ bgeu 3f
563 ++ mov -ENOSYS, %o0
564 ++
565 ++ sll %g1, 2, %l4
566 + mov %i0, %o0
567 ++ ld [%l7 + %l4], %l7
568 + mov %i1, %o1
569 + mov %i2, %o2
570 + mov %i3, %o3
571 +diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S
572 +index a6864826a4bd..336d2750fe78 100644
573 +--- a/arch/sparc/kernel/fpu_traps.S
574 ++++ b/arch/sparc/kernel/fpu_traps.S
575 +@@ -100,8 +100,8 @@ do_fpdis:
576 + fmuld %f0, %f2, %f26
577 + faddd %f0, %f2, %f28
578 + fmuld %f0, %f2, %f30
579 +- b,pt %xcc, fpdis_exit
580 +- nop
581 ++ ba,a,pt %xcc, fpdis_exit
582 ++
583 + 2: andcc %g5, FPRS_DU, %g0
584 + bne,pt %icc, 3f
585 + fzero %f32
586 +@@ -144,8 +144,8 @@ do_fpdis:
587 + fmuld %f32, %f34, %f58
588 + faddd %f32, %f34, %f60
589 + fmuld %f32, %f34, %f62
590 +- ba,pt %xcc, fpdis_exit
591 +- nop
592 ++ ba,a,pt %xcc, fpdis_exit
593 ++
594 + 3: mov SECONDARY_CONTEXT, %g3
595 + add %g6, TI_FPREGS, %g1
596 +
597 +@@ -197,8 +197,7 @@ fpdis_exit2:
598 + fp_other_bounce:
599 + call do_fpother
600 + add %sp, PTREGS_OFF, %o0
601 +- ba,pt %xcc, rtrap
602 +- nop
603 ++ ba,a,pt %xcc, rtrap
604 + .size fp_other_bounce,.-fp_other_bounce
605 +
606 + .align 32
607 +diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
608 +index f2d30cab5b3f..51faf92ace00 100644
609 +--- a/arch/sparc/kernel/head_64.S
610 ++++ b/arch/sparc/kernel/head_64.S
611 +@@ -461,9 +461,8 @@ sun4v_chip_type:
612 + subcc %g3, 1, %g3
613 + bne,pt %xcc, 41b
614 + add %g1, 1, %g1
615 +- mov SUN4V_CHIP_SPARC64X, %g4
616 + ba,pt %xcc, 5f
617 +- nop
618 ++ mov SUN4V_CHIP_SPARC64X, %g4
619 +
620 + 49:
621 + mov SUN4V_CHIP_UNKNOWN, %g4
622 +@@ -548,8 +547,7 @@ sun4u_init:
623 + stxa %g0, [%g7] ASI_DMMU
624 + membar #Sync
625 +
626 +- ba,pt %xcc, sun4u_continue
627 +- nop
628 ++ ba,a,pt %xcc, sun4u_continue
629 +
630 + sun4v_init:
631 + /* Set ctx 0 */
632 +@@ -560,14 +558,12 @@ sun4v_init:
633 + mov SECONDARY_CONTEXT, %g7
634 + stxa %g0, [%g7] ASI_MMU
635 + membar #Sync
636 +- ba,pt %xcc, niagara_tlb_fixup
637 +- nop
638 ++ ba,a,pt %xcc, niagara_tlb_fixup
639 +
640 + sun4u_continue:
641 + BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
642 +
643 +- ba,pt %xcc, spitfire_tlb_fixup
644 +- nop
645 ++ ba,a,pt %xcc, spitfire_tlb_fixup
646 +
647 + niagara_tlb_fixup:
648 + mov 3, %g2 /* Set TLB type to hypervisor. */
649 +@@ -639,8 +635,7 @@ niagara_patch:
650 + call hypervisor_patch_cachetlbops
651 + nop
652 +
653 +- ba,pt %xcc, tlb_fixup_done
654 +- nop
655 ++ ba,a,pt %xcc, tlb_fixup_done
656 +
657 + cheetah_tlb_fixup:
658 + mov 2, %g2 /* Set TLB type to cheetah+. */
659 +@@ -659,8 +654,7 @@ cheetah_tlb_fixup:
660 + call cheetah_patch_cachetlbops
661 + nop
662 +
663 +- ba,pt %xcc, tlb_fixup_done
664 +- nop
665 ++ ba,a,pt %xcc, tlb_fixup_done
666 +
667 + spitfire_tlb_fixup:
668 + /* Set TLB type to spitfire. */
669 +@@ -782,8 +776,7 @@ setup_trap_table:
670 + call %o1
671 + add %sp, (2047 + 128), %o0
672 +
673 +- ba,pt %xcc, 2f
674 +- nop
675 ++ ba,a,pt %xcc, 2f
676 +
677 + 1: sethi %hi(sparc64_ttable_tl0), %o0
678 + set prom_set_trap_table_name, %g2
679 +@@ -822,8 +815,7 @@ setup_trap_table:
680 +
681 + BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
682 +
683 +- ba,pt %xcc, 2f
684 +- nop
685 ++ ba,a,pt %xcc, 2f
686 +
687 + /* Disable STICK_INT interrupts. */
688 + 1:
689 +diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
690 +index 753b4f031bfb..34b4933900bf 100644
691 +--- a/arch/sparc/kernel/misctrap.S
692 ++++ b/arch/sparc/kernel/misctrap.S
693 +@@ -18,8 +18,7 @@ __do_privact:
694 + 109: or %g7, %lo(109b), %g7
695 + call do_privact
696 + add %sp, PTREGS_OFF, %o0
697 +- ba,pt %xcc, rtrap
698 +- nop
699 ++ ba,a,pt %xcc, rtrap
700 + .size __do_privact,.-__do_privact
701 +
702 + .type do_mna,#function
703 +@@ -46,8 +45,7 @@ do_mna:
704 + mov %l5, %o2
705 + call mem_address_unaligned
706 + add %sp, PTREGS_OFF, %o0
707 +- ba,pt %xcc, rtrap
708 +- nop
709 ++ ba,a,pt %xcc, rtrap
710 + .size do_mna,.-do_mna
711 +
712 + .type do_lddfmna,#function
713 +@@ -65,8 +63,7 @@ do_lddfmna:
714 + mov %l5, %o2
715 + call handle_lddfmna
716 + add %sp, PTREGS_OFF, %o0
717 +- ba,pt %xcc, rtrap
718 +- nop
719 ++ ba,a,pt %xcc, rtrap
720 + .size do_lddfmna,.-do_lddfmna
721 +
722 + .type do_stdfmna,#function
723 +@@ -84,8 +81,7 @@ do_stdfmna:
724 + mov %l5, %o2
725 + call handle_stdfmna
726 + add %sp, PTREGS_OFF, %o0
727 +- ba,pt %xcc, rtrap
728 +- nop
729 ++ ba,a,pt %xcc, rtrap
730 + .size do_stdfmna,.-do_stdfmna
731 +
732 + .type breakpoint_trap,#function
733 +diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
734 +index badf0951d73c..9f9614df9e1e 100644
735 +--- a/arch/sparc/kernel/pci.c
736 ++++ b/arch/sparc/kernel/pci.c
737 +@@ -994,6 +994,23 @@ void pcibios_set_master(struct pci_dev *dev)
738 + /* No special bus mastering setup handling */
739 + }
740 +
741 ++#ifdef CONFIG_PCI_IOV
742 ++int pcibios_add_device(struct pci_dev *dev)
743 ++{
744 ++ struct pci_dev *pdev;
745 ++
746 ++ /* Add sriov arch specific initialization here.
747 ++ * Copy dev_archdata from PF to VF
748 ++ */
749 ++ if (dev->is_virtfn) {
750 ++ pdev = dev->physfn;
751 ++ memcpy(&dev->dev.archdata, &pdev->dev.archdata,
752 ++ sizeof(struct dev_archdata));
753 ++ }
754 ++ return 0;
755 ++}
756 ++#endif /* CONFIG_PCI_IOV */
757 ++
758 + static int __init pcibios_init(void)
759 + {
760 + pci_dfl_cache_line_size = 64 >> 2;
761 +diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
762 +index d08bdaffdbfc..216948ca4382 100644
763 +--- a/arch/sparc/kernel/rtrap_64.S
764 ++++ b/arch/sparc/kernel/rtrap_64.S
765 +@@ -14,10 +14,6 @@
766 + #include <asm/visasm.h>
767 + #include <asm/processor.h>
768 +
769 +-#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
770 +-#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
771 +-#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
772 +-
773 + #ifdef CONFIG_CONTEXT_TRACKING
774 + # define SCHEDULE_USER schedule_user
775 + #else
776 +@@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
777 + wrpr %g1, %cwp
778 + ba,a,pt %xcc, user_rtt_fill_64bit
779 +
780 +-user_rtt_fill_fixup:
781 +- rdpr %cwp, %g1
782 +- add %g1, 1, %g1
783 +- wrpr %g1, 0x0, %cwp
784 +-
785 +- rdpr %wstate, %g2
786 +- sll %g2, 3, %g2
787 +- wrpr %g2, 0x0, %wstate
788 +-
789 +- /* We know %canrestore and %otherwin are both zero. */
790 +-
791 +- sethi %hi(sparc64_kern_pri_context), %g2
792 +- ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
793 +- mov PRIMARY_CONTEXT, %g1
794 +-
795 +-661: stxa %g2, [%g1] ASI_DMMU
796 +- .section .sun4v_1insn_patch, "ax"
797 +- .word 661b
798 +- stxa %g2, [%g1] ASI_MMU
799 +- .previous
800 +-
801 +- sethi %hi(KERNBASE), %g1
802 +- flush %g1
803 ++user_rtt_fill_fixup_dax:
804 ++ ba,pt %xcc, user_rtt_fill_fixup_common
805 ++ mov 1, %g3
806 +
807 +- or %g4, FAULT_CODE_WINFIXUP, %g4
808 +- stb %g4, [%g6 + TI_FAULT_CODE]
809 +- stx %g5, [%g6 + TI_FAULT_ADDR]
810 ++user_rtt_fill_fixup_mna:
811 ++ ba,pt %xcc, user_rtt_fill_fixup_common
812 ++ mov 2, %g3
813 +
814 +- mov %g6, %l1
815 +- wrpr %g0, 0x0, %tl
816 +-
817 +-661: nop
818 +- .section .sun4v_1insn_patch, "ax"
819 +- .word 661b
820 +- SET_GL(0)
821 +- .previous
822 +-
823 +- wrpr %g0, RTRAP_PSTATE, %pstate
824 +-
825 +- mov %l1, %g6
826 +- ldx [%g6 + TI_TASK], %g4
827 +- LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
828 +- call do_sparc64_fault
829 +- add %sp, PTREGS_OFF, %o0
830 +- ba,pt %xcc, rtrap
831 +- nop
832 ++user_rtt_fill_fixup:
833 ++ ba,pt %xcc, user_rtt_fill_fixup_common
834 ++ clr %g3
835 +
836 + user_rtt_pre_restore:
837 + add %g1, 1, %g1
838 +diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
839 +index 4eed773a7735..77655f0f0fc7 100644
840 +--- a/arch/sparc/kernel/signal32.c
841 ++++ b/arch/sparc/kernel/signal32.c
842 +@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
843 + return 0;
844 + }
845 +
846 ++/* Checks if the fp is valid. We always build signal frames which are
847 ++ * 16-byte aligned, therefore we can always enforce that the restore
848 ++ * frame has that property as well.
849 ++ */
850 ++static bool invalid_frame_pointer(void __user *fp, int fplen)
851 ++{
852 ++ if ((((unsigned long) fp) & 15) ||
853 ++ ((unsigned long)fp) > 0x100000000ULL - fplen)
854 ++ return true;
855 ++ return false;
856 ++}
857 ++
858 + void do_sigreturn32(struct pt_regs *regs)
859 + {
860 + struct signal_frame32 __user *sf;
861 + compat_uptr_t fpu_save;
862 + compat_uptr_t rwin_save;
863 +- unsigned int psr;
864 ++ unsigned int psr, ufp;
865 + unsigned pc, npc;
866 + sigset_t set;
867 + compat_sigset_t seta;
868 +@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
869 + sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
870 +
871 + /* 1. Make sure we are not getting garbage from the user */
872 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
873 +- (((unsigned long) sf) & 3))
874 ++ if (invalid_frame_pointer(sf, sizeof(*sf)))
875 ++ goto segv;
876 ++
877 ++ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
878 ++ goto segv;
879 ++
880 ++ if (ufp & 0x7)
881 + goto segv;
882 +
883 +- if (get_user(pc, &sf->info.si_regs.pc) ||
884 ++ if (__get_user(pc, &sf->info.si_regs.pc) ||
885 + __get_user(npc, &sf->info.si_regs.npc))
886 + goto segv;
887 +
888 +@@ -227,7 +244,7 @@ segv:
889 + asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
890 + {
891 + struct rt_signal_frame32 __user *sf;
892 +- unsigned int psr, pc, npc;
893 ++ unsigned int psr, pc, npc, ufp;
894 + compat_uptr_t fpu_save;
895 + compat_uptr_t rwin_save;
896 + sigset_t set;
897 +@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
898 + sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
899 +
900 + /* 1. Make sure we are not getting garbage from the user */
901 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
902 +- (((unsigned long) sf) & 3))
903 ++ if (invalid_frame_pointer(sf, sizeof(*sf)))
904 + goto segv;
905 +
906 +- if (get_user(pc, &sf->regs.pc) ||
907 ++ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
908 ++ goto segv;
909 ++
910 ++ if (ufp & 0x7)
911 ++ goto segv;
912 ++
913 ++ if (__get_user(pc, &sf->regs.pc) ||
914 + __get_user(npc, &sf->regs.npc))
915 + goto segv;
916 +
917 +@@ -307,14 +329,6 @@ segv:
918 + force_sig(SIGSEGV, current);
919 + }
920 +
921 +-/* Checks if the fp is valid */
922 +-static int invalid_frame_pointer(void __user *fp, int fplen)
923 +-{
924 +- if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
925 +- return 1;
926 +- return 0;
927 +-}
928 +-
929 + static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
930 + {
931 + unsigned long sp;
932 +diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
933 +index 52aa5e4ce5e7..c3c12efe0bc0 100644
934 +--- a/arch/sparc/kernel/signal_32.c
935 ++++ b/arch/sparc/kernel/signal_32.c
936 +@@ -60,10 +60,22 @@ struct rt_signal_frame {
937 + #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
938 + #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
939 +
940 ++/* Checks if the fp is valid. We always build signal frames which are
941 ++ * 16-byte aligned, therefore we can always enforce that the restore
942 ++ * frame has that property as well.
943 ++ */
944 ++static inline bool invalid_frame_pointer(void __user *fp, int fplen)
945 ++{
946 ++ if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
947 ++ return true;
948 ++
949 ++ return false;
950 ++}
951 ++
952 + asmlinkage void do_sigreturn(struct pt_regs *regs)
953 + {
954 ++ unsigned long up_psr, pc, npc, ufp;
955 + struct signal_frame __user *sf;
956 +- unsigned long up_psr, pc, npc;
957 + sigset_t set;
958 + __siginfo_fpu_t __user *fpu_save;
959 + __siginfo_rwin_t __user *rwin_save;
960 +@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
961 + sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
962 +
963 + /* 1. Make sure we are not getting garbage from the user */
964 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
965 ++ if (!invalid_frame_pointer(sf, sizeof(*sf)))
966 ++ goto segv_and_exit;
967 ++
968 ++ if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
969 + goto segv_and_exit;
970 +
971 +- if (((unsigned long) sf) & 3)
972 ++ if (ufp & 0x7)
973 + goto segv_and_exit;
974 +
975 + err = __get_user(pc, &sf->info.si_regs.pc);
976 +@@ -127,7 +142,7 @@ segv_and_exit:
977 + asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
978 + {
979 + struct rt_signal_frame __user *sf;
980 +- unsigned int psr, pc, npc;
981 ++ unsigned int psr, pc, npc, ufp;
982 + __siginfo_fpu_t __user *fpu_save;
983 + __siginfo_rwin_t __user *rwin_save;
984 + sigset_t set;
985 +@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
986 +
987 + synchronize_user_stack();
988 + sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
989 +- if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
990 +- (((unsigned long) sf) & 0x03))
991 ++ if (!invalid_frame_pointer(sf, sizeof(*sf)))
992 ++ goto segv;
993 ++
994 ++ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
995 ++ goto segv;
996 ++
997 ++ if (ufp & 0x7)
998 + goto segv;
999 +
1000 + err = __get_user(pc, &sf->regs.pc);
1001 +@@ -178,15 +198,6 @@ segv:
1002 + force_sig(SIGSEGV, current);
1003 + }
1004 +
1005 +-/* Checks if the fp is valid */
1006 +-static inline int invalid_frame_pointer(void __user *fp, int fplen)
1007 +-{
1008 +- if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
1009 +- return 1;
1010 +-
1011 +- return 0;
1012 +-}
1013 +-
1014 + static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
1015 + {
1016 + unsigned long sp = regs->u_regs[UREG_FP];
1017 +diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
1018 +index d88beff47bab..5ee930c48f4c 100644
1019 +--- a/arch/sparc/kernel/signal_64.c
1020 ++++ b/arch/sparc/kernel/signal_64.c
1021 +@@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
1022 + unsigned char fenab;
1023 + int err;
1024 +
1025 +- flush_user_windows();
1026 ++ synchronize_user_stack();
1027 + if (get_thread_wsaved() ||
1028 + (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
1029 + (!__access_ok(ucp, sizeof(*ucp))))
1030 +@@ -234,6 +234,17 @@ do_sigsegv:
1031 + goto out;
1032 + }
1033 +
1034 ++/* Checks if the fp is valid. We always build rt signal frames which
1035 ++ * are 16-byte aligned, therefore we can always enforce that the
1036 ++ * restore frame has that property as well.
1037 ++ */
1038 ++static bool invalid_frame_pointer(void __user *fp)
1039 ++{
1040 ++ if (((unsigned long) fp) & 15)
1041 ++ return true;
1042 ++ return false;
1043 ++}
1044 ++
1045 + struct rt_signal_frame {
1046 + struct sparc_stackf ss;
1047 + siginfo_t info;
1048 +@@ -246,8 +257,8 @@ struct rt_signal_frame {
1049 +
1050 + void do_rt_sigreturn(struct pt_regs *regs)
1051 + {
1052 ++ unsigned long tpc, tnpc, tstate, ufp;
1053 + struct rt_signal_frame __user *sf;
1054 +- unsigned long tpc, tnpc, tstate;
1055 + __siginfo_fpu_t __user *fpu_save;
1056 + __siginfo_rwin_t __user *rwin_save;
1057 + sigset_t set;
1058 +@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
1059 + (regs->u_regs [UREG_FP] + STACK_BIAS);
1060 +
1061 + /* 1. Make sure we are not getting garbage from the user */
1062 +- if (((unsigned long) sf) & 3)
1063 ++ if (invalid_frame_pointer(sf))
1064 ++ goto segv;
1065 ++
1066 ++ if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
1067 + goto segv;
1068 +
1069 +- err = get_user(tpc, &sf->regs.tpc);
1070 ++ if ((ufp + STACK_BIAS) & 0x7)
1071 ++ goto segv;
1072 ++
1073 ++ err = __get_user(tpc, &sf->regs.tpc);
1074 + err |= __get_user(tnpc, &sf->regs.tnpc);
1075 + if (test_thread_flag(TIF_32BIT)) {
1076 + tpc &= 0xffffffff;
1077 +@@ -308,14 +325,6 @@ segv:
1078 + force_sig(SIGSEGV, current);
1079 + }
1080 +
1081 +-/* Checks if the fp is valid */
1082 +-static int invalid_frame_pointer(void __user *fp)
1083 +-{
1084 +- if (((unsigned long) fp) & 15)
1085 +- return 1;
1086 +- return 0;
1087 +-}
1088 +-
1089 + static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
1090 + {
1091 + unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
1092 +diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
1093 +index 0f6eebe71e6c..e5fe8cef9a69 100644
1094 +--- a/arch/sparc/kernel/sigutil_32.c
1095 ++++ b/arch/sparc/kernel/sigutil_32.c
1096 +@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1097 + int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1098 + {
1099 + int err;
1100 ++
1101 ++ if (((unsigned long) fpu) & 3)
1102 ++ return -EFAULT;
1103 ++
1104 + #ifdef CONFIG_SMP
1105 + if (test_tsk_thread_flag(current, TIF_USEDFPU))
1106 + regs->psr &= ~PSR_EF;
1107 +@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
1108 + struct thread_info *t = current_thread_info();
1109 + int i, wsaved, err;
1110 +
1111 +- __get_user(wsaved, &rp->wsaved);
1112 ++ if (((unsigned long) rp) & 3)
1113 ++ return -EFAULT;
1114 ++
1115 ++ get_user(wsaved, &rp->wsaved);
1116 + if (wsaved > NSWINS)
1117 + return -EFAULT;
1118 +
1119 +diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
1120 +index 387834a9c56a..36aadcbeac69 100644
1121 +--- a/arch/sparc/kernel/sigutil_64.c
1122 ++++ b/arch/sparc/kernel/sigutil_64.c
1123 +@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
1124 + unsigned long fprs;
1125 + int err;
1126 +
1127 +- err = __get_user(fprs, &fpu->si_fprs);
1128 ++ if (((unsigned long) fpu) & 7)
1129 ++ return -EFAULT;
1130 ++
1131 ++ err = get_user(fprs, &fpu->si_fprs);
1132 + fprs_write(0);
1133 + regs->tstate &= ~TSTATE_PEF;
1134 + if (fprs & FPRS_DL)
1135 +@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
1136 + struct thread_info *t = current_thread_info();
1137 + int i, wsaved, err;
1138 +
1139 +- __get_user(wsaved, &rp->wsaved);
1140 ++ if (((unsigned long) rp) & 7)
1141 ++ return -EFAULT;
1142 ++
1143 ++ get_user(wsaved, &rp->wsaved);
1144 + if (wsaved > NSWINS)
1145 + return -EFAULT;
1146 +
1147 +diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
1148 +index c357e40ffd01..4a73009f66a5 100644
1149 +--- a/arch/sparc/kernel/spiterrs.S
1150 ++++ b/arch/sparc/kernel/spiterrs.S
1151 +@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
1152 + ba,pt %xcc, etraptl1
1153 + rd %pc, %g7
1154 +
1155 +- ba,pt %xcc, 2f
1156 +- nop
1157 ++ ba,a,pt %xcc, 2f
1158 +
1159 + 1: ba,pt %xcc, etrap_irq
1160 + rd %pc, %g7
1161 +@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
1162 + mov %l5, %o2
1163 + call spitfire_access_error
1164 + add %sp, PTREGS_OFF, %o0
1165 +- ba,pt %xcc, rtrap
1166 +- nop
1167 ++ ba,a,pt %xcc, rtrap
1168 + .size __spitfire_access_error,.-__spitfire_access_error
1169 +
1170 + /* This is the trap handler entry point for ECC correctable
1171 +@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
1172 + mov %l5, %o2
1173 + call spitfire_data_access_exception_tl1
1174 + add %sp, PTREGS_OFF, %o0
1175 +- ba,pt %xcc, rtrap
1176 +- nop
1177 ++ ba,a,pt %xcc, rtrap
1178 + .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
1179 +
1180 + .type __spitfire_data_access_exception,#function
1181 +@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
1182 + mov %l5, %o2
1183 + call spitfire_data_access_exception
1184 + add %sp, PTREGS_OFF, %o0
1185 +- ba,pt %xcc, rtrap
1186 +- nop
1187 ++ ba,a,pt %xcc, rtrap
1188 + .size __spitfire_data_access_exception,.-__spitfire_data_access_exception
1189 +
1190 + .type __spitfire_insn_access_exception_tl1,#function
1191 +@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
1192 + mov %l5, %o2
1193 + call spitfire_insn_access_exception_tl1
1194 + add %sp, PTREGS_OFF, %o0
1195 +- ba,pt %xcc, rtrap
1196 +- nop
1197 ++ ba,a,pt %xcc, rtrap
1198 + .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
1199 +
1200 + .type __spitfire_insn_access_exception,#function
1201 +@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
1202 + mov %l5, %o2
1203 + call spitfire_insn_access_exception
1204 + add %sp, PTREGS_OFF, %o0
1205 +- ba,pt %xcc, rtrap
1206 +- nop
1207 ++ ba,a,pt %xcc, rtrap
1208 + .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
1209 +diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
1210 +index bb0008927598..c4a1b5c40e4e 100644
1211 +--- a/arch/sparc/kernel/syscalls.S
1212 ++++ b/arch/sparc/kernel/syscalls.S
1213 +@@ -158,7 +158,25 @@ linux_syscall_trace32:
1214 + add %sp, PTREGS_OFF, %o0
1215 + brnz,pn %o0, 3f
1216 + mov -ENOSYS, %o0
1217 ++
1218 ++ /* Syscall tracing can modify the registers. */
1219 ++ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
1220 ++ sethi %hi(sys_call_table32), %l7
1221 ++ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
1222 ++ or %l7, %lo(sys_call_table32), %l7
1223 ++ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
1224 ++ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
1225 ++ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
1226 ++ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
1227 ++ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
1228 ++
1229 ++ cmp %g1, NR_syscalls
1230 ++ bgeu,pn %xcc, 3f
1231 ++ mov -ENOSYS, %o0
1232 ++
1233 ++ sll %g1, 2, %l4
1234 + srl %i0, 0, %o0
1235 ++ lduw [%l7 + %l4], %l7
1236 + srl %i4, 0, %o4
1237 + srl %i1, 0, %o1
1238 + srl %i2, 0, %o2
1239 +@@ -170,7 +188,25 @@ linux_syscall_trace:
1240 + add %sp, PTREGS_OFF, %o0
1241 + brnz,pn %o0, 3f
1242 + mov -ENOSYS, %o0
1243 ++
1244 ++ /* Syscall tracing can modify the registers. */
1245 ++ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
1246 ++ sethi %hi(sys_call_table64), %l7
1247 ++ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
1248 ++ or %l7, %lo(sys_call_table64), %l7
1249 ++ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
1250 ++ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
1251 ++ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
1252 ++ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
1253 ++ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
1254 ++
1255 ++ cmp %g1, NR_syscalls
1256 ++ bgeu,pn %xcc, 3f
1257 ++ mov -ENOSYS, %o0
1258 ++
1259 ++ sll %g1, 2, %l4
1260 + mov %i0, %o0
1261 ++ lduw [%l7 + %l4], %l7
1262 + mov %i1, %o1
1263 + mov %i2, %o2
1264 + mov %i3, %o3
1265 +diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
1266 +new file mode 100644
1267 +index 000000000000..5604a2b051d4
1268 +--- /dev/null
1269 ++++ b/arch/sparc/kernel/urtt_fill.S
1270 +@@ -0,0 +1,98 @@
1271 ++#include <asm/thread_info.h>
1272 ++#include <asm/trap_block.h>
1273 ++#include <asm/spitfire.h>
1274 ++#include <asm/ptrace.h>
1275 ++#include <asm/head.h>
1276 ++
1277 ++ .text
1278 ++ .align 8
1279 ++ .globl user_rtt_fill_fixup_common
1280 ++user_rtt_fill_fixup_common:
1281 ++ rdpr %cwp, %g1
1282 ++ add %g1, 1, %g1
1283 ++ wrpr %g1, 0x0, %cwp
1284 ++
1285 ++ rdpr %wstate, %g2
1286 ++ sll %g2, 3, %g2
1287 ++ wrpr %g2, 0x0, %wstate
1288 ++
1289 ++ /* We know %canrestore and %otherwin are both zero. */
1290 ++
1291 ++ sethi %hi(sparc64_kern_pri_context), %g2
1292 ++ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
1293 ++ mov PRIMARY_CONTEXT, %g1
1294 ++
1295 ++661: stxa %g2, [%g1] ASI_DMMU
1296 ++ .section .sun4v_1insn_patch, "ax"
1297 ++ .word 661b
1298 ++ stxa %g2, [%g1] ASI_MMU
1299 ++ .previous
1300 ++
1301 ++ sethi %hi(KERNBASE), %g1
1302 ++ flush %g1
1303 ++
1304 ++ mov %g4, %l4
1305 ++ mov %g5, %l5
1306 ++ brnz,pn %g3, 1f
1307 ++ mov %g3, %l3
1308 ++
1309 ++ or %g4, FAULT_CODE_WINFIXUP, %g4
1310 ++ stb %g4, [%g6 + TI_FAULT_CODE]
1311 ++ stx %g5, [%g6 + TI_FAULT_ADDR]
1312 ++1:
1313 ++ mov %g6, %l1
1314 ++ wrpr %g0, 0x0, %tl
1315 ++
1316 ++661: nop
1317 ++ .section .sun4v_1insn_patch, "ax"
1318 ++ .word 661b
1319 ++ SET_GL(0)
1320 ++ .previous
1321 ++
1322 ++ wrpr %g0, RTRAP_PSTATE, %pstate
1323 ++
1324 ++ mov %l1, %g6
1325 ++ ldx [%g6 + TI_TASK], %g4
1326 ++ LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
1327 ++
1328 ++ brnz,pn %l3, 1f
1329 ++ nop
1330 ++
1331 ++ call do_sparc64_fault
1332 ++ add %sp, PTREGS_OFF, %o0
1333 ++ ba,pt %xcc, rtrap
1334 ++ nop
1335 ++
1336 ++1: cmp %g3, 2
1337 ++ bne,pn %xcc, 2f
1338 ++ nop
1339 ++
1340 ++ sethi %hi(tlb_type), %g1
1341 ++ lduw [%g1 + %lo(tlb_type)], %g1
1342 ++ cmp %g1, 3
1343 ++ bne,pt %icc, 1f
1344 ++ add %sp, PTREGS_OFF, %o0
1345 ++ mov %l4, %o2
1346 ++ call sun4v_do_mna
1347 ++ mov %l5, %o1
1348 ++ ba,a,pt %xcc, rtrap
1349 ++1: mov %l4, %o1
1350 ++ mov %l5, %o2
1351 ++ call mem_address_unaligned
1352 ++ nop
1353 ++ ba,a,pt %xcc, rtrap
1354 ++
1355 ++2: sethi %hi(tlb_type), %g1
1356 ++ mov %l4, %o1
1357 ++ lduw [%g1 + %lo(tlb_type)], %g1
1358 ++ mov %l5, %o2
1359 ++ cmp %g1, 3
1360 ++ bne,pt %icc, 1f
1361 ++ add %sp, PTREGS_OFF, %o0
1362 ++ call sun4v_data_access_exception
1363 ++ nop
1364 ++ ba,a,pt %xcc, rtrap
1365 ++
1366 ++1: call spitfire_data_access_exception
1367 ++ nop
1368 ++ ba,a,pt %xcc, rtrap
1369 +diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S
1370 +index b7f0f3f3a909..c731e8023d3e 100644
1371 +--- a/arch/sparc/kernel/utrap.S
1372 ++++ b/arch/sparc/kernel/utrap.S
1373 +@@ -11,8 +11,7 @@ utrap_trap: /* %g3=handler,%g4=level */
1374 + mov %l4, %o1
1375 + call bad_trap
1376 + add %sp, PTREGS_OFF, %o0
1377 +- ba,pt %xcc, rtrap
1378 +- nop
1379 ++ ba,a,pt %xcc, rtrap
1380 +
1381 + invoke_utrap:
1382 + sllx %g3, 3, %g3
1383 +diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
1384 +index f1a2f688b28a..4a41d412dd3d 100644
1385 +--- a/arch/sparc/kernel/vmlinux.lds.S
1386 ++++ b/arch/sparc/kernel/vmlinux.lds.S
1387 +@@ -33,6 +33,10 @@ ENTRY(_start)
1388 + jiffies = jiffies_64;
1389 + #endif
1390 +
1391 ++#ifdef CONFIG_SPARC64
1392 ++ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
1393 ++#endif
1394 ++
1395 + SECTIONS
1396 + {
1397 + #ifdef CONFIG_SPARC64
1398 +diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
1399 +index 1e67ce958369..855019a8590e 100644
1400 +--- a/arch/sparc/kernel/winfixup.S
1401 ++++ b/arch/sparc/kernel/winfixup.S
1402 +@@ -32,8 +32,7 @@ fill_fixup:
1403 + rd %pc, %g7
1404 + call do_sparc64_fault
1405 + add %sp, PTREGS_OFF, %o0
1406 +- ba,pt %xcc, rtrap
1407 +- nop
1408 ++ ba,a,pt %xcc, rtrap
1409 +
1410 + /* Be very careful about usage of the trap globals here.
1411 + * You cannot touch %g5 as that has the fault information.
1412 +diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
1413 +index 131eaf4ad7f5..364d093f46c6 100644
1414 +--- a/arch/sparc/mm/hugetlbpage.c
1415 ++++ b/arch/sparc/mm/hugetlbpage.c
1416 +@@ -176,17 +176,31 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1417 + pte_t *ptep, pte_t entry)
1418 + {
1419 + int i;
1420 ++ pte_t orig[2];
1421 ++ unsigned long nptes;
1422 +
1423 + if (!pte_present(*ptep) && pte_present(entry))
1424 + mm->context.huge_pte_count++;
1425 +
1426 + addr &= HPAGE_MASK;
1427 +- for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
1428 +- set_pte_at(mm, addr, ptep, entry);
1429 ++
1430 ++ nptes = 1 << HUGETLB_PAGE_ORDER;
1431 ++ orig[0] = *ptep;
1432 ++ orig[1] = *(ptep + nptes / 2);
1433 ++ for (i = 0; i < nptes; i++) {
1434 ++ *ptep = entry;
1435 + ptep++;
1436 + addr += PAGE_SIZE;
1437 + pte_val(entry) += PAGE_SIZE;
1438 + }
1439 ++
1440 ++ /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
1441 ++ addr -= REAL_HPAGE_SIZE;
1442 ++ ptep -= nptes / 2;
1443 ++ maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
1444 ++ addr -= REAL_HPAGE_SIZE;
1445 ++ ptep -= nptes / 2;
1446 ++ maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
1447 + }
1448 +
1449 + pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1450 +@@ -194,19 +208,28 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1451 + {
1452 + pte_t entry;
1453 + int i;
1454 ++ unsigned long nptes;
1455 +
1456 + entry = *ptep;
1457 + if (pte_present(entry))
1458 + mm->context.huge_pte_count--;
1459 +
1460 + addr &= HPAGE_MASK;
1461 +-
1462 +- for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
1463 +- pte_clear(mm, addr, ptep);
1464 ++ nptes = 1 << HUGETLB_PAGE_ORDER;
1465 ++ for (i = 0; i < nptes; i++) {
1466 ++ *ptep = __pte(0UL);
1467 + addr += PAGE_SIZE;
1468 + ptep++;
1469 + }
1470 +
1471 ++ /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
1472 ++ addr -= REAL_HPAGE_SIZE;
1473 ++ ptep -= nptes / 2;
1474 ++ maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
1475 ++ addr -= REAL_HPAGE_SIZE;
1476 ++ ptep -= nptes / 2;
1477 ++ maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
1478 ++
1479 + return entry;
1480 + }
1481 +
1482 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
1483 +index 3025bd57f7ab..3c4b8975fa76 100644
1484 +--- a/arch/sparc/mm/init_64.c
1485 ++++ b/arch/sparc/mm/init_64.c
1486 +@@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
1487 + tsb_insert(tsb, tag, tte);
1488 + }
1489 +
1490 +-#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
1491 +-static inline bool is_hugetlb_pte(pte_t pte)
1492 +-{
1493 +- if ((tlb_type == hypervisor &&
1494 +- (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
1495 +- (tlb_type != hypervisor &&
1496 +- (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
1497 +- return true;
1498 +- return false;
1499 +-}
1500 +-#endif
1501 +-
1502 + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
1503 + {
1504 + struct mm_struct *mm;
1505 +@@ -1267,13 +1255,6 @@ static int __init numa_parse_mdesc(void)
1506 + int i, j, err, count;
1507 + u64 node;
1508 +
1509 +- /* Some sane defaults for numa latency values */
1510 +- for (i = 0; i < MAX_NUMNODES; i++) {
1511 +- for (j = 0; j < MAX_NUMNODES; j++)
1512 +- numa_latency[i][j] = (i == j) ?
1513 +- LOCAL_DISTANCE : REMOTE_DISTANCE;
1514 +- }
1515 +-
1516 + node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1517 + if (node == MDESC_NODE_NULL) {
1518 + mdesc_release(md);
1519 +@@ -1369,10 +1350,18 @@ static int __init numa_parse_sun4u(void)
1520 +
1521 + static int __init bootmem_init_numa(void)
1522 + {
1523 ++ int i, j;
1524 + int err = -1;
1525 +
1526 + numadbg("bootmem_init_numa()\n");
1527 +
1528 ++ /* Some sane defaults for numa latency values */
1529 ++ for (i = 0; i < MAX_NUMNODES; i++) {
1530 ++ for (j = 0; j < MAX_NUMNODES; j++)
1531 ++ numa_latency[i][j] = (i == j) ?
1532 ++ LOCAL_DISTANCE : REMOTE_DISTANCE;
1533 ++ }
1534 ++
1535 + if (numa_enabled) {
1536 + if (tlb_type == hypervisor)
1537 + err = numa_parse_mdesc();
1538 +@@ -2832,9 +2821,10 @@ void hugetlb_setup(struct pt_regs *regs)
1539 + * the Data-TLB for huge pages.
1540 + */
1541 + if (tlb_type == cheetah_plus) {
1542 ++ bool need_context_reload = false;
1543 + unsigned long ctx;
1544 +
1545 +- spin_lock(&ctx_alloc_lock);
1546 ++ spin_lock_irq(&ctx_alloc_lock);
1547 + ctx = mm->context.sparc64_ctx_val;
1548 + ctx &= ~CTX_PGSZ_MASK;
1549 + ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
1550 +@@ -2853,9 +2843,12 @@ void hugetlb_setup(struct pt_regs *regs)
1551 + * also executing in this address space.
1552 + */
1553 + mm->context.sparc64_ctx_val = ctx;
1554 +- on_each_cpu(context_reload, mm, 0);
1555 ++ need_context_reload = true;
1556 + }
1557 +- spin_unlock(&ctx_alloc_lock);
1558 ++ spin_unlock_irq(&ctx_alloc_lock);
1559 ++
1560 ++ if (need_context_reload)
1561 ++ on_each_cpu(context_reload, mm, 0);
1562 + }
1563 + }
1564 + #endif
1565 +diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
1566 +index 9df2190c097e..f81cd9736700 100644
1567 +--- a/arch/sparc/mm/tlb.c
1568 ++++ b/arch/sparc/mm/tlb.c
1569 +@@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void)
1570 + }
1571 +
1572 + static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
1573 +- bool exec)
1574 ++ bool exec, bool huge)
1575 + {
1576 + struct tlb_batch *tb = &get_cpu_var(tlb_batch);
1577 + unsigned long nr;
1578 +@@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
1579 + }
1580 +
1581 + if (!tb->active) {
1582 +- flush_tsb_user_page(mm, vaddr);
1583 ++ flush_tsb_user_page(mm, vaddr, huge);
1584 + global_flush_tlb_page(mm, vaddr);
1585 + goto out;
1586 + }
1587 +
1588 +- if (nr == 0)
1589 ++ if (nr == 0) {
1590 + tb->mm = mm;
1591 ++ tb->huge = huge;
1592 ++ }
1593 ++
1594 ++ if (tb->huge != huge) {
1595 ++ flush_tlb_pending();
1596 ++ tb->huge = huge;
1597 ++ nr = 0;
1598 ++ }
1599 +
1600 + tb->vaddrs[nr] = vaddr;
1601 + tb->tlb_nr = ++nr;
1602 +@@ -104,6 +112,8 @@ out:
1603 + void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
1604 + pte_t *ptep, pte_t orig, int fullmm)
1605 + {
1606 ++ bool huge = is_hugetlb_pte(orig);
1607 ++
1608 + if (tlb_type != hypervisor &&
1609 + pte_dirty(orig)) {
1610 + unsigned long paddr, pfn = pte_pfn(orig);
1611 +@@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
1612 +
1613 + no_cache_flush:
1614 + if (!fullmm)
1615 +- tlb_batch_add_one(mm, vaddr, pte_exec(orig));
1616 ++ tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
1617 + }
1618 +
1619 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1620 +@@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
1621 + if (pte_val(*pte) & _PAGE_VALID) {
1622 + bool exec = pte_exec(*pte);
1623 +
1624 +- tlb_batch_add_one(mm, vaddr, exec);
1625 ++ tlb_batch_add_one(mm, vaddr, exec, false);
1626 + }
1627 + pte++;
1628 + vaddr += PAGE_SIZE;
1629 +@@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1630 + pte_t orig_pte = __pte(pmd_val(orig));
1631 + bool exec = pte_exec(orig_pte);
1632 +
1633 +- tlb_batch_add_one(mm, addr, exec);
1634 +- tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
1635 ++ tlb_batch_add_one(mm, addr, exec, true);
1636 ++ tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
1637 ++ true);
1638 + } else {
1639 + tlb_batch_pmd_scan(mm, addr, orig);
1640 + }
1641 +diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
1642 +index a06576683c38..a0604a493a36 100644
1643 +--- a/arch/sparc/mm/tsb.c
1644 ++++ b/arch/sparc/mm/tsb.c
1645 +@@ -76,14 +76,15 @@ void flush_tsb_user(struct tlb_batch *tb)
1646 +
1647 + spin_lock_irqsave(&mm->context.lock, flags);
1648 +
1649 +- base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
1650 +- nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
1651 +- if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1652 +- base = __pa(base);
1653 +- __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
1654 +-
1655 ++ if (!tb->huge) {
1656 ++ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
1657 ++ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
1658 ++ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1659 ++ base = __pa(base);
1660 ++ __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
1661 ++ }
1662 + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
1663 +- if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
1664 ++ if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
1665 + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
1666 + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
1667 + if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1668 +@@ -94,20 +95,21 @@ void flush_tsb_user(struct tlb_batch *tb)
1669 + spin_unlock_irqrestore(&mm->context.lock, flags);
1670 + }
1671 +
1672 +-void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
1673 ++void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge)
1674 + {
1675 + unsigned long nentries, base, flags;
1676 +
1677 + spin_lock_irqsave(&mm->context.lock, flags);
1678 +
1679 +- base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
1680 +- nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
1681 +- if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1682 +- base = __pa(base);
1683 +- __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
1684 +-
1685 ++ if (!huge) {
1686 ++ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
1687 ++ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
1688 ++ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1689 ++ base = __pa(base);
1690 ++ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
1691 ++ }
1692 + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
1693 +- if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
1694 ++ if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
1695 + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
1696 + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
1697 + if (tlb_type == cheetah_plus || tlb_type == hypervisor)
1698 +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
1699 +index ade185a46b1d..679302c312f8 100644
1700 +--- a/arch/x86/kernel/traps.c
1701 ++++ b/arch/x86/kernel/traps.c
1702 +@@ -109,6 +109,12 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
1703 + preempt_count_dec();
1704 + }
1705 +
1706 ++/*
1707 ++ * In IST context, we explicitly disable preemption. This serves two
1708 ++ * purposes: it makes it much less likely that we would accidentally
1709 ++ * schedule in IST context and it will force a warning if we somehow
1710 ++ * manage to schedule by accident.
1711 ++ */
1712 + void ist_enter(struct pt_regs *regs)
1713 + {
1714 + if (user_mode(regs)) {
1715 +@@ -123,13 +129,7 @@ void ist_enter(struct pt_regs *regs)
1716 + rcu_nmi_enter();
1717 + }
1718 +
1719 +- /*
1720 +- * We are atomic because we're on the IST stack; or we're on
1721 +- * x86_32, in which case we still shouldn't schedule; or we're
1722 +- * on x86_64 and entered from user mode, in which case we're
1723 +- * still atomic unless ist_begin_non_atomic is called.
1724 +- */
1725 +- preempt_count_add(HARDIRQ_OFFSET);
1726 ++ preempt_disable();
1727 +
1728 + /* This code is a bit fragile. Test it. */
1729 + RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
1730 +@@ -137,7 +137,7 @@ void ist_enter(struct pt_regs *regs)
1731 +
1732 + void ist_exit(struct pt_regs *regs)
1733 + {
1734 +- preempt_count_sub(HARDIRQ_OFFSET);
1735 ++ preempt_enable_no_resched();
1736 +
1737 + if (!user_mode(regs))
1738 + rcu_nmi_exit();
1739 +@@ -168,7 +168,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
1740 + BUG_ON((unsigned long)(current_top_of_stack() -
1741 + current_stack_pointer()) >= THREAD_SIZE);
1742 +
1743 +- preempt_count_sub(HARDIRQ_OFFSET);
1744 ++ preempt_enable_no_resched();
1745 + }
1746 +
1747 + /**
1748 +@@ -178,7 +178,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
1749 + */
1750 + void ist_end_non_atomic(void)
1751 + {
1752 +- preempt_count_add(HARDIRQ_OFFSET);
1753 ++ preempt_disable();
1754 + }
1755 +
1756 + static nokprobe_inline int
1757 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1758 +index 605cea75eb0d..be222666b1c2 100644
1759 +--- a/arch/x86/kvm/x86.c
1760 ++++ b/arch/x86/kvm/x86.c
1761 +@@ -3014,6 +3014,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
1762 + if (dbgregs->flags)
1763 + return -EINVAL;
1764 +
1765 ++ if (dbgregs->dr6 & ~0xffffffffull)
1766 ++ return -EINVAL;
1767 ++ if (dbgregs->dr7 & ~0xffffffffull)
1768 ++ return -EINVAL;
1769 ++
1770 + memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
1771 + kvm_update_dr0123(vcpu);
1772 + vcpu->arch.dr6 = dbgregs->dr6;
1773 +diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
1774 +index 4870f28403f5..05bfe568cd30 100644
1775 +--- a/crypto/asymmetric_keys/Kconfig
1776 ++++ b/crypto/asymmetric_keys/Kconfig
1777 +@@ -14,6 +14,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
1778 + select MPILIB
1779 + select PUBLIC_KEY_ALGO_RSA
1780 + select CRYPTO_HASH_INFO
1781 ++ select CRYPTO_AKCIPHER
1782 + help
1783 + This option provides support for asymmetric public key type handling.
1784 + If signature generation and/or verification are to be used,
1785 +diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
1786 +index 52c7395cb8d8..0d0d4529ee36 100644
1787 +--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
1788 ++++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
1789 +@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
1790 + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1791 + struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
1792 + unsigned int unit;
1793 ++ u32 unit_size;
1794 + int ret;
1795 +
1796 + if (!ctx->u.aes.key_len)
1797 +@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
1798 + if (!req->info)
1799 + return -EINVAL;
1800 +
1801 +- for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
1802 +- if (!(req->nbytes & (unit_size_map[unit].size - 1)))
1803 +- break;
1804 ++ unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
1805 ++ if (req->nbytes <= unit_size_map[0].size) {
1806 ++ for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
1807 ++ if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
1808 ++ unit_size = unit_size_map[unit].value;
1809 ++ break;
1810 ++ }
1811 ++ }
1812 ++ }
1813 +
1814 +- if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
1815 ++ if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
1816 + (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
1817 + /* Use the fallback to process the request for any
1818 + * unsupported unit sizes or key sizes
1819 +@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
1820 + rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
1821 + rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
1822 + : CCP_AES_ACTION_DECRYPT;
1823 +- rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
1824 ++ rctx->cmd.u.xts.unit_size = unit_size;
1825 + rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
1826 + rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
1827 + rctx->cmd.u.xts.iv = &rctx->iv_sg;
1828 +diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
1829 +index b9178d0a3093..aa1dbeaa9b49 100644
1830 +--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
1831 ++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
1832 +@@ -145,8 +145,6 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
1833 + void adf_disable_aer(struct adf_accel_dev *accel_dev);
1834 + int adf_init_aer(void);
1835 + void adf_exit_aer(void);
1836 +-int adf_init_pf_wq(void);
1837 +-void adf_exit_pf_wq(void);
1838 + int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
1839 + void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
1840 + int adf_send_admin_init(struct adf_accel_dev *accel_dev);
1841 +@@ -229,6 +227,8 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
1842 + uint32_t vf_mask);
1843 + void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
1844 + uint32_t vf_mask);
1845 ++int adf_init_pf_wq(void);
1846 ++void adf_exit_pf_wq(void);
1847 + #else
1848 + static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
1849 + {
1850 +@@ -238,5 +238,14 @@ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
1851 + static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
1852 + {
1853 + }
1854 ++
1855 ++static inline int adf_init_pf_wq(void)
1856 ++{
1857 ++ return 0;
1858 ++}
1859 ++
1860 ++static inline void adf_exit_pf_wq(void)
1861 ++{
1862 ++}
1863 + #endif
1864 + #endif
1865 +diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
1866 +index 33a1f9779b86..4ea71d505bce 100644
1867 +--- a/drivers/gpio/gpio-bcm-kona.c
1868 ++++ b/drivers/gpio/gpio-bcm-kona.c
1869 +@@ -551,11 +551,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
1870 + /* disable interrupts and clear status */
1871 + for (i = 0; i < kona_gpio->num_bank; i++) {
1872 + /* Unlock the entire bank first */
1873 +- bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
1874 ++ bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
1875 + writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
1876 + writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
1877 + /* Now re-lock the bank */
1878 +- bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
1879 ++ bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
1880 + }
1881 + }
1882 +
1883 +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
1884 +index 24c5434abd1c..a02238c85e18 100644
1885 +--- a/drivers/gpu/drm/drm_crtc.c
1886 ++++ b/drivers/gpu/drm/drm_crtc.c
1887 +@@ -3316,6 +3316,24 @@ int drm_mode_addfb2(struct drm_device *dev,
1888 + return 0;
1889 + }
1890 +
1891 ++struct drm_mode_rmfb_work {
1892 ++ struct work_struct work;
1893 ++ struct list_head fbs;
1894 ++};
1895 ++
1896 ++static void drm_mode_rmfb_work_fn(struct work_struct *w)
1897 ++{
1898 ++ struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
1899 ++
1900 ++ while (!list_empty(&arg->fbs)) {
1901 ++ struct drm_framebuffer *fb =
1902 ++ list_first_entry(&arg->fbs, typeof(*fb), filp_head);
1903 ++
1904 ++ list_del_init(&fb->filp_head);
1905 ++ drm_framebuffer_remove(fb);
1906 ++ }
1907 ++}
1908 ++
1909 + /**
1910 + * drm_mode_rmfb - remove an FB from the configuration
1911 + * @dev: drm device for the ioctl
1912 +@@ -3356,7 +3374,25 @@ int drm_mode_rmfb(struct drm_device *dev,
1913 + mutex_unlock(&dev->mode_config.fb_lock);
1914 + mutex_unlock(&file_priv->fbs_lock);
1915 +
1916 +- drm_framebuffer_unreference(fb);
1917 ++ /*
1918 ++ * we now own the reference that was stored in the fbs list
1919 ++ *
1920 ++ * drm_framebuffer_remove may fail with -EINTR on pending signals,
1921 ++ * so run this in a separate stack as there's no way to correctly
1922 ++ * handle this after the fb is already removed from the lookup table.
1923 ++ */
1924 ++ if (atomic_read(&fb->refcount.refcount) > 1) {
1925 ++ struct drm_mode_rmfb_work arg;
1926 ++
1927 ++ INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
1928 ++ INIT_LIST_HEAD(&arg.fbs);
1929 ++ list_add_tail(&fb->filp_head, &arg.fbs);
1930 ++
1931 ++ schedule_work(&arg.work);
1932 ++ flush_work(&arg.work);
1933 ++ destroy_work_on_stack(&arg.work);
1934 ++ } else
1935 ++ drm_framebuffer_unreference(fb);
1936 +
1937 + return 0;
1938 +
1939 +@@ -3509,7 +3545,6 @@ out_err1:
1940 + return ret;
1941 + }
1942 +
1943 +-
1944 + /**
1945 + * drm_fb_release - remove and free the FBs on this file
1946 + * @priv: drm file for the ioctl
1947 +@@ -3524,6 +3559,9 @@ out_err1:
1948 + void drm_fb_release(struct drm_file *priv)
1949 + {
1950 + struct drm_framebuffer *fb, *tfb;
1951 ++ struct drm_mode_rmfb_work arg;
1952 ++
1953 ++ INIT_LIST_HEAD(&arg.fbs);
1954 +
1955 + /*
1956 + * When the file gets released that means no one else can access the fb
1957 +@@ -3536,10 +3574,22 @@ void drm_fb_release(struct drm_file *priv)
1958 + * at it any more.
1959 + */
1960 + list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
1961 +- list_del_init(&fb->filp_head);
1962 ++ if (atomic_read(&fb->refcount.refcount) > 1) {
1963 ++ list_move_tail(&fb->filp_head, &arg.fbs);
1964 ++ } else {
1965 ++ list_del_init(&fb->filp_head);
1966 +
1967 +- /* This drops the fpriv->fbs reference. */
1968 +- drm_framebuffer_unreference(fb);
1969 ++ /* This drops the fpriv->fbs reference. */
1970 ++ drm_framebuffer_unreference(fb);
1971 ++ }
1972 ++ }
1973 ++
1974 ++ if (!list_empty(&arg.fbs)) {
1975 ++ INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
1976 ++
1977 ++ schedule_work(&arg.work);
1978 ++ flush_work(&arg.work);
1979 ++ destroy_work_on_stack(&arg.work);
1980 + }
1981 + }
1982 +
1983 +diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
1984 +index 2b34622a4bfe..3920c3eb6006 100644
1985 +--- a/drivers/net/ethernet/rocker/rocker.c
1986 ++++ b/drivers/net/ethernet/rocker/rocker.c
1987 +@@ -4475,7 +4475,7 @@ static int rocker_port_obj_add(struct net_device *dev,
1988 + fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
1989 + err = rocker_port_fib_ipv4(rocker_port, trans,
1990 + htonl(fib4->dst), fib4->dst_len,
1991 +- &fib4->fi, fib4->tb_id, 0);
1992 ++ fib4->fi, fib4->tb_id, 0);
1993 + break;
1994 + case SWITCHDEV_OBJ_ID_PORT_FDB:
1995 + err = rocker_port_fdb_add(rocker_port, trans,
1996 +@@ -4547,7 +4547,7 @@ static int rocker_port_obj_del(struct net_device *dev,
1997 + fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
1998 + err = rocker_port_fib_ipv4(rocker_port, NULL,
1999 + htonl(fib4->dst), fib4->dst_len,
2000 +- &fib4->fi, fib4->tb_id,
2001 ++ fib4->fi, fib4->tb_id,
2002 + ROCKER_OP_FLAG_REMOVE);
2003 + break;
2004 + case SWITCHDEV_OBJ_ID_PORT_FDB:
2005 +diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
2006 +index e6a084a6be12..cbe9a330117a 100644
2007 +--- a/drivers/net/ethernet/sfc/ef10.c
2008 ++++ b/drivers/net/ethernet/sfc/ef10.c
2009 +@@ -619,6 +619,17 @@ fail:
2010 + return rc;
2011 + }
2012 +
2013 ++static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
2014 ++{
2015 ++ struct efx_channel *channel;
2016 ++ struct efx_tx_queue *tx_queue;
2017 ++
2018 ++ /* All our existing PIO buffers went away */
2019 ++ efx_for_each_channel(channel, efx)
2020 ++ efx_for_each_channel_tx_queue(tx_queue, channel)
2021 ++ tx_queue->piobuf = NULL;
2022 ++}
2023 ++
2024 + #else /* !EFX_USE_PIO */
2025 +
2026 + static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
2027 +@@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
2028 + {
2029 + }
2030 +
2031 ++static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
2032 ++{
2033 ++}
2034 ++
2035 + #endif /* EFX_USE_PIO */
2036 +
2037 + static void efx_ef10_remove(struct efx_nic *efx)
2038 +@@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
2039 + nic_data->must_realloc_vis = true;
2040 + nic_data->must_restore_filters = true;
2041 + nic_data->must_restore_piobufs = true;
2042 ++ efx_ef10_forget_old_piobufs(efx);
2043 + nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
2044 +
2045 + /* Driver-created vswitches and vports must be re-created */
2046 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
2047 +index 58efdec12f30..69e31e2a68fc 100644
2048 +--- a/drivers/net/geneve.c
2049 ++++ b/drivers/net/geneve.c
2050 +@@ -310,15 +310,15 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
2051 +
2052 + /* Need Geneve and inner Ethernet header to be present */
2053 + if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
2054 +- goto error;
2055 ++ goto drop;
2056 +
2057 + /* Return packets with reserved bits set */
2058 + geneveh = geneve_hdr(skb);
2059 + if (unlikely(geneveh->ver != GENEVE_VER))
2060 +- goto error;
2061 ++ goto drop;
2062 +
2063 + if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
2064 +- goto error;
2065 ++ goto drop;
2066 +
2067 + opts_len = geneveh->opt_len * 4;
2068 + if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
2069 +@@ -336,10 +336,6 @@ drop:
2070 + /* Consume bad packet */
2071 + kfree_skb(skb);
2072 + return 0;
2073 +-
2074 +-error:
2075 +- /* Let the UDP layer deal with the skb */
2076 +- return 1;
2077 + }
2078 +
2079 + static struct socket *geneve_create_sock(struct net *net, bool ipv6,
2080 +@@ -998,6 +994,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
2081 + return geneve_xmit_skb(skb, dev, info);
2082 + }
2083 +
2084 ++static int geneve_change_mtu(struct net_device *dev, int new_mtu)
2085 ++{
2086 ++ /* GENEVE overhead is not fixed, so we can't enforce a more
2087 ++ * precise max MTU.
2088 ++ */
2089 ++ if (new_mtu < 68 || new_mtu > IP_MAX_MTU)
2090 ++ return -EINVAL;
2091 ++ dev->mtu = new_mtu;
2092 ++ return 0;
2093 ++}
2094 ++
2095 + static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2096 + {
2097 + struct ip_tunnel_info *info = skb_tunnel_info(skb);
2098 +@@ -1042,7 +1049,7 @@ static const struct net_device_ops geneve_netdev_ops = {
2099 + .ndo_stop = geneve_stop,
2100 + .ndo_start_xmit = geneve_xmit,
2101 + .ndo_get_stats64 = ip_tunnel_get_stats64,
2102 +- .ndo_change_mtu = eth_change_mtu,
2103 ++ .ndo_change_mtu = geneve_change_mtu,
2104 + .ndo_validate_addr = eth_validate_addr,
2105 + .ndo_set_mac_address = eth_mac_addr,
2106 + .ndo_fill_metadata_dst = geneve_fill_metadata_dst,
2107 +@@ -1349,11 +1356,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
2108 +
2109 + err = geneve_configure(net, dev, &geneve_remote_unspec,
2110 + 0, 0, 0, htons(dst_port), true);
2111 +- if (err) {
2112 +- free_netdev(dev);
2113 +- return ERR_PTR(err);
2114 +- }
2115 ++ if (err)
2116 ++ goto err;
2117 ++
2118 ++ /* openvswitch users expect packet sizes to be unrestricted,
2119 ++ * so set the largest MTU we can.
2120 ++ */
2121 ++ err = geneve_change_mtu(dev, IP_MAX_MTU);
2122 ++ if (err)
2123 ++ goto err;
2124 ++
2125 + return dev;
2126 ++
2127 ++ err:
2128 ++ free_netdev(dev);
2129 ++ return ERR_PTR(err);
2130 + }
2131 + EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
2132 +
2133 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2134 +index 59fefca74263..a5f392ae30d5 100644
2135 +--- a/drivers/net/team/team.c
2136 ++++ b/drivers/net/team/team.c
2137 +@@ -969,7 +969,7 @@ static void team_port_disable(struct team *team,
2138 + NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
2139 + NETIF_F_HIGHDMA | NETIF_F_LRO)
2140 +
2141 +-static void __team_compute_features(struct team *team)
2142 ++static void ___team_compute_features(struct team *team)
2143 + {
2144 + struct team_port *port;
2145 + u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
2146 +@@ -993,15 +993,20 @@ static void __team_compute_features(struct team *team)
2147 + team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
2148 + if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
2149 + team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
2150 ++}
2151 +
2152 ++static void __team_compute_features(struct team *team)
2153 ++{
2154 ++ ___team_compute_features(team);
2155 + netdev_change_features(team->dev);
2156 + }
2157 +
2158 + static void team_compute_features(struct team *team)
2159 + {
2160 + mutex_lock(&team->lock);
2161 +- __team_compute_features(team);
2162 ++ ___team_compute_features(team);
2163 + mutex_unlock(&team->lock);
2164 ++ netdev_change_features(team->dev);
2165 + }
2166 +
2167 + static int team_port_enter(struct team *team, struct team_port *port)
2168 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2169 +index 2d186bd66d43..111b972e3053 100644
2170 +--- a/drivers/net/tun.c
2171 ++++ b/drivers/net/tun.c
2172 +@@ -567,11 +567,13 @@ static void tun_detach_all(struct net_device *dev)
2173 + for (i = 0; i < n; i++) {
2174 + tfile = rtnl_dereference(tun->tfiles[i]);
2175 + BUG_ON(!tfile);
2176 ++ tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
2177 + tfile->socket.sk->sk_data_ready(tfile->socket.sk);
2178 + RCU_INIT_POINTER(tfile->tun, NULL);
2179 + --tun->numqueues;
2180 + }
2181 + list_for_each_entry(tfile, &tun->disabled, next) {
2182 ++ tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
2183 + tfile->socket.sk->sk_data_ready(tfile->socket.sk);
2184 + RCU_INIT_POINTER(tfile->tun, NULL);
2185 + }
2186 +@@ -627,6 +629,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
2187 + goto out;
2188 + }
2189 + tfile->queue_index = tun->numqueues;
2190 ++ tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
2191 + rcu_assign_pointer(tfile->tun, tun);
2192 + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
2193 + tun->numqueues++;
2194 +@@ -1408,9 +1411,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2195 + if (!iov_iter_count(to))
2196 + return 0;
2197 +
2198 +- if (tun->dev->reg_state != NETREG_REGISTERED)
2199 +- return -EIO;
2200 +-
2201 + /* Read frames from queue */
2202 + skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
2203 + &peeked, &off, &err);
2204 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2205 +index 3c0df70e2f53..003780901628 100644
2206 +--- a/drivers/net/vxlan.c
2207 ++++ b/drivers/net/vxlan.c
2208 +@@ -1254,7 +1254,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
2209 +
2210 + /* Need Vxlan and inner Ethernet header to be present */
2211 + if (!pskb_may_pull(skb, VXLAN_HLEN))
2212 +- goto error;
2213 ++ goto drop;
2214 +
2215 + vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
2216 + flags = ntohl(vxh->vx_flags);
2217 +@@ -1344,13 +1344,7 @@ drop:
2218 + bad_flags:
2219 + netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
2220 + ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
2221 +-
2222 +-error:
2223 +- if (tun_dst)
2224 +- dst_release((struct dst_entry *)tun_dst);
2225 +-
2226 +- /* Return non vxlan pkt */
2227 +- return 1;
2228 ++ goto drop;
2229 + }
2230 +
2231 + static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
2232 +@@ -2370,29 +2364,43 @@ static void vxlan_set_multicast_list(struct net_device *dev)
2233 + {
2234 + }
2235 +
2236 +-static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2237 ++static int __vxlan_change_mtu(struct net_device *dev,
2238 ++ struct net_device *lowerdev,
2239 ++ struct vxlan_rdst *dst, int new_mtu, bool strict)
2240 + {
2241 +- struct vxlan_dev *vxlan = netdev_priv(dev);
2242 +- struct vxlan_rdst *dst = &vxlan->default_dst;
2243 +- struct net_device *lowerdev;
2244 +- int max_mtu;
2245 ++ int max_mtu = IP_MAX_MTU;
2246 +
2247 +- lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
2248 +- if (lowerdev == NULL)
2249 +- return eth_change_mtu(dev, new_mtu);
2250 ++ if (lowerdev)
2251 ++ max_mtu = lowerdev->mtu;
2252 +
2253 + if (dst->remote_ip.sa.sa_family == AF_INET6)
2254 +- max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
2255 ++ max_mtu -= VXLAN6_HEADROOM;
2256 + else
2257 +- max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
2258 ++ max_mtu -= VXLAN_HEADROOM;
2259 +
2260 +- if (new_mtu < 68 || new_mtu > max_mtu)
2261 ++ if (new_mtu < 68)
2262 + return -EINVAL;
2263 +
2264 ++ if (new_mtu > max_mtu) {
2265 ++ if (strict)
2266 ++ return -EINVAL;
2267 ++
2268 ++ new_mtu = max_mtu;
2269 ++ }
2270 ++
2271 + dev->mtu = new_mtu;
2272 + return 0;
2273 + }
2274 +
2275 ++static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2276 ++{
2277 ++ struct vxlan_dev *vxlan = netdev_priv(dev);
2278 ++ struct vxlan_rdst *dst = &vxlan->default_dst;
2279 ++ struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2280 ++ dst->remote_ifindex);
2281 ++ return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
2282 ++}
2283 ++
2284 + static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
2285 + struct ip_tunnel_info *info,
2286 + __be16 sport, __be16 dport)
2287 +@@ -2768,6 +2776,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2288 + int err;
2289 + bool use_ipv6 = false;
2290 + __be16 default_port = vxlan->cfg.dst_port;
2291 ++ struct net_device *lowerdev = NULL;
2292 +
2293 + vxlan->net = src_net;
2294 +
2295 +@@ -2788,9 +2797,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2296 + }
2297 +
2298 + if (conf->remote_ifindex) {
2299 +- struct net_device *lowerdev
2300 +- = __dev_get_by_index(src_net, conf->remote_ifindex);
2301 +-
2302 ++ lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
2303 + dst->remote_ifindex = conf->remote_ifindex;
2304 +
2305 + if (!lowerdev) {
2306 +@@ -2814,6 +2821,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2307 + needed_headroom = lowerdev->hard_header_len;
2308 + }
2309 +
2310 ++ if (conf->mtu) {
2311 ++ err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
2312 ++ if (err)
2313 ++ return err;
2314 ++ }
2315 ++
2316 + if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
2317 + needed_headroom += VXLAN6_HEADROOM;
2318 + else
2319 +@@ -2991,6 +3004,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2320 + if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
2321 + conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
2322 +
2323 ++ if (tb[IFLA_MTU])
2324 ++ conf.mtu = nla_get_u32(tb[IFLA_MTU]);
2325 ++
2326 + err = vxlan_dev_configure(src_net, dev, &conf);
2327 + switch (err) {
2328 + case -ENODEV:
2329 +diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
2330 +index 3d8019eb3d84..181b35879ebd 100644
2331 +--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
2332 ++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
2333 +@@ -1191,9 +1191,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
2334 + const struct mtk_desc_pin *pin;
2335 +
2336 + chained_irq_enter(chip, desc);
2337 +- for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
2338 ++ for (eint_num = 0;
2339 ++ eint_num < pctl->devdata->ap_num;
2340 ++ eint_num += 32, reg += 4) {
2341 + status = readl(reg);
2342 +- reg += 4;
2343 + while (status) {
2344 + offset = __ffs(status);
2345 + index = eint_num + offset;
2346 +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
2347 +index da2e068ee47d..93cbefa75b26 100644
2348 +--- a/drivers/scsi/scsi_devinfo.c
2349 ++++ b/drivers/scsi/scsi_devinfo.c
2350 +@@ -227,6 +227,7 @@ static struct {
2351 + {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
2352 + {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
2353 + {"Promise", "", NULL, BLIST_SPARSELUN},
2354 ++ {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
2355 + {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
2356 + {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
2357 + {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
2358 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2359 +index dd8ad2a44510..cf5b99e1f12b 100644
2360 +--- a/drivers/scsi/scsi_lib.c
2361 ++++ b/drivers/scsi/scsi_lib.c
2362 +@@ -910,9 +910,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
2363 + }
2364 +
2365 + /*
2366 +- * If we finished all bytes in the request we are done now.
2367 ++ * special case: failed zero length commands always need to
2368 ++ * drop down into the retry code. Otherwise, if we finished
2369 ++ * all bytes in the request we are done now.
2370 + */
2371 +- if (!scsi_end_request(req, error, good_bytes, 0))
2372 ++ if (!(blk_rq_bytes(req) == 0 && error) &&
2373 ++ !scsi_end_request(req, error, good_bytes, 0))
2374 + return;
2375 +
2376 + /*
2377 +diff --git a/fs/dcache.c b/fs/dcache.c
2378 +index 18effa378f97..108d7d810be3 100644
2379 +--- a/fs/dcache.c
2380 ++++ b/fs/dcache.c
2381 +@@ -1618,7 +1618,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
2382 + struct dentry *dentry = __d_alloc(parent->d_sb, name);
2383 + if (!dentry)
2384 + return NULL;
2385 +-
2386 ++ dentry->d_flags |= DCACHE_RCUACCESS;
2387 + spin_lock(&parent->d_lock);
2388 + /*
2389 + * don't need child lock because it is not subject
2390 +@@ -2413,7 +2413,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2391 + {
2392 + BUG_ON(!d_unhashed(entry));
2393 + hlist_bl_lock(b);
2394 +- entry->d_flags |= DCACHE_RCUACCESS;
2395 + hlist_bl_add_head_rcu(&entry->d_hash, b);
2396 + hlist_bl_unlock(b);
2397 + }
2398 +@@ -2632,6 +2631,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
2399 + /* ... and switch them in the tree */
2400 + if (IS_ROOT(dentry)) {
2401 + /* splicing a tree */
2402 ++ dentry->d_flags |= DCACHE_RCUACCESS;
2403 + dentry->d_parent = target->d_parent;
2404 + target->d_parent = target;
2405 + list_del_init(&target->d_child);
2406 +diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
2407 +index 866bb18efefe..e818f5ac7a26 100644
2408 +--- a/fs/ecryptfs/kthread.c
2409 ++++ b/fs/ecryptfs/kthread.c
2410 +@@ -25,6 +25,7 @@
2411 + #include <linux/slab.h>
2412 + #include <linux/wait.h>
2413 + #include <linux/mount.h>
2414 ++#include <linux/file.h>
2415 + #include "ecryptfs_kernel.h"
2416 +
2417 + struct ecryptfs_open_req {
2418 +@@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
2419 + flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
2420 + (*lower_file) = dentry_open(&req.path, flags, cred);
2421 + if (!IS_ERR(*lower_file))
2422 +- goto out;
2423 ++ goto have_file;
2424 + if ((flags & O_ACCMODE) == O_RDONLY) {
2425 + rc = PTR_ERR((*lower_file));
2426 + goto out;
2427 +@@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
2428 + mutex_unlock(&ecryptfs_kthread_ctl.mux);
2429 + wake_up(&ecryptfs_kthread_ctl.wait);
2430 + wait_for_completion(&req.done);
2431 +- if (IS_ERR(*lower_file))
2432 ++ if (IS_ERR(*lower_file)) {
2433 + rc = PTR_ERR(*lower_file);
2434 ++ goto out;
2435 ++ }
2436 ++have_file:
2437 ++ if ((*lower_file)->f_op->mmap == NULL) {
2438 ++ fput(*lower_file);
2439 ++ *lower_file = NULL;
2440 ++ rc = -EMEDIUMTYPE;
2441 ++ }
2442 + out:
2443 + return rc;
2444 + }
2445 +diff --git a/fs/proc/root.c b/fs/proc/root.c
2446 +index 361ab4ee42fc..ec649c92d270 100644
2447 +--- a/fs/proc/root.c
2448 ++++ b/fs/proc/root.c
2449 +@@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
2450 + if (IS_ERR(sb))
2451 + return ERR_CAST(sb);
2452 +
2453 ++ /*
2454 ++ * procfs isn't actually a stacking filesystem; however, there is
2455 ++ * too much magic going on inside it to permit stacking things on
2456 ++ * top of it
2457 ++ */
2458 ++ sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
2459 ++
2460 + if (!proc_parse_options(options, ns)) {
2461 + deactivate_locked_super(sb);
2462 + return ERR_PTR(-EINVAL);
2463 +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
2464 +index d5d798b35c1f..e98425058f20 100644
2465 +--- a/include/linux/irqchip/arm-gic-v3.h
2466 ++++ b/include/linux/irqchip/arm-gic-v3.h
2467 +@@ -301,7 +301,7 @@
2468 + #define ICC_SGI1R_AFFINITY_1_SHIFT 16
2469 + #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
2470 + #define ICC_SGI1R_SGI_ID_SHIFT 24
2471 +-#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
2472 ++#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
2473 + #define ICC_SGI1R_AFFINITY_2_SHIFT 32
2474 + #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
2475 + #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
2476 +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
2477 +index c5577410c25d..04078e8a4803 100644
2478 +--- a/include/linux/netfilter/x_tables.h
2479 ++++ b/include/linux/netfilter/x_tables.h
2480 +@@ -239,11 +239,18 @@ void xt_unregister_match(struct xt_match *target);
2481 + int xt_register_matches(struct xt_match *match, unsigned int n);
2482 + void xt_unregister_matches(struct xt_match *match, unsigned int n);
2483 +
2484 ++int xt_check_entry_offsets(const void *base, const char *elems,
2485 ++ unsigned int target_offset,
2486 ++ unsigned int next_offset);
2487 ++
2488 + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
2489 + bool inv_proto);
2490 + int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
2491 + bool inv_proto);
2492 +
2493 ++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
2494 ++ struct xt_counters_info *info, bool compat);
2495 ++
2496 + struct xt_table *xt_register_table(struct net *net,
2497 + const struct xt_table *table,
2498 + struct xt_table_info *bootstrap,
2499 +@@ -478,7 +485,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
2500 + int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
2501 +
2502 + int xt_compat_match_offset(const struct xt_match *match);
2503 +-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
2504 ++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
2505 + unsigned int *size);
2506 + int xt_compat_match_to_user(const struct xt_entry_match *m,
2507 + void __user **dstptr, unsigned int *size);
2508 +@@ -488,6 +495,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
2509 + unsigned int *size);
2510 + int xt_compat_target_to_user(const struct xt_entry_target *t,
2511 + void __user **dstptr, unsigned int *size);
2512 ++int xt_compat_check_entry_offsets(const void *base, const char *elems,
2513 ++ unsigned int target_offset,
2514 ++ unsigned int next_offset);
2515 +
2516 + #endif /* CONFIG_COMPAT */
2517 + #endif /* _X_TABLES_H */
2518 +diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
2519 +index 62a750a6a8f8..af40bc586a1b 100644
2520 +--- a/include/net/ip_tunnels.h
2521 ++++ b/include/net/ip_tunnels.h
2522 +@@ -230,6 +230,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
2523 + int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
2524 + int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
2525 + u8 *protocol, struct flowi4 *fl4);
2526 ++int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
2527 + int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
2528 +
2529 + struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
2530 +diff --git a/include/net/switchdev.h b/include/net/switchdev.h
2531 +index 1d22ce9f352e..31d0e5143848 100644
2532 +--- a/include/net/switchdev.h
2533 ++++ b/include/net/switchdev.h
2534 +@@ -88,7 +88,7 @@ struct switchdev_obj_ipv4_fib {
2535 + struct switchdev_obj obj;
2536 + u32 dst;
2537 + int dst_len;
2538 +- struct fib_info fi;
2539 ++ struct fib_info *fi;
2540 + u8 tos;
2541 + u8 type;
2542 + u32 nlflags;
2543 +diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
2544 +index d5e38c73377c..e4f048ee7043 100644
2545 +--- a/include/uapi/linux/libc-compat.h
2546 ++++ b/include/uapi/linux/libc-compat.h
2547 +@@ -52,7 +52,7 @@
2548 + #if defined(__GLIBC__)
2549 +
2550 + /* Coordinate with glibc net/if.h header. */
2551 +-#if defined(_NET_IF_H)
2552 ++#if defined(_NET_IF_H) && defined(__USE_MISC)
2553 +
2554 + /* GLIBC headers included first so don't define anything
2555 + * that would already be defined. */
2556 +diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
2557 +index d1a7646f79c5..cb85d228b1ac 100644
2558 +--- a/kernel/bpf/inode.c
2559 ++++ b/kernel/bpf/inode.c
2560 +@@ -358,7 +358,7 @@ static int bpf_fill_super(struct super_block *sb, void *data, int silent)
2561 + static struct dentry *bpf_mount(struct file_system_type *type, int flags,
2562 + const char *dev_name, void *data)
2563 + {
2564 +- return mount_ns(type, flags, current->nsproxy->mnt_ns, bpf_fill_super);
2565 ++ return mount_nodev(type, flags, data, bpf_fill_super);
2566 + }
2567 +
2568 + static struct file_system_type bpf_fs_type = {
2569 +@@ -366,7 +366,6 @@ static struct file_system_type bpf_fs_type = {
2570 + .name = "bpf",
2571 + .mount = bpf_mount,
2572 + .kill_sb = kill_litter_super,
2573 +- .fs_flags = FS_USERNS_MOUNT,
2574 + };
2575 +
2576 + MODULE_ALIAS_FS("bpf");
2577 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2578 +index 55bebf924946..6c0cdb5a73f8 100644
2579 +--- a/kernel/sched/core.c
2580 ++++ b/kernel/sched/core.c
2581 +@@ -3008,7 +3008,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
2582 + static inline void schedule_debug(struct task_struct *prev)
2583 + {
2584 + #ifdef CONFIG_SCHED_STACK_END_CHECK
2585 +- BUG_ON(task_stack_end_corrupted(prev));
2586 ++ if (task_stack_end_corrupted(prev))
2587 ++ panic("corrupted stack end detected inside scheduler\n");
2588 + #endif
2589 +
2590 + if (unlikely(in_atomic_preempt_off())) {
2591 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2592 +index 6ba4dd988e2e..67648e6b2ac8 100644
2593 +--- a/mm/memcontrol.c
2594 ++++ b/mm/memcontrol.c
2595 +@@ -3661,6 +3661,7 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
2596 + * ordering is imposed by list_lru_node->lock taken by
2597 + * memcg_drain_all_list_lrus().
2598 + */
2599 ++ rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2600 + css_for_each_descendant_pre(css, &memcg->css) {
2601 + child = mem_cgroup_from_css(css);
2602 + BUG_ON(child->kmemcg_id != kmemcg_id);
2603 +@@ -3668,6 +3669,8 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
2604 + if (!memcg->use_hierarchy)
2605 + break;
2606 + }
2607 ++ rcu_read_unlock();
2608 ++
2609 + memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2610 +
2611 + memcg_free_cache_id(kmemcg_id);
2612 +diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
2613 +index a642bb829d09..09442e0f7f67 100644
2614 +--- a/net/bridge/br_fdb.c
2615 ++++ b/net/bridge/br_fdb.c
2616 +@@ -278,6 +278,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
2617 + * change from under us.
2618 + */
2619 + list_for_each_entry(v, &vg->vlan_list, vlist) {
2620 ++ if (!br_vlan_should_use(v))
2621 ++ continue;
2622 + f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
2623 + if (f && f->is_local && !f->dst)
2624 + fdb_delete_local(br, NULL, f);
2625 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
2626 +index 7dc962b89fa1..3e4184088082 100644
2627 +--- a/net/ipv4/ip_gre.c
2628 ++++ b/net/ipv4/ip_gre.c
2629 +@@ -1247,6 +1247,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
2630 + err = ipgre_newlink(net, dev, tb, NULL);
2631 + if (err < 0)
2632 + goto out;
2633 ++
2634 ++ /* openvswitch users expect packet sizes to be unrestricted,
2635 ++ * so set the largest MTU we can.
2636 ++ */
2637 ++ err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
2638 ++ if (err)
2639 ++ goto out;
2640 ++
2641 + return dev;
2642 + out:
2643 + free_netdev(dev);
2644 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
2645 +index ce30c8b72457..3310ac75e3f3 100644
2646 +--- a/net/ipv4/ip_tunnel.c
2647 ++++ b/net/ipv4/ip_tunnel.c
2648 +@@ -948,17 +948,31 @@ done:
2649 + }
2650 + EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
2651 +
2652 +-int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
2653 ++int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
2654 + {
2655 + struct ip_tunnel *tunnel = netdev_priv(dev);
2656 + int t_hlen = tunnel->hlen + sizeof(struct iphdr);
2657 ++ int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
2658 +
2659 +- if (new_mtu < 68 ||
2660 +- new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
2661 ++ if (new_mtu < 68)
2662 + return -EINVAL;
2663 ++
2664 ++ if (new_mtu > max_mtu) {
2665 ++ if (strict)
2666 ++ return -EINVAL;
2667 ++
2668 ++ new_mtu = max_mtu;
2669 ++ }
2670 ++
2671 + dev->mtu = new_mtu;
2672 + return 0;
2673 + }
2674 ++EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
2675 ++
2676 ++int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
2677 ++{
2678 ++ return __ip_tunnel_change_mtu(dev, new_mtu, true);
2679 ++}
2680 + EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
2681 +
2682 + static void ip_tunnel_dev_free(struct net_device *dev)
2683 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
2684 +index 11dccba474b7..6e3e0e8b1ce3 100644
2685 +--- a/net/ipv4/netfilter/arp_tables.c
2686 ++++ b/net/ipv4/netfilter/arp_tables.c
2687 +@@ -359,11 +359,24 @@ unsigned int arpt_do_table(struct sk_buff *skb,
2688 + }
2689 +
2690 + /* All zeroes == unconditional rule. */
2691 +-static inline bool unconditional(const struct arpt_arp *arp)
2692 ++static inline bool unconditional(const struct arpt_entry *e)
2693 + {
2694 + static const struct arpt_arp uncond;
2695 +
2696 +- return memcmp(arp, &uncond, sizeof(uncond)) == 0;
2697 ++ return e->target_offset == sizeof(struct arpt_entry) &&
2698 ++ memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
2699 ++}
2700 ++
2701 ++static bool find_jump_target(const struct xt_table_info *t,
2702 ++ const struct arpt_entry *target)
2703 ++{
2704 ++ struct arpt_entry *iter;
2705 ++
2706 ++ xt_entry_foreach(iter, t->entries, t->size) {
2707 ++ if (iter == target)
2708 ++ return true;
2709 ++ }
2710 ++ return false;
2711 + }
2712 +
2713 + /* Figures out from what hook each rule can be called: returns 0 if
2714 +@@ -402,11 +415,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
2715 + |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
2716 +
2717 + /* Unconditional return/END. */
2718 +- if ((e->target_offset == sizeof(struct arpt_entry) &&
2719 ++ if ((unconditional(e) &&
2720 + (strcmp(t->target.u.user.name,
2721 + XT_STANDARD_TARGET) == 0) &&
2722 +- t->verdict < 0 && unconditional(&e->arp)) ||
2723 +- visited) {
2724 ++ t->verdict < 0) || visited) {
2725 + unsigned int oldpos, size;
2726 +
2727 + if ((strcmp(t->target.u.user.name,
2728 +@@ -439,6 +451,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
2729 + size = e->next_offset;
2730 + e = (struct arpt_entry *)
2731 + (entry0 + pos + size);
2732 ++ if (pos + size >= newinfo->size)
2733 ++ return 0;
2734 + e->counters.pcnt = pos;
2735 + pos += size;
2736 + } else {
2737 +@@ -458,9 +472,15 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
2738 + /* This a jump; chase it. */
2739 + duprintf("Jump rule %u -> %u\n",
2740 + pos, newpos);
2741 ++ e = (struct arpt_entry *)
2742 ++ (entry0 + newpos);
2743 ++ if (!find_jump_target(newinfo, e))
2744 ++ return 0;
2745 + } else {
2746 + /* ... this is a fallthru */
2747 + newpos = pos + e->next_offset;
2748 ++ if (newpos >= newinfo->size)
2749 ++ return 0;
2750 + }
2751 + e = (struct arpt_entry *)
2752 + (entry0 + newpos);
2753 +@@ -474,25 +494,6 @@ next:
2754 + return 1;
2755 + }
2756 +
2757 +-static inline int check_entry(const struct arpt_entry *e, const char *name)
2758 +-{
2759 +- const struct xt_entry_target *t;
2760 +-
2761 +- if (!arp_checkentry(&e->arp)) {
2762 +- duprintf("arp_tables: arp check failed %p %s.\n", e, name);
2763 +- return -EINVAL;
2764 +- }
2765 +-
2766 +- if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
2767 +- return -EINVAL;
2768 +-
2769 +- t = arpt_get_target_c(e);
2770 +- if (e->target_offset + t->u.target_size > e->next_offset)
2771 +- return -EINVAL;
2772 +-
2773 +- return 0;
2774 +-}
2775 +-
2776 + static inline int check_target(struct arpt_entry *e, const char *name)
2777 + {
2778 + struct xt_entry_target *t = arpt_get_target(e);
2779 +@@ -522,10 +523,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
2780 + struct xt_target *target;
2781 + int ret;
2782 +
2783 +- ret = check_entry(e, name);
2784 +- if (ret)
2785 +- return ret;
2786 +-
2787 + e->counters.pcnt = xt_percpu_counter_alloc();
2788 + if (IS_ERR_VALUE(e->counters.pcnt))
2789 + return -ENOMEM;
2790 +@@ -557,7 +554,7 @@ static bool check_underflow(const struct arpt_entry *e)
2791 + const struct xt_entry_target *t;
2792 + unsigned int verdict;
2793 +
2794 +- if (!unconditional(&e->arp))
2795 ++ if (!unconditional(e))
2796 + return false;
2797 + t = arpt_get_target_c(e);
2798 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
2799 +@@ -576,9 +573,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
2800 + unsigned int valid_hooks)
2801 + {
2802 + unsigned int h;
2803 ++ int err;
2804 +
2805 + if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
2806 +- (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
2807 ++ (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
2808 ++ (unsigned char *)e + e->next_offset > limit) {
2809 + duprintf("Bad offset %p\n", e);
2810 + return -EINVAL;
2811 + }
2812 +@@ -590,6 +589,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
2813 + return -EINVAL;
2814 + }
2815 +
2816 ++ if (!arp_checkentry(&e->arp))
2817 ++ return -EINVAL;
2818 ++
2819 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
2820 ++ e->next_offset);
2821 ++ if (err)
2822 ++ return err;
2823 ++
2824 + /* Check hooks & underflows */
2825 + for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
2826 + if (!(valid_hooks & (1 << h)))
2827 +@@ -598,9 +605,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
2828 + newinfo->hook_entry[h] = hook_entries[h];
2829 + if ((unsigned char *)e - base == underflows[h]) {
2830 + if (!check_underflow(e)) {
2831 +- pr_err("Underflows must be unconditional and "
2832 +- "use the STANDARD target with "
2833 +- "ACCEPT/DROP\n");
2834 ++ pr_debug("Underflows must be unconditional and "
2835 ++ "use the STANDARD target with "
2836 ++ "ACCEPT/DROP\n");
2837 + return -EINVAL;
2838 + }
2839 + newinfo->underflow[h] = underflows[h];
2840 +@@ -691,10 +698,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
2841 + }
2842 + }
2843 +
2844 +- if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
2845 +- duprintf("Looping hook\n");
2846 ++ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
2847 + return -ELOOP;
2848 +- }
2849 +
2850 + /* Finally, each sanity check must pass */
2851 + i = 0;
2852 +@@ -1125,55 +1130,17 @@ static int do_add_counters(struct net *net, const void __user *user,
2853 + unsigned int i;
2854 + struct xt_counters_info tmp;
2855 + struct xt_counters *paddc;
2856 +- unsigned int num_counters;
2857 +- const char *name;
2858 +- int size;
2859 +- void *ptmp;
2860 + struct xt_table *t;
2861 + const struct xt_table_info *private;
2862 + int ret = 0;
2863 + struct arpt_entry *iter;
2864 + unsigned int addend;
2865 +-#ifdef CONFIG_COMPAT
2866 +- struct compat_xt_counters_info compat_tmp;
2867 +-
2868 +- if (compat) {
2869 +- ptmp = &compat_tmp;
2870 +- size = sizeof(struct compat_xt_counters_info);
2871 +- } else
2872 +-#endif
2873 +- {
2874 +- ptmp = &tmp;
2875 +- size = sizeof(struct xt_counters_info);
2876 +- }
2877 +
2878 +- if (copy_from_user(ptmp, user, size) != 0)
2879 +- return -EFAULT;
2880 +-
2881 +-#ifdef CONFIG_COMPAT
2882 +- if (compat) {
2883 +- num_counters = compat_tmp.num_counters;
2884 +- name = compat_tmp.name;
2885 +- } else
2886 +-#endif
2887 +- {
2888 +- num_counters = tmp.num_counters;
2889 +- name = tmp.name;
2890 +- }
2891 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
2892 ++ if (IS_ERR(paddc))
2893 ++ return PTR_ERR(paddc);
2894 +
2895 +- if (len != size + num_counters * sizeof(struct xt_counters))
2896 +- return -EINVAL;
2897 +-
2898 +- paddc = vmalloc(len - size);
2899 +- if (!paddc)
2900 +- return -ENOMEM;
2901 +-
2902 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
2903 +- ret = -EFAULT;
2904 +- goto free;
2905 +- }
2906 +-
2907 +- t = xt_find_table_lock(net, NFPROTO_ARP, name);
2908 ++ t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
2909 + if (IS_ERR_OR_NULL(t)) {
2910 + ret = t ? PTR_ERR(t) : -ENOENT;
2911 + goto free;
2912 +@@ -1181,7 +1148,7 @@ static int do_add_counters(struct net *net, const void __user *user,
2913 +
2914 + local_bh_disable();
2915 + private = t->private;
2916 +- if (private->number != num_counters) {
2917 ++ if (private->number != tmp.num_counters) {
2918 + ret = -EINVAL;
2919 + goto unlock_up_free;
2920 + }
2921 +@@ -1208,6 +1175,18 @@ static int do_add_counters(struct net *net, const void __user *user,
2922 + }
2923 +
2924 + #ifdef CONFIG_COMPAT
2925 ++struct compat_arpt_replace {
2926 ++ char name[XT_TABLE_MAXNAMELEN];
2927 ++ u32 valid_hooks;
2928 ++ u32 num_entries;
2929 ++ u32 size;
2930 ++ u32 hook_entry[NF_ARP_NUMHOOKS];
2931 ++ u32 underflow[NF_ARP_NUMHOOKS];
2932 ++ u32 num_counters;
2933 ++ compat_uptr_t counters;
2934 ++ struct compat_arpt_entry entries[0];
2935 ++};
2936 ++
2937 + static inline void compat_release_entry(struct compat_arpt_entry *e)
2938 + {
2939 + struct xt_entry_target *t;
2940 +@@ -1216,24 +1195,22 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
2941 + module_put(t->u.kernel.target->me);
2942 + }
2943 +
2944 +-static inline int
2945 ++static int
2946 + check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
2947 + struct xt_table_info *newinfo,
2948 + unsigned int *size,
2949 + const unsigned char *base,
2950 +- const unsigned char *limit,
2951 +- const unsigned int *hook_entries,
2952 +- const unsigned int *underflows,
2953 +- const char *name)
2954 ++ const unsigned char *limit)
2955 + {
2956 + struct xt_entry_target *t;
2957 + struct xt_target *target;
2958 + unsigned int entry_offset;
2959 +- int ret, off, h;
2960 ++ int ret, off;
2961 +
2962 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
2963 + if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
2964 +- (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
2965 ++ (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
2966 ++ (unsigned char *)e + e->next_offset > limit) {
2967 + duprintf("Bad offset %p, limit = %p\n", e, limit);
2968 + return -EINVAL;
2969 + }
2970 +@@ -1245,8 +1222,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
2971 + return -EINVAL;
2972 + }
2973 +
2974 +- /* For purposes of check_entry casting the compat entry is fine */
2975 +- ret = check_entry((struct arpt_entry *)e, name);
2976 ++ if (!arp_checkentry(&e->arp))
2977 ++ return -EINVAL;
2978 ++
2979 ++ ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
2980 ++ e->next_offset);
2981 + if (ret)
2982 + return ret;
2983 +
2984 +@@ -1270,17 +1250,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
2985 + if (ret)
2986 + goto release_target;
2987 +
2988 +- /* Check hooks & underflows */
2989 +- for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
2990 +- if ((unsigned char *)e - base == hook_entries[h])
2991 +- newinfo->hook_entry[h] = hook_entries[h];
2992 +- if ((unsigned char *)e - base == underflows[h])
2993 +- newinfo->underflow[h] = underflows[h];
2994 +- }
2995 +-
2996 +- /* Clear counters and comefrom */
2997 +- memset(&e->counters, 0, sizeof(e->counters));
2998 +- e->comefrom = 0;
2999 + return 0;
3000 +
3001 + release_target:
3002 +@@ -1289,18 +1258,17 @@ out:
3003 + return ret;
3004 + }
3005 +
3006 +-static int
3007 ++static void
3008 + compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
3009 +- unsigned int *size, const char *name,
3010 ++ unsigned int *size,
3011 + struct xt_table_info *newinfo, unsigned char *base)
3012 + {
3013 + struct xt_entry_target *t;
3014 + struct xt_target *target;
3015 + struct arpt_entry *de;
3016 + unsigned int origsize;
3017 +- int ret, h;
3018 ++ int h;
3019 +
3020 +- ret = 0;
3021 + origsize = *size;
3022 + de = (struct arpt_entry *)*dstptr;
3023 + memcpy(de, e, sizeof(struct arpt_entry));
3024 +@@ -1321,148 +1289,82 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
3025 + if ((unsigned char *)de - base < newinfo->underflow[h])
3026 + newinfo->underflow[h] -= origsize - *size;
3027 + }
3028 +- return ret;
3029 + }
3030 +
3031 +-static int translate_compat_table(const char *name,
3032 +- unsigned int valid_hooks,
3033 +- struct xt_table_info **pinfo,
3034 ++static int translate_compat_table(struct xt_table_info **pinfo,
3035 + void **pentry0,
3036 +- unsigned int total_size,
3037 +- unsigned int number,
3038 +- unsigned int *hook_entries,
3039 +- unsigned int *underflows)
3040 ++ const struct compat_arpt_replace *compatr)
3041 + {
3042 + unsigned int i, j;
3043 + struct xt_table_info *newinfo, *info;
3044 + void *pos, *entry0, *entry1;
3045 + struct compat_arpt_entry *iter0;
3046 +- struct arpt_entry *iter1;
3047 ++ struct arpt_replace repl;
3048 + unsigned int size;
3049 + int ret = 0;
3050 +
3051 + info = *pinfo;
3052 + entry0 = *pentry0;
3053 +- size = total_size;
3054 +- info->number = number;
3055 +-
3056 +- /* Init all hooks to impossible value. */
3057 +- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
3058 +- info->hook_entry[i] = 0xFFFFFFFF;
3059 +- info->underflow[i] = 0xFFFFFFFF;
3060 +- }
3061 ++ size = compatr->size;
3062 ++ info->number = compatr->num_entries;
3063 +
3064 + duprintf("translate_compat_table: size %u\n", info->size);
3065 + j = 0;
3066 + xt_compat_lock(NFPROTO_ARP);
3067 +- xt_compat_init_offsets(NFPROTO_ARP, number);
3068 ++ xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
3069 + /* Walk through entries, checking offsets. */
3070 +- xt_entry_foreach(iter0, entry0, total_size) {
3071 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
3072 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
3073 + entry0,
3074 +- entry0 + total_size,
3075 +- hook_entries,
3076 +- underflows,
3077 +- name);
3078 ++ entry0 + compatr->size);
3079 + if (ret != 0)
3080 + goto out_unlock;
3081 + ++j;
3082 + }
3083 +
3084 + ret = -EINVAL;
3085 +- if (j != number) {
3086 ++ if (j != compatr->num_entries) {
3087 + duprintf("translate_compat_table: %u not %u entries\n",
3088 +- j, number);
3089 ++ j, compatr->num_entries);
3090 + goto out_unlock;
3091 + }
3092 +
3093 +- /* Check hooks all assigned */
3094 +- for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
3095 +- /* Only hooks which are valid */
3096 +- if (!(valid_hooks & (1 << i)))
3097 +- continue;
3098 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
3099 +- duprintf("Invalid hook entry %u %u\n",
3100 +- i, hook_entries[i]);
3101 +- goto out_unlock;
3102 +- }
3103 +- if (info->underflow[i] == 0xFFFFFFFF) {
3104 +- duprintf("Invalid underflow %u %u\n",
3105 +- i, underflows[i]);
3106 +- goto out_unlock;
3107 +- }
3108 +- }
3109 +-
3110 + ret = -ENOMEM;
3111 + newinfo = xt_alloc_table_info(size);
3112 + if (!newinfo)
3113 + goto out_unlock;
3114 +
3115 +- newinfo->number = number;
3116 ++ newinfo->number = compatr->num_entries;
3117 + for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
3118 + newinfo->hook_entry[i] = info->hook_entry[i];
3119 + newinfo->underflow[i] = info->underflow[i];
3120 + }
3121 + entry1 = newinfo->entries;
3122 + pos = entry1;
3123 +- size = total_size;
3124 +- xt_entry_foreach(iter0, entry0, total_size) {
3125 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
3126 +- name, newinfo, entry1);
3127 +- if (ret != 0)
3128 +- break;
3129 +- }
3130 ++ size = compatr->size;
3131 ++ xt_entry_foreach(iter0, entry0, compatr->size)
3132 ++ compat_copy_entry_from_user(iter0, &pos, &size,
3133 ++ newinfo, entry1);
3134 ++
3135 ++ /* all module references in entry0 are now gone */
3136 ++
3137 + xt_compat_flush_offsets(NFPROTO_ARP);
3138 + xt_compat_unlock(NFPROTO_ARP);
3139 +- if (ret)
3140 +- goto free_newinfo;
3141 +
3142 +- ret = -ELOOP;
3143 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
3144 +- goto free_newinfo;
3145 ++ memcpy(&repl, compatr, sizeof(*compatr));
3146 +
3147 +- i = 0;
3148 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
3149 +- iter1->counters.pcnt = xt_percpu_counter_alloc();
3150 +- if (IS_ERR_VALUE(iter1->counters.pcnt)) {
3151 +- ret = -ENOMEM;
3152 +- break;
3153 +- }
3154 +-
3155 +- ret = check_target(iter1, name);
3156 +- if (ret != 0) {
3157 +- xt_percpu_counter_free(iter1->counters.pcnt);
3158 +- break;
3159 +- }
3160 +- ++i;
3161 +- if (strcmp(arpt_get_target(iter1)->u.user.name,
3162 +- XT_ERROR_TARGET) == 0)
3163 +- ++newinfo->stacksize;
3164 +- }
3165 +- if (ret) {
3166 +- /*
3167 +- * The first i matches need cleanup_entry (calls ->destroy)
3168 +- * because they had called ->check already. The other j-i
3169 +- * entries need only release.
3170 +- */
3171 +- int skip = i;
3172 +- j -= i;
3173 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
3174 +- if (skip-- > 0)
3175 +- continue;
3176 +- if (j-- == 0)
3177 +- break;
3178 +- compat_release_entry(iter0);
3179 +- }
3180 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
3181 +- if (i-- == 0)
3182 +- break;
3183 +- cleanup_entry(iter1);
3184 +- }
3185 +- xt_free_table_info(newinfo);
3186 +- return ret;
3187 ++ for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
3188 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
3189 ++ repl.underflow[i] = newinfo->underflow[i];
3190 + }
3191 +
3192 ++ repl.num_counters = 0;
3193 ++ repl.counters = NULL;
3194 ++ repl.size = newinfo->size;
3195 ++ ret = translate_table(newinfo, entry1, &repl);
3196 ++ if (ret)
3197 ++ goto free_newinfo;
3198 ++
3199 + *pinfo = newinfo;
3200 + *pentry0 = entry1;
3201 + xt_free_table_info(info);
3202 +@@ -1470,31 +1372,18 @@ static int translate_compat_table(const char *name,
3203 +
3204 + free_newinfo:
3205 + xt_free_table_info(newinfo);
3206 +-out:
3207 +- xt_entry_foreach(iter0, entry0, total_size) {
3208 ++ return ret;
3209 ++out_unlock:
3210 ++ xt_compat_flush_offsets(NFPROTO_ARP);
3211 ++ xt_compat_unlock(NFPROTO_ARP);
3212 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
3213 + if (j-- == 0)
3214 + break;
3215 + compat_release_entry(iter0);
3216 + }
3217 + return ret;
3218 +-out_unlock:
3219 +- xt_compat_flush_offsets(NFPROTO_ARP);
3220 +- xt_compat_unlock(NFPROTO_ARP);
3221 +- goto out;
3222 + }
3223 +
3224 +-struct compat_arpt_replace {
3225 +- char name[XT_TABLE_MAXNAMELEN];
3226 +- u32 valid_hooks;
3227 +- u32 num_entries;
3228 +- u32 size;
3229 +- u32 hook_entry[NF_ARP_NUMHOOKS];
3230 +- u32 underflow[NF_ARP_NUMHOOKS];
3231 +- u32 num_counters;
3232 +- compat_uptr_t counters;
3233 +- struct compat_arpt_entry entries[0];
3234 +-};
3235 +-
3236 + static int compat_do_replace(struct net *net, void __user *user,
3237 + unsigned int len)
3238 + {
3239 +@@ -1527,10 +1416,7 @@ static int compat_do_replace(struct net *net, void __user *user,
3240 + goto free_newinfo;
3241 + }
3242 +
3243 +- ret = translate_compat_table(tmp.name, tmp.valid_hooks,
3244 +- &newinfo, &loc_cpu_entry, tmp.size,
3245 +- tmp.num_entries, tmp.hook_entry,
3246 +- tmp.underflow);
3247 ++ ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
3248 + if (ret != 0)
3249 + goto free_newinfo;
3250 +
3251 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
3252 +index b99affad6ba1..a399c5419622 100644
3253 +--- a/net/ipv4/netfilter/ip_tables.c
3254 ++++ b/net/ipv4/netfilter/ip_tables.c
3255 +@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
3256 +
3257 + /* All zeroes == unconditional rule. */
3258 + /* Mildly perf critical (only if packet tracing is on) */
3259 +-static inline bool unconditional(const struct ipt_ip *ip)
3260 ++static inline bool unconditional(const struct ipt_entry *e)
3261 + {
3262 + static const struct ipt_ip uncond;
3263 +
3264 +- return memcmp(ip, &uncond, sizeof(uncond)) == 0;
3265 ++ return e->target_offset == sizeof(struct ipt_entry) &&
3266 ++ memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
3267 + #undef FWINV
3268 + }
3269 +
3270 +@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
3271 + } else if (s == e) {
3272 + (*rulenum)++;
3273 +
3274 +- if (s->target_offset == sizeof(struct ipt_entry) &&
3275 ++ if (unconditional(s) &&
3276 + strcmp(t->target.u.kernel.target->name,
3277 + XT_STANDARD_TARGET) == 0 &&
3278 +- t->verdict < 0 &&
3279 +- unconditional(&s->ip)) {
3280 ++ t->verdict < 0) {
3281 + /* Tail of chains: STANDARD target (return/policy) */
3282 + *comment = *chainname == hookname
3283 + ? comments[NF_IP_TRACE_COMMENT_POLICY]
3284 +@@ -443,6 +443,18 @@ ipt_do_table(struct sk_buff *skb,
3285 + #endif
3286 + }
3287 +
3288 ++static bool find_jump_target(const struct xt_table_info *t,
3289 ++ const struct ipt_entry *target)
3290 ++{
3291 ++ struct ipt_entry *iter;
3292 ++
3293 ++ xt_entry_foreach(iter, t->entries, t->size) {
3294 ++ if (iter == target)
3295 ++ return true;
3296 ++ }
3297 ++ return false;
3298 ++}
3299 ++
3300 + /* Figures out from what hook each rule can be called: returns 0 if
3301 + there are loops. Puts hook bitmask in comefrom. */
3302 + static int
3303 +@@ -476,11 +488,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
3304 + e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
3305 +
3306 + /* Unconditional return/END. */
3307 +- if ((e->target_offset == sizeof(struct ipt_entry) &&
3308 ++ if ((unconditional(e) &&
3309 + (strcmp(t->target.u.user.name,
3310 + XT_STANDARD_TARGET) == 0) &&
3311 +- t->verdict < 0 && unconditional(&e->ip)) ||
3312 +- visited) {
3313 ++ t->verdict < 0) || visited) {
3314 + unsigned int oldpos, size;
3315 +
3316 + if ((strcmp(t->target.u.user.name,
3317 +@@ -521,6 +532,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
3318 + size = e->next_offset;
3319 + e = (struct ipt_entry *)
3320 + (entry0 + pos + size);
3321 ++ if (pos + size >= newinfo->size)
3322 ++ return 0;
3323 + e->counters.pcnt = pos;
3324 + pos += size;
3325 + } else {
3326 +@@ -539,9 +552,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
3327 + /* This a jump; chase it. */
3328 + duprintf("Jump rule %u -> %u\n",
3329 + pos, newpos);
3330 ++ e = (struct ipt_entry *)
3331 ++ (entry0 + newpos);
3332 ++ if (!find_jump_target(newinfo, e))
3333 ++ return 0;
3334 + } else {
3335 + /* ... this is a fallthru */
3336 + newpos = pos + e->next_offset;
3337 ++ if (newpos >= newinfo->size)
3338 ++ return 0;
3339 + }
3340 + e = (struct ipt_entry *)
3341 + (entry0 + newpos);
3342 +@@ -569,27 +588,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
3343 + }
3344 +
3345 + static int
3346 +-check_entry(const struct ipt_entry *e, const char *name)
3347 +-{
3348 +- const struct xt_entry_target *t;
3349 +-
3350 +- if (!ip_checkentry(&e->ip)) {
3351 +- duprintf("ip check failed %p %s.\n", e, name);
3352 +- return -EINVAL;
3353 +- }
3354 +-
3355 +- if (e->target_offset + sizeof(struct xt_entry_target) >
3356 +- e->next_offset)
3357 +- return -EINVAL;
3358 +-
3359 +- t = ipt_get_target_c(e);
3360 +- if (e->target_offset + t->u.target_size > e->next_offset)
3361 +- return -EINVAL;
3362 +-
3363 +- return 0;
3364 +-}
3365 +-
3366 +-static int
3367 + check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
3368 + {
3369 + const struct ipt_ip *ip = par->entryinfo;
3370 +@@ -666,10 +664,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
3371 + struct xt_mtchk_param mtpar;
3372 + struct xt_entry_match *ematch;
3373 +
3374 +- ret = check_entry(e, name);
3375 +- if (ret)
3376 +- return ret;
3377 +-
3378 + e->counters.pcnt = xt_percpu_counter_alloc();
3379 + if (IS_ERR_VALUE(e->counters.pcnt))
3380 + return -ENOMEM;
3381 +@@ -721,7 +715,7 @@ static bool check_underflow(const struct ipt_entry *e)
3382 + const struct xt_entry_target *t;
3383 + unsigned int verdict;
3384 +
3385 +- if (!unconditional(&e->ip))
3386 ++ if (!unconditional(e))
3387 + return false;
3388 + t = ipt_get_target_c(e);
3389 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
3390 +@@ -741,9 +735,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
3391 + unsigned int valid_hooks)
3392 + {
3393 + unsigned int h;
3394 ++ int err;
3395 +
3396 + if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
3397 +- (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
3398 ++ (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
3399 ++ (unsigned char *)e + e->next_offset > limit) {
3400 + duprintf("Bad offset %p\n", e);
3401 + return -EINVAL;
3402 + }
3403 +@@ -755,6 +751,14 @@ check_entry_size_and_hooks(struct ipt_entry *e,
3404 + return -EINVAL;
3405 + }
3406 +
3407 ++ if (!ip_checkentry(&e->ip))
3408 ++ return -EINVAL;
3409 ++
3410 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
3411 ++ e->next_offset);
3412 ++ if (err)
3413 ++ return err;
3414 ++
3415 + /* Check hooks & underflows */
3416 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
3417 + if (!(valid_hooks & (1 << h)))
3418 +@@ -763,9 +767,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
3419 + newinfo->hook_entry[h] = hook_entries[h];
3420 + if ((unsigned char *)e - base == underflows[h]) {
3421 + if (!check_underflow(e)) {
3422 +- pr_err("Underflows must be unconditional and "
3423 +- "use the STANDARD target with "
3424 +- "ACCEPT/DROP\n");
3425 ++ pr_debug("Underflows must be unconditional and "
3426 ++ "use the STANDARD target with "
3427 ++ "ACCEPT/DROP\n");
3428 + return -EINVAL;
3429 + }
3430 + newinfo->underflow[h] = underflows[h];
3431 +@@ -1309,55 +1313,17 @@ do_add_counters(struct net *net, const void __user *user,
3432 + unsigned int i;
3433 + struct xt_counters_info tmp;
3434 + struct xt_counters *paddc;
3435 +- unsigned int num_counters;
3436 +- const char *name;
3437 +- int size;
3438 +- void *ptmp;
3439 + struct xt_table *t;
3440 + const struct xt_table_info *private;
3441 + int ret = 0;
3442 + struct ipt_entry *iter;
3443 + unsigned int addend;
3444 +-#ifdef CONFIG_COMPAT
3445 +- struct compat_xt_counters_info compat_tmp;
3446 +-
3447 +- if (compat) {
3448 +- ptmp = &compat_tmp;
3449 +- size = sizeof(struct compat_xt_counters_info);
3450 +- } else
3451 +-#endif
3452 +- {
3453 +- ptmp = &tmp;
3454 +- size = sizeof(struct xt_counters_info);
3455 +- }
3456 +-
3457 +- if (copy_from_user(ptmp, user, size) != 0)
3458 +- return -EFAULT;
3459 +-
3460 +-#ifdef CONFIG_COMPAT
3461 +- if (compat) {
3462 +- num_counters = compat_tmp.num_counters;
3463 +- name = compat_tmp.name;
3464 +- } else
3465 +-#endif
3466 +- {
3467 +- num_counters = tmp.num_counters;
3468 +- name = tmp.name;
3469 +- }
3470 +
3471 +- if (len != size + num_counters * sizeof(struct xt_counters))
3472 +- return -EINVAL;
3473 +-
3474 +- paddc = vmalloc(len - size);
3475 +- if (!paddc)
3476 +- return -ENOMEM;
3477 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
3478 ++ if (IS_ERR(paddc))
3479 ++ return PTR_ERR(paddc);
3480 +
3481 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
3482 +- ret = -EFAULT;
3483 +- goto free;
3484 +- }
3485 +-
3486 +- t = xt_find_table_lock(net, AF_INET, name);
3487 ++ t = xt_find_table_lock(net, AF_INET, tmp.name);
3488 + if (IS_ERR_OR_NULL(t)) {
3489 + ret = t ? PTR_ERR(t) : -ENOENT;
3490 + goto free;
3491 +@@ -1365,7 +1331,7 @@ do_add_counters(struct net *net, const void __user *user,
3492 +
3493 + local_bh_disable();
3494 + private = t->private;
3495 +- if (private->number != num_counters) {
3496 ++ if (private->number != tmp.num_counters) {
3497 + ret = -EINVAL;
3498 + goto unlock_up_free;
3499 + }
3500 +@@ -1444,7 +1410,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
3501 +
3502 + static int
3503 + compat_find_calc_match(struct xt_entry_match *m,
3504 +- const char *name,
3505 + const struct ipt_ip *ip,
3506 + int *size)
3507 + {
3508 +@@ -1479,21 +1444,19 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
3509 + struct xt_table_info *newinfo,
3510 + unsigned int *size,
3511 + const unsigned char *base,
3512 +- const unsigned char *limit,
3513 +- const unsigned int *hook_entries,
3514 +- const unsigned int *underflows,
3515 +- const char *name)
3516 ++ const unsigned char *limit)
3517 + {
3518 + struct xt_entry_match *ematch;
3519 + struct xt_entry_target *t;
3520 + struct xt_target *target;
3521 + unsigned int entry_offset;
3522 + unsigned int j;
3523 +- int ret, off, h;
3524 ++ int ret, off;
3525 +
3526 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
3527 + if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
3528 +- (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
3529 ++ (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
3530 ++ (unsigned char *)e + e->next_offset > limit) {
3531 + duprintf("Bad offset %p, limit = %p\n", e, limit);
3532 + return -EINVAL;
3533 + }
3534 +@@ -1505,8 +1468,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
3535 + return -EINVAL;
3536 + }
3537 +
3538 +- /* For purposes of check_entry casting the compat entry is fine */
3539 +- ret = check_entry((struct ipt_entry *)e, name);
3540 ++ if (!ip_checkentry(&e->ip))
3541 ++ return -EINVAL;
3542 ++
3543 ++ ret = xt_compat_check_entry_offsets(e, e->elems,
3544 ++ e->target_offset, e->next_offset);
3545 + if (ret)
3546 + return ret;
3547 +
3548 +@@ -1514,7 +1480,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
3549 + entry_offset = (void *)e - (void *)base;
3550 + j = 0;
3551 + xt_ematch_foreach(ematch, e) {
3552 +- ret = compat_find_calc_match(ematch, name, &e->ip, &off);
3553 ++ ret = compat_find_calc_match(ematch, &e->ip, &off);
3554 + if (ret != 0)
3555 + goto release_matches;
3556 + ++j;
3557 +@@ -1537,17 +1503,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
3558 + if (ret)
3559 + goto out;
3560 +
3561 +- /* Check hooks & underflows */
3562 +- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
3563 +- if ((unsigned char *)e - base == hook_entries[h])
3564 +- newinfo->hook_entry[h] = hook_entries[h];
3565 +- if ((unsigned char *)e - base == underflows[h])
3566 +- newinfo->underflow[h] = underflows[h];
3567 +- }
3568 +-
3569 +- /* Clear counters and comefrom */
3570 +- memset(&e->counters, 0, sizeof(e->counters));
3571 +- e->comefrom = 0;
3572 + return 0;
3573 +
3574 + out:
3575 +@@ -1561,19 +1516,18 @@ release_matches:
3576 + return ret;
3577 + }
3578 +
3579 +-static int
3580 ++static void
3581 + compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
3582 +- unsigned int *size, const char *name,
3583 ++ unsigned int *size,
3584 + struct xt_table_info *newinfo, unsigned char *base)
3585 + {
3586 + struct xt_entry_target *t;
3587 + struct xt_target *target;
3588 + struct ipt_entry *de;
3589 + unsigned int origsize;
3590 +- int ret, h;
3591 ++ int h;
3592 + struct xt_entry_match *ematch;
3593 +
3594 +- ret = 0;
3595 + origsize = *size;
3596 + de = (struct ipt_entry *)*dstptr;
3597 + memcpy(de, e, sizeof(struct ipt_entry));
3598 +@@ -1582,201 +1536,105 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
3599 + *dstptr += sizeof(struct ipt_entry);
3600 + *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
3601 +
3602 +- xt_ematch_foreach(ematch, e) {
3603 +- ret = xt_compat_match_from_user(ematch, dstptr, size);
3604 +- if (ret != 0)
3605 +- return ret;
3606 +- }
3607 ++ xt_ematch_foreach(ematch, e)
3608 ++ xt_compat_match_from_user(ematch, dstptr, size);
3609 ++
3610 + de->target_offset = e->target_offset - (origsize - *size);
3611 + t = compat_ipt_get_target(e);
3612 + target = t->u.kernel.target;
3613 + xt_compat_target_from_user(t, dstptr, size);
3614 +
3615 + de->next_offset = e->next_offset - (origsize - *size);
3616 ++
3617 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
3618 + if ((unsigned char *)de - base < newinfo->hook_entry[h])
3619 + newinfo->hook_entry[h] -= origsize - *size;
3620 + if ((unsigned char *)de - base < newinfo->underflow[h])
3621 + newinfo->underflow[h] -= origsize - *size;
3622 + }
3623 +- return ret;
3624 +-}
3625 +-
3626 +-static int
3627 +-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
3628 +-{
3629 +- struct xt_entry_match *ematch;
3630 +- struct xt_mtchk_param mtpar;
3631 +- unsigned int j;
3632 +- int ret = 0;
3633 +-
3634 +- e->counters.pcnt = xt_percpu_counter_alloc();
3635 +- if (IS_ERR_VALUE(e->counters.pcnt))
3636 +- return -ENOMEM;
3637 +-
3638 +- j = 0;
3639 +- mtpar.net = net;
3640 +- mtpar.table = name;
3641 +- mtpar.entryinfo = &e->ip;
3642 +- mtpar.hook_mask = e->comefrom;
3643 +- mtpar.family = NFPROTO_IPV4;
3644 +- xt_ematch_foreach(ematch, e) {
3645 +- ret = check_match(ematch, &mtpar);
3646 +- if (ret != 0)
3647 +- goto cleanup_matches;
3648 +- ++j;
3649 +- }
3650 +-
3651 +- ret = check_target(e, net, name);
3652 +- if (ret)
3653 +- goto cleanup_matches;
3654 +- return 0;
3655 +-
3656 +- cleanup_matches:
3657 +- xt_ematch_foreach(ematch, e) {
3658 +- if (j-- == 0)
3659 +- break;
3660 +- cleanup_match(ematch, net);
3661 +- }
3662 +-
3663 +- xt_percpu_counter_free(e->counters.pcnt);
3664 +-
3665 +- return ret;
3666 + }
3667 +
3668 + static int
3669 + translate_compat_table(struct net *net,
3670 +- const char *name,
3671 +- unsigned int valid_hooks,
3672 + struct xt_table_info **pinfo,
3673 + void **pentry0,
3674 +- unsigned int total_size,
3675 +- unsigned int number,
3676 +- unsigned int *hook_entries,
3677 +- unsigned int *underflows)
3678 ++ const struct compat_ipt_replace *compatr)
3679 + {
3680 + unsigned int i, j;
3681 + struct xt_table_info *newinfo, *info;
3682 + void *pos, *entry0, *entry1;
3683 + struct compat_ipt_entry *iter0;
3684 +- struct ipt_entry *iter1;
3685 ++ struct ipt_replace repl;
3686 + unsigned int size;
3687 + int ret;
3688 +
3689 + info = *pinfo;
3690 + entry0 = *pentry0;
3691 +- size = total_size;
3692 +- info->number = number;
3693 +-
3694 +- /* Init all hooks to impossible value. */
3695 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3696 +- info->hook_entry[i] = 0xFFFFFFFF;
3697 +- info->underflow[i] = 0xFFFFFFFF;
3698 +- }
3699 ++ size = compatr->size;
3700 ++ info->number = compatr->num_entries;
3701 +
3702 + duprintf("translate_compat_table: size %u\n", info->size);
3703 + j = 0;
3704 + xt_compat_lock(AF_INET);
3705 +- xt_compat_init_offsets(AF_INET, number);
3706 ++ xt_compat_init_offsets(AF_INET, compatr->num_entries);
3707 + /* Walk through entries, checking offsets. */
3708 +- xt_entry_foreach(iter0, entry0, total_size) {
3709 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
3710 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
3711 + entry0,
3712 +- entry0 + total_size,
3713 +- hook_entries,
3714 +- underflows,
3715 +- name);
3716 ++ entry0 + compatr->size);
3717 + if (ret != 0)
3718 + goto out_unlock;
3719 + ++j;
3720 + }
3721 +
3722 + ret = -EINVAL;
3723 +- if (j != number) {
3724 ++ if (j != compatr->num_entries) {
3725 + duprintf("translate_compat_table: %u not %u entries\n",
3726 +- j, number);
3727 ++ j, compatr->num_entries);
3728 + goto out_unlock;
3729 + }
3730 +
3731 +- /* Check hooks all assigned */
3732 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3733 +- /* Only hooks which are valid */
3734 +- if (!(valid_hooks & (1 << i)))
3735 +- continue;
3736 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
3737 +- duprintf("Invalid hook entry %u %u\n",
3738 +- i, hook_entries[i]);
3739 +- goto out_unlock;
3740 +- }
3741 +- if (info->underflow[i] == 0xFFFFFFFF) {
3742 +- duprintf("Invalid underflow %u %u\n",
3743 +- i, underflows[i]);
3744 +- goto out_unlock;
3745 +- }
3746 +- }
3747 +-
3748 + ret = -ENOMEM;
3749 + newinfo = xt_alloc_table_info(size);
3750 + if (!newinfo)
3751 + goto out_unlock;
3752 +
3753 +- newinfo->number = number;
3754 ++ newinfo->number = compatr->num_entries;
3755 + for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3756 +- newinfo->hook_entry[i] = info->hook_entry[i];
3757 +- newinfo->underflow[i] = info->underflow[i];
3758 ++ newinfo->hook_entry[i] = compatr->hook_entry[i];
3759 ++ newinfo->underflow[i] = compatr->underflow[i];
3760 + }
3761 + entry1 = newinfo->entries;
3762 + pos = entry1;
3763 +- size = total_size;
3764 +- xt_entry_foreach(iter0, entry0, total_size) {
3765 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
3766 +- name, newinfo, entry1);
3767 +- if (ret != 0)
3768 +- break;
3769 +- }
3770 ++ size = compatr->size;
3771 ++ xt_entry_foreach(iter0, entry0, compatr->size)
3772 ++ compat_copy_entry_from_user(iter0, &pos, &size,
3773 ++ newinfo, entry1);
3774 ++
3775 ++ /* all module references in entry0 are now gone.
3776 ++ * entry1/newinfo contains a 64bit ruleset that looks exactly as
3777 ++ * generated by 64bit userspace.
3778 ++ *
3779 ++ * Call standard translate_table() to validate all hook_entrys,
3780 ++ * underflows, check for loops, etc.
3781 ++ */
3782 + xt_compat_flush_offsets(AF_INET);
3783 + xt_compat_unlock(AF_INET);
3784 +- if (ret)
3785 +- goto free_newinfo;
3786 +
3787 +- ret = -ELOOP;
3788 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
3789 +- goto free_newinfo;
3790 ++ memcpy(&repl, compatr, sizeof(*compatr));
3791 +
3792 +- i = 0;
3793 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
3794 +- ret = compat_check_entry(iter1, net, name);
3795 +- if (ret != 0)
3796 +- break;
3797 +- ++i;
3798 +- if (strcmp(ipt_get_target(iter1)->u.user.name,
3799 +- XT_ERROR_TARGET) == 0)
3800 +- ++newinfo->stacksize;
3801 +- }
3802 +- if (ret) {
3803 +- /*
3804 +- * The first i matches need cleanup_entry (calls ->destroy)
3805 +- * because they had called ->check already. The other j-i
3806 +- * entries need only release.
3807 +- */
3808 +- int skip = i;
3809 +- j -= i;
3810 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
3811 +- if (skip-- > 0)
3812 +- continue;
3813 +- if (j-- == 0)
3814 +- break;
3815 +- compat_release_entry(iter0);
3816 +- }
3817 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
3818 +- if (i-- == 0)
3819 +- break;
3820 +- cleanup_entry(iter1, net);
3821 +- }
3822 +- xt_free_table_info(newinfo);
3823 +- return ret;
3824 ++ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
3825 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
3826 ++ repl.underflow[i] = newinfo->underflow[i];
3827 + }
3828 +
3829 ++ repl.num_counters = 0;
3830 ++ repl.counters = NULL;
3831 ++ repl.size = newinfo->size;
3832 ++ ret = translate_table(net, newinfo, entry1, &repl);
3833 ++ if (ret)
3834 ++ goto free_newinfo;
3835 ++
3836 + *pinfo = newinfo;
3837 + *pentry0 = entry1;
3838 + xt_free_table_info(info);
3839 +@@ -1784,17 +1642,16 @@ translate_compat_table(struct net *net,
3840 +
3841 + free_newinfo:
3842 + xt_free_table_info(newinfo);
3843 +-out:
3844 +- xt_entry_foreach(iter0, entry0, total_size) {
3845 ++ return ret;
3846 ++out_unlock:
3847 ++ xt_compat_flush_offsets(AF_INET);
3848 ++ xt_compat_unlock(AF_INET);
3849 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
3850 + if (j-- == 0)
3851 + break;
3852 + compat_release_entry(iter0);
3853 + }
3854 + return ret;
3855 +-out_unlock:
3856 +- xt_compat_flush_offsets(AF_INET);
3857 +- xt_compat_unlock(AF_INET);
3858 +- goto out;
3859 + }
3860 +
3861 + static int
3862 +@@ -1830,10 +1687,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
3863 + goto free_newinfo;
3864 + }
3865 +
3866 +- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
3867 +- &newinfo, &loc_cpu_entry, tmp.size,
3868 +- tmp.num_entries, tmp.hook_entry,
3869 +- tmp.underflow);
3870 ++ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
3871 + if (ret != 0)
3872 + goto free_newinfo;
3873 +
3874 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3875 +index 21fbb54f11d0..44e1632370dd 100644
3876 +--- a/net/ipv4/udp.c
3877 ++++ b/net/ipv4/udp.c
3878 +@@ -1531,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
3879 +
3880 + /* if we're overly short, let UDP handle it */
3881 + encap_rcv = ACCESS_ONCE(up->encap_rcv);
3882 +- if (skb->len > sizeof(struct udphdr) && encap_rcv) {
3883 ++ if (encap_rcv) {
3884 + int ret;
3885 +
3886 + /* Verify checksum before giving to encap */
3887 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3888 +index a175152d3e46..58900c21e4e4 100644
3889 +--- a/net/ipv6/ip6_output.c
3890 ++++ b/net/ipv6/ip6_output.c
3891 +@@ -1072,17 +1072,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
3892 + const struct in6_addr *final_dst)
3893 + {
3894 + struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
3895 +- int err;
3896 +
3897 + dst = ip6_sk_dst_check(sk, dst, fl6);
3898 ++ if (!dst)
3899 ++ dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
3900 +
3901 +- err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
3902 +- if (err)
3903 +- return ERR_PTR(err);
3904 +- if (final_dst)
3905 +- fl6->daddr = *final_dst;
3906 +-
3907 +- return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
3908 ++ return dst;
3909 + }
3910 + EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
3911 +
3912 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
3913 +index 99425cf2819b..22f39e00bef3 100644
3914 +--- a/net/ipv6/netfilter/ip6_tables.c
3915 ++++ b/net/ipv6/netfilter/ip6_tables.c
3916 +@@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset)
3917 +
3918 + /* All zeroes == unconditional rule. */
3919 + /* Mildly perf critical (only if packet tracing is on) */
3920 +-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
3921 ++static inline bool unconditional(const struct ip6t_entry *e)
3922 + {
3923 + static const struct ip6t_ip6 uncond;
3924 +
3925 +- return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
3926 ++ return e->target_offset == sizeof(struct ip6t_entry) &&
3927 ++ memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
3928 + }
3929 +
3930 + static inline const struct xt_entry_target *
3931 +@@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
3932 + } else if (s == e) {
3933 + (*rulenum)++;
3934 +
3935 +- if (s->target_offset == sizeof(struct ip6t_entry) &&
3936 ++ if (unconditional(s) &&
3937 + strcmp(t->target.u.kernel.target->name,
3938 + XT_STANDARD_TARGET) == 0 &&
3939 +- t->verdict < 0 &&
3940 +- unconditional(&s->ipv6)) {
3941 ++ t->verdict < 0) {
3942 + /* Tail of chains: STANDARD target (return/policy) */
3943 + *comment = *chainname == hookname
3944 + ? comments[NF_IP6_TRACE_COMMENT_POLICY]
3945 +@@ -455,6 +455,18 @@ ip6t_do_table(struct sk_buff *skb,
3946 + #endif
3947 + }
3948 +
3949 ++static bool find_jump_target(const struct xt_table_info *t,
3950 ++ const struct ip6t_entry *target)
3951 ++{
3952 ++ struct ip6t_entry *iter;
3953 ++
3954 ++ xt_entry_foreach(iter, t->entries, t->size) {
3955 ++ if (iter == target)
3956 ++ return true;
3957 ++ }
3958 ++ return false;
3959 ++}
3960 ++
3961 + /* Figures out from what hook each rule can be called: returns 0 if
3962 + there are loops. Puts hook bitmask in comefrom. */
3963 + static int
3964 +@@ -488,11 +500,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
3965 + e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
3966 +
3967 + /* Unconditional return/END. */
3968 +- if ((e->target_offset == sizeof(struct ip6t_entry) &&
3969 ++ if ((unconditional(e) &&
3970 + (strcmp(t->target.u.user.name,
3971 + XT_STANDARD_TARGET) == 0) &&
3972 +- t->verdict < 0 &&
3973 +- unconditional(&e->ipv6)) || visited) {
3974 ++ t->verdict < 0) || visited) {
3975 + unsigned int oldpos, size;
3976 +
3977 + if ((strcmp(t->target.u.user.name,
3978 +@@ -533,6 +544,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
3979 + size = e->next_offset;
3980 + e = (struct ip6t_entry *)
3981 + (entry0 + pos + size);
3982 ++ if (pos + size >= newinfo->size)
3983 ++ return 0;
3984 + e->counters.pcnt = pos;
3985 + pos += size;
3986 + } else {
3987 +@@ -551,9 +564,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
3988 + /* This a jump; chase it. */
3989 + duprintf("Jump rule %u -> %u\n",
3990 + pos, newpos);
3991 ++ e = (struct ip6t_entry *)
3992 ++ (entry0 + newpos);
3993 ++ if (!find_jump_target(newinfo, e))
3994 ++ return 0;
3995 + } else {
3996 + /* ... this is a fallthru */
3997 + newpos = pos + e->next_offset;
3998 ++ if (newpos >= newinfo->size)
3999 ++ return 0;
4000 + }
4001 + e = (struct ip6t_entry *)
4002 + (entry0 + newpos);
4003 +@@ -580,27 +599,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
4004 + module_put(par.match->me);
4005 + }
4006 +
4007 +-static int
4008 +-check_entry(const struct ip6t_entry *e, const char *name)
4009 +-{
4010 +- const struct xt_entry_target *t;
4011 +-
4012 +- if (!ip6_checkentry(&e->ipv6)) {
4013 +- duprintf("ip_tables: ip check failed %p %s.\n", e, name);
4014 +- return -EINVAL;
4015 +- }
4016 +-
4017 +- if (e->target_offset + sizeof(struct xt_entry_target) >
4018 +- e->next_offset)
4019 +- return -EINVAL;
4020 +-
4021 +- t = ip6t_get_target_c(e);
4022 +- if (e->target_offset + t->u.target_size > e->next_offset)
4023 +- return -EINVAL;
4024 +-
4025 +- return 0;
4026 +-}
4027 +-
4028 + static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
4029 + {
4030 + const struct ip6t_ip6 *ipv6 = par->entryinfo;
4031 +@@ -679,10 +677,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
4032 + struct xt_mtchk_param mtpar;
4033 + struct xt_entry_match *ematch;
4034 +
4035 +- ret = check_entry(e, name);
4036 +- if (ret)
4037 +- return ret;
4038 +-
4039 + e->counters.pcnt = xt_percpu_counter_alloc();
4040 + if (IS_ERR_VALUE(e->counters.pcnt))
4041 + return -ENOMEM;
4042 +@@ -733,7 +727,7 @@ static bool check_underflow(const struct ip6t_entry *e)
4043 + const struct xt_entry_target *t;
4044 + unsigned int verdict;
4045 +
4046 +- if (!unconditional(&e->ipv6))
4047 ++ if (!unconditional(e))
4048 + return false;
4049 + t = ip6t_get_target_c(e);
4050 + if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
4051 +@@ -753,9 +747,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
4052 + unsigned int valid_hooks)
4053 + {
4054 + unsigned int h;
4055 ++ int err;
4056 +
4057 + if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
4058 +- (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
4059 ++ (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
4060 ++ (unsigned char *)e + e->next_offset > limit) {
4061 + duprintf("Bad offset %p\n", e);
4062 + return -EINVAL;
4063 + }
4064 +@@ -767,6 +763,14 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
4065 + return -EINVAL;
4066 + }
4067 +
4068 ++ if (!ip6_checkentry(&e->ipv6))
4069 ++ return -EINVAL;
4070 ++
4071 ++ err = xt_check_entry_offsets(e, e->elems, e->target_offset,
4072 ++ e->next_offset);
4073 ++ if (err)
4074 ++ return err;
4075 ++
4076 + /* Check hooks & underflows */
4077 + for (h = 0; h < NF_INET_NUMHOOKS; h++) {
4078 + if (!(valid_hooks & (1 << h)))
4079 +@@ -775,9 +779,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
4080 + newinfo->hook_entry[h] = hook_entries[h];
4081 + if ((unsigned char *)e - base == underflows[h]) {
4082 + if (!check_underflow(e)) {
4083 +- pr_err("Underflows must be unconditional and "
4084 +- "use the STANDARD target with "
4085 +- "ACCEPT/DROP\n");
4086 ++ pr_debug("Underflows must be unconditional and "
4087 ++ "use the STANDARD target with "
4088 ++ "ACCEPT/DROP\n");
4089 + return -EINVAL;
4090 + }
4091 + newinfo->underflow[h] = underflows[h];
4092 +@@ -1321,55 +1325,16 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
4093 + unsigned int i;
4094 + struct xt_counters_info tmp;
4095 + struct xt_counters *paddc;
4096 +- unsigned int num_counters;
4097 +- char *name;
4098 +- int size;
4099 +- void *ptmp;
4100 + struct xt_table *t;
4101 + const struct xt_table_info *private;
4102 + int ret = 0;
4103 + struct ip6t_entry *iter;
4104 + unsigned int addend;
4105 +-#ifdef CONFIG_COMPAT
4106 +- struct compat_xt_counters_info compat_tmp;
4107 +-
4108 +- if (compat) {
4109 +- ptmp = &compat_tmp;
4110 +- size = sizeof(struct compat_xt_counters_info);
4111 +- } else
4112 +-#endif
4113 +- {
4114 +- ptmp = &tmp;
4115 +- size = sizeof(struct xt_counters_info);
4116 +- }
4117 +-
4118 +- if (copy_from_user(ptmp, user, size) != 0)
4119 +- return -EFAULT;
4120 +
4121 +-#ifdef CONFIG_COMPAT
4122 +- if (compat) {
4123 +- num_counters = compat_tmp.num_counters;
4124 +- name = compat_tmp.name;
4125 +- } else
4126 +-#endif
4127 +- {
4128 +- num_counters = tmp.num_counters;
4129 +- name = tmp.name;
4130 +- }
4131 +-
4132 +- if (len != size + num_counters * sizeof(struct xt_counters))
4133 +- return -EINVAL;
4134 +-
4135 +- paddc = vmalloc(len - size);
4136 +- if (!paddc)
4137 +- return -ENOMEM;
4138 +-
4139 +- if (copy_from_user(paddc, user + size, len - size) != 0) {
4140 +- ret = -EFAULT;
4141 +- goto free;
4142 +- }
4143 +-
4144 +- t = xt_find_table_lock(net, AF_INET6, name);
4145 ++ paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
4146 ++ if (IS_ERR(paddc))
4147 ++ return PTR_ERR(paddc);
4148 ++ t = xt_find_table_lock(net, AF_INET6, tmp.name);
4149 + if (IS_ERR_OR_NULL(t)) {
4150 + ret = t ? PTR_ERR(t) : -ENOENT;
4151 + goto free;
4152 +@@ -1377,7 +1342,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
4153 +
4154 + local_bh_disable();
4155 + private = t->private;
4156 +- if (private->number != num_counters) {
4157 ++ if (private->number != tmp.num_counters) {
4158 + ret = -EINVAL;
4159 + goto unlock_up_free;
4160 + }
4161 +@@ -1456,7 +1421,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
4162 +
4163 + static int
4164 + compat_find_calc_match(struct xt_entry_match *m,
4165 +- const char *name,
4166 + const struct ip6t_ip6 *ipv6,
4167 + int *size)
4168 + {
4169 +@@ -1491,21 +1455,19 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
4170 + struct xt_table_info *newinfo,
4171 + unsigned int *size,
4172 + const unsigned char *base,
4173 +- const unsigned char *limit,
4174 +- const unsigned int *hook_entries,
4175 +- const unsigned int *underflows,
4176 +- const char *name)
4177 ++ const unsigned char *limit)
4178 + {
4179 + struct xt_entry_match *ematch;
4180 + struct xt_entry_target *t;
4181 + struct xt_target *target;
4182 + unsigned int entry_offset;
4183 + unsigned int j;
4184 +- int ret, off, h;
4185 ++ int ret, off;
4186 +
4187 + duprintf("check_compat_entry_size_and_hooks %p\n", e);
4188 + if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
4189 +- (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
4190 ++ (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
4191 ++ (unsigned char *)e + e->next_offset > limit) {
4192 + duprintf("Bad offset %p, limit = %p\n", e, limit);
4193 + return -EINVAL;
4194 + }
4195 +@@ -1517,8 +1479,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
4196 + return -EINVAL;
4197 + }
4198 +
4199 +- /* For purposes of check_entry casting the compat entry is fine */
4200 +- ret = check_entry((struct ip6t_entry *)e, name);
4201 ++ if (!ip6_checkentry(&e->ipv6))
4202 ++ return -EINVAL;
4203 ++
4204 ++ ret = xt_compat_check_entry_offsets(e, e->elems,
4205 ++ e->target_offset, e->next_offset);
4206 + if (ret)
4207 + return ret;
4208 +
4209 +@@ -1526,7 +1491,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
4210 + entry_offset = (void *)e - (void *)base;
4211 + j = 0;
4212 + xt_ematch_foreach(ematch, e) {
4213 +- ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
4214 ++ ret = compat_find_calc_match(ematch, &e->ipv6, &off);
4215 + if (ret != 0)
4216 + goto release_matches;
4217 + ++j;
4218 +@@ -1549,17 +1514,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
4219 + if (ret)
4220 + goto out;
4221 +
4222 +- /* Check hooks & underflows */
4223 +- for (h = 0; h < NF_INET_NUMHOOKS; h++) {
4224 +- if ((unsigned char *)e - base == hook_entries[h])
4225 +- newinfo->hook_entry[h] = hook_entries[h];
4226 +- if ((unsigned char *)e - base == underflows[h])
4227 +- newinfo->underflow[h] = underflows[h];
4228 +- }
4229 +-
4230 +- /* Clear counters and comefrom */
4231 +- memset(&e->counters, 0, sizeof(e->counters));
4232 +- e->comefrom = 0;
4233 + return 0;
4234 +
4235 + out:
4236 +@@ -1573,18 +1527,17 @@ release_matches:
4237 + return ret;
4238 + }
4239 +
4240 +-static int
4241 ++static void
4242 + compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
4243 +- unsigned int *size, const char *name,
4244 ++ unsigned int *size,
4245 + struct xt_table_info *newinfo, unsigned char *base)
4246 + {
4247 + struct xt_entry_target *t;
4248 + struct ip6t_entry *de;
4249 + unsigned int origsize;
4250 +- int ret, h;
4251 ++ int h;
4252 + struct xt_entry_match *ematch;
4253 +
4254 +- ret = 0;
4255 + origsize = *size;
4256 + de = (struct ip6t_entry *)*dstptr;
4257 + memcpy(de, e, sizeof(struct ip6t_entry));
4258 +@@ -1593,11 +1546,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
4259 + *dstptr += sizeof(struct ip6t_entry);
4260 + *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
4261 +
4262 +- xt_ematch_foreach(ematch, e) {
4263 +- ret = xt_compat_match_from_user(ematch, dstptr, size);
4264 +- if (ret != 0)
4265 +- return ret;
4266 +- }
4267 ++ xt_ematch_foreach(ematch, e)
4268 ++ xt_compat_match_from_user(ematch, dstptr, size);
4269 ++
4270 + de->target_offset = e->target_offset - (origsize - *size);
4271 + t = compat_ip6t_get_target(e);
4272 + xt_compat_target_from_user(t, dstptr, size);
4273 +@@ -1609,183 +1560,83 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
4274 + if ((unsigned char *)de - base < newinfo->underflow[h])
4275 + newinfo->underflow[h] -= origsize - *size;
4276 + }
4277 +- return ret;
4278 +-}
4279 +-
4280 +-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
4281 +- const char *name)
4282 +-{
4283 +- unsigned int j;
4284 +- int ret = 0;
4285 +- struct xt_mtchk_param mtpar;
4286 +- struct xt_entry_match *ematch;
4287 +-
4288 +- e->counters.pcnt = xt_percpu_counter_alloc();
4289 +- if (IS_ERR_VALUE(e->counters.pcnt))
4290 +- return -ENOMEM;
4291 +- j = 0;
4292 +- mtpar.net = net;
4293 +- mtpar.table = name;
4294 +- mtpar.entryinfo = &e->ipv6;
4295 +- mtpar.hook_mask = e->comefrom;
4296 +- mtpar.family = NFPROTO_IPV6;
4297 +- xt_ematch_foreach(ematch, e) {
4298 +- ret = check_match(ematch, &mtpar);
4299 +- if (ret != 0)
4300 +- goto cleanup_matches;
4301 +- ++j;
4302 +- }
4303 +-
4304 +- ret = check_target(e, net, name);
4305 +- if (ret)
4306 +- goto cleanup_matches;
4307 +- return 0;
4308 +-
4309 +- cleanup_matches:
4310 +- xt_ematch_foreach(ematch, e) {
4311 +- if (j-- == 0)
4312 +- break;
4313 +- cleanup_match(ematch, net);
4314 +- }
4315 +-
4316 +- xt_percpu_counter_free(e->counters.pcnt);
4317 +-
4318 +- return ret;
4319 + }
4320 +
4321 + static int
4322 + translate_compat_table(struct net *net,
4323 +- const char *name,
4324 +- unsigned int valid_hooks,
4325 + struct xt_table_info **pinfo,
4326 + void **pentry0,
4327 +- unsigned int total_size,
4328 +- unsigned int number,
4329 +- unsigned int *hook_entries,
4330 +- unsigned int *underflows)
4331 ++ const struct compat_ip6t_replace *compatr)
4332 + {
4333 + unsigned int i, j;
4334 + struct xt_table_info *newinfo, *info;
4335 + void *pos, *entry0, *entry1;
4336 + struct compat_ip6t_entry *iter0;
4337 +- struct ip6t_entry *iter1;
4338 ++ struct ip6t_replace repl;
4339 + unsigned int size;
4340 + int ret = 0;
4341 +
4342 + info = *pinfo;
4343 + entry0 = *pentry0;
4344 +- size = total_size;
4345 +- info->number = number;
4346 +-
4347 +- /* Init all hooks to impossible value. */
4348 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
4349 +- info->hook_entry[i] = 0xFFFFFFFF;
4350 +- info->underflow[i] = 0xFFFFFFFF;
4351 +- }
4352 ++ size = compatr->size;
4353 ++ info->number = compatr->num_entries;
4354 +
4355 + duprintf("translate_compat_table: size %u\n", info->size);
4356 + j = 0;
4357 + xt_compat_lock(AF_INET6);
4358 +- xt_compat_init_offsets(AF_INET6, number);
4359 ++ xt_compat_init_offsets(AF_INET6, compatr->num_entries);
4360 + /* Walk through entries, checking offsets. */
4361 +- xt_entry_foreach(iter0, entry0, total_size) {
4362 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
4363 + ret = check_compat_entry_size_and_hooks(iter0, info, &size,
4364 + entry0,
4365 +- entry0 + total_size,
4366 +- hook_entries,
4367 +- underflows,
4368 +- name);
4369 ++ entry0 + compatr->size);
4370 + if (ret != 0)
4371 + goto out_unlock;
4372 + ++j;
4373 + }
4374 +
4375 + ret = -EINVAL;
4376 +- if (j != number) {
4377 ++ if (j != compatr->num_entries) {
4378 + duprintf("translate_compat_table: %u not %u entries\n",
4379 +- j, number);
4380 ++ j, compatr->num_entries);
4381 + goto out_unlock;
4382 + }
4383 +
4384 +- /* Check hooks all assigned */
4385 +- for (i = 0; i < NF_INET_NUMHOOKS; i++) {
4386 +- /* Only hooks which are valid */
4387 +- if (!(valid_hooks & (1 << i)))
4388 +- continue;
4389 +- if (info->hook_entry[i] == 0xFFFFFFFF) {
4390 +- duprintf("Invalid hook entry %u %u\n",
4391 +- i, hook_entries[i]);
4392 +- goto out_unlock;
4393 +- }
4394 +- if (info->underflow[i] == 0xFFFFFFFF) {
4395 +- duprintf("Invalid underflow %u %u\n",
4396 +- i, underflows[i]);
4397 +- goto out_unlock;
4398 +- }
4399 +- }
4400 +-
4401 + ret = -ENOMEM;
4402 + newinfo = xt_alloc_table_info(size);
4403 + if (!newinfo)
4404 + goto out_unlock;
4405 +
4406 +- newinfo->number = number;
4407 ++ newinfo->number = compatr->num_entries;
4408 + for (i = 0; i < NF_INET_NUMHOOKS; i++) {
4409 +- newinfo->hook_entry[i] = info->hook_entry[i];
4410 +- newinfo->underflow[i] = info->underflow[i];
4411 ++ newinfo->hook_entry[i] = compatr->hook_entry[i];
4412 ++ newinfo->underflow[i] = compatr->underflow[i];
4413 + }
4414 + entry1 = newinfo->entries;
4415 + pos = entry1;
4416 +- size = total_size;
4417 +- xt_entry_foreach(iter0, entry0, total_size) {
4418 +- ret = compat_copy_entry_from_user(iter0, &pos, &size,
4419 +- name, newinfo, entry1);
4420 +- if (ret != 0)
4421 +- break;
4422 +- }
4423 ++ size = compatr->size;
4424 ++ xt_entry_foreach(iter0, entry0, compatr->size)
4425 ++ compat_copy_entry_from_user(iter0, &pos, &size,
4426 ++ newinfo, entry1);
4427 ++
4428 ++ /* all module references in entry0 are now gone. */
4429 + xt_compat_flush_offsets(AF_INET6);
4430 + xt_compat_unlock(AF_INET6);
4431 +- if (ret)
4432 +- goto free_newinfo;
4433 +
4434 +- ret = -ELOOP;
4435 +- if (!mark_source_chains(newinfo, valid_hooks, entry1))
4436 +- goto free_newinfo;
4437 ++ memcpy(&repl, compatr, sizeof(*compatr));
4438 +
4439 +- i = 0;
4440 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
4441 +- ret = compat_check_entry(iter1, net, name);
4442 +- if (ret != 0)
4443 +- break;
4444 +- ++i;
4445 +- if (strcmp(ip6t_get_target(iter1)->u.user.name,
4446 +- XT_ERROR_TARGET) == 0)
4447 +- ++newinfo->stacksize;
4448 +- }
4449 +- if (ret) {
4450 +- /*
4451 +- * The first i matches need cleanup_entry (calls ->destroy)
4452 +- * because they had called ->check already. The other j-i
4453 +- * entries need only release.
4454 +- */
4455 +- int skip = i;
4456 +- j -= i;
4457 +- xt_entry_foreach(iter0, entry0, newinfo->size) {
4458 +- if (skip-- > 0)
4459 +- continue;
4460 +- if (j-- == 0)
4461 +- break;
4462 +- compat_release_entry(iter0);
4463 +- }
4464 +- xt_entry_foreach(iter1, entry1, newinfo->size) {
4465 +- if (i-- == 0)
4466 +- break;
4467 +- cleanup_entry(iter1, net);
4468 +- }
4469 +- xt_free_table_info(newinfo);
4470 +- return ret;
4471 ++ for (i = 0; i < NF_INET_NUMHOOKS; i++) {
4472 ++ repl.hook_entry[i] = newinfo->hook_entry[i];
4473 ++ repl.underflow[i] = newinfo->underflow[i];
4474 + }
4475 +
4476 ++ repl.num_counters = 0;
4477 ++ repl.counters = NULL;
4478 ++ repl.size = newinfo->size;
4479 ++ ret = translate_table(net, newinfo, entry1, &repl);
4480 ++ if (ret)
4481 ++ goto free_newinfo;
4482 ++
4483 + *pinfo = newinfo;
4484 + *pentry0 = entry1;
4485 + xt_free_table_info(info);
4486 +@@ -1793,17 +1644,16 @@ translate_compat_table(struct net *net,
4487 +
4488 + free_newinfo:
4489 + xt_free_table_info(newinfo);
4490 +-out:
4491 +- xt_entry_foreach(iter0, entry0, total_size) {
4492 ++ return ret;
4493 ++out_unlock:
4494 ++ xt_compat_flush_offsets(AF_INET6);
4495 ++ xt_compat_unlock(AF_INET6);
4496 ++ xt_entry_foreach(iter0, entry0, compatr->size) {
4497 + if (j-- == 0)
4498 + break;
4499 + compat_release_entry(iter0);
4500 + }
4501 + return ret;
4502 +-out_unlock:
4503 +- xt_compat_flush_offsets(AF_INET6);
4504 +- xt_compat_unlock(AF_INET6);
4505 +- goto out;
4506 + }
4507 +
4508 + static int
4509 +@@ -1839,10 +1689,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
4510 + goto free_newinfo;
4511 + }
4512 +
4513 +- ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
4514 +- &newinfo, &loc_cpu_entry, tmp.size,
4515 +- tmp.num_entries, tmp.hook_entry,
4516 +- tmp.underflow);
4517 ++ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
4518 + if (ret != 0)
4519 + goto free_newinfo;
4520 +
4521 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
4522 +index b8d405623f4f..1a1cd3938fd0 100644
4523 +--- a/net/ipv6/tcp_ipv6.c
4524 ++++ b/net/ipv6/tcp_ipv6.c
4525 +@@ -1706,7 +1706,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
4526 + destp = ntohs(inet->inet_dport);
4527 + srcp = ntohs(inet->inet_sport);
4528 +
4529 +- if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
4530 ++ if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
4531 ++ icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
4532 ++ icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
4533 + timer_active = 1;
4534 + timer_expires = icsk->icsk_timeout;
4535 + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
4536 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
4537 +index 6665e1a0bfe1..275af43306f9 100644
4538 +--- a/net/ipv6/udp.c
4539 ++++ b/net/ipv6/udp.c
4540 +@@ -647,7 +647,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
4541 +
4542 + /* if we're overly short, let UDP handle it */
4543 + encap_rcv = ACCESS_ONCE(up->encap_rcv);
4544 +- if (skb->len > sizeof(struct udphdr) && encap_rcv) {
4545 ++ if (encap_rcv) {
4546 + int ret;
4547 +
4548 + /* Verify checksum before giving to encap */
4549 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
4550 +index afca2eb4dfa7..ec17cbe8a02b 100644
4551 +--- a/net/l2tp/l2tp_core.c
4552 ++++ b/net/l2tp/l2tp_core.c
4553 +@@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
4554 + /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
4555 + tunnel->encap = encap;
4556 + if (encap == L2TP_ENCAPTYPE_UDP) {
4557 +- struct udp_tunnel_sock_cfg udp_cfg;
4558 ++ struct udp_tunnel_sock_cfg udp_cfg = { };
4559 +
4560 + udp_cfg.sk_user_data = tunnel;
4561 + udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
4562 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
4563 +index d4aaad747ea9..25391fb25516 100644
4564 +--- a/net/netfilter/x_tables.c
4565 ++++ b/net/netfilter/x_tables.c
4566 +@@ -415,6 +415,47 @@ int xt_check_match(struct xt_mtchk_param *par,
4567 + }
4568 + EXPORT_SYMBOL_GPL(xt_check_match);
4569 +
4570 ++/** xt_check_entry_match - check that matches end before start of target
4571 ++ *
4572 ++ * @match: beginning of xt_entry_match
4573 ++ * @target: beginning of this rules target (alleged end of matches)
4574 ++ * @alignment: alignment requirement of match structures
4575 ++ *
4576 ++ * Validates that all matches add up to the beginning of the target,
4577 ++ * and that each match covers at least the base structure size.
4578 ++ *
4579 ++ * Return: 0 on success, negative errno on failure.
4580 ++ */
4581 ++static int xt_check_entry_match(const char *match, const char *target,
4582 ++ const size_t alignment)
4583 ++{
4584 ++ const struct xt_entry_match *pos;
4585 ++ int length = target - match;
4586 ++
4587 ++ if (length == 0) /* no matches */
4588 ++ return 0;
4589 ++
4590 ++ pos = (struct xt_entry_match *)match;
4591 ++ do {
4592 ++ if ((unsigned long)pos % alignment)
4593 ++ return -EINVAL;
4594 ++
4595 ++ if (length < (int)sizeof(struct xt_entry_match))
4596 ++ return -EINVAL;
4597 ++
4598 ++ if (pos->u.match_size < sizeof(struct xt_entry_match))
4599 ++ return -EINVAL;
4600 ++
4601 ++ if (pos->u.match_size > length)
4602 ++ return -EINVAL;
4603 ++
4604 ++ length -= pos->u.match_size;
4605 ++ pos = ((void *)((char *)(pos) + (pos)->u.match_size));
4606 ++ } while (length > 0);
4607 ++
4608 ++ return 0;
4609 ++}
4610 ++
4611 + #ifdef CONFIG_COMPAT
4612 + int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
4613 + {
4614 +@@ -484,13 +525,14 @@ int xt_compat_match_offset(const struct xt_match *match)
4615 + }
4616 + EXPORT_SYMBOL_GPL(xt_compat_match_offset);
4617 +
4618 +-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
4619 +- unsigned int *size)
4620 ++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
4621 ++ unsigned int *size)
4622 + {
4623 + const struct xt_match *match = m->u.kernel.match;
4624 + struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
4625 + int pad, off = xt_compat_match_offset(match);
4626 + u_int16_t msize = cm->u.user.match_size;
4627 ++ char name[sizeof(m->u.user.name)];
4628 +
4629 + m = *dstptr;
4630 + memcpy(m, cm, sizeof(*cm));
4631 +@@ -504,10 +546,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
4632 +
4633 + msize += off;
4634 + m->u.user.match_size = msize;
4635 ++ strlcpy(name, match->name, sizeof(name));
4636 ++ module_put(match->me);
4637 ++ strncpy(m->u.user.name, name, sizeof(m->u.user.name));
4638 +
4639 + *size += off;
4640 + *dstptr += msize;
4641 +- return 0;
4642 + }
4643 + EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
4644 +
4645 +@@ -538,8 +582,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
4646 + return 0;
4647 + }
4648 + EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
4649 ++
4650 ++/* non-compat version may have padding after verdict */
4651 ++struct compat_xt_standard_target {
4652 ++ struct compat_xt_entry_target t;
4653 ++ compat_uint_t verdict;
4654 ++};
4655 ++
4656 ++int xt_compat_check_entry_offsets(const void *base, const char *elems,
4657 ++ unsigned int target_offset,
4658 ++ unsigned int next_offset)
4659 ++{
4660 ++ long size_of_base_struct = elems - (const char *)base;
4661 ++ const struct compat_xt_entry_target *t;
4662 ++ const char *e = base;
4663 ++
4664 ++ if (target_offset < size_of_base_struct)
4665 ++ return -EINVAL;
4666 ++
4667 ++ if (target_offset + sizeof(*t) > next_offset)
4668 ++ return -EINVAL;
4669 ++
4670 ++ t = (void *)(e + target_offset);
4671 ++ if (t->u.target_size < sizeof(*t))
4672 ++ return -EINVAL;
4673 ++
4674 ++ if (target_offset + t->u.target_size > next_offset)
4675 ++ return -EINVAL;
4676 ++
4677 ++ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
4678 ++ COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
4679 ++ return -EINVAL;
4680 ++
4681 ++ /* compat_xt_entry match has less strict aligment requirements,
4682 ++ * otherwise they are identical. In case of padding differences
4683 ++ * we need to add compat version of xt_check_entry_match.
4684 ++ */
4685 ++ BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
4686 ++
4687 ++ return xt_check_entry_match(elems, base + target_offset,
4688 ++ __alignof__(struct compat_xt_entry_match));
4689 ++}
4690 ++EXPORT_SYMBOL(xt_compat_check_entry_offsets);
4691 + #endif /* CONFIG_COMPAT */
4692 +
4693 ++/**
4694 ++ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
4695 ++ *
4696 ++ * @base: pointer to arp/ip/ip6t_entry
4697 ++ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
4698 ++ * @target_offset: the arp/ip/ip6_t->target_offset
4699 ++ * @next_offset: the arp/ip/ip6_t->next_offset
4700 ++ *
4701 ++ * validates that target_offset and next_offset are sane and that all
4702 ++ * match sizes (if any) align with the target offset.
4703 ++ *
4704 ++ * This function does not validate the targets or matches themselves, it
4705 ++ * only tests that all the offsets and sizes are correct, that all
4706 ++ * match structures are aligned, and that the last structure ends where
4707 ++ * the target structure begins.
4708 ++ *
4709 ++ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
4710 ++ *
4711 ++ * The arp/ip/ip6t_entry structure @base must have passed following tests:
4712 ++ * - it must point to a valid memory location
4713 ++ * - base to base + next_offset must be accessible, i.e. not exceed allocated
4714 ++ * length.
4715 ++ *
4716 ++ * A well-formed entry looks like this:
4717 ++ *
4718 ++ * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
4719 ++ * e->elems[]-----' | |
4720 ++ * matchsize | |
4721 ++ * matchsize | |
4722 ++ * | |
4723 ++ * target_offset---------------------------------' |
4724 ++ * next_offset---------------------------------------------------'
4725 ++ *
4726 ++ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
4727 ++ * This is where matches (if any) and the target reside.
4728 ++ * target_offset: beginning of target.
4729 ++ * next_offset: start of the next rule; also: size of this rule.
4730 ++ * Since targets have a minimum size, target_offset + minlen <= next_offset.
4731 ++ *
4732 ++ * Every match stores its size, sum of sizes must not exceed target_offset.
4733 ++ *
4734 ++ * Return: 0 on success, negative errno on failure.
4735 ++ */
4736 ++int xt_check_entry_offsets(const void *base,
4737 ++ const char *elems,
4738 ++ unsigned int target_offset,
4739 ++ unsigned int next_offset)
4740 ++{
4741 ++ long size_of_base_struct = elems - (const char *)base;
4742 ++ const struct xt_entry_target *t;
4743 ++ const char *e = base;
4744 ++
4745 ++ /* target start is within the ip/ip6/arpt_entry struct */
4746 ++ if (target_offset < size_of_base_struct)
4747 ++ return -EINVAL;
4748 ++
4749 ++ if (target_offset + sizeof(*t) > next_offset)
4750 ++ return -EINVAL;
4751 ++
4752 ++ t = (void *)(e + target_offset);
4753 ++ if (t->u.target_size < sizeof(*t))
4754 ++ return -EINVAL;
4755 ++
4756 ++ if (target_offset + t->u.target_size > next_offset)
4757 ++ return -EINVAL;
4758 ++
4759 ++ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
4760 ++ XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
4761 ++ return -EINVAL;
4762 ++
4763 ++ return xt_check_entry_match(elems, base + target_offset,
4764 ++ __alignof__(struct xt_entry_match));
4765 ++}
4766 ++EXPORT_SYMBOL(xt_check_entry_offsets);
4767 ++
4768 + int xt_check_target(struct xt_tgchk_param *par,
4769 + unsigned int size, u_int8_t proto, bool inv_proto)
4770 + {
4771 +@@ -590,6 +751,80 @@ int xt_check_target(struct xt_tgchk_param *par,
4772 + }
4773 + EXPORT_SYMBOL_GPL(xt_check_target);
4774 +
4775 ++/**
4776 ++ * xt_copy_counters_from_user - copy counters and metadata from userspace
4777 ++ *
4778 ++ * @user: src pointer to userspace memory
4779 ++ * @len: alleged size of userspace memory
4780 ++ * @info: where to store the xt_counters_info metadata
4781 ++ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
4782 ++ *
4783 ++ * Copies counter meta data from @user and stores it in @info.
4784 ++ *
4785 ++ * vmallocs memory to hold the counters, then copies the counter data
4786 ++ * from @user to the new memory and returns a pointer to it.
4787 ++ *
4788 ++ * If @compat is true, @info gets converted automatically to the 64bit
4789 ++ * representation.
4790 ++ *
4791 ++ * The metadata associated with the counters is stored in @info.
4792 ++ *
4793 ++ * Return: returns pointer that caller has to test via IS_ERR().
4794 ++ * If IS_ERR is false, caller has to vfree the pointer.
4795 ++ */
4796 ++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
4797 ++ struct xt_counters_info *info, bool compat)
4798 ++{
4799 ++ void *mem;
4800 ++ u64 size;
4801 ++
4802 ++#ifdef CONFIG_COMPAT
4803 ++ if (compat) {
4804 ++ /* structures only differ in size due to alignment */
4805 ++ struct compat_xt_counters_info compat_tmp;
4806 ++
4807 ++ if (len <= sizeof(compat_tmp))
4808 ++ return ERR_PTR(-EINVAL);
4809 ++
4810 ++ len -= sizeof(compat_tmp);
4811 ++ if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
4812 ++ return ERR_PTR(-EFAULT);
4813 ++
4814 ++ strlcpy(info->name, compat_tmp.name, sizeof(info->name));
4815 ++ info->num_counters = compat_tmp.num_counters;
4816 ++ user += sizeof(compat_tmp);
4817 ++ } else
4818 ++#endif
4819 ++ {
4820 ++ if (len <= sizeof(*info))
4821 ++ return ERR_PTR(-EINVAL);
4822 ++
4823 ++ len -= sizeof(*info);
4824 ++ if (copy_from_user(info, user, sizeof(*info)) != 0)
4825 ++ return ERR_PTR(-EFAULT);
4826 ++
4827 ++ info->name[sizeof(info->name) - 1] = '\0';
4828 ++ user += sizeof(*info);
4829 ++ }
4830 ++
4831 ++ size = sizeof(struct xt_counters);
4832 ++ size *= info->num_counters;
4833 ++
4834 ++ if (size != (u64)len)
4835 ++ return ERR_PTR(-EINVAL);
4836 ++
4837 ++ mem = vmalloc(len);
4838 ++ if (!mem)
4839 ++ return ERR_PTR(-ENOMEM);
4840 ++
4841 ++ if (copy_from_user(mem, user, len) == 0)
4842 ++ return mem;
4843 ++
4844 ++ vfree(mem);
4845 ++ return ERR_PTR(-EFAULT);
4846 ++}
4847 ++EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
4848 ++
4849 + #ifdef CONFIG_COMPAT
4850 + int xt_compat_target_offset(const struct xt_target *target)
4851 + {
4852 +@@ -605,6 +840,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
4853 + struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
4854 + int pad, off = xt_compat_target_offset(target);
4855 + u_int16_t tsize = ct->u.user.target_size;
4856 ++ char name[sizeof(t->u.user.name)];
4857 +
4858 + t = *dstptr;
4859 + memcpy(t, ct, sizeof(*ct));
4860 +@@ -618,6 +854,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
4861 +
4862 + tsize += off;
4863 + t->u.user.target_size = tsize;
4864 ++ strlcpy(name, target->name, sizeof(name));
4865 ++ module_put(target->me);
4866 ++ strncpy(t->u.user.name, name, sizeof(t->u.user.name));
4867 +
4868 + *size += off;
4869 + *dstptr += tsize;
4870 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
4871 +index 992b35fb8615..7a5fa0c98377 100644
4872 +--- a/net/netlink/af_netlink.c
4873 ++++ b/net/netlink/af_netlink.c
4874 +@@ -2784,6 +2784,7 @@ static int netlink_dump(struct sock *sk)
4875 + struct netlink_callback *cb;
4876 + struct sk_buff *skb = NULL;
4877 + struct nlmsghdr *nlh;
4878 ++ struct module *module;
4879 + int len, err = -ENOBUFS;
4880 + int alloc_min_size;
4881 + int alloc_size;
4882 +@@ -2863,9 +2864,11 @@ static int netlink_dump(struct sock *sk)
4883 + cb->done(cb);
4884 +
4885 + nlk->cb_running = false;
4886 ++ module = cb->module;
4887 ++ skb = cb->skb;
4888 + mutex_unlock(nlk->cb_mutex);
4889 +- module_put(cb->module);
4890 +- consume_skb(cb->skb);
4891 ++ module_put(module);
4892 ++ consume_skb(skb);
4893 + return 0;
4894 +
4895 + errout_skb:
4896 +diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
4897 +index d933cb89efac..5eb7694348b5 100644
4898 +--- a/net/openvswitch/vport-vxlan.c
4899 ++++ b/net/openvswitch/vport-vxlan.c
4900 +@@ -91,6 +91,8 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
4901 + struct vxlan_config conf = {
4902 + .no_share = true,
4903 + .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
4904 ++ /* Don't restrict the packets that can be sent by MTU */
4905 ++ .mtu = IP_MAX_MTU,
4906 + };
4907 +
4908 + if (!options) {
4909 +diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
4910 +index d5d7132ac847..1b58866175e6 100644
4911 +--- a/net/switchdev/switchdev.c
4912 ++++ b/net/switchdev/switchdev.c
4913 +@@ -1169,6 +1169,7 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
4914 + .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
4915 + .dst = dst,
4916 + .dst_len = dst_len,
4917 ++ .fi = fi,
4918 + .tos = tos,
4919 + .type = type,
4920 + .nlflags = nlflags,
4921 +@@ -1177,8 +1178,6 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
4922 + struct net_device *dev;
4923 + int err = 0;
4924 +
4925 +- memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
4926 +-
4927 + /* Don't offload route if using custom ip rules or if
4928 + * IPv4 FIB offloading has been disabled completely.
4929 + */
4930 +@@ -1222,6 +1221,7 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
4931 + .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
4932 + .dst = dst,
4933 + .dst_len = dst_len,
4934 ++ .fi = fi,
4935 + .tos = tos,
4936 + .type = type,
4937 + .nlflags = 0,
4938 +@@ -1230,8 +1230,6 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
4939 + struct net_device *dev;
4940 + int err = 0;
4941 +
4942 +- memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
4943 +-
4944 + if (!(fi->fib_flags & RTNH_F_OFFLOAD))
4945 + return 0;
4946 +
4947 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
4948 +index 1eadc95e1132..2ed732bfe94b 100644
4949 +--- a/net/tipc/netlink_compat.c
4950 ++++ b/net/tipc/netlink_compat.c
4951 +@@ -802,7 +802,7 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
4952 + goto out;
4953 +
4954 + tipc_tlv_sprintf(msg->rep, "%-10u %s",
4955 +- nla_get_u32(publ[TIPC_NLA_PUBL_REF]),
4956 ++ nla_get_u32(publ[TIPC_NLA_PUBL_KEY]),
4957 + scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
4958 + out:
4959 + tipc_tlv_sprintf(msg->rep, "\n");
4960 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
4961 +index e53003cf7703..9b713e0ce00d 100644
4962 +--- a/net/tipc/socket.c
4963 ++++ b/net/tipc/socket.c
4964 +@@ -2814,6 +2814,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
4965 + if (err)
4966 + return err;
4967 +
4968 ++ if (!attrs[TIPC_NLA_SOCK])
4969 ++ return -EINVAL;
4970 ++
4971 + err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
4972 + attrs[TIPC_NLA_SOCK],
4973 + tipc_nl_sock_policy);
4974 +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
4975 +index b50ee5d622e1..c753211cb83f 100644
4976 +--- a/net/wireless/wext-core.c
4977 ++++ b/net/wireless/wext-core.c
4978 +@@ -955,8 +955,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
4979 + return private(dev, iwr, cmd, info, handler);
4980 + }
4981 + /* Old driver API : call driver ioctl handler */
4982 +- if (dev->netdev_ops->ndo_do_ioctl)
4983 +- return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
4984 ++ if (dev->netdev_ops->ndo_do_ioctl) {
4985 ++#ifdef CONFIG_COMPAT
4986 ++ if (info->flags & IW_REQUEST_FLAG_COMPAT) {
4987 ++ int ret = 0;
4988 ++ struct iwreq iwr_lcl;
4989 ++ struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
4990 ++
4991 ++ memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
4992 ++ iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
4993 ++ iwr_lcl.u.data.length = iwp_compat->length;
4994 ++ iwr_lcl.u.data.flags = iwp_compat->flags;
4995 ++
4996 ++ ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
4997 ++
4998 ++ iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
4999 ++ iwp_compat->length = iwr_lcl.u.data.length;
5000 ++ iwp_compat->flags = iwr_lcl.u.data.flags;
5001 ++
5002 ++ return ret;
5003 ++ } else
5004 ++#endif
5005 ++ return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
5006 ++ }
5007 + return -EOPNOTSUPP;
5008 + }
5009 +
5010 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5011 +index 411630e9c034..1475440b70aa 100644
5012 +--- a/sound/pci/hda/hda_intel.c
5013 ++++ b/sound/pci/hda/hda_intel.c
5014 +@@ -359,8 +359,11 @@ enum {
5015 +
5016 + #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
5017 + #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
5018 ++#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
5019 ++#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
5020 + #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
5021 +-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
5022 ++#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
5023 ++ IS_KBL(pci) || IS_KBL_LP(pci)
5024 +
5025 + static char *driver_short_names[] = {
5026 + [AZX_DRIVER_ICH] = "HDA Intel",
5027 +@@ -2204,6 +2207,12 @@ static const struct pci_device_id azx_ids[] = {
5028 + /* Sunrise Point-LP */
5029 + { PCI_DEVICE(0x8086, 0x9d70),
5030 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
5031 ++ /* Kabylake */
5032 ++ { PCI_DEVICE(0x8086, 0xa171),
5033 ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
5034 ++ /* Kabylake-LP */
5035 ++ { PCI_DEVICE(0x8086, 0x9d71),
5036 ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
5037 + /* Broxton-P(Apollolake) */
5038 + { PCI_DEVICE(0x8086, 0x5a98),
5039 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
5040 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5041 +index d53c25e7a1c1..0fe18ede3e85 100644
5042 +--- a/sound/pci/hda/patch_realtek.c
5043 ++++ b/sound/pci/hda/patch_realtek.c
5044 +@@ -346,6 +346,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
5045 + case 0x10ec0234:
5046 + case 0x10ec0274:
5047 + case 0x10ec0294:
5048 ++ case 0x10ec0700:
5049 ++ case 0x10ec0701:
5050 ++ case 0x10ec0703:
5051 + alc_update_coef_idx(codec, 0x10, 1<<15, 0);
5052 + break;
5053 + case 0x10ec0662:
5054 +@@ -2655,6 +2658,7 @@ enum {
5055 + ALC269_TYPE_ALC256,
5056 + ALC269_TYPE_ALC225,
5057 + ALC269_TYPE_ALC294,
5058 ++ ALC269_TYPE_ALC700,
5059 + };
5060 +
5061 + /*
5062 +@@ -2686,6 +2690,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
5063 + case ALC269_TYPE_ALC256:
5064 + case ALC269_TYPE_ALC225:
5065 + case ALC269_TYPE_ALC294:
5066 ++ case ALC269_TYPE_ALC700:
5067 + ssids = alc269_ssids;
5068 + break;
5069 + default:
5070 +@@ -3618,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
5071 + static void alc_headset_mode_unplugged(struct hda_codec *codec)
5072 + {
5073 + static struct coef_fw coef0255[] = {
5074 +- WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
5075 + WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
5076 + UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
5077 + WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
5078 + WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
5079 + {}
5080 + };
5081 ++ static struct coef_fw coef0255_1[] = {
5082 ++ WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
5083 ++ {}
5084 ++ };
5085 ++ static struct coef_fw coef0256[] = {
5086 ++ WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
5087 ++ {}
5088 ++ };
5089 + static struct coef_fw coef0233[] = {
5090 + WRITE_COEF(0x1b, 0x0c0b),
5091 + WRITE_COEF(0x45, 0xc429),
5092 +@@ -3677,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
5093 +
5094 + switch (codec->core.vendor_id) {
5095 + case 0x10ec0255:
5096 ++ alc_process_coef_fw(codec, coef0255_1);
5097 ++ alc_process_coef_fw(codec, coef0255);
5098 ++ break;
5099 + case 0x10ec0256:
5100 ++ alc_process_coef_fw(codec, coef0256);
5101 + alc_process_coef_fw(codec, coef0255);
5102 + break;
5103 + case 0x10ec0233:
5104 +@@ -3896,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
5105 + WRITE_COEFEX(0x57, 0x03, 0x8ea6),
5106 + {}
5107 + };
5108 ++ static struct coef_fw coef0256[] = {
5109 ++ WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
5110 ++ WRITE_COEF(0x1b, 0x0c6b),
5111 ++ WRITE_COEFEX(0x57, 0x03, 0x8ea6),
5112 ++ {}
5113 ++ };
5114 + static struct coef_fw coef0233[] = {
5115 + WRITE_COEF(0x45, 0xd429),
5116 + WRITE_COEF(0x1b, 0x0c2b),
5117 +@@ -3936,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
5118 +
5119 + switch (codec->core.vendor_id) {
5120 + case 0x10ec0255:
5121 +- case 0x10ec0256:
5122 + alc_process_coef_fw(codec, coef0255);
5123 + break;
5124 ++ case 0x10ec0256:
5125 ++ alc_process_coef_fw(codec, coef0256);
5126 ++ break;
5127 + case 0x10ec0233:
5128 + case 0x10ec0283:
5129 + alc_process_coef_fw(codec, coef0233);
5130 +@@ -3978,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
5131 + WRITE_COEFEX(0x57, 0x03, 0x8ea6),
5132 + {}
5133 + };
5134 ++ static struct coef_fw coef0256[] = {
5135 ++ WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
5136 ++ WRITE_COEF(0x1b, 0x0c6b),
5137 ++ WRITE_COEFEX(0x57, 0x03, 0x8ea6),
5138 ++ {}
5139 ++ };
5140 + static struct coef_fw coef0233[] = {
5141 + WRITE_COEF(0x45, 0xe429),
5142 + WRITE_COEF(0x1b, 0x0c2b),
5143 +@@ -4018,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
5144 +
5145 + switch (codec->core.vendor_id) {
5146 + case 0x10ec0255:
5147 +- case 0x10ec0256:
5148 + alc_process_coef_fw(codec, coef0255);
5149 + break;
5150 ++ case 0x10ec0256:
5151 ++ alc_process_coef_fw(codec, coef0256);
5152 ++ break;
5153 + case 0x10ec0233:
5154 + case 0x10ec0283:
5155 + alc_process_coef_fw(codec, coef0233);
5156 +@@ -4266,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
5157 + static void alc255_set_default_jack_type(struct hda_codec *codec)
5158 + {
5159 + /* Set to iphone type */
5160 +- static struct coef_fw fw[] = {
5161 ++ static struct coef_fw alc255fw[] = {
5162 + WRITE_COEF(0x1b, 0x880b),
5163 + WRITE_COEF(0x45, 0xd089),
5164 + WRITE_COEF(0x1b, 0x080b),
5165 +@@ -4274,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
5166 + WRITE_COEF(0x1b, 0x0c0b),
5167 + {}
5168 + };
5169 +- alc_process_coef_fw(codec, fw);
5170 ++ static struct coef_fw alc256fw[] = {
5171 ++ WRITE_COEF(0x1b, 0x884b),
5172 ++ WRITE_COEF(0x45, 0xd089),
5173 ++ WRITE_COEF(0x1b, 0x084b),
5174 ++ WRITE_COEF(0x46, 0x0004),
5175 ++ WRITE_COEF(0x1b, 0x0c4b),
5176 ++ {}
5177 ++ };
5178 ++ switch (codec->core.vendor_id) {
5179 ++ case 0x10ec0255:
5180 ++ alc_process_coef_fw(codec, alc255fw);
5181 ++ break;
5182 ++ case 0x10ec0256:
5183 ++ alc_process_coef_fw(codec, alc256fw);
5184 ++ break;
5185 ++ }
5186 + msleep(30);
5187 + }
5188 +
5189 +@@ -5587,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5190 + SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
5191 + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
5192 + SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
5193 ++ SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
5194 + SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
5195 + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5196 + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
5197 +@@ -5775,6 +5823,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5198 + {0x12, 0x90a60180},
5199 + {0x14, 0x90170130},
5200 + {0x21, 0x02211040}),
5201 ++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5202 ++ {0x12, 0x90a60180},
5203 ++ {0x14, 0x90170120},
5204 ++ {0x21, 0x02211030}),
5205 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5206 + {0x12, 0x90a60160},
5207 + {0x14, 0x90170120},
5208 +@@ -6053,6 +6105,14 @@ static int patch_alc269(struct hda_codec *codec)
5209 + case 0x10ec0294:
5210 + spec->codec_variant = ALC269_TYPE_ALC294;
5211 + break;
5212 ++ case 0x10ec0700:
5213 ++ case 0x10ec0701:
5214 ++ case 0x10ec0703:
5215 ++ spec->codec_variant = ALC269_TYPE_ALC700;
5216 ++ spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
5217 ++ alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
5218 ++ break;
5219 ++
5220 + }
5221 +
5222 + if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
5223 +@@ -7008,6 +7068,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
5224 + HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
5225 + HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
5226 + HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
5227 ++ HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
5228 ++ HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
5229 ++ HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
5230 + HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
5231 + HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
5232 + HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
5233 +diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
5234 +index f0b08a2a48ba..7d31d8c5b9ea 100644
5235 +--- a/virt/kvm/irqchip.c
5236 ++++ b/virt/kvm/irqchip.c
5237 +@@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
5238 +
5239 + irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
5240 + lockdep_is_held(&kvm->irq_lock));
5241 +- if (gsi < irq_rt->nr_rt_entries) {
5242 ++ if (irq_rt && gsi < irq_rt->nr_rt_entries) {
5243 + hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
5244 + entries[n] = *e;
5245 + ++n;