Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.3 commit in: /
Date: Sun, 31 Jan 2016 23:31:13
Message-Id: 1454283071.5c6031723f80c0670aa5fe939f24cbcbbfc2cbcd.mpagano@gentoo
1 commit: 5c6031723f80c0670aa5fe939f24cbcbbfc2cbcd
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Jan 31 23:31:11 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Jan 31 23:31:11 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5c603172
7
8 Linux patch 4.3.5
9
10 0000_README | 4 +
11 1004_linux-4.3.5.patch | 5981 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5985 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 5f4c1bc..74a7d33 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -59,6 +59,10 @@ Patch: 1003_linux-4.3.4.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.3.4
21
22 +Patch: 1004_linux-4.3.5.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.3.5
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1004_linux-4.3.5.patch b/1004_linux-4.3.5.patch
31 new file mode 100644
32 index 0000000..e04b2cb
33 --- /dev/null
34 +++ b/1004_linux-4.3.5.patch
35 @@ -0,0 +1,5981 @@
36 +diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
37 +index 864637f25bee..01c7a41c18ac 100644
38 +--- a/Documentation/ABI/testing/sysfs-bus-usb
39 ++++ b/Documentation/ABI/testing/sysfs-bus-usb
40 +@@ -114,19 +114,21 @@ Description:
41 + enabled for the device. Developer can write y/Y/1 or n/N/0 to
42 + the file to enable/disable the feature.
43 +
44 +-What: /sys/bus/usb/devices/.../power/usb3_hardware_lpm
45 +-Date: June 2015
46 ++What: /sys/bus/usb/devices/.../power/usb3_hardware_lpm_u1
47 ++ /sys/bus/usb/devices/.../power/usb3_hardware_lpm_u2
48 ++Date: November 2015
49 + Contact: Kevin Strasser <kevin.strasser@×××××××××××.com>
50 ++ Lu Baolu <baolu.lu@×××××××××××.com>
51 + Description:
52 + If CONFIG_PM is set and a USB 3.0 lpm-capable device is plugged
53 + in to a xHCI host which supports link PM, it will check if U1
54 + and U2 exit latencies have been set in the BOS descriptor; if
55 +- the check is is passed and the host supports USB3 hardware LPM,
56 ++ the check is passed and the host supports USB3 hardware LPM,
57 + USB3 hardware LPM will be enabled for the device and the USB
58 +- device directory will contain a file named
59 +- power/usb3_hardware_lpm. The file holds a string value (enable
60 +- or disable) indicating whether or not USB3 hardware LPM is
61 +- enabled for the device.
62 ++ device directory will contain two files named
63 ++ power/usb3_hardware_lpm_u1 and power/usb3_hardware_lpm_u2. These
64 ++ files hold a string value (enable or disable) indicating whether
65 ++ or not USB3 hardware LPM U1 or U2 is enabled for the device.
66 +
67 + What: /sys/bus/usb/devices/.../removable
68 + Date: February 2012
69 +diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
70 +index 4a15c90bc11d..0a94ffe17ab6 100644
71 +--- a/Documentation/usb/power-management.txt
72 ++++ b/Documentation/usb/power-management.txt
73 +@@ -537,17 +537,18 @@ relevant attribute files are usb2_hardware_lpm and usb3_hardware_lpm.
74 + can write y/Y/1 or n/N/0 to the file to enable/disable
75 + USB2 hardware LPM manually. This is for test purpose mainly.
76 +
77 +- power/usb3_hardware_lpm
78 ++ power/usb3_hardware_lpm_u1
79 ++ power/usb3_hardware_lpm_u2
80 +
81 + When a USB 3.0 lpm-capable device is plugged in to a
82 + xHCI host which supports link PM, it will check if U1
83 + and U2 exit latencies have been set in the BOS
84 + descriptor; if the check is is passed and the host
85 + supports USB3 hardware LPM, USB3 hardware LPM will be
86 +- enabled for the device and this file will be created.
87 +- The file holds a string value (enable or disable)
88 +- indicating whether or not USB3 hardware LPM is
89 +- enabled for the device.
90 ++ enabled for the device and these files will be created.
91 ++ The files hold a string value (enable or disable)
92 ++ indicating whether or not USB3 hardware LPM U1 or U2
93 ++ is enabled for the device.
94 +
95 + USB Port Power Control
96 + ----------------------
97 +diff --git a/Makefile b/Makefile
98 +index 69430ed64270..efc7a766c470 100644
99 +--- a/Makefile
100 ++++ b/Makefile
101 +@@ -1,6 +1,6 @@
102 + VERSION = 4
103 + PATCHLEVEL = 3
104 +-SUBLEVEL = 4
105 ++SUBLEVEL = 5
106 + EXTRAVERSION =
107 + NAME = Blurry Fish Butt
108 +
109 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
110 +index 6984342da13d..61d96a645ff3 100644
111 +--- a/arch/arm/kvm/mmu.c
112 ++++ b/arch/arm/kvm/mmu.c
113 +@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
114 + __kvm_flush_dcache_pud(pud);
115 + }
116 +
117 ++static bool kvm_is_device_pfn(unsigned long pfn)
118 ++{
119 ++ return !pfn_valid(pfn);
120 ++}
121 ++
122 + /**
123 + * stage2_dissolve_pmd() - clear and flush huge PMD entry
124 + * @kvm: pointer to kvm structure.
125 +@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
126 + kvm_tlb_flush_vmid_ipa(kvm, addr);
127 +
128 + /* No need to invalidate the cache for device mappings */
129 +- if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
130 ++ if (!kvm_is_device_pfn(pte_pfn(old_pte)))
131 + kvm_flush_dcache_pte(old_pte);
132 +
133 + put_page(virt_to_page(pte));
134 +@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
135 +
136 + pte = pte_offset_kernel(pmd, addr);
137 + do {
138 +- if (!pte_none(*pte) &&
139 +- (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
140 ++ if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
141 + kvm_flush_dcache_pte(*pte);
142 + } while (pte++, addr += PAGE_SIZE, addr != end);
143 + }
144 +@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
145 + return kvm_vcpu_dabt_iswrite(vcpu);
146 + }
147 +
148 +-static bool kvm_is_device_pfn(unsigned long pfn)
149 +-{
150 +- return !pfn_valid(pfn);
151 +-}
152 +-
153 + /**
154 + * stage2_wp_ptes - write protect PMD range
155 + * @pmd: pointer to pmd entry
156 +diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
157 +index b8efb8cd1f73..4d25fd0fae10 100644
158 +--- a/arch/arm/net/bpf_jit_32.c
159 ++++ b/arch/arm/net/bpf_jit_32.c
160 +@@ -182,19 +182,6 @@ static inline int mem_words_used(struct jit_ctx *ctx)
161 + return fls(ctx->seen & SEEN_MEM);
162 + }
163 +
164 +-static inline bool is_load_to_a(u16 inst)
165 +-{
166 +- switch (inst) {
167 +- case BPF_LD | BPF_W | BPF_LEN:
168 +- case BPF_LD | BPF_W | BPF_ABS:
169 +- case BPF_LD | BPF_H | BPF_ABS:
170 +- case BPF_LD | BPF_B | BPF_ABS:
171 +- return true;
172 +- default:
173 +- return false;
174 +- }
175 +-}
176 +-
177 + static void jit_fill_hole(void *area, unsigned int size)
178 + {
179 + u32 *ptr;
180 +@@ -206,7 +193,6 @@ static void jit_fill_hole(void *area, unsigned int size)
181 + static void build_prologue(struct jit_ctx *ctx)
182 + {
183 + u16 reg_set = saved_regs(ctx);
184 +- u16 first_inst = ctx->skf->insns[0].code;
185 + u16 off;
186 +
187 + #ifdef CONFIG_FRAME_POINTER
188 +@@ -236,7 +222,7 @@ static void build_prologue(struct jit_ctx *ctx)
189 + emit(ARM_MOV_I(r_X, 0), ctx);
190 +
191 + /* do not leak kernel data to userspace */
192 +- if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
193 ++ if (bpf_needs_clear_a(&ctx->skf->insns[0]))
194 + emit(ARM_MOV_I(r_A, 0), ctx);
195 +
196 + /* stack space for the BPF_MEM words */
197 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
198 +index 07d1811aa03f..a92266e634cd 100644
199 +--- a/arch/arm64/Kconfig
200 ++++ b/arch/arm64/Kconfig
201 +@@ -311,6 +311,27 @@ config ARM64_ERRATUM_832075
202 +
203 + If unsure, say Y.
204 +
205 ++config ARM64_ERRATUM_834220
206 ++ bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
207 ++ depends on KVM
208 ++ default y
209 ++ help
210 ++ This option adds an alternative code sequence to work around ARM
211 ++ erratum 834220 on Cortex-A57 parts up to r1p2.
212 ++
213 ++ Affected Cortex-A57 parts might report a Stage 2 translation
214 ++ fault as a the result of a Stage 1 fault for a load crossing
215 ++ a page boundary when there is a Stage 1 permission or device
216 ++ memory alignment fault and a Stage 2 translation fault
217 ++
218 ++ The workaround is to verify that the Stage-1 translation
219 ++ doesn't generate a fault before handling the Stage-2 fault.
220 ++ Please note that this does not necessarily enable the workaround,
221 ++ as it depends on the alternative framework, which will only patch
222 ++ the kernel if an affected CPU is detected.
223 ++
224 ++ If unsure, say Y.
225 ++
226 + config ARM64_ERRATUM_845719
227 + bool "Cortex-A53: 845719: a load might read incorrect data"
228 + depends on COMPAT
229 +diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
230 +index b3b5c4ae3800..af5b9d5c5c23 100644
231 +--- a/arch/arm64/include/asm/atomic_ll_sc.h
232 ++++ b/arch/arm64/include/asm/atomic_ll_sc.h
233 +@@ -211,7 +211,7 @@ __CMPXCHG_CASE( , , mb_8, dmb ish, l, "memory")
234 + #undef __CMPXCHG_CASE
235 +
236 + #define __CMPXCHG_DBL(name, mb, rel, cl) \
237 +-__LL_SC_INLINE int \
238 ++__LL_SC_INLINE long \
239 + __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
240 + unsigned long old2, \
241 + unsigned long new1, \
242 +diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
243 +index 55d740e63459..4d548aa54b21 100644
244 +--- a/arch/arm64/include/asm/atomic_lse.h
245 ++++ b/arch/arm64/include/asm/atomic_lse.h
246 +@@ -348,7 +348,7 @@ __CMPXCHG_CASE(x, , mb_8, al, "memory")
247 + #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
248 +
249 + #define __CMPXCHG_DBL(name, mb, cl...) \
250 +-static inline int __cmpxchg_double##name(unsigned long old1, \
251 ++static inline long __cmpxchg_double##name(unsigned long old1, \
252 + unsigned long old2, \
253 + unsigned long new1, \
254 + unsigned long new2, \
255 +diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
256 +index 171570702bb8..a1a5981526fe 100644
257 +--- a/arch/arm64/include/asm/cpufeature.h
258 ++++ b/arch/arm64/include/asm/cpufeature.h
259 +@@ -27,8 +27,9 @@
260 + #define ARM64_HAS_SYSREG_GIC_CPUIF 3
261 + #define ARM64_HAS_PAN 4
262 + #define ARM64_HAS_LSE_ATOMICS 5
263 ++#define ARM64_WORKAROUND_834220 6
264 +
265 +-#define ARM64_NCAPS 6
266 ++#define ARM64_NCAPS 7
267 +
268 + #ifndef __ASSEMBLY__
269 +
270 +diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
271 +index 17e92f05b1fe..3ca894ecf699 100644
272 +--- a/arch/arm64/include/asm/kvm_emulate.h
273 ++++ b/arch/arm64/include/asm/kvm_emulate.h
274 +@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
275 + *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
276 + }
277 +
278 ++/*
279 ++ * vcpu_reg should always be passed a register number coming from a
280 ++ * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
281 ++ * with banked registers.
282 ++ */
283 + static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
284 + {
285 +- if (vcpu_mode_is_32bit(vcpu))
286 +- return vcpu_reg32(vcpu, reg_num);
287 +-
288 + return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
289 + }
290 +
291 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
292 +index 6ffd91438560..dc0df822def3 100644
293 +--- a/arch/arm64/kernel/cpu_errata.c
294 ++++ b/arch/arm64/kernel/cpu_errata.c
295 +@@ -74,6 +74,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
296 + (1 << MIDR_VARIANT_SHIFT) | 2),
297 + },
298 + #endif
299 ++#ifdef CONFIG_ARM64_ERRATUM_834220
300 ++ {
301 ++ /* Cortex-A57 r0p0 - r1p2 */
302 ++ .desc = "ARM erratum 834220",
303 ++ .capability = ARM64_WORKAROUND_834220,
304 ++ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
305 ++ (1 << MIDR_VARIANT_SHIFT) | 2),
306 ++ },
307 ++#endif
308 + #ifdef CONFIG_ARM64_ERRATUM_845719
309 + {
310 + /* Cortex-A53 r0p[01234] */
311 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
312 +index 90d09eddd5b2..b84ef8376471 100644
313 +--- a/arch/arm64/kernel/head.S
314 ++++ b/arch/arm64/kernel/head.S
315 +@@ -524,9 +524,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
316 + #endif
317 +
318 + /* EL2 debug */
319 ++ mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
320 ++ sbfx x0, x0, #8, #4
321 ++ cmp x0, #1
322 ++ b.lt 4f // Skip if no PMU present
323 + mrs x0, pmcr_el0 // Disable debug access traps
324 + ubfx x0, x0, #11, #5 // to EL2 and allow access to
325 + msr mdcr_el2, x0 // all PMU counters from EL1
326 ++4:
327 +
328 + /* Stage-2 translation */
329 + msr vttbr_el2, xzr
330 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
331 +index f9a74d4fff3b..f325af5b38e3 100644
332 +--- a/arch/arm64/kernel/perf_event.c
333 ++++ b/arch/arm64/kernel/perf_event.c
334 +@@ -1159,9 +1159,6 @@ static void armv8pmu_reset(void *info)
335 +
336 + /* Initialize & Reset PMNC: C and P bits. */
337 + armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
338 +-
339 +- /* Disable access from userspace. */
340 +- asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
341 + }
342 +
343 + static int armv8_pmuv3_map_event(struct perf_event *event)
344 +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
345 +index 1971f491bb90..ff7f13239515 100644
346 +--- a/arch/arm64/kernel/ptrace.c
347 ++++ b/arch/arm64/kernel/ptrace.c
348 +@@ -58,6 +58,12 @@
349 + */
350 + void ptrace_disable(struct task_struct *child)
351 + {
352 ++ /*
353 ++ * This would be better off in core code, but PTRACE_DETACH has
354 ++ * grown its fair share of arch-specific worts and changing it
355 ++ * is likely to cause regressions on obscure architectures.
356 ++ */
357 ++ user_disable_single_step(child);
358 + }
359 +
360 + #ifdef CONFIG_HAVE_HW_BREAKPOINT
361 +diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
362 +index 232247945b1c..9f17dc72645a 100644
363 +--- a/arch/arm64/kernel/setup.c
364 ++++ b/arch/arm64/kernel/setup.c
365 +@@ -558,6 +558,10 @@ static int c_show(struct seq_file *m, void *v)
366 + */
367 + seq_printf(m, "processor\t: %d\n", i);
368 +
369 ++ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
370 ++ loops_per_jiffy / (500000UL/HZ),
371 ++ loops_per_jiffy / (5000UL/HZ) % 100);
372 ++
373 + /*
374 + * Dump out the common processor features in a single line.
375 + * Userspace should read the hwcaps with getauxval(AT_HWCAP)
376 +diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
377 +index 44ca4143b013..dd6ad81d53aa 100644
378 +--- a/arch/arm64/kernel/suspend.c
379 ++++ b/arch/arm64/kernel/suspend.c
380 +@@ -1,3 +1,4 @@
381 ++#include <linux/ftrace.h>
382 + #include <linux/percpu.h>
383 + #include <linux/slab.h>
384 + #include <asm/cacheflush.h>
385 +@@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
386 + local_dbg_save(flags);
387 +
388 + /*
389 ++ * Function graph tracer state gets incosistent when the kernel
390 ++ * calls functions that never return (aka suspend finishers) hence
391 ++ * disable graph tracing during their execution.
392 ++ */
393 ++ pause_graph_tracing();
394 ++
395 ++ /*
396 + * mm context saved on the stack, it will be restored when
397 + * the cpu comes out of reset through the identity mapped
398 + * page tables, so that the thread address space is properly
399 +@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
400 + hw_breakpoint_restore(NULL);
401 + }
402 +
403 ++ unpause_graph_tracing();
404 ++
405 + /*
406 + * Restore pstate flags. OS lock and mdscr have been already
407 + * restored, so from this point onwards, debugging is fully
408 +diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
409 +index e5836138ec42..3e840649b133 100644
410 +--- a/arch/arm64/kvm/hyp.S
411 ++++ b/arch/arm64/kvm/hyp.S
412 +@@ -1007,9 +1007,15 @@ el1_trap:
413 + b.ne 1f // Not an abort we care about
414 +
415 + /* This is an abort. Check for permission fault */
416 ++alternative_if_not ARM64_WORKAROUND_834220
417 + and x2, x1, #ESR_ELx_FSC_TYPE
418 + cmp x2, #FSC_PERM
419 + b.ne 1f // Not a permission fault
420 ++alternative_else
421 ++ nop // Force a Stage-1 translation to occur
422 ++ nop // and return to the guest if it failed
423 ++ nop
424 ++alternative_endif
425 +
426 + /*
427 + * Check for Stage-1 page table walk, which is guaranteed
428 +diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
429 +index 85c57158dcd9..648112e90ed5 100644
430 +--- a/arch/arm64/kvm/inject_fault.c
431 ++++ b/arch/arm64/kvm/inject_fault.c
432 +@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
433 +
434 + /* Note: These now point to the banked copies */
435 + *vcpu_spsr(vcpu) = new_spsr_value;
436 +- *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
437 ++ *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
438 +
439 + /* Branch to exception vector */
440 + if (sctlr & (1 << 13))
441 +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
442 +index 9211b8527f25..9317974c9b8e 100644
443 +--- a/arch/arm64/mm/mmu.c
444 ++++ b/arch/arm64/mm/mmu.c
445 +@@ -451,6 +451,9 @@ void __init paging_init(void)
446 +
447 + empty_zero_page = virt_to_page(zero_page);
448 +
449 ++ /* Ensure the zero page is visible to the page table walker */
450 ++ dsb(ishst);
451 ++
452 + /*
453 + * TTBR0 is only used for the identity mapping at this stage. Make it
454 + * point to zero page to avoid speculatively fetching new entries.
455 +diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
456 +index 4c4d93c4bf65..d69dffffaa89 100644
457 +--- a/arch/arm64/mm/proc-macros.S
458 ++++ b/arch/arm64/mm/proc-macros.S
459 +@@ -62,3 +62,15 @@
460 + bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
461 + #endif
462 + .endm
463 ++
464 ++/*
465 ++ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
466 ++ */
467 ++ .macro reset_pmuserenr_el0, tmpreg
468 ++ mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
469 ++ sbfx \tmpreg, \tmpreg, #8, #4
470 ++ cmp \tmpreg, #1 // Skip if no PMU present
471 ++ b.lt 9000f
472 ++ msr pmuserenr_el0, xzr // Disable PMU access from EL0
473 ++9000:
474 ++ .endm
475 +diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
476 +index e4ee7bd8830a..b722d3e26185 100644
477 +--- a/arch/arm64/mm/proc.S
478 ++++ b/arch/arm64/mm/proc.S
479 +@@ -115,6 +115,7 @@ ENTRY(cpu_do_resume)
480 + */
481 + ubfx x11, x11, #1, #1
482 + msr oslar_el1, x11
483 ++ reset_pmuserenr_el0 x0 // Disable PMU access from EL0
484 + mov x0, x12
485 + dsb nsh // Make sure local tlb invalidation completed
486 + isb
487 +@@ -153,6 +154,7 @@ ENTRY(__cpu_setup)
488 + msr cpacr_el1, x0 // Enable FP/ASIMD
489 + mov x0, #1 << 12 // Reset mdscr_el1 and disable
490 + msr mdscr_el1, x0 // access to the DCC from EL0
491 ++ reset_pmuserenr_el0 x0 // Disable PMU access from EL0
492 + /*
493 + * Memory region attributes for LPAE:
494 + *
495 +diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
496 +index 98a26ce82d26..aee5637ea436 100644
497 +--- a/arch/arm64/net/bpf_jit.h
498 ++++ b/arch/arm64/net/bpf_jit.h
499 +@@ -1,7 +1,7 @@
500 + /*
501 + * BPF JIT compiler for ARM64
502 + *
503 +- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@×××××.com>
504 ++ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@×××××.com>
505 + *
506 + * This program is free software; you can redistribute it and/or modify
507 + * it under the terms of the GNU General Public License version 2 as
508 +@@ -35,6 +35,7 @@
509 + aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
510 + AARCH64_INSN_BRANCH_COMP_##type)
511 + #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
512 ++#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
513 +
514 + /* Conditional branch (immediate) */
515 + #define A64_COND_BRANCH(cond, offset) \
516 +diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
517 +index c047598b09e0..6217f80702d2 100644
518 +--- a/arch/arm64/net/bpf_jit_comp.c
519 ++++ b/arch/arm64/net/bpf_jit_comp.c
520 +@@ -1,7 +1,7 @@
521 + /*
522 + * BPF JIT compiler for ARM64
523 + *
524 +- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@×××××.com>
525 ++ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@×××××.com>
526 + *
527 + * This program is free software; you can redistribute it and/or modify
528 + * it under the terms of the GNU General Public License version 2 as
529 +@@ -225,6 +225,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
530 + u8 jmp_cond;
531 + s32 jmp_offset;
532 +
533 ++#define check_imm(bits, imm) do { \
534 ++ if ((((imm) > 0) && ((imm) >> (bits))) || \
535 ++ (((imm) < 0) && (~(imm) >> (bits)))) { \
536 ++ pr_info("[%2d] imm=%d(0x%x) out of range\n", \
537 ++ i, imm, imm); \
538 ++ return -EINVAL; \
539 ++ } \
540 ++} while (0)
541 ++#define check_imm19(imm) check_imm(19, imm)
542 ++#define check_imm26(imm) check_imm(26, imm)
543 ++
544 + switch (code) {
545 + /* dst = src */
546 + case BPF_ALU | BPF_MOV | BPF_X:
547 +@@ -258,15 +269,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
548 + break;
549 + case BPF_ALU | BPF_DIV | BPF_X:
550 + case BPF_ALU64 | BPF_DIV | BPF_X:
551 +- emit(A64_UDIV(is64, dst, dst, src), ctx);
552 +- break;
553 + case BPF_ALU | BPF_MOD | BPF_X:
554 + case BPF_ALU64 | BPF_MOD | BPF_X:
555 +- ctx->tmp_used = 1;
556 +- emit(A64_UDIV(is64, tmp, dst, src), ctx);
557 +- emit(A64_MUL(is64, tmp, tmp, src), ctx);
558 +- emit(A64_SUB(is64, dst, dst, tmp), ctx);
559 ++ {
560 ++ const u8 r0 = bpf2a64[BPF_REG_0];
561 ++
562 ++ /* if (src == 0) return 0 */
563 ++ jmp_offset = 3; /* skip ahead to else path */
564 ++ check_imm19(jmp_offset);
565 ++ emit(A64_CBNZ(is64, src, jmp_offset), ctx);
566 ++ emit(A64_MOVZ(1, r0, 0, 0), ctx);
567 ++ jmp_offset = epilogue_offset(ctx);
568 ++ check_imm26(jmp_offset);
569 ++ emit(A64_B(jmp_offset), ctx);
570 ++ /* else */
571 ++ switch (BPF_OP(code)) {
572 ++ case BPF_DIV:
573 ++ emit(A64_UDIV(is64, dst, dst, src), ctx);
574 ++ break;
575 ++ case BPF_MOD:
576 ++ ctx->tmp_used = 1;
577 ++ emit(A64_UDIV(is64, tmp, dst, src), ctx);
578 ++ emit(A64_MUL(is64, tmp, tmp, src), ctx);
579 ++ emit(A64_SUB(is64, dst, dst, tmp), ctx);
580 ++ break;
581 ++ }
582 + break;
583 ++ }
584 + case BPF_ALU | BPF_LSH | BPF_X:
585 + case BPF_ALU64 | BPF_LSH | BPF_X:
586 + emit(A64_LSLV(is64, dst, dst, src), ctx);
587 +@@ -393,17 +422,6 @@ emit_bswap_uxt:
588 + emit(A64_ASR(is64, dst, dst, imm), ctx);
589 + break;
590 +
591 +-#define check_imm(bits, imm) do { \
592 +- if ((((imm) > 0) && ((imm) >> (bits))) || \
593 +- (((imm) < 0) && (~(imm) >> (bits)))) { \
594 +- pr_info("[%2d] imm=%d(0x%x) out of range\n", \
595 +- i, imm, imm); \
596 +- return -EINVAL; \
597 +- } \
598 +-} while (0)
599 +-#define check_imm19(imm) check_imm(19, imm)
600 +-#define check_imm26(imm) check_imm(26, imm)
601 +-
602 + /* JUMP off */
603 + case BPF_JMP | BPF_JA:
604 + jmp_offset = bpf2a64_offset(i + off, i, ctx);
605 +diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
606 +index 0c4a133f6216..26e947d61040 100644
607 +--- a/arch/mips/net/bpf_jit.c
608 ++++ b/arch/mips/net/bpf_jit.c
609 +@@ -521,19 +521,6 @@ static inline u16 align_sp(unsigned int num)
610 + return num;
611 + }
612 +
613 +-static bool is_load_to_a(u16 inst)
614 +-{
615 +- switch (inst) {
616 +- case BPF_LD | BPF_W | BPF_LEN:
617 +- case BPF_LD | BPF_W | BPF_ABS:
618 +- case BPF_LD | BPF_H | BPF_ABS:
619 +- case BPF_LD | BPF_B | BPF_ABS:
620 +- return true;
621 +- default:
622 +- return false;
623 +- }
624 +-}
625 +-
626 + static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
627 + {
628 + int i = 0, real_off = 0;
629 +@@ -614,7 +601,6 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx)
630 +
631 + static void build_prologue(struct jit_ctx *ctx)
632 + {
633 +- u16 first_inst = ctx->skf->insns[0].code;
634 + int sp_off;
635 +
636 + /* Calculate the total offset for the stack pointer */
637 +@@ -641,7 +627,7 @@ static void build_prologue(struct jit_ctx *ctx)
638 + emit_jit_reg_move(r_X, r_zero, ctx);
639 +
640 + /* Do not leak kernel data to userspace */
641 +- if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
642 ++ if (bpf_needs_clear_a(&ctx->skf->insns[0]))
643 + emit_jit_reg_move(r_A, r_zero, ctx);
644 + }
645 +
646 +diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
647 +index 4434b54e1d87..78ae5552fdb8 100644
648 +--- a/arch/mn10300/Kconfig
649 ++++ b/arch/mn10300/Kconfig
650 +@@ -1,6 +1,7 @@
651 + config MN10300
652 + def_bool y
653 + select HAVE_OPROFILE
654 ++ select HAVE_UID16
655 + select GENERIC_IRQ_SHOW
656 + select ARCH_WANT_IPC_PARSE_VERSION
657 + select HAVE_ARCH_TRACEHOOK
658 +@@ -37,9 +38,6 @@ config HIGHMEM
659 + config NUMA
660 + def_bool n
661 +
662 +-config UID16
663 +- def_bool y
664 +-
665 + config RWSEM_GENERIC_SPINLOCK
666 + def_bool y
667 +
668 +diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
669 +index ad6263cffb0f..d1a8d93cccfd 100644
670 +--- a/arch/powerpc/include/asm/cmpxchg.h
671 ++++ b/arch/powerpc/include/asm/cmpxchg.h
672 +@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
673 + unsigned long prev;
674 +
675 + __asm__ __volatile__(
676 +- PPC_RELEASE_BARRIER
677 ++ PPC_ATOMIC_ENTRY_BARRIER
678 + "1: lwarx %0,0,%2 \n"
679 + PPC405_ERR77(0,%2)
680 + " stwcx. %3,0,%2 \n\
681 + bne- 1b"
682 +- PPC_ACQUIRE_BARRIER
683 ++ PPC_ATOMIC_EXIT_BARRIER
684 + : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
685 + : "r" (p), "r" (val)
686 + : "cc", "memory");
687 +@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
688 + unsigned long prev;
689 +
690 + __asm__ __volatile__(
691 +- PPC_RELEASE_BARRIER
692 ++ PPC_ATOMIC_ENTRY_BARRIER
693 + "1: ldarx %0,0,%2 \n"
694 + PPC405_ERR77(0,%2)
695 + " stdcx. %3,0,%2 \n\
696 + bne- 1b"
697 +- PPC_ACQUIRE_BARRIER
698 ++ PPC_ATOMIC_EXIT_BARRIER
699 + : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
700 + : "r" (p), "r" (val)
701 + : "cc", "memory");
702 +@@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
703 + unsigned int prev;
704 +
705 + __asm__ __volatile__ (
706 +- PPC_RELEASE_BARRIER
707 ++ PPC_ATOMIC_ENTRY_BARRIER
708 + "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
709 + cmpw 0,%0,%3\n\
710 + bne- 2f\n"
711 + PPC405_ERR77(0,%2)
712 + " stwcx. %4,0,%2\n\
713 + bne- 1b"
714 +- PPC_ACQUIRE_BARRIER
715 ++ PPC_ATOMIC_EXIT_BARRIER
716 + "\n\
717 + 2:"
718 + : "=&r" (prev), "+m" (*p)
719 +@@ -197,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
720 + unsigned long prev;
721 +
722 + __asm__ __volatile__ (
723 +- PPC_RELEASE_BARRIER
724 ++ PPC_ATOMIC_ENTRY_BARRIER
725 + "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
726 + cmpd 0,%0,%3\n\
727 + bne- 2f\n\
728 + stdcx. %4,0,%2\n\
729 + bne- 1b"
730 +- PPC_ACQUIRE_BARRIER
731 ++ PPC_ATOMIC_EXIT_BARRIER
732 + "\n\
733 + 2:"
734 + : "=&r" (prev), "+m" (*p)
735 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
736 +index a908ada8e0a5..2220f7a60def 100644
737 +--- a/arch/powerpc/include/asm/reg.h
738 ++++ b/arch/powerpc/include/asm/reg.h
739 +@@ -108,6 +108,7 @@
740 + #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
741 + #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
742 + #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
743 ++#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
744 + #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
745 + #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
746 +
747 +diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
748 +index e682a7143edb..c50868681f9e 100644
749 +--- a/arch/powerpc/include/asm/synch.h
750 ++++ b/arch/powerpc/include/asm/synch.h
751 +@@ -44,7 +44,7 @@ static inline void isync(void)
752 + MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
753 + #define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
754 + #define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
755 +-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
756 ++#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
757 + #define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
758 + #else
759 + #define PPC_ACQUIRE_BARRIER
760 +diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
761 +index 59dad113897b..c2d21d11c2d2 100644
762 +--- a/arch/powerpc/include/uapi/asm/elf.h
763 ++++ b/arch/powerpc/include/uapi/asm/elf.h
764 +@@ -295,6 +295,8 @@ do { \
765 + #define R_PPC64_TLSLD 108
766 + #define R_PPC64_TOCSAVE 109
767 +
768 ++#define R_PPC64_ENTRY 118
769 ++
770 + #define R_PPC64_REL16 249
771 + #define R_PPC64_REL16_LO 250
772 + #define R_PPC64_REL16_HI 251
773 +diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
774 +index 68384514506b..59663af9315f 100644
775 +--- a/arch/powerpc/kernel/module_64.c
776 ++++ b/arch/powerpc/kernel/module_64.c
777 +@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
778 + */
779 + break;
780 +
781 ++ case R_PPC64_ENTRY:
782 ++ /*
783 ++ * Optimize ELFv2 large code model entry point if
784 ++ * the TOC is within 2GB range of current location.
785 ++ */
786 ++ value = my_r2(sechdrs, me) - (unsigned long)location;
787 ++ if (value + 0x80008000 > 0xffffffff)
788 ++ break;
789 ++ /*
790 ++ * Check for the large code model prolog sequence:
791 ++ * ld r2, ...(r12)
792 ++ * add r2, r2, r12
793 ++ */
794 ++ if ((((uint32_t *)location)[0] & ~0xfffc)
795 ++ != 0xe84c0000)
796 ++ break;
797 ++ if (((uint32_t *)location)[1] != 0x7c426214)
798 ++ break;
799 ++ /*
800 ++ * If found, replace it with:
801 ++ * addis r2, r12, (.TOC.-func)@ha
802 ++ * addi r2, r12, (.TOC.-func)@l
803 ++ */
804 ++ ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
805 ++ ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
806 ++ break;
807 ++
808 + case R_PPC64_REL16_HA:
809 + /* Subtract location pointer */
810 + value -= (unsigned long)location;
811 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
812 +index 75b6676c1a0b..646bf4d222c1 100644
813 +--- a/arch/powerpc/kernel/process.c
814 ++++ b/arch/powerpc/kernel/process.c
815 +@@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
816 + msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
817 + }
818 +
819 ++ /*
820 ++ * Use the current MSR TM suspended bit to track if we have
821 ++ * checkpointed state outstanding.
822 ++ * On signal delivery, we'd normally reclaim the checkpointed
823 ++ * state to obtain stack pointer (see:get_tm_stackpointer()).
824 ++ * This will then directly return to userspace without going
825 ++ * through __switch_to(). However, if the stack frame is bad,
826 ++ * we need to exit this thread which calls __switch_to() which
827 ++ * will again attempt to reclaim the already saved tm state.
828 ++ * Hence we need to check that we've not already reclaimed
829 ++ * this state.
830 ++ * We do this using the current MSR, rather tracking it in
831 ++ * some specific thread_struct bit, as it has the additional
832 ++ * benifit of checking for a potential TM bad thing exception.
833 ++ */
834 ++ if (!MSR_TM_SUSPENDED(mfmsr()))
835 ++ return;
836 ++
837 + tm_reclaim(thr, thr->regs->msr, cause);
838 +
839 + /* Having done the reclaim, we now have the checkpointed
840 +diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
841 +index 0dbee465af7a..ef7c24e84a62 100644
842 +--- a/arch/powerpc/kernel/signal_32.c
843 ++++ b/arch/powerpc/kernel/signal_32.c
844 +@@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
845 + return 1;
846 + #endif /* CONFIG_SPE */
847 +
848 ++ /* Get the top half of the MSR from the user context */
849 ++ if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
850 ++ return 1;
851 ++ msr_hi <<= 32;
852 ++ /* If TM bits are set to the reserved value, it's an invalid context */
853 ++ if (MSR_TM_RESV(msr_hi))
854 ++ return 1;
855 ++ /* Pull in the MSR TM bits from the user context */
856 ++ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
857 + /* Now, recheckpoint. This loads up all of the checkpointed (older)
858 + * registers, including FP and V[S]Rs. After recheckpointing, the
859 + * transactional versions should be loaded.
860 +@@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
861 + current->thread.tm_texasr |= TEXASR_FS;
862 + /* This loads the checkpointed FP/VEC state, if used */
863 + tm_recheckpoint(&current->thread, msr);
864 +- /* Get the top half of the MSR */
865 +- if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
866 +- return 1;
867 +- /* Pull in MSR TM from user context */
868 +- regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
869 +
870 + /* This loads the speculative FP/VEC state, if used */
871 + if (msr & MSR_FP) {
872 +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
873 +index 20756dfb9f34..c676ecec0869 100644
874 +--- a/arch/powerpc/kernel/signal_64.c
875 ++++ b/arch/powerpc/kernel/signal_64.c
876 +@@ -438,6 +438,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
877 +
878 + /* get MSR separately, transfer the LE bit if doing signal return */
879 + err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
880 ++ /* Don't allow reserved mode. */
881 ++ if (MSR_TM_RESV(msr))
882 ++ return -EINVAL;
883 ++
884 + /* pull in MSR TM from user context */
885 + regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
886 +
887 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
888 +index 9c26c5a96ea2..a7352b59e6f9 100644
889 +--- a/arch/powerpc/kvm/book3s_hv.c
890 ++++ b/arch/powerpc/kvm/book3s_hv.c
891 +@@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
892 +
893 + static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
894 + {
895 ++ /*
896 ++ * Check for illegal transactional state bit combination
897 ++ * and if we find it, force the TS field to a safe state.
898 ++ */
899 ++ if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
900 ++ msr &= ~MSR_TS_MASK;
901 + vcpu->arch.shregs.msr = msr;
902 + kvmppc_end_cede(vcpu);
903 + }
904 +@@ -2019,7 +2025,7 @@ static bool can_split_piggybacked_subcores(struct core_info *cip)
905 + return false;
906 + n_subcores += (cip->subcore_threads[sub] - 1) >> 1;
907 + }
908 +- if (n_subcores > 3 || large_sub < 0)
909 ++ if (large_sub < 0 || !subcore_config_ok(n_subcores + 1, 2))
910 + return false;
911 +
912 + /*
913 +diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
914 +index 17cea18a09d3..264c473c1b3c 100644
915 +--- a/arch/powerpc/net/bpf_jit_comp.c
916 ++++ b/arch/powerpc/net/bpf_jit_comp.c
917 +@@ -78,18 +78,9 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
918 + PPC_LI(r_X, 0);
919 + }
920 +
921 +- switch (filter[0].code) {
922 +- case BPF_RET | BPF_K:
923 +- case BPF_LD | BPF_W | BPF_LEN:
924 +- case BPF_LD | BPF_W | BPF_ABS:
925 +- case BPF_LD | BPF_H | BPF_ABS:
926 +- case BPF_LD | BPF_B | BPF_ABS:
927 +- /* first instruction sets A register (or is RET 'constant') */
928 +- break;
929 +- default:
930 +- /* make sure we dont leak kernel information to user */
931 ++ /* make sure we dont leak kernel information to user */
932 ++ if (bpf_needs_clear_a(&filter[0]))
933 + PPC_LI(r_A, 0);
934 +- }
935 + }
936 +
937 + static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
938 +diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
939 +index 2c91ee7800b9..e96027d27151 100644
940 +--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
941 ++++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
942 +@@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
943 + static unsigned int *opal_irqs;
944 +
945 + static void opal_handle_irq_work(struct irq_work *work);
946 +-static __be64 last_outstanding_events;
947 ++static u64 last_outstanding_events;
948 + static struct irq_work opal_event_irq_work = {
949 + .func = opal_handle_irq_work,
950 + };
951 +
952 ++void opal_handle_events(uint64_t events)
953 ++{
954 ++ int virq, hwirq = 0;
955 ++ u64 mask = opal_event_irqchip.mask;
956 ++
957 ++ if (!in_irq() && (events & mask)) {
958 ++ last_outstanding_events = events;
959 ++ irq_work_queue(&opal_event_irq_work);
960 ++ return;
961 ++ }
962 ++
963 ++ while (events & mask) {
964 ++ hwirq = fls64(events) - 1;
965 ++ if (BIT_ULL(hwirq) & mask) {
966 ++ virq = irq_find_mapping(opal_event_irqchip.domain,
967 ++ hwirq);
968 ++ if (virq)
969 ++ generic_handle_irq(virq);
970 ++ }
971 ++ events &= ~BIT_ULL(hwirq);
972 ++ }
973 ++}
974 ++
975 + static void opal_event_mask(struct irq_data *d)
976 + {
977 + clear_bit(d->hwirq, &opal_event_irqchip.mask);
978 +@@ -55,9 +78,21 @@ static void opal_event_mask(struct irq_data *d)
979 +
980 + static void opal_event_unmask(struct irq_data *d)
981 + {
982 ++ __be64 events;
983 ++
984 + set_bit(d->hwirq, &opal_event_irqchip.mask);
985 +
986 +- opal_poll_events(&last_outstanding_events);
987 ++ opal_poll_events(&events);
988 ++ last_outstanding_events = be64_to_cpu(events);
989 ++
990 ++ /*
991 ++ * We can't just handle the events now with opal_handle_events().
992 ++ * If we did we would deadlock when opal_event_unmask() is called from
993 ++ * handle_level_irq() with the irq descriptor lock held, because
994 ++ * calling opal_handle_events() would call generic_handle_irq() and
995 ++ * then handle_level_irq() which would try to take the descriptor lock
996 ++ * again. Instead queue the events for later.
997 ++ */
998 + if (last_outstanding_events & opal_event_irqchip.mask)
999 + /* Need to retrigger the interrupt */
1000 + irq_work_queue(&opal_event_irq_work);
1001 +@@ -96,29 +131,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
1002 + return 0;
1003 + }
1004 +
1005 +-void opal_handle_events(uint64_t events)
1006 +-{
1007 +- int virq, hwirq = 0;
1008 +- u64 mask = opal_event_irqchip.mask;
1009 +-
1010 +- if (!in_irq() && (events & mask)) {
1011 +- last_outstanding_events = events;
1012 +- irq_work_queue(&opal_event_irq_work);
1013 +- return;
1014 +- }
1015 +-
1016 +- while (events & mask) {
1017 +- hwirq = fls64(events) - 1;
1018 +- if (BIT_ULL(hwirq) & mask) {
1019 +- virq = irq_find_mapping(opal_event_irqchip.domain,
1020 +- hwirq);
1021 +- if (virq)
1022 +- generic_handle_irq(virq);
1023 +- }
1024 +- events &= ~BIT_ULL(hwirq);
1025 +- }
1026 +-}
1027 +-
1028 + static irqreturn_t opal_interrupt(int irq, void *data)
1029 + {
1030 + __be64 events;
1031 +@@ -131,7 +143,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
1032 +
1033 + static void opal_handle_irq_work(struct irq_work *work)
1034 + {
1035 +- opal_handle_events(be64_to_cpu(last_outstanding_events));
1036 ++ opal_handle_events(last_outstanding_events);
1037 + }
1038 +
1039 + static int opal_event_match(struct irq_domain *h, struct device_node *node,
1040 +diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
1041 +index 4296d55e88f3..57cffb80bc36 100644
1042 +--- a/arch/powerpc/platforms/powernv/opal.c
1043 ++++ b/arch/powerpc/platforms/powernv/opal.c
1044 +@@ -278,7 +278,7 @@ static void opal_handle_message(void)
1045 +
1046 + /* Sanity check */
1047 + if (type >= OPAL_MSG_TYPE_MAX) {
1048 +- pr_warning("%s: Unknown message type: %u\n", __func__, type);
1049 ++ pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
1050 + return;
1051 + }
1052 + opal_message_do_notify(type, (void *)&msg);
1053 +diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
1054 +index f8b9f71b9a2b..17e71d2d96e5 100644
1055 +--- a/arch/sparc/net/bpf_jit_comp.c
1056 ++++ b/arch/sparc/net/bpf_jit_comp.c
1057 +@@ -420,22 +420,9 @@ void bpf_jit_compile(struct bpf_prog *fp)
1058 + }
1059 + emit_reg_move(O7, r_saved_O7);
1060 +
1061 +- switch (filter[0].code) {
1062 +- case BPF_RET | BPF_K:
1063 +- case BPF_LD | BPF_W | BPF_LEN:
1064 +- case BPF_LD | BPF_W | BPF_ABS:
1065 +- case BPF_LD | BPF_H | BPF_ABS:
1066 +- case BPF_LD | BPF_B | BPF_ABS:
1067 +- /* The first instruction sets the A register (or is
1068 +- * a "RET 'constant'")
1069 +- */
1070 +- break;
1071 +- default:
1072 +- /* Make sure we dont leak kernel information to the
1073 +- * user.
1074 +- */
1075 ++ /* Make sure we dont leak kernel information to the user. */
1076 ++ if (bpf_needs_clear_a(&filter[0]))
1077 + emit_clear(r_A); /* A = 0 */
1078 +- }
1079 +
1080 + for (i = 0; i < flen; i++) {
1081 + unsigned int K = filter[i].k;
1082 +diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
1083 +index 4fa687a47a62..6b8d6e8cd449 100644
1084 +--- a/arch/x86/include/asm/boot.h
1085 ++++ b/arch/x86/include/asm/boot.h
1086 +@@ -27,7 +27,7 @@
1087 + #define BOOT_HEAP_SIZE 0x400000
1088 + #else /* !CONFIG_KERNEL_BZIP2 */
1089 +
1090 +-#define BOOT_HEAP_SIZE 0x8000
1091 ++#define BOOT_HEAP_SIZE 0x10000
1092 +
1093 + #endif /* !CONFIG_KERNEL_BZIP2 */
1094 +
1095 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
1096 +index 379cd3658799..bfd9b2a35a0b 100644
1097 +--- a/arch/x86/include/asm/mmu_context.h
1098 ++++ b/arch/x86/include/asm/mmu_context.h
1099 +@@ -116,8 +116,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
1100 + #endif
1101 + cpumask_set_cpu(cpu, mm_cpumask(next));
1102 +
1103 +- /* Re-load page tables */
1104 ++ /*
1105 ++ * Re-load page tables.
1106 ++ *
1107 ++ * This logic has an ordering constraint:
1108 ++ *
1109 ++ * CPU 0: Write to a PTE for 'next'
1110 ++ * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
1111 ++ * CPU 1: set bit 1 in next's mm_cpumask
1112 ++ * CPU 1: load from the PTE that CPU 0 writes (implicit)
1113 ++ *
1114 ++ * We need to prevent an outcome in which CPU 1 observes
1115 ++ * the new PTE value and CPU 0 observes bit 1 clear in
1116 ++ * mm_cpumask. (If that occurs, then the IPI will never
1117 ++ * be sent, and CPU 0's TLB will contain a stale entry.)
1118 ++ *
1119 ++ * The bad outcome can occur if either CPU's load is
1120 ++ * reordered before that CPU's store, so both CPUs must
1121 ++ * execute full barriers to prevent this from happening.
1122 ++ *
1123 ++ * Thus, switch_mm needs a full barrier between the
1124 ++ * store to mm_cpumask and any operation that could load
1125 ++ * from next->pgd. TLB fills are special and can happen
1126 ++ * due to instruction fetches or for no reason at all,
1127 ++ * and neither LOCK nor MFENCE orders them.
1128 ++ * Fortunately, load_cr3() is serializing and gives the
1129 ++ * ordering guarantee we need.
1130 ++ *
1131 ++ */
1132 + load_cr3(next->pgd);
1133 ++
1134 + trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
1135 +
1136 + /* Stop flush ipis for the previous mm */
1137 +@@ -156,10 +184,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
1138 + * schedule, protecting us from simultaneous changes.
1139 + */
1140 + cpumask_set_cpu(cpu, mm_cpumask(next));
1141 ++
1142 + /*
1143 + * We were in lazy tlb mode and leave_mm disabled
1144 + * tlb flush IPI delivery. We must reload CR3
1145 + * to make sure to use no freed page tables.
1146 ++ *
1147 ++ * As above, load_cr3() is serializing and orders TLB
1148 ++ * fills with respect to the mm_cpumask write.
1149 + */
1150 + load_cr3(next->pgd);
1151 + trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
1152 +diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
1153 +index 10d0596433f8..c759b3cca663 100644
1154 +--- a/arch/x86/include/asm/paravirt.h
1155 ++++ b/arch/x86/include/asm/paravirt.h
1156 +@@ -19,6 +19,12 @@ static inline int paravirt_enabled(void)
1157 + return pv_info.paravirt_enabled;
1158 + }
1159 +
1160 ++static inline int paravirt_has_feature(unsigned int feature)
1161 ++{
1162 ++ WARN_ON_ONCE(!pv_info.paravirt_enabled);
1163 ++ return (pv_info.features & feature);
1164 ++}
1165 ++
1166 + static inline void load_sp0(struct tss_struct *tss,
1167 + struct thread_struct *thread)
1168 + {
1169 +diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
1170 +index 31247b5bff7c..3d44191185f8 100644
1171 +--- a/arch/x86/include/asm/paravirt_types.h
1172 ++++ b/arch/x86/include/asm/paravirt_types.h
1173 +@@ -70,9 +70,14 @@ struct pv_info {
1174 + #endif
1175 +
1176 + int paravirt_enabled;
1177 ++ unsigned int features; /* valid only if paravirt_enabled is set */
1178 + const char *name;
1179 + };
1180 +
1181 ++#define paravirt_has(x) paravirt_has_feature(PV_SUPPORTED_##x)
1182 ++/* Supported features */
1183 ++#define PV_SUPPORTED_RTC (1<<0)
1184 ++
1185 + struct pv_init_ops {
1186 + /*
1187 + * Patch may replace one of the defined code sequences with
1188 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
1189 +index 19577dd325fa..b7692daeaf92 100644
1190 +--- a/arch/x86/include/asm/processor.h
1191 ++++ b/arch/x86/include/asm/processor.h
1192 +@@ -472,6 +472,7 @@ static inline unsigned long current_top_of_stack(void)
1193 + #else
1194 + #define __cpuid native_cpuid
1195 + #define paravirt_enabled() 0
1196 ++#define paravirt_has(x) 0
1197 +
1198 + static inline void load_sp0(struct tss_struct *tss,
1199 + struct thread_struct *thread)
1200 +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
1201 +index 9d014b82a124..6b2c8229f9da 100644
1202 +--- a/arch/x86/kernel/cpu/mcheck/mce.c
1203 ++++ b/arch/x86/kernel/cpu/mcheck/mce.c
1204 +@@ -999,6 +999,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1205 + int flags = MF_ACTION_REQUIRED;
1206 + int lmce = 0;
1207 +
1208 ++ /* If this CPU is offline, just bail out. */
1209 ++ if (cpu_is_offline(smp_processor_id())) {
1210 ++ u64 mcgstatus;
1211 ++
1212 ++ mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
1213 ++ if (mcgstatus & MCG_STATUS_RIPV) {
1214 ++ mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1215 ++ return;
1216 ++ }
1217 ++ }
1218 ++
1219 + ist_enter(regs);
1220 +
1221 + this_cpu_inc(mce_exception_count);
1222 +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
1223 +index 02693dd9a079..f660d63f40fe 100644
1224 +--- a/arch/x86/kernel/reboot.c
1225 ++++ b/arch/x86/kernel/reboot.c
1226 +@@ -182,6 +182,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
1227 + DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
1228 + },
1229 + },
1230 ++ { /* Handle problems with rebooting on the iMac10,1. */
1231 ++ .callback = set_pci_reboot,
1232 ++ .ident = "Apple iMac10,1",
1233 ++ .matches = {
1234 ++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
1235 ++ DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
1236 ++ },
1237 ++ },
1238 +
1239 + /* ASRock */
1240 + { /* Handle problems with rebooting on ASRock Q1900DC-ITX */
1241 +diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
1242 +index cd9685235df9..4af8d063fb36 100644
1243 +--- a/arch/x86/kernel/rtc.c
1244 ++++ b/arch/x86/kernel/rtc.c
1245 +@@ -200,6 +200,9 @@ static __init int add_rtc_cmos(void)
1246 + }
1247 + #endif
1248 +
1249 ++ if (paravirt_enabled() && !paravirt_has(RTC))
1250 ++ return -ENODEV;
1251 ++
1252 + platform_device_register(&rtc_device);
1253 + dev_info(&rtc_device.dev,
1254 + "registered platform RTC device (no PNP device found)\n");
1255 +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
1256 +index da52e6bb5c7f..7d2b2ed33dee 100644
1257 +--- a/arch/x86/kernel/signal.c
1258 ++++ b/arch/x86/kernel/signal.c
1259 +@@ -688,12 +688,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1260 + signal_setup_done(failed, ksig, stepping);
1261 + }
1262 +
1263 +-#ifdef CONFIG_X86_32
1264 +-#define NR_restart_syscall __NR_restart_syscall
1265 +-#else /* !CONFIG_X86_32 */
1266 +-#define NR_restart_syscall \
1267 +- test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
1268 +-#endif /* CONFIG_X86_32 */
1269 ++static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
1270 ++{
1271 ++#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
1272 ++ return __NR_restart_syscall;
1273 ++#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
1274 ++ return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
1275 ++ __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
1276 ++#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
1277 ++}
1278 +
1279 + /*
1280 + * Note that 'init' is a special process: it doesn't get signals it doesn't
1281 +@@ -722,7 +725,7 @@ void do_signal(struct pt_regs *regs)
1282 + break;
1283 +
1284 + case -ERESTART_RESTARTBLOCK:
1285 +- regs->ax = NR_restart_syscall;
1286 ++ regs->ax = get_nr_restart_syscall(regs);
1287 + regs->ip -= 2;
1288 + break;
1289 + }
1290 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
1291 +index 892ee2e5ecbc..fbabe4fcc7fb 100644
1292 +--- a/arch/x86/kernel/smpboot.c
1293 ++++ b/arch/x86/kernel/smpboot.c
1294 +@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
1295 + */
1296 + #define UDELAY_10MS_DEFAULT 10000
1297 +
1298 +-static unsigned int init_udelay = INT_MAX;
1299 ++static unsigned int init_udelay = UINT_MAX;
1300 +
1301 + static int __init cpu_init_udelay(char *str)
1302 + {
1303 +@@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay);
1304 + static void __init smp_quirk_init_udelay(void)
1305 + {
1306 + /* if cmdline changed it from default, leave it alone */
1307 +- if (init_udelay != INT_MAX)
1308 ++ if (init_udelay != UINT_MAX)
1309 + return;
1310 +
1311 + /* if modern processor, use no delay */
1312 + if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
1313 +- ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
1314 ++ ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
1315 + init_udelay = 0;
1316 +-
1317 ++ return;
1318 ++ }
1319 + /* else, use legacy delay */
1320 + init_udelay = UDELAY_10MS_DEFAULT;
1321 + }
1322 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1323 +index d7f89387ba0c..22d181350ec9 100644
1324 +--- a/arch/x86/kvm/svm.c
1325 ++++ b/arch/x86/kvm/svm.c
1326 +@@ -1108,6 +1108,7 @@ static void init_vmcb(struct vcpu_svm *svm)
1327 + set_exception_intercept(svm, UD_VECTOR);
1328 + set_exception_intercept(svm, MC_VECTOR);
1329 + set_exception_intercept(svm, AC_VECTOR);
1330 ++ set_exception_intercept(svm, DB_VECTOR);
1331 +
1332 + set_intercept(svm, INTERCEPT_INTR);
1333 + set_intercept(svm, INTERCEPT_NMI);
1334 +@@ -1642,20 +1643,13 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
1335 + mark_dirty(svm->vmcb, VMCB_SEG);
1336 + }
1337 +
1338 +-static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
1339 ++static void update_bp_intercept(struct kvm_vcpu *vcpu)
1340 + {
1341 + struct vcpu_svm *svm = to_svm(vcpu);
1342 +
1343 +- clr_exception_intercept(svm, DB_VECTOR);
1344 + clr_exception_intercept(svm, BP_VECTOR);
1345 +
1346 +- if (svm->nmi_singlestep)
1347 +- set_exception_intercept(svm, DB_VECTOR);
1348 +-
1349 + if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1350 +- if (vcpu->guest_debug &
1351 +- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
1352 +- set_exception_intercept(svm, DB_VECTOR);
1353 + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1354 + set_exception_intercept(svm, BP_VECTOR);
1355 + } else
1356 +@@ -1761,7 +1755,6 @@ static int db_interception(struct vcpu_svm *svm)
1357 + if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1358 + svm->vmcb->save.rflags &=
1359 + ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1360 +- update_db_bp_intercept(&svm->vcpu);
1361 + }
1362 +
1363 + if (svm->vcpu.guest_debug &
1364 +@@ -3761,7 +3754,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
1365 + */
1366 + svm->nmi_singlestep = true;
1367 + svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1368 +- update_db_bp_intercept(vcpu);
1369 + }
1370 +
1371 + static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
1372 +@@ -4383,7 +4375,7 @@ static struct kvm_x86_ops svm_x86_ops = {
1373 + .vcpu_load = svm_vcpu_load,
1374 + .vcpu_put = svm_vcpu_put,
1375 +
1376 +- .update_db_bp_intercept = update_db_bp_intercept,
1377 ++ .update_db_bp_intercept = update_bp_intercept,
1378 + .get_msr = svm_get_msr,
1379 + .set_msr = svm_set_msr,
1380 + .get_segment_base = svm_get_segment_base,
1381 +diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
1382 +index 4eae7c35ddf5..08b668cb3462 100644
1383 +--- a/arch/x86/kvm/trace.h
1384 ++++ b/arch/x86/kvm/trace.h
1385 +@@ -250,7 +250,7 @@ TRACE_EVENT(kvm_inj_virq,
1386 + #define kvm_trace_sym_exc \
1387 + EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
1388 + EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
1389 +- EXS(MF), EXS(MC)
1390 ++ EXS(MF), EXS(AC), EXS(MC)
1391 +
1392 + /*
1393 + * Tracepoint for kvm interrupt injection:
1394 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1395 +index 343d3692dd65..2e0bd4884652 100644
1396 +--- a/arch/x86/kvm/vmx.c
1397 ++++ b/arch/x86/kvm/vmx.c
1398 +@@ -3644,20 +3644,21 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1399 + if (!is_paging(vcpu)) {
1400 + hw_cr4 &= ~X86_CR4_PAE;
1401 + hw_cr4 |= X86_CR4_PSE;
1402 +- /*
1403 +- * SMEP/SMAP is disabled if CPU is in non-paging mode
1404 +- * in hardware. However KVM always uses paging mode to
1405 +- * emulate guest non-paging mode with TDP.
1406 +- * To emulate this behavior, SMEP/SMAP needs to be
1407 +- * manually disabled when guest switches to non-paging
1408 +- * mode.
1409 +- */
1410 +- hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
1411 + } else if (!(cr4 & X86_CR4_PAE)) {
1412 + hw_cr4 &= ~X86_CR4_PAE;
1413 + }
1414 + }
1415 +
1416 ++ if (!enable_unrestricted_guest && !is_paging(vcpu))
1417 ++ /*
1418 ++ * SMEP/SMAP is disabled if CPU is in non-paging mode in
1419 ++ * hardware. However KVM always uses paging mode without
1420 ++ * unrestricted guest.
1421 ++ * To emulate this behavior, SMEP/SMAP needs to be manually
1422 ++ * disabled when guest switches to non-paging mode.
1423 ++ */
1424 ++ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
1425 ++
1426 + vmcs_writel(CR4_READ_SHADOW, cr4);
1427 + vmcs_writel(GUEST_CR4, hw_cr4);
1428 + return 0;
1429 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1430 +index 43609af03283..37bbbf842350 100644
1431 +--- a/arch/x86/kvm/x86.c
1432 ++++ b/arch/x86/kvm/x86.c
1433 +@@ -942,7 +942,7 @@ static u32 msrs_to_save[] = {
1434 + MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1435 + #endif
1436 + MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1437 +- MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
1438 ++ MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1439 + };
1440 +
1441 + static unsigned num_msrs_to_save;
1442 +@@ -3847,16 +3847,17 @@ static void kvm_init_msr_list(void)
1443 +
1444 + /*
1445 + * Even MSRs that are valid in the host may not be exposed
1446 +- * to the guests in some cases. We could work around this
1447 +- * in VMX with the generic MSR save/load machinery, but it
1448 +- * is not really worthwhile since it will really only
1449 +- * happen with nested virtualization.
1450 ++ * to the guests in some cases.
1451 + */
1452 + switch (msrs_to_save[i]) {
1453 + case MSR_IA32_BNDCFGS:
1454 + if (!kvm_x86_ops->mpx_supported())
1455 + continue;
1456 + break;
1457 ++ case MSR_TSC_AUX:
1458 ++ if (!kvm_x86_ops->rdtscp_supported())
1459 ++ continue;
1460 ++ break;
1461 + default:
1462 + break;
1463 + }
1464 +diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
1465 +index a0d09f6c6533..a43b2eafc466 100644
1466 +--- a/arch/x86/lguest/boot.c
1467 ++++ b/arch/x86/lguest/boot.c
1468 +@@ -1414,6 +1414,7 @@ __init void lguest_init(void)
1469 + pv_info.kernel_rpl = 1;
1470 + /* Everyone except Xen runs with this set. */
1471 + pv_info.shared_kernel_pmd = 1;
1472 ++ pv_info.features = 0;
1473 +
1474 + /*
1475 + * We set up all the lguest overrides for sensitive operations. These
1476 +diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
1477 +index 71fc79a58a15..78e47ff74f9d 100644
1478 +--- a/arch/x86/mm/mpx.c
1479 ++++ b/arch/x86/mm/mpx.c
1480 +@@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
1481 + switch (type) {
1482 + case REG_TYPE_RM:
1483 + regno = X86_MODRM_RM(insn->modrm.value);
1484 +- if (X86_REX_B(insn->rex_prefix.value) == 1)
1485 ++ if (X86_REX_B(insn->rex_prefix.value))
1486 + regno += 8;
1487 + break;
1488 +
1489 + case REG_TYPE_INDEX:
1490 + regno = X86_SIB_INDEX(insn->sib.value);
1491 +- if (X86_REX_X(insn->rex_prefix.value) == 1)
1492 ++ if (X86_REX_X(insn->rex_prefix.value))
1493 + regno += 8;
1494 + break;
1495 +
1496 + case REG_TYPE_BASE:
1497 + regno = X86_SIB_BASE(insn->sib.value);
1498 +- if (X86_REX_B(insn->rex_prefix.value) == 1)
1499 ++ if (X86_REX_B(insn->rex_prefix.value))
1500 + regno += 8;
1501 + break;
1502 +
1503 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
1504 +index 8ddb5d0d66fb..8f4cc3dfac32 100644
1505 +--- a/arch/x86/mm/tlb.c
1506 ++++ b/arch/x86/mm/tlb.c
1507 +@@ -161,7 +161,10 @@ void flush_tlb_current_task(void)
1508 + preempt_disable();
1509 +
1510 + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
1511 ++
1512 ++ /* This is an implicit full barrier that synchronizes with switch_mm. */
1513 + local_flush_tlb();
1514 ++
1515 + trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
1516 + if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
1517 + flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
1518 +@@ -188,17 +191,29 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
1519 + unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
1520 +
1521 + preempt_disable();
1522 +- if (current->active_mm != mm)
1523 ++ if (current->active_mm != mm) {
1524 ++ /* Synchronize with switch_mm. */
1525 ++ smp_mb();
1526 ++
1527 + goto out;
1528 ++ }
1529 +
1530 + if (!current->mm) {
1531 + leave_mm(smp_processor_id());
1532 ++
1533 ++ /* Synchronize with switch_mm. */
1534 ++ smp_mb();
1535 ++
1536 + goto out;
1537 + }
1538 +
1539 + if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
1540 + base_pages_to_flush = (end - start) >> PAGE_SHIFT;
1541 +
1542 ++ /*
1543 ++ * Both branches below are implicit full barriers (MOV to CR or
1544 ++ * INVLPG) that synchronize with switch_mm.
1545 ++ */
1546 + if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
1547 + base_pages_to_flush = TLB_FLUSH_ALL;
1548 + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
1549 +@@ -228,10 +243,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
1550 + preempt_disable();
1551 +
1552 + if (current->active_mm == mm) {
1553 +- if (current->mm)
1554 ++ if (current->mm) {
1555 ++ /*
1556 ++ * Implicit full barrier (INVLPG) that synchronizes
1557 ++ * with switch_mm.
1558 ++ */
1559 + __flush_tlb_one(start);
1560 +- else
1561 ++ } else {
1562 + leave_mm(smp_processor_id());
1563 ++
1564 ++ /* Synchronize with switch_mm. */
1565 ++ smp_mb();
1566 ++ }
1567 + }
1568 +
1569 + if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
1570 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
1571 +index 993b7a71386d..aeb385d86e95 100644
1572 +--- a/arch/x86/xen/enlighten.c
1573 ++++ b/arch/x86/xen/enlighten.c
1574 +@@ -1191,7 +1191,7 @@ static const struct pv_info xen_info __initconst = {
1575 + #ifdef CONFIG_X86_64
1576 + .extra_user_64bit_cs = FLAT_USER_CS64,
1577 + #endif
1578 +-
1579 ++ .features = 0,
1580 + .name = "Xen",
1581 + };
1582 +
1583 +@@ -1534,6 +1534,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
1584 +
1585 + /* Install Xen paravirt ops */
1586 + pv_info = xen_info;
1587 ++ if (xen_initial_domain())
1588 ++ pv_info.features |= PV_SUPPORTED_RTC;
1589 + pv_init_ops = xen_init_ops;
1590 + pv_apic_ops = xen_apic_ops;
1591 + if (!xen_pvh_domain()) {
1592 +diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
1593 +index feddabdab448..4299aa924b9f 100644
1594 +--- a/arch/x86/xen/suspend.c
1595 ++++ b/arch/x86/xen/suspend.c
1596 +@@ -33,7 +33,8 @@ static void xen_hvm_post_suspend(int suspend_cancelled)
1597 + {
1598 + #ifdef CONFIG_XEN_PVHVM
1599 + int cpu;
1600 +- xen_hvm_init_shared_info();
1601 ++ if (!suspend_cancelled)
1602 ++ xen_hvm_init_shared_info();
1603 + xen_callback_vector();
1604 + xen_unplug_emulated_devices();
1605 + if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
1606 +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1607 +index 654f6f36a071..54bccf7db592 100644
1608 +--- a/drivers/char/ipmi/ipmi_si_intf.c
1609 ++++ b/drivers/char/ipmi/ipmi_si_intf.c
1610 +@@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
1611 + return rv;
1612 + }
1613 +
1614 +-static void start_check_enables(struct smi_info *smi_info)
1615 ++static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
1616 ++{
1617 ++ smi_info->last_timeout_jiffies = jiffies;
1618 ++ mod_timer(&smi_info->si_timer, new_val);
1619 ++ smi_info->timer_running = true;
1620 ++}
1621 ++
1622 ++/*
1623 ++ * Start a new message and (re)start the timer and thread.
1624 ++ */
1625 ++static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
1626 ++ unsigned int size)
1627 ++{
1628 ++ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1629 ++
1630 ++ if (smi_info->thread)
1631 ++ wake_up_process(smi_info->thread);
1632 ++
1633 ++ smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
1634 ++}
1635 ++
1636 ++static void start_check_enables(struct smi_info *smi_info, bool start_timer)
1637 + {
1638 + unsigned char msg[2];
1639 +
1640 + msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
1641 + msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1642 +
1643 +- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1644 ++ if (start_timer)
1645 ++ start_new_msg(smi_info, msg, 2);
1646 ++ else
1647 ++ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1648 + smi_info->si_state = SI_CHECKING_ENABLES;
1649 + }
1650 +
1651 +-static void start_clear_flags(struct smi_info *smi_info)
1652 ++static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
1653 + {
1654 + unsigned char msg[3];
1655 +
1656 +@@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info)
1657 + msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
1658 + msg[2] = WDT_PRE_TIMEOUT_INT;
1659 +
1660 +- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1661 ++ if (start_timer)
1662 ++ start_new_msg(smi_info, msg, 3);
1663 ++ else
1664 ++ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1665 + smi_info->si_state = SI_CLEARING_FLAGS;
1666 + }
1667 +
1668 +@@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
1669 + smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
1670 + smi_info->curr_msg->data_size = 2;
1671 +
1672 +- smi_info->handlers->start_transaction(
1673 +- smi_info->si_sm,
1674 +- smi_info->curr_msg->data,
1675 +- smi_info->curr_msg->data_size);
1676 ++ start_new_msg(smi_info, smi_info->curr_msg->data,
1677 ++ smi_info->curr_msg->data_size);
1678 + smi_info->si_state = SI_GETTING_MESSAGES;
1679 + }
1680 +
1681 +@@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info)
1682 + smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
1683 + smi_info->curr_msg->data_size = 2;
1684 +
1685 +- smi_info->handlers->start_transaction(
1686 +- smi_info->si_sm,
1687 +- smi_info->curr_msg->data,
1688 +- smi_info->curr_msg->data_size);
1689 ++ start_new_msg(smi_info, smi_info->curr_msg->data,
1690 ++ smi_info->curr_msg->data_size);
1691 + smi_info->si_state = SI_GETTING_EVENTS;
1692 + }
1693 +
1694 +-static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
1695 +-{
1696 +- smi_info->last_timeout_jiffies = jiffies;
1697 +- mod_timer(&smi_info->si_timer, new_val);
1698 +- smi_info->timer_running = true;
1699 +-}
1700 +-
1701 + /*
1702 + * When we have a situtaion where we run out of memory and cannot
1703 + * allocate messages, we just leave them in the BMC and run the system
1704 +@@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
1705 + * Note that we cannot just use disable_irq(), since the interrupt may
1706 + * be shared.
1707 + */
1708 +-static inline bool disable_si_irq(struct smi_info *smi_info)
1709 ++static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
1710 + {
1711 + if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1712 + smi_info->interrupt_disabled = true;
1713 +- start_check_enables(smi_info);
1714 ++ start_check_enables(smi_info, start_timer);
1715 + return true;
1716 + }
1717 + return false;
1718 +@@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
1719 + {
1720 + if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
1721 + smi_info->interrupt_disabled = false;
1722 +- start_check_enables(smi_info);
1723 ++ start_check_enables(smi_info, true);
1724 + return true;
1725 + }
1726 + return false;
1727 +@@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
1728 +
1729 + msg = ipmi_alloc_smi_msg();
1730 + if (!msg) {
1731 +- if (!disable_si_irq(smi_info))
1732 ++ if (!disable_si_irq(smi_info, true))
1733 + smi_info->si_state = SI_NORMAL;
1734 + } else if (enable_si_irq(smi_info)) {
1735 + ipmi_free_smi_msg(msg);
1736 +@@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info)
1737 + /* Watchdog pre-timeout */
1738 + smi_inc_stat(smi_info, watchdog_pretimeouts);
1739 +
1740 +- start_clear_flags(smi_info);
1741 ++ start_clear_flags(smi_info, true);
1742 + smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
1743 + if (smi_info->intf)
1744 + ipmi_smi_watchdog_pretimeout(smi_info->intf);
1745 +@@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
1746 + msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
1747 + msg[1] = IPMI_GET_MSG_FLAGS_CMD;
1748 +
1749 +- smi_info->handlers->start_transaction(
1750 +- smi_info->si_sm, msg, 2);
1751 ++ start_new_msg(smi_info, msg, 2);
1752 + smi_info->si_state = SI_GETTING_FLAGS;
1753 + goto restart;
1754 + }
1755 +@@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
1756 + * disable and messages disabled.
1757 + */
1758 + if (smi_info->supports_event_msg_buff || smi_info->irq) {
1759 +- start_check_enables(smi_info);
1760 ++ start_check_enables(smi_info, true);
1761 + } else {
1762 + smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
1763 + if (!smi_info->curr_msg)
1764 +@@ -1208,14 +1223,14 @@ static int smi_start_processing(void *send_info,
1765 +
1766 + new_smi->intf = intf;
1767 +
1768 +- /* Try to claim any interrupts. */
1769 +- if (new_smi->irq_setup)
1770 +- new_smi->irq_setup(new_smi);
1771 +-
1772 + /* Set up the timer that drives the interface. */
1773 + setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1774 + smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1775 +
1776 ++ /* Try to claim any interrupts. */
1777 ++ if (new_smi->irq_setup)
1778 ++ new_smi->irq_setup(new_smi);
1779 ++
1780 + /*
1781 + * Check if the user forcefully enabled the daemon.
1782 + */
1783 +@@ -3613,7 +3628,7 @@ static int try_smi_init(struct smi_info *new_smi)
1784 + * Start clearing the flags before we enable interrupts or the
1785 + * timer to avoid racing with the timer.
1786 + */
1787 +- start_clear_flags(new_smi);
1788 ++ start_clear_flags(new_smi, false);
1789 +
1790 + /*
1791 + * IRQ is defined to be set when non-zero. req_events will
1792 +@@ -3908,7 +3923,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
1793 + poll(to_clean);
1794 + schedule_timeout_uninterruptible(1);
1795 + }
1796 +- disable_si_irq(to_clean);
1797 ++ disable_si_irq(to_clean, false);
1798 + while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
1799 + poll(to_clean);
1800 + schedule_timeout_uninterruptible(1);
1801 +diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
1802 +index 30f522848c73..c19e7fc717c3 100644
1803 +--- a/drivers/connector/connector.c
1804 ++++ b/drivers/connector/connector.c
1805 +@@ -178,26 +178,21 @@ static int cn_call_callback(struct sk_buff *skb)
1806 + *
1807 + * It checks skb, netlink header and msg sizes, and calls callback helper.
1808 + */
1809 +-static void cn_rx_skb(struct sk_buff *__skb)
1810 ++static void cn_rx_skb(struct sk_buff *skb)
1811 + {
1812 + struct nlmsghdr *nlh;
1813 +- struct sk_buff *skb;
1814 + int len, err;
1815 +
1816 +- skb = skb_get(__skb);
1817 +-
1818 + if (skb->len >= NLMSG_HDRLEN) {
1819 + nlh = nlmsg_hdr(skb);
1820 + len = nlmsg_len(nlh);
1821 +
1822 + if (len < (int)sizeof(struct cn_msg) ||
1823 + skb->len < nlh->nlmsg_len ||
1824 +- len > CONNECTOR_MAX_MSG_SIZE) {
1825 +- kfree_skb(skb);
1826 ++ len > CONNECTOR_MAX_MSG_SIZE)
1827 + return;
1828 +- }
1829 +
1830 +- err = cn_call_callback(skb);
1831 ++ err = cn_call_callback(skb_get(skb));
1832 + if (err < 0)
1833 + kfree_skb(skb);
1834 + }
1835 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1836 +index 70a11ac38119..c0fbf4ed58ec 100644
1837 +--- a/drivers/hid/hid-core.c
1838 ++++ b/drivers/hid/hid-core.c
1839 +@@ -1611,7 +1611,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
1840 + "Multi-Axis Controller"
1841 + };
1842 + const char *type, *bus;
1843 +- char buf[64];
1844 ++ char buf[64] = "";
1845 + unsigned int i;
1846 + int len;
1847 + int ret;
1848 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1849 +index 0215ab62bb93..cba008ac9cff 100644
1850 +--- a/drivers/hid/wacom_wac.c
1851 ++++ b/drivers/hid/wacom_wac.c
1852 +@@ -1628,6 +1628,7 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
1853 + wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
1854 + break;
1855 + case HID_DG_CONTACTCOUNT:
1856 ++ wacom_wac->hid_data.cc_report = field->report->id;
1857 + wacom_wac->hid_data.cc_index = field->index;
1858 + wacom_wac->hid_data.cc_value_index = usage->usage_index;
1859 + break;
1860 +@@ -1715,7 +1716,32 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
1861 + struct wacom_wac *wacom_wac = &wacom->wacom_wac;
1862 + struct hid_data* hid_data = &wacom_wac->hid_data;
1863 +
1864 +- if (hid_data->cc_index >= 0) {
1865 ++ if (hid_data->cc_report != 0 &&
1866 ++ hid_data->cc_report != report->id) {
1867 ++ int i;
1868 ++
1869 ++ hid_data->cc_report = report->id;
1870 ++ hid_data->cc_index = -1;
1871 ++ hid_data->cc_value_index = -1;
1872 ++
1873 ++ for (i = 0; i < report->maxfield; i++) {
1874 ++ struct hid_field *field = report->field[i];
1875 ++ int j;
1876 ++
1877 ++ for (j = 0; j < field->maxusage; j++) {
1878 ++ if (field->usage[j].hid == HID_DG_CONTACTCOUNT) {
1879 ++ hid_data->cc_index = i;
1880 ++ hid_data->cc_value_index = j;
1881 ++
1882 ++ /* break */
1883 ++ i = report->maxfield;
1884 ++ j = field->maxusage;
1885 ++ }
1886 ++ }
1887 ++ }
1888 ++ }
1889 ++ if (hid_data->cc_report != 0 &&
1890 ++ hid_data->cc_index >= 0) {
1891 + struct hid_field *field = report->field[hid_data->cc_index];
1892 + int value = field->value[hid_data->cc_value_index];
1893 + if (value)
1894 +diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
1895 +index 1e270d401e18..809c03e34f74 100644
1896 +--- a/drivers/hid/wacom_wac.h
1897 ++++ b/drivers/hid/wacom_wac.h
1898 +@@ -198,6 +198,7 @@ struct hid_data {
1899 + int width;
1900 + int height;
1901 + int id;
1902 ++ int cc_report;
1903 + int cc_index;
1904 + int cc_value_index;
1905 + int num_expected;
1906 +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
1907 +index 2d0dbbf38ceb..558c1e784613 100644
1908 +--- a/drivers/infiniband/hw/mlx5/cq.c
1909 ++++ b/drivers/infiniband/hw/mlx5/cq.c
1910 +@@ -756,7 +756,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
1911 + int uninitialized_var(index);
1912 + int uninitialized_var(inlen);
1913 + int cqe_size;
1914 +- int irqn;
1915 ++ unsigned int irqn;
1916 + int eqn;
1917 + int err;
1918 +
1919 +diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
1920 +index 286e890e7d64..ef7862056978 100644
1921 +--- a/drivers/iommu/arm-smmu-v3.c
1922 ++++ b/drivers/iommu/arm-smmu-v3.c
1923 +@@ -1427,7 +1427,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1924 + struct io_pgtable_cfg *pgtbl_cfg)
1925 + {
1926 + int ret;
1927 +- u16 asid;
1928 ++ int asid;
1929 + struct arm_smmu_device *smmu = smmu_domain->smmu;
1930 + struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1931 +
1932 +@@ -1439,10 +1439,11 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1933 + &cfg->cdptr_dma, GFP_KERNEL);
1934 + if (!cfg->cdptr) {
1935 + dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1936 ++ ret = -ENOMEM;
1937 + goto out_free_asid;
1938 + }
1939 +
1940 +- cfg->cd.asid = asid;
1941 ++ cfg->cd.asid = (u16)asid;
1942 + cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1943 + cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1944 + cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1945 +@@ -1456,7 +1457,7 @@ out_free_asid:
1946 + static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1947 + struct io_pgtable_cfg *pgtbl_cfg)
1948 + {
1949 +- u16 vmid;
1950 ++ int vmid;
1951 + struct arm_smmu_device *smmu = smmu_domain->smmu;
1952 + struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1953 +
1954 +@@ -1464,7 +1465,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1955 + if (IS_ERR_VALUE(vmid))
1956 + return vmid;
1957 +
1958 +- cfg->vmid = vmid;
1959 ++ cfg->vmid = (u16)vmid;
1960 + cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1961 + cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1962 + return 0;
1963 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1964 +index d65cf42399e8..dfc64662b386 100644
1965 +--- a/drivers/iommu/intel-iommu.c
1966 ++++ b/drivers/iommu/intel-iommu.c
1967 +@@ -4194,14 +4194,17 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)
1968 + dev = pci_physfn(dev);
1969 + for (bus = dev->bus; bus; bus = bus->parent) {
1970 + bridge = bus->self;
1971 +- if (!bridge || !pci_is_pcie(bridge) ||
1972 ++ /* If it's an integrated device, allow ATS */
1973 ++ if (!bridge)
1974 ++ return 1;
1975 ++ /* Connected via non-PCIe: no ATS */
1976 ++ if (!pci_is_pcie(bridge) ||
1977 + pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
1978 + return 0;
1979 ++ /* If we found the root port, look it up in the ATSR */
1980 + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
1981 + break;
1982 + }
1983 +- if (!bridge)
1984 +- return 0;
1985 +
1986 + rcu_read_lock();
1987 + list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
1988 +diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
1989 +index c4198fa490bf..9c1e8adaf4fc 100644
1990 +--- a/drivers/isdn/i4l/isdn_ppp.c
1991 ++++ b/drivers/isdn/i4l/isdn_ppp.c
1992 +@@ -301,6 +301,8 @@ isdn_ppp_open(int min, struct file *file)
1993 + is->compflags = 0;
1994 +
1995 + is->reset = isdn_ppp_ccp_reset_alloc(is);
1996 ++ if (!is->reset)
1997 ++ return -ENOMEM;
1998 +
1999 + is->lp = NULL;
2000 + is->mp_seqno = 0; /* MP sequence number */
2001 +@@ -320,6 +322,10 @@ isdn_ppp_open(int min, struct file *file)
2002 + * VJ header compression init
2003 + */
2004 + is->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */
2005 ++ if (IS_ERR(is->slcomp)) {
2006 ++ isdn_ppp_ccp_reset_free(is);
2007 ++ return PTR_ERR(is->slcomp);
2008 ++ }
2009 + #endif
2010 + #ifdef CONFIG_IPPP_FILTER
2011 + is->pass_filter = NULL;
2012 +@@ -567,10 +573,8 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
2013 + is->maxcid = val;
2014 + #ifdef CONFIG_ISDN_PPP_VJ
2015 + sltmp = slhc_init(16, val);
2016 +- if (!sltmp) {
2017 +- printk(KERN_ERR "ippp, can't realloc slhc struct\n");
2018 +- return -ENOMEM;
2019 +- }
2020 ++ if (IS_ERR(sltmp))
2021 ++ return PTR_ERR(sltmp);
2022 + if (is->slcomp)
2023 + slhc_free(is->slcomp);
2024 + is->slcomp = sltmp;
2025 +diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
2026 +index 084d346fb4c4..e15eef6a94e5 100644
2027 +--- a/drivers/media/platform/vivid/vivid-osd.c
2028 ++++ b/drivers/media/platform/vivid/vivid-osd.c
2029 +@@ -85,6 +85,7 @@ static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg)
2030 + case FBIOGET_VBLANK: {
2031 + struct fb_vblank vblank;
2032 +
2033 ++ memset(&vblank, 0, sizeof(vblank));
2034 + vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT |
2035 + FB_VBLANK_HAVE_VSYNC;
2036 + vblank.count = 0;
2037 +diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
2038 +index 8f2e1c277c5f..7b91327bd472 100644
2039 +--- a/drivers/media/usb/airspy/airspy.c
2040 ++++ b/drivers/media/usb/airspy/airspy.c
2041 +@@ -132,7 +132,7 @@ struct airspy {
2042 + int urbs_submitted;
2043 +
2044 + /* USB control message buffer */
2045 +- #define BUF_SIZE 24
2046 ++ #define BUF_SIZE 128
2047 + u8 buf[BUF_SIZE];
2048 +
2049 + /* Current configuration */
2050 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2051 +index bcd7bddbe312..509440cb6411 100644
2052 +--- a/drivers/net/bonding/bond_main.c
2053 ++++ b/drivers/net/bonding/bond_main.c
2054 +@@ -1207,7 +1207,6 @@ static int bond_master_upper_dev_link(struct net_device *bond_dev,
2055 + err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
2056 + if (err)
2057 + return err;
2058 +- slave_dev->flags |= IFF_SLAVE;
2059 + rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
2060 + return 0;
2061 + }
2062 +@@ -1465,6 +1464,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
2063 + }
2064 + }
2065 +
2066 ++ /* set slave flag before open to prevent IPv6 addrconf */
2067 ++ slave_dev->flags |= IFF_SLAVE;
2068 ++
2069 + /* open the slave since the application closed it */
2070 + res = dev_open(slave_dev);
2071 + if (res) {
2072 +@@ -1725,6 +1727,7 @@ err_close:
2073 + dev_close(slave_dev);
2074 +
2075 + err_restore_mac:
2076 ++ slave_dev->flags &= ~IFF_SLAVE;
2077 + if (!bond->params.fail_over_mac ||
2078 + BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2079 + /* XXX TODO - fom follow mode needs to change master's
2080 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2081 +index 443632df2010..394744bfbf89 100644
2082 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2083 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2084 +@@ -746,7 +746,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
2085 + struct mlx5_core_dev *mdev = priv->mdev;
2086 + struct mlx5_core_cq *mcq = &cq->mcq;
2087 + int eqn_not_used;
2088 +- int irqn;
2089 ++ unsigned int irqn;
2090 + int err;
2091 + u32 i;
2092 +
2093 +@@ -800,7 +800,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
2094 + void *in;
2095 + void *cqc;
2096 + int inlen;
2097 +- int irqn_not_used;
2098 ++ unsigned int irqn_not_used;
2099 + int eqn;
2100 + int err;
2101 +
2102 +@@ -1498,7 +1498,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
2103 + struct mlx5_core_dev *mdev = priv->mdev;
2104 + struct mlx5_core_cq *mcq = &cq->mcq;
2105 + int eqn_not_used;
2106 +- int irqn;
2107 ++ unsigned int irqn;
2108 + int err;
2109 +
2110 + err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
2111 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2112 +index 03aabdd79abe..af9593baf1bb 100644
2113 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
2114 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2115 +@@ -520,7 +520,8 @@ static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
2116 + mlx5_irq_clear_affinity_hint(mdev, i);
2117 + }
2118 +
2119 +-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
2120 ++int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
2121 ++ unsigned int *irqn)
2122 + {
2123 + struct mlx5_eq_table *table = &dev->priv.eq_table;
2124 + struct mlx5_eq *eq, *n;
2125 +diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
2126 +index 85b3326775b8..37640e11afa6 100644
2127 +--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
2128 ++++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
2129 +@@ -2107,7 +2107,7 @@ static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
2130 + dd = &lp->tx_descs[lp->tx_next];
2131 +
2132 + /* Set DMA Descriptor fields */
2133 +- dd->des0 = dma_handle;
2134 ++ dd->des0 = dma_handle + consumed_size;
2135 + dd->des1 = 0;
2136 + dd->des2 = dma_size;
2137 +
2138 +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
2139 +index ed00446759b2..9a863c6a6a33 100644
2140 +--- a/drivers/net/ppp/ppp_generic.c
2141 ++++ b/drivers/net/ppp/ppp_generic.c
2142 +@@ -721,10 +721,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2143 + val &= 0xffff;
2144 + }
2145 + vj = slhc_init(val2+1, val+1);
2146 +- if (!vj) {
2147 +- netdev_err(ppp->dev,
2148 +- "PPP: no memory (VJ compressor)\n");
2149 +- err = -ENOMEM;
2150 ++ if (IS_ERR(vj)) {
2151 ++ err = PTR_ERR(vj);
2152 + break;
2153 + }
2154 + ppp_lock(ppp);
2155 +diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
2156 +index 079f7adfcde5..27ed25252aac 100644
2157 +--- a/drivers/net/slip/slhc.c
2158 ++++ b/drivers/net/slip/slhc.c
2159 +@@ -84,8 +84,9 @@ static long decode(unsigned char **cpp);
2160 + static unsigned char * put16(unsigned char *cp, unsigned short x);
2161 + static unsigned short pull16(unsigned char **cpp);
2162 +
2163 +-/* Initialize compression data structure
2164 ++/* Allocate compression data structure
2165 + * slots must be in range 0 to 255 (zero meaning no compression)
2166 ++ * Returns pointer to structure or ERR_PTR() on error.
2167 + */
2168 + struct slcompress *
2169 + slhc_init(int rslots, int tslots)
2170 +@@ -94,11 +95,14 @@ slhc_init(int rslots, int tslots)
2171 + register struct cstate *ts;
2172 + struct slcompress *comp;
2173 +
2174 ++ if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255)
2175 ++ return ERR_PTR(-EINVAL);
2176 ++
2177 + comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
2178 + if (! comp)
2179 + goto out_fail;
2180 +
2181 +- if ( rslots > 0 && rslots < 256 ) {
2182 ++ if (rslots > 0) {
2183 + size_t rsize = rslots * sizeof(struct cstate);
2184 + comp->rstate = kzalloc(rsize, GFP_KERNEL);
2185 + if (! comp->rstate)
2186 +@@ -106,7 +110,7 @@ slhc_init(int rslots, int tslots)
2187 + comp->rslot_limit = rslots - 1;
2188 + }
2189 +
2190 +- if ( tslots > 0 && tslots < 256 ) {
2191 ++ if (tslots > 0) {
2192 + size_t tsize = tslots * sizeof(struct cstate);
2193 + comp->tstate = kzalloc(tsize, GFP_KERNEL);
2194 + if (! comp->tstate)
2195 +@@ -141,7 +145,7 @@ out_free2:
2196 + out_free:
2197 + kfree(comp);
2198 + out_fail:
2199 +- return NULL;
2200 ++ return ERR_PTR(-ENOMEM);
2201 + }
2202 +
2203 +
2204 +diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
2205 +index 05387b1e2e95..a17d86a57734 100644
2206 +--- a/drivers/net/slip/slip.c
2207 ++++ b/drivers/net/slip/slip.c
2208 +@@ -164,7 +164,7 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
2209 + if (cbuff == NULL)
2210 + goto err_exit;
2211 + slcomp = slhc_init(16, 16);
2212 +- if (slcomp == NULL)
2213 ++ if (IS_ERR(slcomp))
2214 + goto err_exit;
2215 + #endif
2216 + spin_lock_bh(&sl->lock);
2217 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
2218 +index 651d35ea22c5..59fefca74263 100644
2219 +--- a/drivers/net/team/team.c
2220 ++++ b/drivers/net/team/team.c
2221 +@@ -1845,10 +1845,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2222 + struct team *team = netdev_priv(dev);
2223 + struct team_port *port;
2224 +
2225 +- rcu_read_lock();
2226 +- list_for_each_entry_rcu(port, &team->port_list, list)
2227 ++ mutex_lock(&team->lock);
2228 ++ list_for_each_entry(port, &team->port_list, list)
2229 + vlan_vid_del(port->dev, proto, vid);
2230 +- rcu_read_unlock();
2231 ++ mutex_unlock(&team->lock);
2232 +
2233 + return 0;
2234 + }
2235 +diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
2236 +index b6ea6ff7fb7b..d87b4acdfa5b 100644
2237 +--- a/drivers/net/usb/cdc_mbim.c
2238 ++++ b/drivers/net/usb/cdc_mbim.c
2239 +@@ -100,7 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
2240 + .ndo_stop = usbnet_stop,
2241 + .ndo_start_xmit = usbnet_start_xmit,
2242 + .ndo_tx_timeout = usbnet_tx_timeout,
2243 +- .ndo_change_mtu = usbnet_change_mtu,
2244 ++ .ndo_change_mtu = cdc_ncm_change_mtu,
2245 + .ndo_set_mac_address = eth_mac_addr,
2246 + .ndo_validate_addr = eth_validate_addr,
2247 + .ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid,
2248 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2249 +index fa41a6d2a3e5..e278a7a4956d 100644
2250 +--- a/drivers/net/usb/cdc_ncm.c
2251 ++++ b/drivers/net/usb/cdc_ncm.c
2252 +@@ -41,6 +41,7 @@
2253 + #include <linux/module.h>
2254 + #include <linux/netdevice.h>
2255 + #include <linux/ctype.h>
2256 ++#include <linux/etherdevice.h>
2257 + #include <linux/ethtool.h>
2258 + #include <linux/workqueue.h>
2259 + #include <linux/mii.h>
2260 +@@ -689,6 +690,33 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
2261 + kfree(ctx);
2262 + }
2263 +
2264 ++/* we need to override the usbnet change_mtu ndo for two reasons:
2265 ++ * - respect the negotiated maximum datagram size
2266 ++ * - avoid unwanted changes to rx and tx buffers
2267 ++ */
2268 ++int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
2269 ++{
2270 ++ struct usbnet *dev = netdev_priv(net);
2271 ++ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
2272 ++ int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev);
2273 ++
2274 ++ if (new_mtu <= 0 || new_mtu > maxmtu)
2275 ++ return -EINVAL;
2276 ++ net->mtu = new_mtu;
2277 ++ return 0;
2278 ++}
2279 ++EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);
2280 ++
2281 ++static const struct net_device_ops cdc_ncm_netdev_ops = {
2282 ++ .ndo_open = usbnet_open,
2283 ++ .ndo_stop = usbnet_stop,
2284 ++ .ndo_start_xmit = usbnet_start_xmit,
2285 ++ .ndo_tx_timeout = usbnet_tx_timeout,
2286 ++ .ndo_change_mtu = cdc_ncm_change_mtu,
2287 ++ .ndo_set_mac_address = eth_mac_addr,
2288 ++ .ndo_validate_addr = eth_validate_addr,
2289 ++};
2290 ++
2291 + int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
2292 + {
2293 + const struct usb_cdc_union_desc *union_desc = NULL;
2294 +@@ -874,6 +902,9 @@ advance:
2295 + /* add our sysfs attrs */
2296 + dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
2297 +
2298 ++ /* must handle MTU changes */
2299 ++ dev->net->netdev_ops = &cdc_ncm_netdev_ops;
2300 ++
2301 + return 0;
2302 +
2303 + error2:
2304 +diff --git a/drivers/net/veth.c b/drivers/net/veth.c
2305 +index 0ef4a5ad5557..ba21d072be31 100644
2306 +--- a/drivers/net/veth.c
2307 ++++ b/drivers/net/veth.c
2308 +@@ -117,12 +117,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
2309 + kfree_skb(skb);
2310 + goto drop;
2311 + }
2312 +- /* don't change ip_summed == CHECKSUM_PARTIAL, as that
2313 +- * will cause bad checksum on forwarded packets
2314 +- */
2315 +- if (skb->ip_summed == CHECKSUM_NONE &&
2316 +- rcv->features & NETIF_F_RXCSUM)
2317 +- skb->ip_summed = CHECKSUM_UNNECESSARY;
2318 +
2319 + if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
2320 + struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
2321 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2322 +index c1587ece28cf..40b5f8af47a3 100644
2323 +--- a/drivers/net/vxlan.c
2324 ++++ b/drivers/net/vxlan.c
2325 +@@ -2660,7 +2660,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2326 + struct vxlan_config *conf)
2327 + {
2328 + struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2329 +- struct vxlan_dev *vxlan = netdev_priv(dev);
2330 ++ struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
2331 + struct vxlan_rdst *dst = &vxlan->default_dst;
2332 + int err;
2333 + bool use_ipv6 = false;
2334 +@@ -2725,9 +2725,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2335 + if (!vxlan->cfg.age_interval)
2336 + vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
2337 +
2338 +- if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
2339 +- vxlan->cfg.dst_port, vxlan->flags))
2340 ++ list_for_each_entry(tmp, &vn->vxlan_list, next) {
2341 ++ if (tmp->cfg.vni == conf->vni &&
2342 ++ (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
2343 ++ tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
2344 ++ tmp->cfg.dst_port == vxlan->cfg.dst_port &&
2345 ++ (tmp->flags & VXLAN_F_RCV_FLAGS) ==
2346 ++ (vxlan->flags & VXLAN_F_RCV_FLAGS))
2347 + return -EEXIST;
2348 ++ }
2349 +
2350 + dev->ethtool_ops = &vxlan_ethtool_ops;
2351 +
2352 +diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
2353 +index 761e77bfce5d..e56f1569f6c3 100644
2354 +--- a/drivers/parisc/iommu-helpers.h
2355 ++++ b/drivers/parisc/iommu-helpers.h
2356 +@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
2357 + struct scatterlist *contig_sg; /* contig chunk head */
2358 + unsigned long dma_offset, dma_len; /* start/len of DMA stream */
2359 + unsigned int n_mappings = 0;
2360 +- unsigned int max_seg_size = dma_get_max_seg_size(dev);
2361 ++ unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
2362 ++ (unsigned)DMA_CHUNK_SIZE);
2363 ++ unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
2364 ++ if (max_seg_boundary) /* check if the addition above didn't overflow */
2365 ++ max_seg_size = min(max_seg_size, max_seg_boundary);
2366 +
2367 + while (nents > 0) {
2368 +
2369 +@@ -138,14 +142,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
2370 +
2371 + /*
2372 + ** First make sure current dma stream won't
2373 +- ** exceed DMA_CHUNK_SIZE if we coalesce the
2374 ++ ** exceed max_seg_size if we coalesce the
2375 + ** next entry.
2376 + */
2377 +- if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
2378 +- IOVP_SIZE) > DMA_CHUNK_SIZE))
2379 +- break;
2380 +-
2381 +- if (startsg->length + dma_len > max_seg_size)
2382 ++ if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
2383 ++ max_seg_size))
2384 + break;
2385 +
2386 + /*
2387 +diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
2388 +index 27bd170c3a28..ef2c5e032f10 100644
2389 +--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
2390 ++++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
2391 +@@ -1268,6 +1268,7 @@ static int
2392 + echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
2393 + {
2394 + struct lov_stripe_md *ulsm = _ulsm;
2395 ++ struct lov_oinfo **p;
2396 + int nob, i;
2397 +
2398 + nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
2399 +@@ -1277,9 +1278,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
2400 + if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
2401 + return -EFAULT;
2402 +
2403 +- for (i = 0; i < lsm->lsm_stripe_count; i++) {
2404 +- if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
2405 +- sizeof(lsm->lsm_oinfo[0])))
2406 ++ for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
2407 ++ struct lov_oinfo __user *up;
2408 ++ if (get_user(up, ulsm->lsm_oinfo + i) ||
2409 ++ copy_to_user(up, *p, sizeof(struct lov_oinfo)))
2410 + return -EFAULT;
2411 + }
2412 + return 0;
2413 +@@ -1287,9 +1289,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
2414 +
2415 + static int
2416 + echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
2417 +- void *ulsm, int ulsm_nob)
2418 ++ struct lov_stripe_md __user *ulsm, int ulsm_nob)
2419 + {
2420 + struct echo_client_obd *ec = ed->ed_ec;
2421 ++ struct lov_oinfo **p;
2422 + int i;
2423 +
2424 + if (ulsm_nob < sizeof(*lsm))
2425 +@@ -1305,11 +1308,10 @@ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
2426 + return -EINVAL;
2427 +
2428 +
2429 +- for (i = 0; i < lsm->lsm_stripe_count; i++) {
2430 +- if (copy_from_user(lsm->lsm_oinfo[i],
2431 +- ((struct lov_stripe_md *)ulsm)-> \
2432 +- lsm_oinfo[i],
2433 +- sizeof(lsm->lsm_oinfo[0])))
2434 ++ for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
2435 ++ struct lov_oinfo __user *up;
2436 ++ if (get_user(up, ulsm->lsm_oinfo + i) ||
2437 ++ copy_from_user(*p, up, sizeof(struct lov_oinfo)))
2438 + return -EFAULT;
2439 + }
2440 + return 0;
2441 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2442 +index 522f766a7d07..62084335a608 100644
2443 +--- a/drivers/usb/core/hub.c
2444 ++++ b/drivers/usb/core/hub.c
2445 +@@ -1035,10 +1035,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2446 + unsigned delay;
2447 +
2448 + /* Continue a partial initialization */
2449 +- if (type == HUB_INIT2)
2450 +- goto init2;
2451 +- if (type == HUB_INIT3)
2452 ++ if (type == HUB_INIT2 || type == HUB_INIT3) {
2453 ++ device_lock(hub->intfdev);
2454 ++
2455 ++ /* Was the hub disconnected while we were waiting? */
2456 ++ if (hub->disconnected) {
2457 ++ device_unlock(hub->intfdev);
2458 ++ kref_put(&hub->kref, hub_release);
2459 ++ return;
2460 ++ }
2461 ++ if (type == HUB_INIT2)
2462 ++ goto init2;
2463 + goto init3;
2464 ++ }
2465 ++ kref_get(&hub->kref);
2466 +
2467 + /* The superspeed hub except for root hub has to use Hub Depth
2468 + * value as an offset into the route string to locate the bits
2469 +@@ -1236,6 +1246,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2470 + queue_delayed_work(system_power_efficient_wq,
2471 + &hub->init_work,
2472 + msecs_to_jiffies(delay));
2473 ++ device_unlock(hub->intfdev);
2474 + return; /* Continues at init3: below */
2475 + } else {
2476 + msleep(delay);
2477 +@@ -1257,6 +1268,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2478 + /* Allow autosuspend if it was suppressed */
2479 + if (type <= HUB_INIT3)
2480 + usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
2481 ++
2482 ++ if (type == HUB_INIT2 || type == HUB_INIT3)
2483 ++ device_unlock(hub->intfdev);
2484 ++
2485 ++ kref_put(&hub->kref, hub_release);
2486 + }
2487 +
2488 + /* Implement the continuations for the delays above */
2489 +@@ -3870,17 +3886,30 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
2490 + return;
2491 + }
2492 +
2493 +- if (usb_set_lpm_timeout(udev, state, timeout))
2494 ++ if (usb_set_lpm_timeout(udev, state, timeout)) {
2495 + /* If we can't set the parent hub U1/U2 timeout,
2496 + * device-initiated LPM won't be allowed either, so let the xHCI
2497 + * host know that this link state won't be enabled.
2498 + */
2499 + hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
2500 ++ } else {
2501 ++ /* Only a configured device will accept the Set Feature
2502 ++ * U1/U2_ENABLE
2503 ++ */
2504 ++ if (udev->actconfig)
2505 ++ usb_set_device_initiated_lpm(udev, state, true);
2506 +
2507 +- /* Only a configured device will accept the Set Feature U1/U2_ENABLE */
2508 +- else if (udev->actconfig)
2509 +- usb_set_device_initiated_lpm(udev, state, true);
2510 +-
2511 ++ /* As soon as usb_set_lpm_timeout(timeout) returns 0, the
2512 ++ * hub-initiated LPM is enabled. Thus, LPM is enabled no
2513 ++ * matter the result of usb_set_device_initiated_lpm().
2514 ++ * The only difference is whether device is able to initiate
2515 ++ * LPM.
2516 ++ */
2517 ++ if (state == USB3_LPM_U1)
2518 ++ udev->usb3_lpm_u1_enabled = 1;
2519 ++ else if (state == USB3_LPM_U2)
2520 ++ udev->usb3_lpm_u2_enabled = 1;
2521 ++ }
2522 + }
2523 +
2524 + /*
2525 +@@ -3920,6 +3949,18 @@ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
2526 + dev_warn(&udev->dev, "Could not disable xHCI %s timeout, "
2527 + "bus schedule bandwidth may be impacted.\n",
2528 + usb3_lpm_names[state]);
2529 ++
2530 ++ /* As soon as usb_set_lpm_timeout(0) return 0, hub initiated LPM
2531 ++ * is disabled. Hub will disallows link to enter U1/U2 as well,
2532 ++ * even device is initiating LPM. Hence LPM is disabled if hub LPM
2533 ++ * timeout set to 0, no matter device-initiated LPM is disabled or
2534 ++ * not.
2535 ++ */
2536 ++ if (state == USB3_LPM_U1)
2537 ++ udev->usb3_lpm_u1_enabled = 0;
2538 ++ else if (state == USB3_LPM_U2)
2539 ++ udev->usb3_lpm_u2_enabled = 0;
2540 ++
2541 + return 0;
2542 + }
2543 +
2544 +@@ -3954,8 +3995,6 @@ int usb_disable_lpm(struct usb_device *udev)
2545 + if (usb_disable_link_state(hcd, udev, USB3_LPM_U2))
2546 + goto enable_lpm;
2547 +
2548 +- udev->usb3_lpm_enabled = 0;
2549 +-
2550 + return 0;
2551 +
2552 + enable_lpm:
2553 +@@ -4013