Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sat, 21 Sep 2019 15:57:46
Message-Id: 1569081445.aec61ff0dfcae7b39a0bb0d68fbe2b6c23d93db9.mpagano@gentoo
1 commit: aec61ff0dfcae7b39a0bb0d68fbe2b6c23d93db9
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Sep 21 15:57:25 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Sep 21 15:57:25 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=aec61ff0
7
8 Linux patch 4.9.194
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1193_linux-4.9.194.patch | 1922 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1926 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 5442280..97e4a0d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -815,6 +815,10 @@ Patch: 1192_linux-4.9.193.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.193
23
24 +Patch: 1193_linux-4.9.194.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.194
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1193_linux-4.9.194.patch b/1193_linux-4.9.194.patch
33 new file mode 100644
34 index 0000000..a49275b
35 --- /dev/null
36 +++ b/1193_linux-4.9.194.patch
37 @@ -0,0 +1,1922 @@
38 +diff --git a/Makefile b/Makefile
39 +index 48f79c6729ad..6e3c81c3bf40 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 193
46 ++SUBLEVEL = 194
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
51 +index 2fb0cd39a31c..cd6e3615e3d1 100644
52 +--- a/arch/arc/kernel/traps.c
53 ++++ b/arch/arc/kernel/traps.c
54 +@@ -163,3 +163,4 @@ void abort(void)
55 + {
56 + __asm__ __volatile__("trap_s 5\n");
57 + }
58 ++EXPORT_SYMBOL(abort);
59 +diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
60 +index cf65ab8bb004..e5dcbda20129 100644
61 +--- a/arch/arm/mach-omap2/omap4-common.c
62 ++++ b/arch/arm/mach-omap2/omap4-common.c
63 +@@ -131,6 +131,9 @@ static int __init omap4_sram_init(void)
64 + struct device_node *np;
65 + struct gen_pool *sram_pool;
66 +
67 ++ if (!soc_is_omap44xx() && !soc_is_omap54xx())
68 ++ return 0;
69 ++
70 + np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
71 + if (!np)
72 + pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
73 +diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
74 +index 1ab7096af8e2..f850fc3a91e8 100644
75 +--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
76 ++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
77 +@@ -387,7 +387,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
78 + static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
79 + .rev_offs = 0x0,
80 + .sysc_offs = 0x4,
81 +- .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
82 ++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
83 ++ SYSC_HAS_RESET_STATUS,
84 + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
85 + .sysc_fields = &omap_hwmod_sysc_type2,
86 + };
87 +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
88 +index 1565d6b67163..0fe4a7025e46 100644
89 +--- a/arch/arm/mm/init.c
90 ++++ b/arch/arm/mm/init.c
91 +@@ -192,6 +192,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
92 + #ifdef CONFIG_HAVE_ARCH_PFN_VALID
93 + int pfn_valid(unsigned long pfn)
94 + {
95 ++ phys_addr_t addr = __pfn_to_phys(pfn);
96 ++
97 ++ if (__phys_to_pfn(addr) != pfn)
98 ++ return 0;
99 ++
100 + return memblock_is_map_memory(__pfn_to_phys(pfn));
101 + }
102 + EXPORT_SYMBOL(pfn_valid);
103 +@@ -698,7 +703,8 @@ static void update_sections_early(struct section_perm perms[], int n)
104 + if (t->flags & PF_KTHREAD)
105 + continue;
106 + for_each_thread(t, s)
107 +- set_section_perms(perms, n, true, s->mm);
108 ++ if (s->mm)
109 ++ set_section_perms(perms, n, true, s->mm);
110 + }
111 + read_unlock(&tasklist_lock);
112 + set_section_perms(perms, n, true, current->active_mm);
113 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
114 +index 6cd230434f32..92bcde046b6b 100644
115 +--- a/arch/mips/Kconfig
116 ++++ b/arch/mips/Kconfig
117 +@@ -792,7 +792,6 @@ config SIBYTE_SWARM
118 + select SYS_SUPPORTS_HIGHMEM
119 + select SYS_SUPPORTS_LITTLE_ENDIAN
120 + select ZONE_DMA32 if 64BIT
121 +- select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
122 +
123 + config SIBYTE_LITTLESUR
124 + bool "Sibyte BCM91250C2-LittleSur"
125 +@@ -815,7 +814,6 @@ config SIBYTE_SENTOSA
126 + select SYS_HAS_CPU_SB1
127 + select SYS_SUPPORTS_BIG_ENDIAN
128 + select SYS_SUPPORTS_LITTLE_ENDIAN
129 +- select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
130 +
131 + config SIBYTE_BIGSUR
132 + bool "Sibyte BCM91480B-BigSur"
133 +@@ -829,7 +827,6 @@ config SIBYTE_BIGSUR
134 + select SYS_SUPPORTS_HIGHMEM
135 + select SYS_SUPPORTS_LITTLE_ENDIAN
136 + select ZONE_DMA32 if 64BIT
137 +- select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
138 +
139 + config SNI_RM
140 + bool "SNI RM200/300/400"
141 +diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
142 +index 060f23ff1817..258158c34df1 100644
143 +--- a/arch/mips/include/asm/smp.h
144 ++++ b/arch/mips/include/asm/smp.h
145 +@@ -25,7 +25,17 @@ extern cpumask_t cpu_sibling_map[];
146 + extern cpumask_t cpu_core_map[];
147 + extern cpumask_t cpu_foreign_map[];
148 +
149 +-#define raw_smp_processor_id() (current_thread_info()->cpu)
150 ++static inline int raw_smp_processor_id(void)
151 ++{
152 ++#if defined(__VDSO__)
153 ++ extern int vdso_smp_processor_id(void)
154 ++ __compiletime_error("VDSO should not call smp_processor_id()");
155 ++ return vdso_smp_processor_id();
156 ++#else
157 ++ return current_thread_info()->cpu;
158 ++#endif
159 ++}
160 ++#define raw_smp_processor_id raw_smp_processor_id
161 +
162 + /* Map from cpu id to sequential logical cpu number. This will only
163 + not be idempotent when cpus failed to come on-line. */
164 +diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
165 +index 3ef3fb658136..b3d6bf23a662 100644
166 +--- a/arch/mips/sibyte/common/Makefile
167 ++++ b/arch/mips/sibyte/common/Makefile
168 +@@ -1,5 +1,4 @@
169 + obj-y := cfe.o
170 +-obj-$(CONFIG_SWIOTLB) += dma.o
171 + obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
172 + obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
173 + obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
174 +diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
175 +deleted file mode 100644
176 +index eb47a94f3583..000000000000
177 +--- a/arch/mips/sibyte/common/dma.c
178 ++++ /dev/null
179 +@@ -1,14 +0,0 @@
180 +-// SPDX-License-Identifier: GPL-2.0+
181 +-/*
182 +- * DMA support for Broadcom SiByte platforms.
183 +- *
184 +- * Copyright (c) 2018 Maciej W. Rozycki
185 +- */
186 +-
187 +-#include <linux/swiotlb.h>
188 +-#include <asm/bootinfo.h>
189 +-
190 +-void __init plat_swiotlb_setup(void)
191 +-{
192 +- swiotlb_init(1);
193 +-}
194 +diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
195 +index 0b845cc7fbdc..247ca2e9add9 100644
196 +--- a/arch/mips/vdso/Makefile
197 ++++ b/arch/mips/vdso/Makefile
198 +@@ -6,7 +6,9 @@ ccflags-vdso := \
199 + $(filter -I%,$(KBUILD_CFLAGS)) \
200 + $(filter -E%,$(KBUILD_CFLAGS)) \
201 + $(filter -mmicromips,$(KBUILD_CFLAGS)) \
202 +- $(filter -march=%,$(KBUILD_CFLAGS))
203 ++ $(filter -march=%,$(KBUILD_CFLAGS)) \
204 ++ $(filter -m%-float,$(KBUILD_CFLAGS)) \
205 ++ -D__VDSO__
206 + cflags-vdso := $(ccflags-vdso) \
207 + $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
208 + -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
209 +diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
210 +index 44c33ee397a0..2525f23da4be 100644
211 +--- a/arch/powerpc/mm/pgtable-radix.c
212 ++++ b/arch/powerpc/mm/pgtable-radix.c
213 +@@ -287,14 +287,6 @@ void __init radix__early_init_devtree(void)
214 + mmu_psize_defs[MMU_PAGE_64K].shift = 16;
215 + mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
216 + found:
217 +-#ifdef CONFIG_SPARSEMEM_VMEMMAP
218 +- if (mmu_psize_defs[MMU_PAGE_2M].shift) {
219 +- /*
220 +- * map vmemmap using 2M if available
221 +- */
222 +- mmu_vmemmap_psize = MMU_PAGE_2M;
223 +- }
224 +-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
225 + return;
226 + }
227 +
228 +@@ -337,7 +329,13 @@ void __init radix__early_init_mmu(void)
229 +
230 + #ifdef CONFIG_SPARSEMEM_VMEMMAP
231 + /* vmemmap mapping */
232 +- mmu_vmemmap_psize = mmu_virtual_psize;
233 ++ if (mmu_psize_defs[MMU_PAGE_2M].shift) {
234 ++ /*
235 ++ * map vmemmap using 2M if available
236 ++ */
237 ++ mmu_vmemmap_psize = MMU_PAGE_2M;
238 ++ } else
239 ++ mmu_vmemmap_psize = mmu_virtual_psize;
240 + #endif
241 + /*
242 + * initialize page table size
243 +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
244 +index be4db07f70d3..95126d25aed5 100644
245 +--- a/arch/s390/kvm/interrupt.c
246 ++++ b/arch/s390/kvm/interrupt.c
247 +@@ -1652,6 +1652,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
248 + case KVM_S390_MCHK:
249 + irq->u.mchk.mcic = s390int->parm64;
250 + break;
251 ++ case KVM_S390_INT_PFAULT_INIT:
252 ++ irq->u.ext.ext_params = s390int->parm;
253 ++ irq->u.ext.ext_params2 = s390int->parm64;
254 ++ break;
255 ++ case KVM_S390_RESTART:
256 ++ case KVM_S390_INT_CLOCK_COMP:
257 ++ case KVM_S390_INT_CPU_TIMER:
258 ++ break;
259 ++ default:
260 ++ return -EINVAL;
261 + }
262 + return 0;
263 + }
264 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
265 +index 07f571900676..ea20b60edde7 100644
266 +--- a/arch/s390/kvm/kvm-s390.c
267 ++++ b/arch/s390/kvm/kvm-s390.c
268 +@@ -3105,7 +3105,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
269 + }
270 + case KVM_S390_INTERRUPT: {
271 + struct kvm_s390_interrupt s390int;
272 +- struct kvm_s390_irq s390irq;
273 ++ struct kvm_s390_irq s390irq = {};
274 +
275 + r = -EFAULT;
276 + if (copy_from_user(&s390int, argp, sizeof(s390int)))
277 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
278 +index 896344b6e036..9b15a1dc6628 100644
279 +--- a/arch/s390/net/bpf_jit_comp.c
280 ++++ b/arch/s390/net/bpf_jit_comp.c
281 +@@ -881,7 +881,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
282 + break;
283 + case BPF_ALU64 | BPF_NEG: /* dst = -dst */
284 + /* lcgr %dst,%dst */
285 +- EMIT4(0xb9130000, dst_reg, dst_reg);
286 ++ EMIT4(0xb9030000, dst_reg, dst_reg);
287 + break;
288 + /*
289 + * BPF_FROM_BE/LE
290 +@@ -1062,8 +1062,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
291 + /* llgf %w1,map.max_entries(%b2) */
292 + EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
293 + offsetof(struct bpf_array, map.max_entries));
294 +- /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
295 +- EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
296 ++ /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
297 ++ EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
298 + REG_W1, 0, 0xa);
299 +
300 + /*
301 +@@ -1089,8 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
302 + * goto out;
303 + */
304 +
305 +- /* sllg %r1,%b3,3: %r1 = index * 8 */
306 +- EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
307 ++ /* llgfr %r1,%b3: %r1 = (u32) index */
308 ++ EMIT4(0xb9160000, REG_1, BPF_REG_3);
309 ++ /* sllg %r1,%r1,3: %r1 *= 8 */
310 ++ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
311 + /* lg %r1,prog(%b2,%r1) */
312 + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
313 + REG_1, offsetof(struct bpf_array, ptrs));
314 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
315 +index 2996a1d0a410..940ed27a6212 100644
316 +--- a/arch/x86/Makefile
317 ++++ b/arch/x86/Makefile
318 +@@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
319 +
320 + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
321 + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
322 ++REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
323 + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
324 + export REALMODE_CFLAGS
325 +
326 +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
327 +index fd4484ae3ffc..112e3c4636b4 100644
328 +--- a/arch/x86/events/amd/ibs.c
329 ++++ b/arch/x86/events/amd/ibs.c
330 +@@ -671,10 +671,17 @@ fail:
331 +
332 + throttle = perf_event_overflow(event, &data, &regs);
333 + out:
334 +- if (throttle)
335 ++ if (throttle) {
336 + perf_ibs_stop(event, 0);
337 +- else
338 +- perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
339 ++ } else {
340 ++ period >>= 4;
341 ++
342 ++ if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
343 ++ (*config & IBS_OP_CNT_CTL))
344 ++ period |= *config & IBS_OP_CUR_CNT_RAND;
345 ++
346 ++ perf_ibs_enable_event(perf_ibs, hwc, period);
347 ++ }
348 +
349 + perf_event_update_userpage(event);
350 +
351 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
352 +index e98e238d3775..55e362f9dbfa 100644
353 +--- a/arch/x86/events/intel/core.c
354 ++++ b/arch/x86/events/intel/core.c
355 +@@ -3075,6 +3075,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
356 + return left;
357 + }
358 +
359 ++static u64 nhm_limit_period(struct perf_event *event, u64 left)
360 ++{
361 ++ return max(left, 32ULL);
362 ++}
363 ++
364 + PMU_FORMAT_ATTR(event, "config:0-7" );
365 + PMU_FORMAT_ATTR(umask, "config:8-15" );
366 + PMU_FORMAT_ATTR(edge, "config:18" );
367 +@@ -3734,6 +3739,7 @@ __init int intel_pmu_init(void)
368 + x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
369 + x86_pmu.enable_all = intel_pmu_nhm_enable_all;
370 + x86_pmu.extra_regs = intel_nehalem_extra_regs;
371 ++ x86_pmu.limit_period = nhm_limit_period;
372 +
373 + x86_pmu.cpu_events = nhm_events_attrs;
374 +
375 +diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
376 +index 0232b5a2a2d9..588d8fbd1e6d 100644
377 +--- a/arch/x86/include/asm/bootparam_utils.h
378 ++++ b/arch/x86/include/asm/bootparam_utils.h
379 +@@ -71,6 +71,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
380 + BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
381 + BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
382 + BOOT_PARAM_PRESERVE(hdr),
383 ++ BOOT_PARAM_PRESERVE(e820_map),
384 + BOOT_PARAM_PRESERVE(eddbuf),
385 + };
386 +
387 +diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
388 +index f353061bba1d..81d5ea71bbe9 100644
389 +--- a/arch/x86/include/asm/perf_event.h
390 ++++ b/arch/x86/include/asm/perf_event.h
391 +@@ -200,16 +200,20 @@ struct x86_pmu_capability {
392 + #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
393 + #define IBSCTL_LVT_OFFSET_MASK 0x0F
394 +
395 +-/* ibs fetch bits/masks */
396 ++/* IBS fetch bits/masks */
397 + #define IBS_FETCH_RAND_EN (1ULL<<57)
398 + #define IBS_FETCH_VAL (1ULL<<49)
399 + #define IBS_FETCH_ENABLE (1ULL<<48)
400 + #define IBS_FETCH_CNT 0xFFFF0000ULL
401 + #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
402 +
403 +-/* ibs op bits/masks */
404 +-/* lower 4 bits of the current count are ignored: */
405 +-#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
406 ++/*
407 ++ * IBS op bits/masks
408 ++ * The lower 7 bits of the current count are random bits
409 ++ * preloaded by hardware and ignored in software
410 ++ */
411 ++#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
412 ++#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
413 + #define IBS_OP_CNT_CTL (1ULL<<19)
414 + #define IBS_OP_VAL (1ULL<<18)
415 + #define IBS_OP_ENABLE (1ULL<<17)
416 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
417 +index 2177c7551ff7..9db8d8758ed3 100644
418 +--- a/arch/x86/include/asm/uaccess.h
419 ++++ b/arch/x86/include/asm/uaccess.h
420 +@@ -438,8 +438,10 @@ do { \
421 + ({ \
422 + int __gu_err; \
423 + __inttype(*(ptr)) __gu_val; \
424 ++ __typeof__(ptr) __gu_ptr = (ptr); \
425 ++ __typeof__(size) __gu_size = (size); \
426 + __uaccess_begin_nospec(); \
427 +- __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
428 ++ __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
429 + __uaccess_end(); \
430 + (x) = (__force __typeof__(*(ptr)))__gu_val; \
431 + __builtin_expect(__gu_err, 0); \
432 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
433 +index d34629d70421..09dd95cabfc2 100644
434 +--- a/arch/x86/kernel/apic/io_apic.c
435 ++++ b/arch/x86/kernel/apic/io_apic.c
436 +@@ -2346,7 +2346,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
437 + * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
438 + * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
439 + */
440 +- return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
441 ++ if (!ioapic_initialized)
442 ++ return gsi_top;
443 ++ /*
444 ++ * For DT enabled machines ioapic_dynirq_base is irrelevant and not
445 ++ * updated. So simply return @from if ioapic_dynirq_base == 0.
446 ++ */
447 ++ return ioapic_dynirq_base ? : from;
448 + }
449 +
450 + #ifdef CONFIG_X86_32
451 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
452 +index 8feb4f7e2e59..7ab13ad53a59 100644
453 +--- a/arch/x86/kvm/vmx.c
454 ++++ b/arch/x86/kvm/vmx.c
455 +@@ -7639,6 +7639,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
456 + unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
457 + u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
458 + gva_t gva = 0;
459 ++ struct x86_exception e;
460 +
461 + if (!nested_vmx_check_permission(vcpu) ||
462 + !nested_vmx_check_vmcs12(vcpu))
463 +@@ -7665,8 +7666,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
464 + vmx_instruction_info, true, &gva))
465 + return 1;
466 + /* _system ok, as nested_vmx_check_permission verified cpl=0 */
467 +- kvm_write_guest_virt_system(vcpu, gva, &field_value,
468 +- (is_long_mode(vcpu) ? 8 : 4), NULL);
469 ++ if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
470 ++ (is_long_mode(vcpu) ? 8 : 4),
471 ++ NULL))
472 ++ kvm_inject_page_fault(vcpu, &e);
473 + }
474 +
475 + nested_vmx_succeed(vcpu);
476 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
477 +index bbecbf2b1f5e..aabfc141d2f1 100644
478 +--- a/arch/x86/kvm/x86.c
479 ++++ b/arch/x86/kvm/x86.c
480 +@@ -4620,6 +4620,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
481 + /* kvm_write_guest_virt_system can pull in tons of pages. */
482 + vcpu->arch.l1tf_flush_l1d = true;
483 +
484 ++ /*
485 ++ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
486 ++ * is returned, but our callers are not ready for that and they blindly
487 ++ * call kvm_inject_page_fault. Ensure that they at least do not leak
488 ++ * uninitialized kernel stack memory into cr2 and error code.
489 ++ */
490 ++ memset(exception, 0, sizeof(*exception));
491 + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
492 + PFERR_WRITE_MASK, exception);
493 + }
494 +diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
495 +index 31c60101a69a..7fa840170151 100644
496 +--- a/drivers/atm/Kconfig
497 ++++ b/drivers/atm/Kconfig
498 +@@ -199,7 +199,7 @@ config ATM_NICSTAR_USE_SUNI
499 + make the card work).
500 +
501 + config ATM_NICSTAR_USE_IDT77105
502 +- bool "Use IDT77015 PHY driver (25Mbps)"
503 ++ bool "Use IDT77105 PHY driver (25Mbps)"
504 + depends on ATM_NICSTAR
505 + help
506 + Support for the PHYsical layer chip in ForeRunner LE25 cards. In
507 +diff --git a/drivers/base/core.c b/drivers/base/core.c
508 +index 901aec4bb01d..3dc483f00060 100644
509 +--- a/drivers/base/core.c
510 ++++ b/drivers/base/core.c
511 +@@ -857,12 +857,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
512 + */
513 + static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
514 + {
515 ++ unsigned int ref;
516 ++
517 + /* see if we live in a "glue" directory */
518 + if (!live_in_glue_dir(glue_dir, dev))
519 + return;
520 +
521 + mutex_lock(&gdp_mutex);
522 +- if (!kobject_has_children(glue_dir))
523 ++ /**
524 ++ * There is a race condition between removing glue directory
525 ++ * and adding a new device under the glue directory.
526 ++ *
527 ++ * CPU1: CPU2:
528 ++ *
529 ++ * device_add()
530 ++ * get_device_parent()
531 ++ * class_dir_create_and_add()
532 ++ * kobject_add_internal()
533 ++ * create_dir() // create glue_dir
534 ++ *
535 ++ * device_add()
536 ++ * get_device_parent()
537 ++ * kobject_get() // get glue_dir
538 ++ *
539 ++ * device_del()
540 ++ * cleanup_glue_dir()
541 ++ * kobject_del(glue_dir)
542 ++ *
543 ++ * kobject_add()
544 ++ * kobject_add_internal()
545 ++ * create_dir() // in glue_dir
546 ++ * sysfs_create_dir_ns()
547 ++ * kernfs_create_dir_ns(sd)
548 ++ *
549 ++ * sysfs_remove_dir() // glue_dir->sd=NULL
550 ++ * sysfs_put() // free glue_dir->sd
551 ++ *
552 ++ * // sd is freed
553 ++ * kernfs_new_node(sd)
554 ++ * kernfs_get(glue_dir)
555 ++ * kernfs_add_one()
556 ++ * kernfs_put()
557 ++ *
558 ++ * Before CPU1 remove last child device under glue dir, if CPU2 add
559 ++ * a new device under glue dir, the glue_dir kobject reference count
560 ++ * will be increase to 2 in kobject_get(k). And CPU2 has been called
561 ++ * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
562 ++ * and sysfs_put(). This result in glue_dir->sd is freed.
563 ++ *
564 ++ * Then the CPU2 will see a stale "empty" but still potentially used
565 ++ * glue dir around in kernfs_new_node().
566 ++ *
567 ++ * In order to avoid this happening, we also should make sure that
568 ++ * kernfs_node for glue_dir is released in CPU1 only when refcount
569 ++ * for glue_dir kobj is 1.
570 ++ */
571 ++ ref = atomic_read(&glue_dir->kref.refcount);
572 ++ if (!kobject_has_children(glue_dir) && !--ref)
573 + kobject_del(glue_dir);
574 + kobject_put(glue_dir);
575 + mutex_unlock(&gdp_mutex);
576 +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
577 +index 6930abef42b3..ece4f706b38f 100644
578 +--- a/drivers/block/floppy.c
579 ++++ b/drivers/block/floppy.c
580 +@@ -3784,7 +3784,7 @@ static int compat_getdrvprm(int drive,
581 + v.native_format = UDP->native_format;
582 + mutex_unlock(&floppy_mutex);
583 +
584 +- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
585 ++ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
586 + return -EFAULT;
587 + return 0;
588 + }
589 +@@ -3820,7 +3820,7 @@ static int compat_getdrvstat(int drive, bool poll,
590 + v.bufblocks = UDRS->bufblocks;
591 + mutex_unlock(&floppy_mutex);
592 +
593 +- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
594 ++ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
595 + return -EFAULT;
596 + return 0;
597 + Eintr:
598 +diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
599 +index fe7d9ed1d436..b0a18bc1a27f 100644
600 +--- a/drivers/clk/rockchip/clk-mmc-phase.c
601 ++++ b/drivers/clk/rockchip/clk-mmc-phase.c
602 +@@ -59,10 +59,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
603 + u32 delay_num = 0;
604 +
605 + /* See the comment for rockchip_mmc_set_phase below */
606 +- if (!rate) {
607 +- pr_err("%s: invalid clk rate\n", __func__);
608 ++ if (!rate)
609 + return -EINVAL;
610 +- }
611 +
612 + raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
613 +
614 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
615 +index ea8595d2c3d8..30f8bbe757b7 100644
616 +--- a/drivers/crypto/talitos.c
617 ++++ b/drivers/crypto/talitos.c
618 +@@ -943,11 +943,13 @@ static void talitos_sg_unmap(struct device *dev,
619 +
620 + static void ipsec_esp_unmap(struct device *dev,
621 + struct talitos_edesc *edesc,
622 +- struct aead_request *areq)
623 ++ struct aead_request *areq, bool encrypt)
624 + {
625 + struct crypto_aead *aead = crypto_aead_reqtfm(areq);
626 + struct talitos_ctx *ctx = crypto_aead_ctx(aead);
627 + unsigned int ivsize = crypto_aead_ivsize(aead);
628 ++ unsigned int authsize = crypto_aead_authsize(aead);
629 ++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
630 +
631 + if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
632 + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
633 +@@ -956,7 +958,7 @@ static void ipsec_esp_unmap(struct device *dev,
634 + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
635 + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
636 +
637 +- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
638 ++ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
639 + areq->assoclen);
640 +
641 + if (edesc->dma_len)
642 +@@ -967,7 +969,7 @@ static void ipsec_esp_unmap(struct device *dev,
643 + unsigned int dst_nents = edesc->dst_nents ? : 1;
644 +
645 + sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
646 +- areq->assoclen + areq->cryptlen - ivsize);
647 ++ areq->assoclen + cryptlen - ivsize);
648 + }
649 + }
650 +
651 +@@ -988,7 +990,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
652 +
653 + edesc = container_of(desc, struct talitos_edesc, desc);
654 +
655 +- ipsec_esp_unmap(dev, edesc, areq);
656 ++ ipsec_esp_unmap(dev, edesc, areq, true);
657 +
658 + /* copy the generated ICV to dst */
659 + if (edesc->icv_ool) {
660 +@@ -1020,7 +1022,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
661 +
662 + edesc = container_of(desc, struct talitos_edesc, desc);
663 +
664 +- ipsec_esp_unmap(dev, edesc, req);
665 ++ ipsec_esp_unmap(dev, edesc, req, false);
666 +
667 + if (!err) {
668 + char icvdata[SHA512_DIGEST_SIZE];
669 +@@ -1066,7 +1068,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
670 +
671 + edesc = container_of(desc, struct talitos_edesc, desc);
672 +
673 +- ipsec_esp_unmap(dev, edesc, req);
674 ++ ipsec_esp_unmap(dev, edesc, req, false);
675 +
676 + /* check ICV auth status */
677 + if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
678 +@@ -1173,6 +1175,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
679 + * fill in and submit ipsec_esp descriptor
680 + */
681 + static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
682 ++ bool encrypt,
683 + void (*callback)(struct device *dev,
684 + struct talitos_desc *desc,
685 + void *context, int error))
686 +@@ -1182,7 +1185,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
687 + struct talitos_ctx *ctx = crypto_aead_ctx(aead);
688 + struct device *dev = ctx->dev;
689 + struct talitos_desc *desc = &edesc->desc;
690 +- unsigned int cryptlen = areq->cryptlen;
691 ++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
692 + unsigned int ivsize = crypto_aead_ivsize(aead);
693 + int tbl_off = 0;
694 + int sg_count, ret;
695 +@@ -1324,7 +1327,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
696 +
697 + ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
698 + if (ret != -EINPROGRESS) {
699 +- ipsec_esp_unmap(dev, edesc, areq);
700 ++ ipsec_esp_unmap(dev, edesc, areq, encrypt);
701 + kfree(edesc);
702 + }
703 + return ret;
704 +@@ -1433,9 +1436,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
705 + unsigned int authsize = crypto_aead_authsize(authenc);
706 + struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
707 + unsigned int ivsize = crypto_aead_ivsize(authenc);
708 ++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
709 +
710 + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
711 +- iv, areq->assoclen, areq->cryptlen,
712 ++ iv, areq->assoclen, cryptlen,
713 + authsize, ivsize, icv_stashing,
714 + areq->base.flags, encrypt);
715 + }
716 +@@ -1454,7 +1458,7 @@ static int aead_encrypt(struct aead_request *req)
717 + /* set encrypt */
718 + edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
719 +
720 +- return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
721 ++ return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
722 + }
723 +
724 + static int aead_decrypt(struct aead_request *req)
725 +@@ -1466,14 +1470,13 @@ static int aead_decrypt(struct aead_request *req)
726 + struct talitos_edesc *edesc;
727 + void *icvdata;
728 +
729 +- req->cryptlen -= authsize;
730 +-
731 + /* allocate extended descriptor */
732 + edesc = aead_edesc_alloc(req, req->iv, 1, false);
733 + if (IS_ERR(edesc))
734 + return PTR_ERR(edesc);
735 +
736 +- if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
737 ++ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
738 ++ (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
739 + ((!edesc->src_nents && !edesc->dst_nents) ||
740 + priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
741 +
742 +@@ -1485,7 +1488,8 @@ static int aead_decrypt(struct aead_request *req)
743 + /* reset integrity check result bits */
744 + edesc->desc.hdr_lo = 0;
745 +
746 +- return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
747 ++ return ipsec_esp(edesc, req, false,
748 ++ ipsec_esp_decrypt_hwauth_done);
749 + }
750 +
751 + /* Have to check the ICV with software */
752 +@@ -1501,7 +1505,7 @@ static int aead_decrypt(struct aead_request *req)
753 + sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
754 + req->assoclen + req->cryptlen - authsize);
755 +
756 +- return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
757 ++ return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
758 + }
759 +
760 + static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
761 +@@ -1528,6 +1532,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
762 + return 0;
763 + }
764 +
765 ++static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
766 ++ const u8 *key, unsigned int keylen)
767 ++{
768 ++ if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
769 ++ keylen == AES_KEYSIZE_256)
770 ++ return ablkcipher_setkey(cipher, key, keylen);
771 ++
772 ++ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
773 ++
774 ++ return -EINVAL;
775 ++}
776 ++
777 + static void common_nonsnoop_unmap(struct device *dev,
778 + struct talitos_edesc *edesc,
779 + struct ablkcipher_request *areq)
780 +@@ -1656,6 +1672,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
781 + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
782 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
783 + struct talitos_edesc *edesc;
784 ++ unsigned int blocksize =
785 ++ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
786 ++
787 ++ if (!areq->nbytes)
788 ++ return 0;
789 ++
790 ++ if (areq->nbytes % blocksize)
791 ++ return -EINVAL;
792 +
793 + /* allocate extended descriptor */
794 + edesc = ablkcipher_edesc_alloc(areq, true);
795 +@@ -1673,6 +1697,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
796 + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
797 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
798 + struct talitos_edesc *edesc;
799 ++ unsigned int blocksize =
800 ++ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
801 ++
802 ++ if (!areq->nbytes)
803 ++ return 0;
804 ++
805 ++ if (areq->nbytes % blocksize)
806 ++ return -EINVAL;
807 +
808 + /* allocate extended descriptor */
809 + edesc = ablkcipher_edesc_alloc(areq, false);
810 +@@ -2621,6 +2653,7 @@ static struct talitos_alg_template driver_algs[] = {
811 + .min_keysize = AES_MIN_KEY_SIZE,
812 + .max_keysize = AES_MAX_KEY_SIZE,
813 + .ivsize = AES_BLOCK_SIZE,
814 ++ .setkey = ablkcipher_aes_setkey,
815 + }
816 + },
817 + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
818 +@@ -2631,13 +2664,13 @@ static struct talitos_alg_template driver_algs[] = {
819 + .alg.crypto = {
820 + .cra_name = "ctr(aes)",
821 + .cra_driver_name = "ctr-aes-talitos",
822 +- .cra_blocksize = AES_BLOCK_SIZE,
823 ++ .cra_blocksize = 1,
824 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
825 + CRYPTO_ALG_ASYNC,
826 + .cra_ablkcipher = {
827 + .min_keysize = AES_MIN_KEY_SIZE,
828 + .max_keysize = AES_MAX_KEY_SIZE,
829 +- .ivsize = AES_BLOCK_SIZE,
830 ++ .setkey = ablkcipher_aes_setkey,
831 + }
832 + },
833 + .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
834 +diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
835 +index 6b16ce390dce..9f901f16bddc 100644
836 +--- a/drivers/dma/omap-dma.c
837 ++++ b/drivers/dma/omap-dma.c
838 +@@ -1429,8 +1429,10 @@ static int omap_dma_probe(struct platform_device *pdev)
839 +
840 + rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
841 + IRQF_SHARED, "omap-dma-engine", od);
842 +- if (rc)
843 ++ if (rc) {
844 ++ omap_dma_free(od);
845 + return rc;
846 ++ }
847 + }
848 +
849 + if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
850 +diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
851 +index 8c3c588834d2..a7e1f6e17e3d 100644
852 +--- a/drivers/dma/ti-dma-crossbar.c
853 ++++ b/drivers/dma/ti-dma-crossbar.c
854 +@@ -395,8 +395,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
855 +
856 + ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
857 + nelm * 2);
858 +- if (ret)
859 ++ if (ret) {
860 ++ kfree(rsv_events);
861 + return ret;
862 ++ }
863 +
864 + for (i = 0; i < nelm; i++) {
865 + ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
866 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
867 +index 3b0d77b2fdc5..6008a30a17d0 100644
868 +--- a/drivers/gpio/gpiolib.c
869 ++++ b/drivers/gpio/gpiolib.c
870 +@@ -426,12 +426,23 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
871 + struct linehandle_state *lh;
872 + struct file *file;
873 + int fd, i, count = 0, ret;
874 ++ u32 lflags;
875 +
876 + if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
877 + return -EFAULT;
878 + if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
879 + return -EINVAL;
880 +
881 ++ lflags = handlereq.flags;
882 ++
883 ++ /*
884 ++ * Do not allow both INPUT & OUTPUT flags to be set as they are
885 ++ * contradictory.
886 ++ */
887 ++ if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
888 ++ (lflags & GPIOHANDLE_REQUEST_OUTPUT))
889 ++ return -EINVAL;
890 ++
891 + lh = kzalloc(sizeof(*lh), GFP_KERNEL);
892 + if (!lh)
893 + return -ENOMEM;
894 +@@ -452,7 +463,6 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
895 + /* Request each GPIO */
896 + for (i = 0; i < handlereq.lines; i++) {
897 + u32 offset = handlereq.lineoffsets[i];
898 +- u32 lflags = handlereq.flags;
899 + struct gpio_desc *desc;
900 +
901 + if (offset >= gdev->ngpio) {
902 +@@ -787,7 +797,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
903 + }
904 +
905 + /* This is just wrong: we don't look for events on output lines */
906 +- if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
907 ++ if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
908 ++ (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
909 ++ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
910 + ret = -EINVAL;
911 + goto out_free_label;
912 + }
913 +@@ -801,10 +813,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
914 +
915 + if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
916 + set_bit(FLAG_ACTIVE_LOW, &desc->flags);
917 +- if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
918 +- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
919 +- if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
920 +- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
921 +
922 + ret = gpiod_direction_input(desc);
923 + if (ret)
924 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
925 +index 48dfc163233e..286587607931 100644
926 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
927 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
928 +@@ -423,12 +423,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
929 + comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
930 + if (!comp) {
931 + ret = -ENOMEM;
932 ++ of_node_put(node);
933 + goto err_node;
934 + }
935 +
936 + ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
937 +- if (ret)
938 ++ if (ret) {
939 ++ of_node_put(node);
940 + goto err_node;
941 ++ }
942 +
943 + private->ddp_comp[comp_id] = comp;
944 + }
945 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
946 +index c1233d0288a0..dd7880de7e4e 100644
947 +--- a/drivers/iommu/amd_iommu.c
948 ++++ b/drivers/iommu/amd_iommu.c
949 +@@ -1321,18 +1321,21 @@ static void domain_flush_devices(struct protection_domain *domain)
950 + * another level increases the size of the address space by 9 bits to a size up
951 + * to 64 bits.
952 + */
953 +-static bool increase_address_space(struct protection_domain *domain,
954 ++static void increase_address_space(struct protection_domain *domain,
955 + gfp_t gfp)
956 + {
957 ++ unsigned long flags;
958 + u64 *pte;
959 +
960 +- if (domain->mode == PAGE_MODE_6_LEVEL)
961 ++ spin_lock_irqsave(&domain->lock, flags);
962 ++
963 ++ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
964 + /* address space already 64 bit large */
965 +- return false;
966 ++ goto out;
967 +
968 + pte = (void *)get_zeroed_page(gfp);
969 + if (!pte)
970 +- return false;
971 ++ goto out;
972 +
973 + *pte = PM_LEVEL_PDE(domain->mode,
974 + virt_to_phys(domain->pt_root));
975 +@@ -1340,7 +1343,10 @@ static bool increase_address_space(struct protection_domain *domain,
976 + domain->mode += 1;
977 + domain->updated = true;
978 +
979 +- return true;
980 ++out:
981 ++ spin_unlock_irqrestore(&domain->lock, flags);
982 ++
983 ++ return;
984 + }
985 +
986 + static u64 *alloc_pte(struct protection_domain *domain,
987 +diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
988 +index 6a2df3297e77..691ad069444d 100644
989 +--- a/drivers/isdn/capi/capi.c
990 ++++ b/drivers/isdn/capi/capi.c
991 +@@ -687,6 +687,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
992 + if (!cdev->ap.applid)
993 + return -ENODEV;
994 +
995 ++ if (count < CAPIMSG_BASELEN)
996 ++ return -EINVAL;
997 ++
998 + skb = alloc_skb(count, GFP_USER);
999 + if (!skb)
1000 + return -ENOMEM;
1001 +@@ -697,7 +700,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
1002 + }
1003 + mlen = CAPIMSG_LEN(skb->data);
1004 + if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
1005 +- if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
1006 ++ if (count < CAPI_DATA_B3_REQ_LEN ||
1007 ++ (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
1008 + kfree_skb(skb);
1009 + return -EINVAL;
1010 + }
1011 +@@ -710,6 +714,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
1012 + CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
1013 +
1014 + if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
1015 ++ if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
1016 ++ kfree_skb(skb);
1017 ++ return -EINVAL;
1018 ++ }
1019 + mutex_lock(&cdev->lock);
1020 + capincci_free(cdev, CAPIMSG_NCCI(skb->data));
1021 + mutex_unlock(&cdev->lock);
1022 +diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
1023 +index 4706628a3ed5..10bccce22858 100644
1024 +--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
1025 ++++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
1026 +@@ -612,10 +612,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
1027 + static int technisat_usb2_get_ir(struct dvb_usb_device *d)
1028 + {
1029 + struct technisat_usb2_state *state = d->priv;
1030 +- u8 *buf = state->buf;
1031 +- u8 *b;
1032 +- int ret;
1033 + struct ir_raw_event ev;
1034 ++ u8 *buf = state->buf;
1035 ++ int i, ret;
1036 +
1037 + buf[0] = GET_IR_DATA_VENDOR_REQUEST;
1038 + buf[1] = 0x08;
1039 +@@ -651,26 +650,25 @@ unlock:
1040 + return 0; /* no key pressed */
1041 +
1042 + /* decoding */
1043 +- b = buf+1;
1044 +
1045 + #if 0
1046 + deb_rc("RC: %d ", ret);
1047 +- debug_dump(b, ret, deb_rc);
1048 ++ debug_dump(buf + 1, ret, deb_rc);
1049 + #endif
1050 +
1051 + ev.pulse = 0;
1052 +- while (1) {
1053 +- ev.pulse = !ev.pulse;
1054 +- ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
1055 +- ir_raw_event_store(d->rc_dev, &ev);
1056 +-
1057 +- b++;
1058 +- if (*b == 0xff) {
1059 ++ for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
1060 ++ if (buf[i] == 0xff) {
1061 + ev.pulse = 0;
1062 + ev.duration = 888888*2;
1063 + ir_raw_event_store(d->rc_dev, &ev);
1064 + break;
1065 + }
1066 ++
1067 ++ ev.pulse = !ev.pulse;
1068 ++ ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
1069 ++ FIRMWARE_CLOCK_TICK) / 1000;
1070 ++ ir_raw_event_store(d->rc_dev, &ev);
1071 + }
1072 +
1073 + ir_raw_event_handle(d->rc_dev);
1074 +diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
1075 +index ee88ae83230c..185c8079d0f9 100644
1076 +--- a/drivers/media/usb/tm6000/tm6000-dvb.c
1077 ++++ b/drivers/media/usb/tm6000/tm6000-dvb.c
1078 +@@ -111,6 +111,7 @@ static void tm6000_urb_received(struct urb *urb)
1079 + printk(KERN_ERR "tm6000: error %s\n", __func__);
1080 + kfree(urb->transfer_buffer);
1081 + usb_free_urb(urb);
1082 ++ dev->dvb->bulk_urb = NULL;
1083 + }
1084 + }
1085 + }
1086 +@@ -141,6 +142,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
1087 + dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
1088 + if (dvb->bulk_urb->transfer_buffer == NULL) {
1089 + usb_free_urb(dvb->bulk_urb);
1090 ++ dvb->bulk_urb = NULL;
1091 + printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
1092 + return -ENOMEM;
1093 + }
1094 +@@ -168,6 +170,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
1095 +
1096 + kfree(dvb->bulk_urb->transfer_buffer);
1097 + usb_free_urb(dvb->bulk_urb);
1098 ++ dvb->bulk_urb = NULL;
1099 + return ret;
1100 + }
1101 +
1102 +diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
1103 +index 5223a2182ee4..ca95ae00215e 100644
1104 +--- a/drivers/mtd/nand/mtk_nand.c
1105 ++++ b/drivers/mtd/nand/mtk_nand.c
1106 +@@ -810,19 +810,21 @@ static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1107 + return ret & NAND_STATUS_FAIL ? -EIO : 0;
1108 + }
1109 +
1110 +-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
1111 ++static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
1112 ++ u32 sectors)
1113 + {
1114 + struct nand_chip *chip = mtd_to_nand(mtd);
1115 + struct mtk_nfc *nfc = nand_get_controller_data(chip);
1116 + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1117 + struct mtk_ecc_stats stats;
1118 ++ u32 reg_size = mtk_nand->fdm.reg_size;
1119 + int rc, i;
1120 +
1121 + rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
1122 + if (rc) {
1123 + memset(buf, 0xff, sectors * chip->ecc.size);
1124 + for (i = 0; i < sectors; i++)
1125 +- memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
1126 ++ memset(oob_ptr(chip, start + i), 0xff, reg_size);
1127 + return 0;
1128 + }
1129 +
1130 +@@ -842,7 +844,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1131 + u32 spare = mtk_nand->spare_per_sector;
1132 + u32 column, sectors, start, end, reg;
1133 + dma_addr_t addr;
1134 +- int bitflips;
1135 ++ int bitflips = 0;
1136 + size_t len;
1137 + u8 *buf;
1138 + int rc;
1139 +@@ -910,14 +912,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1140 + if (rc < 0) {
1141 + dev_err(nfc->dev, "subpage done timeout\n");
1142 + bitflips = -EIO;
1143 +- } else {
1144 +- bitflips = 0;
1145 +- if (!raw) {
1146 +- rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
1147 +- bitflips = rc < 0 ? -ETIMEDOUT :
1148 +- mtk_nfc_update_ecc_stats(mtd, buf, sectors);
1149 +- mtk_nfc_read_fdm(chip, start, sectors);
1150 +- }
1151 ++ } else if (!raw) {
1152 ++ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
1153 ++ bitflips = rc < 0 ? -ETIMEDOUT :
1154 ++ mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
1155 ++ mtk_nfc_read_fdm(chip, start, sectors);
1156 + }
1157 +
1158 + dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
1159 +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
1160 +index 59dbecd19c93..49f692907a30 100644
1161 +--- a/drivers/net/ethernet/marvell/sky2.c
1162 ++++ b/drivers/net/ethernet/marvell/sky2.c
1163 +@@ -4946,6 +4946,13 @@ static const struct dmi_system_id msi_blacklist[] = {
1164 + DMI_MATCH(DMI_BOARD_NAME, "P6T"),
1165 + },
1166 + },
1167 ++ {
1168 ++ .ident = "ASUS P6X",
1169 ++ .matches = {
1170 ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1171 ++ DMI_MATCH(DMI_BOARD_NAME, "P6X"),
1172 ++ },
1173 ++ },
1174 + {}
1175 + };
1176 +
1177 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
1178 +index a769196628d9..708117fc6f73 100644
1179 +--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
1180 ++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
1181 +@@ -958,7 +958,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1182 + &drv_version);
1183 + if (rc) {
1184 + DP_NOTICE(cdev, "Failed sending drv version command\n");
1185 +- return rc;
1186 ++ goto err4;
1187 + }
1188 + }
1189 +
1190 +@@ -966,6 +966,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1191 +
1192 + return 0;
1193 +
1194 ++err4:
1195 ++ qed_ll2_dealloc_if(cdev);
1196 + err3:
1197 + qed_hw_stop(cdev);
1198 + err2:
1199 +diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
1200 +index c2bd5378ffda..3527962f0bda 100644
1201 +--- a/drivers/net/ethernet/seeq/sgiseeq.c
1202 ++++ b/drivers/net/ethernet/seeq/sgiseeq.c
1203 +@@ -792,15 +792,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
1204 + printk(KERN_ERR "Sgiseeq: Cannot register net device, "
1205 + "aborting.\n");
1206 + err = -ENODEV;
1207 +- goto err_out_free_page;
1208 ++ goto err_out_free_attrs;
1209 + }
1210 +
1211 + printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
1212 +
1213 + return 0;
1214 +
1215 +-err_out_free_page:
1216 +- free_page((unsigned long) sp->srings);
1217 ++err_out_free_attrs:
1218 ++ dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
1219 ++ sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
1220 + err_out_free_dev:
1221 + free_netdev(dev);
1222 +
1223 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1224 +index 36136a147867..17be1f6a813f 100644
1225 +--- a/drivers/net/tun.c
1226 ++++ b/drivers/net/tun.c
1227 +@@ -627,7 +627,8 @@ static void tun_detach_all(struct net_device *dev)
1228 + module_put(THIS_MODULE);
1229 + }
1230 +
1231 +-static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
1232 ++static int tun_attach(struct tun_struct *tun, struct file *file,
1233 ++ bool skip_filter, bool publish_tun)
1234 + {
1235 + struct tun_file *tfile = file->private_data;
1236 + struct net_device *dev = tun->dev;
1237 +@@ -669,7 +670,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
1238 +
1239 + tfile->queue_index = tun->numqueues;
1240 + tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
1241 +- rcu_assign_pointer(tfile->tun, tun);
1242 ++ if (publish_tun)
1243 ++ rcu_assign_pointer(tfile->tun, tun);
1244 + rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
1245 + tun->numqueues++;
1246 +
1247 +@@ -1751,7 +1753,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1248 + if (err < 0)
1249 + return err;
1250 +
1251 +- err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
1252 ++ err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, true);
1253 + if (err < 0)
1254 + return err;
1255 +
1256 +@@ -1839,13 +1841,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1257 + NETIF_F_HW_VLAN_STAG_TX);
1258 +
1259 + INIT_LIST_HEAD(&tun->disabled);
1260 +- err = tun_attach(tun, file, false);
1261 ++ err = tun_attach(tun, file, false, false);
1262 + if (err < 0)
1263 + goto err_free_flow;
1264 +
1265 + err = register_netdevice(tun->dev);
1266 + if (err < 0)
1267 + goto err_detach;
1268 ++ /* free_netdev() won't check refcnt, to aovid race
1269 ++ * with dev_put() we need publish tun after registration.
1270 ++ */
1271 ++ rcu_assign_pointer(tfile->tun, tun);
1272 + }
1273 +
1274 + netif_carrier_on(tun->dev);
1275 +@@ -1989,7 +1995,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
1276 + ret = security_tun_dev_attach_queue(tun->security);
1277 + if (ret < 0)
1278 + goto unlock;
1279 +- ret = tun_attach(tun, file, false);
1280 ++ ret = tun_attach(tun, file, false, true);
1281 + } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1282 + tun = rtnl_dereference(tfile->tun);
1283 + if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
1284 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
1285 +index 99424c87b464..8f03cc52ddda 100644
1286 +--- a/drivers/net/usb/cdc_ether.c
1287 ++++ b/drivers/net/usb/cdc_ether.c
1288 +@@ -212,9 +212,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
1289 + goto bad_desc;
1290 + }
1291 + skip:
1292 +- if ( rndis &&
1293 +- header.usb_cdc_acm_descriptor &&
1294 +- header.usb_cdc_acm_descriptor->bmCapabilities) {
1295 ++ /* Communcation class functions with bmCapabilities are not
1296 ++ * RNDIS. But some Wireless class RNDIS functions use
1297 ++ * bmCapabilities for their own purpose. The failsafe is
1298 ++ * therefore applied only to Communication class RNDIS
1299 ++ * functions. The rndis test is redundant, but a cheap
1300 ++ * optimization.
1301 ++ */
1302 ++ if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
1303 ++ header.usb_cdc_acm_descriptor &&
1304 ++ header.usb_cdc_acm_descriptor->bmCapabilities) {
1305 + dev_dbg(&intf->dev,
1306 + "ACM capabilities %02x, not really RNDIS?\n",
1307 + header.usb_cdc_acm_descriptor->bmCapabilities);
1308 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1309 +index 02e29562d254..15dc70c11857 100644
1310 +--- a/drivers/net/usb/r8152.c
1311 ++++ b/drivers/net/usb/r8152.c
1312 +@@ -689,8 +689,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
1313 + ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
1314 + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
1315 + value, index, tmp, size, 500);
1316 ++ if (ret < 0)
1317 ++ memset(data, 0xff, size);
1318 ++ else
1319 ++ memcpy(data, tmp, size);
1320 +
1321 +- memcpy(data, tmp, size);
1322 + kfree(tmp);
1323 +
1324 + return ret;
1325 +diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
1326 +index 0f977dc556ca..c67e08fa1aaf 100644
1327 +--- a/drivers/net/wireless/marvell/mwifiex/ie.c
1328 ++++ b/drivers/net/wireless/marvell/mwifiex/ie.c
1329 +@@ -240,6 +240,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
1330 + }
1331 +
1332 + vs_ie = (struct ieee_types_header *)vendor_ie;
1333 ++ if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
1334 ++ IEEE_MAX_IE_SIZE)
1335 ++ return -EINVAL;
1336 + memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
1337 + vs_ie, vs_ie->len + 2);
1338 + le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
1339 +diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
1340 +index a7e9f544f219..f2ef1464e20c 100644
1341 +--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
1342 ++++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
1343 +@@ -287,6 +287,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
1344 +
1345 + rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
1346 + if (rate_ie) {
1347 ++ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
1348 ++ return;
1349 + memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
1350 + rate_len = rate_ie->len;
1351 + }
1352 +@@ -294,8 +296,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
1353 + rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
1354 + params->beacon.tail,
1355 + params->beacon.tail_len);
1356 +- if (rate_ie)
1357 ++ if (rate_ie) {
1358 ++ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
1359 ++ return;
1360 + memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
1361 ++ }
1362 +
1363 + return;
1364 + }
1365 +@@ -413,6 +418,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
1366 + params->beacon.tail_len);
1367 + if (vendor_ie) {
1368 + wmm_ie = (struct ieee_types_header *)vendor_ie;
1369 ++ if (*(vendor_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
1370 ++ return;
1371 + memcpy(&bss_cfg->wmm_info, wmm_ie + 1,
1372 + sizeof(bss_cfg->wmm_info));
1373 + priv->wmm_enabled = 1;
1374 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1375 +index 14ceeaaa7fe5..c31c564b8eab 100644
1376 +--- a/drivers/net/xen-netfront.c
1377 ++++ b/drivers/net/xen-netfront.c
1378 +@@ -907,7 +907,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
1379 + __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1380 + }
1381 + if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1382 +- queue->rx.rsp_cons = ++cons;
1383 ++ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
1384 + kfree_skb(nskb);
1385 + return ~0U;
1386 + }
1387 +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
1388 +index bb2f79933b17..9ca24e4d5d49 100644
1389 +--- a/drivers/nvmem/core.c
1390 ++++ b/drivers/nvmem/core.c
1391 +@@ -401,10 +401,17 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
1392 + if (!config->base_dev)
1393 + return -EINVAL;
1394 +
1395 +- if (nvmem->read_only)
1396 +- nvmem->eeprom = bin_attr_ro_root_nvmem;
1397 +- else
1398 +- nvmem->eeprom = bin_attr_rw_root_nvmem;
1399 ++ if (nvmem->read_only) {
1400 ++ if (config->root_only)
1401 ++ nvmem->eeprom = bin_attr_ro_root_nvmem;
1402 ++ else
1403 ++ nvmem->eeprom = bin_attr_ro_nvmem;
1404 ++ } else {
1405 ++ if (config->root_only)
1406 ++ nvmem->eeprom = bin_attr_rw_root_nvmem;
1407 ++ else
1408 ++ nvmem->eeprom = bin_attr_rw_nvmem;
1409 ++ }
1410 + nvmem->eeprom.attr.name = "eeprom";
1411 + nvmem->eeprom.size = nvmem->size;
1412 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
1413 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
1414 +index ef688aadb032..578242239daa 100644
1415 +--- a/drivers/tty/serial/atmel_serial.c
1416 ++++ b/drivers/tty/serial/atmel_serial.c
1417 +@@ -1279,7 +1279,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1418 +
1419 + atmel_port->hd_start_rx = false;
1420 + atmel_start_rx(port);
1421 +- return;
1422 + }
1423 +
1424 + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1425 +diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
1426 +index 747560feb63e..2e34239ac8a9 100644
1427 +--- a/drivers/tty/serial/sprd_serial.c
1428 ++++ b/drivers/tty/serial/sprd_serial.c
1429 +@@ -240,7 +240,7 @@ static inline void sprd_rx(struct uart_port *port)
1430 +
1431 + if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
1432 + SPRD_LSR_FE | SPRD_LSR_OE))
1433 +- if (handle_lsr_errors(port, &lsr, &flag))
1434 ++ if (handle_lsr_errors(port, &flag, &lsr))
1435 + continue;
1436 + if (uart_handle_sysrq_char(port, ch))
1437 + continue;
1438 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
1439 +index eb12eea13770..94ec2dc27748 100644
1440 +--- a/drivers/usb/core/config.c
1441 ++++ b/drivers/usb/core/config.c
1442 +@@ -920,7 +920,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1443 + struct usb_bos_descriptor *bos;
1444 + struct usb_dev_cap_header *cap;
1445 + struct usb_ssp_cap_descriptor *ssp_cap;
1446 +- unsigned char *buffer;
1447 ++ unsigned char *buffer, *buffer0;
1448 + int length, total_len, num, i, ssac;
1449 + __u8 cap_type;
1450 + int ret;
1451 +@@ -965,10 +965,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1452 + ret = -ENOMSG;
1453 + goto err;
1454 + }
1455 ++
1456 ++ buffer0 = buffer;
1457 + total_len -= length;
1458 ++ buffer += length;
1459 +
1460 + for (i = 0; i < num; i++) {
1461 +- buffer += length;
1462 + cap = (struct usb_dev_cap_header *)buffer;
1463 +
1464 + if (total_len < sizeof(*cap) || total_len < cap->bLength) {
1465 +@@ -982,8 +984,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1466 + break;
1467 + }
1468 +
1469 +- total_len -= length;
1470 +-
1471 + if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
1472 + dev_warn(ddev, "descriptor type invalid, skip\n");
1473 + continue;
1474 +@@ -1018,7 +1018,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1475 + default:
1476 + break;
1477 + }
1478 ++
1479 ++ total_len -= length;
1480 ++ buffer += length;
1481 + }
1482 ++ dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
1483 +
1484 + return 0;
1485 +
1486 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1487 +index 02bb7b52cb36..65e1eaa5df84 100644
1488 +--- a/fs/btrfs/tree-log.c
1489 ++++ b/fs/btrfs/tree-log.c
1490 +@@ -4846,7 +4846,7 @@ again:
1491 + err = btrfs_log_inode(trans, root, other_inode,
1492 + LOG_OTHER_INODE,
1493 + 0, LLONG_MAX, ctx);
1494 +- iput(other_inode);
1495 ++ btrfs_add_delayed_iput(other_inode);
1496 + if (err)
1497 + goto out_unlock;
1498 + else
1499 +@@ -5264,7 +5264,7 @@ process_leaf:
1500 + }
1501 +
1502 + if (btrfs_inode_in_log(di_inode, trans->transid)) {
1503 +- iput(di_inode);
1504 ++ btrfs_add_delayed_iput(di_inode);
1505 + break;
1506 + }
1507 +
1508 +@@ -5276,7 +5276,7 @@ process_leaf:
1509 + if (!ret &&
1510 + btrfs_must_commit_transaction(trans, di_inode))
1511 + ret = 1;
1512 +- iput(di_inode);
1513 ++ btrfs_add_delayed_iput(di_inode);
1514 + if (ret)
1515 + goto next_dir_inode;
1516 + if (ctx->log_new_dentries) {
1517 +@@ -5422,7 +5422,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
1518 + if (!ret && ctx && ctx->log_new_dentries)
1519 + ret = log_new_dir_dentries(trans, root,
1520 + dir_inode, ctx);
1521 +- iput(dir_inode);
1522 ++ btrfs_add_delayed_iput(dir_inode);
1523 + if (ret)
1524 + goto out;
1525 + }
1526 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1527 +index f291ed0c155d..e43ba6db2bdd 100644
1528 +--- a/fs/cifs/connect.c
1529 ++++ b/fs/cifs/connect.c
1530 +@@ -2447,6 +2447,7 @@ static int
1531 + cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
1532 + {
1533 + int rc = 0;
1534 ++ int is_domain = 0;
1535 + const char *delim, *payload;
1536 + char *desc;
1537 + ssize_t len;
1538 +@@ -2494,6 +2495,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
1539 + rc = PTR_ERR(key);
1540 + goto out_err;
1541 + }
1542 ++ is_domain = 1;
1543 + }
1544 +
1545 + down_read(&key->sem);
1546 +@@ -2551,6 +2553,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
1547 + goto out_key_put;
1548 + }
1549 +
1550 ++ /*
1551 ++ * If we have a domain key then we must set the domainName in the
1552 ++ * for the request.
1553 ++ */
1554 ++ if (is_domain && ses->domainName) {
1555 ++ vol->domainname = kstrndup(ses->domainName,
1556 ++ strlen(ses->domainName),
1557 ++ GFP_KERNEL);
1558 ++ if (!vol->domainname) {
1559 ++ cifs_dbg(FYI, "Unable to allocate %zd bytes for "
1560 ++ "domain\n", len);
1561 ++ rc = -ENOMEM;
1562 ++ kfree(vol->username);
1563 ++ vol->username = NULL;
1564 ++ kzfree(vol->password);
1565 ++ vol->password = NULL;
1566 ++ goto out_key_put;
1567 ++ }
1568 ++ }
1569 ++
1570 + out_key_put:
1571 + up_read(&key->sem);
1572 + key_put(key);
1573 +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
1574 +index 8a0c301b0c69..7138383382ff 100644
1575 +--- a/fs/nfs/nfs4file.c
1576 ++++ b/fs/nfs/nfs4file.c
1577 +@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
1578 + if (IS_ERR(inode)) {
1579 + err = PTR_ERR(inode);
1580 + switch (err) {
1581 +- case -EPERM:
1582 +- case -EACCES:
1583 +- case -EDQUOT:
1584 +- case -ENOSPC:
1585 +- case -EROFS:
1586 +- goto out_put_ctx;
1587 + default:
1588 ++ goto out_put_ctx;
1589 ++ case -ENOENT:
1590 ++ case -ESTALE:
1591 ++ case -EISDIR:
1592 ++ case -ENOTDIR:
1593 ++ case -ELOOP:
1594 + goto out_drop;
1595 + }
1596 + }
1597 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
1598 +index fad4d5188aaf..b6e25126a0b0 100644
1599 +--- a/fs/nfs/pagelist.c
1600 ++++ b/fs/nfs/pagelist.c
1601 +@@ -562,7 +562,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
1602 + }
1603 +
1604 + hdr->res.fattr = &hdr->fattr;
1605 +- hdr->res.count = count;
1606 ++ hdr->res.count = 0;
1607 + hdr->res.eof = 0;
1608 + hdr->res.verf = &hdr->verf;
1609 + nfs_fattr_init(&hdr->fattr);
1610 +diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
1611 +index b7bca8303989..06e72229be12 100644
1612 +--- a/fs/nfs/proc.c
1613 ++++ b/fs/nfs/proc.c
1614 +@@ -588,7 +588,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
1615 + /* Emulate the eof flag, which isn't normally needed in NFSv2
1616 + * as it is guaranteed to always return the file attributes
1617 + */
1618 +- if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
1619 ++ if ((hdr->res.count == 0 && hdr->args.count > 0) ||
1620 ++ hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
1621 + hdr->res.eof = 1;
1622 + }
1623 + return 0;
1624 +@@ -609,8 +610,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
1625 +
1626 + static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
1627 + {
1628 +- if (task->tk_status >= 0)
1629 ++ if (task->tk_status >= 0) {
1630 ++ hdr->res.count = hdr->args.count;
1631 + nfs_writeback_update_inode(hdr);
1632 ++ }
1633 + return 0;
1634 + }
1635 +
1636 +diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h
1637 +index b58635f722da..ae1e1fba2e13 100644
1638 +--- a/include/uapi/linux/isdn/capicmd.h
1639 ++++ b/include/uapi/linux/isdn/capicmd.h
1640 +@@ -15,6 +15,7 @@
1641 + #define CAPI_MSG_BASELEN 8
1642 + #define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
1643 + #define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
1644 ++#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
1645 +
1646 + /*----- CAPI commands -----*/
1647 + #define CAPI_ALERT 0x01
1648 +diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
1649 +index b86886beee4f..867fb0ed4aa6 100644
1650 +--- a/kernel/irq/resend.c
1651 ++++ b/kernel/irq/resend.c
1652 +@@ -37,6 +37,8 @@ static void resend_irqs(unsigned long arg)
1653 + irq = find_first_bit(irqs_resend, nr_irqs);
1654 + clear_bit(irq, irqs_resend);
1655 + desc = irq_to_desc(irq);
1656 ++ if (!desc)
1657 ++ continue;
1658 + local_irq_disable();
1659 + desc->handle_irq(desc);
1660 + local_irq_enable();
1661 +diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
1662 +index 1aeeadca620c..f435435b447e 100644
1663 +--- a/net/batman-adv/bat_v_ogm.c
1664 ++++ b/net/batman-adv/bat_v_ogm.c
1665 +@@ -618,17 +618,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
1666 + * batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
1667 + * @buff_pos: current position in the skb
1668 + * @packet_len: total length of the skb
1669 +- * @tvlv_len: tvlv length of the previously considered OGM
1670 ++ * @ogm2_packet: potential OGM2 in buffer
1671 + *
1672 + * Return: true if there is enough space for another OGM, false otherwise.
1673 + */
1674 +-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
1675 +- __be16 tvlv_len)
1676 ++static bool
1677 ++batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
1678 ++ const struct batadv_ogm2_packet *ogm2_packet)
1679 + {
1680 + int next_buff_pos = 0;
1681 +
1682 +- next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
1683 +- next_buff_pos += ntohs(tvlv_len);
1684 ++ /* check if there is enough space for the header */
1685 ++ next_buff_pos += buff_pos + sizeof(*ogm2_packet);
1686 ++ if (next_buff_pos > packet_len)
1687 ++ return false;
1688 ++
1689 ++ /* check if there is enough space for the optional TVLV */
1690 ++ next_buff_pos += ntohs(ogm2_packet->tvlv_len);
1691 +
1692 + return (next_buff_pos <= packet_len) &&
1693 + (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
1694 +@@ -775,7 +781,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
1695 + ogm_packet = (struct batadv_ogm2_packet *)skb->data;
1696 +
1697 + while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
1698 +- ogm_packet->tvlv_len)) {
1699 ++ ogm_packet)) {
1700 + batadv_v_ogm_process(skb, ogm_offset, if_incoming);
1701 +
1702 + ogm_offset += BATADV_OGM2_HLEN;
1703 +diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
1704 +index 6406010e155b..7007683973b4 100644
1705 +--- a/net/bridge/br_mdb.c
1706 ++++ b/net/bridge/br_mdb.c
1707 +@@ -372,7 +372,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
1708 + struct nlmsghdr *nlh;
1709 + struct nlattr *nest;
1710 +
1711 +- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
1712 ++ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
1713 + if (!nlh)
1714 + return -EMSGSIZE;
1715 +
1716 +diff --git a/net/core/dev.c b/net/core/dev.c
1717 +index 08bcbce16e12..547b4daae5ca 100644
1718 +--- a/net/core/dev.c
1719 ++++ b/net/core/dev.c
1720 +@@ -7353,6 +7353,8 @@ int register_netdevice(struct net_device *dev)
1721 + ret = notifier_to_errno(ret);
1722 + if (ret) {
1723 + rollback_registered(dev);
1724 ++ rcu_barrier();
1725 ++
1726 + dev->reg_state = NETREG_UNREGISTERED;
1727 + }
1728 + /*
1729 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1730 +index 4a71d78d0c6a..7164569c1ec8 100644
1731 +--- a/net/core/skbuff.c
1732 ++++ b/net/core/skbuff.c
1733 +@@ -3094,6 +3094,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
1734 + int pos;
1735 + int dummy;
1736 +
1737 ++ if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
1738 ++ (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
1739 ++ /* gso_size is untrusted, and we have a frag_list with a linear
1740 ++ * non head_frag head.
1741 ++ *
1742 ++ * (we assume checking the first list_skb member suffices;
1743 ++ * i.e if either of the list_skb members have non head_frag
1744 ++ * head, then the first one has too).
1745 ++ *
1746 ++ * If head_skb's headlen does not fit requested gso_size, it
1747 ++ * means that the frag_list members do NOT terminate on exact
1748 ++ * gso_size boundaries. Hence we cannot perform skb_frag_t page
1749 ++ * sharing. Therefore we must fallback to copying the frag_list
1750 ++ * skbs; we do so by disabling SG.
1751 ++ */
1752 ++ if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
1753 ++ features &= ~NETIF_F_SG;
1754 ++ }
1755 ++
1756 + __skb_push(head_skb, doffset);
1757 + proto = skb_network_protocol(head_skb, &dummy);
1758 + if (unlikely(!proto))
1759 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1760 +index e2e58bc42ba4..84ff36a6d4e3 100644
1761 +--- a/net/ipv4/tcp_input.c
1762 ++++ b/net/ipv4/tcp_input.c
1763 +@@ -247,7 +247,7 @@ static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
1764 +
1765 + static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
1766 + {
1767 +- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
1768 ++ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
1769 + }
1770 +
1771 + static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
1772 +diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
1773 +index 982868193dbb..e209ae19fe78 100644
1774 +--- a/net/ipv6/ping.c
1775 ++++ b/net/ipv6/ping.c
1776 +@@ -239,7 +239,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
1777 + return ping_proc_register(net, &ping_v6_seq_afinfo);
1778 + }
1779 +
1780 +-static void __net_init ping_v6_proc_exit_net(struct net *net)
1781 ++static void __net_exit ping_v6_proc_exit_net(struct net *net)
1782 + {
1783 + return ping_proc_unregister(net, &ping_v6_seq_afinfo);
1784 + }
1785 +diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
1786 +index e3ed20060878..562b54524249 100644
1787 +--- a/net/netfilter/nf_conntrack_ftp.c
1788 ++++ b/net/netfilter/nf_conntrack_ftp.c
1789 +@@ -323,7 +323,7 @@ static int find_pattern(const char *data, size_t dlen,
1790 + i++;
1791 + }
1792 +
1793 +- pr_debug("Skipped up to `%c'!\n", skip);
1794 ++ pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
1795 +
1796 + *numoff = i;
1797 + *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
1798 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1799 +index 9016c8baf2aa..88ce8edf1261 100644
1800 +--- a/net/sched/sch_generic.c
1801 ++++ b/net/sched/sch_generic.c
1802 +@@ -699,7 +699,11 @@ static void qdisc_rcu_free(struct rcu_head *head)
1803 +
1804 + void qdisc_destroy(struct Qdisc *qdisc)
1805 + {
1806 +- const struct Qdisc_ops *ops = qdisc->ops;
1807 ++ const struct Qdisc_ops *ops;
1808 ++
1809 ++ if (!qdisc)
1810 ++ return;
1811 ++ ops = qdisc->ops;
1812 +
1813 + if (qdisc->flags & TCQ_F_BUILTIN ||
1814 + !atomic_dec_and_test(&qdisc->refcnt))
1815 +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
1816 +index f4b2d69973c3..fe32239253a6 100644
1817 +--- a/net/sched/sch_hhf.c
1818 ++++ b/net/sched/sch_hhf.c
1819 +@@ -543,7 +543,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
1820 + new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
1821 +
1822 + non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
1823 +- if (non_hh_quantum > INT_MAX)
1824 ++ if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
1825 + return -EINVAL;
1826 +
1827 + sch_tree_lock(sch);
1828 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
1829 +index d6af93a24aa0..833283c8fe11 100644
1830 +--- a/net/sctp/protocol.c
1831 ++++ b/net/sctp/protocol.c
1832 +@@ -1336,7 +1336,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
1833 + return status;
1834 + }
1835 +
1836 +-static void __net_init sctp_ctrlsock_exit(struct net *net)
1837 ++static void __net_exit sctp_ctrlsock_exit(struct net *net)
1838 + {
1839 + /* Free the control endpoint. */
1840 + inet_ctl_sock_destroy(net->sctp.ctl_sock);
1841 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
1842 +index b1ead1776e81..8b4cf78987e4 100644
1843 +--- a/net/sctp/sm_sideeffect.c
1844 ++++ b/net/sctp/sm_sideeffect.c
1845 +@@ -509,7 +509,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
1846 + if (net->sctp.pf_enable &&
1847 + (transport->state == SCTP_ACTIVE) &&
1848 + (transport->error_count < transport->pathmaxrxt) &&
1849 +- (transport->error_count > asoc->pf_retrans)) {
1850 ++ (transport->error_count > transport->pf_retrans)) {
1851 +
1852 + sctp_assoc_control_transport(asoc, transport,
1853 + SCTP_TRANSPORT_PF,
1854 +diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
1855 +index 23f8899e0f8c..7ebcaff8c1c4 100644
1856 +--- a/net/tipc/name_distr.c
1857 ++++ b/net/tipc/name_distr.c
1858 +@@ -224,7 +224,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
1859 + publ->key);
1860 + }
1861 +
1862 +- kfree_rcu(p, rcu);
1863 ++ if (p)
1864 ++ kfree_rcu(p, rcu);
1865 + }
1866 +
1867 + /**
1868 +diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
1869 +index f60baeb338e5..b47445022d5c 100644
1870 +--- a/security/keys/request_key_auth.c
1871 ++++ b/security/keys/request_key_auth.c
1872 +@@ -71,6 +71,9 @@ static void request_key_auth_describe(const struct key *key,
1873 + {
1874 + struct request_key_auth *rka = key->payload.data[0];
1875 +
1876 ++ if (!rka)
1877 ++ return;
1878 ++
1879 + seq_puts(m, "key:");
1880 + seq_puts(m, key->description);
1881 + if (key_is_positive(key))
1882 +@@ -88,6 +91,9 @@ static long request_key_auth_read(const struct key *key,
1883 + size_t datalen;
1884 + long ret;
1885 +
1886 ++ if (!rka)
1887 ++ return -EKEYREVOKED;
1888 ++
1889 + datalen = rka->callout_len;
1890 + ret = datalen;
1891 +
1892 +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
1893 +index b4c5d96e54c1..7c2c8e74aa9a 100644
1894 +--- a/tools/power/x86/turbostat/turbostat.c
1895 ++++ b/tools/power/x86/turbostat/turbostat.c
1896 +@@ -3593,7 +3593,7 @@ int initialize_counters(int cpu_id)
1897 +
1898 + void allocate_output_buffer()
1899 + {
1900 +- output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
1901 ++ output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
1902 + outp = output_buffer;
1903 + if (outp == NULL)
1904 + err(-1, "calloc output buffer");
1905 +diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
1906 +index 571c1ce37d15..5c1efb869df2 100644
1907 +--- a/virt/kvm/coalesced_mmio.c
1908 ++++ b/virt/kvm/coalesced_mmio.c
1909 +@@ -39,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
1910 + return 1;
1911 + }
1912 +
1913 +-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
1914 ++static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
1915 + {
1916 + struct kvm_coalesced_mmio_ring *ring;
1917 + unsigned avail;
1918 +@@ -51,7 +51,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
1919 + * there is always one unused entry in the buffer
1920 + */
1921 + ring = dev->kvm->coalesced_mmio_ring;
1922 +- avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
1923 ++ avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
1924 + if (avail == 0) {
1925 + /* full */
1926 + return 0;
1927 +@@ -66,24 +66,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
1928 + {
1929 + struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
1930 + struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
1931 ++ __u32 insert;
1932 +
1933 + if (!coalesced_mmio_in_range(dev, addr, len))
1934 + return -EOPNOTSUPP;
1935 +
1936 + spin_lock(&dev->kvm->ring_lock);
1937 +
1938 +- if (!coalesced_mmio_has_room(dev)) {
1939 ++ insert = READ_ONCE(ring->last);
1940 ++ if (!coalesced_mmio_has_room(dev, insert) ||
1941 ++ insert >= KVM_COALESCED_MMIO_MAX) {
1942 + spin_unlock(&dev->kvm->ring_lock);
1943 + return -EOPNOTSUPP;
1944 + }
1945 +
1946 + /* copy data in first free entry of the ring */
1947 +
1948 +- ring->coalesced_mmio[ring->last].phys_addr = addr;
1949 +- ring->coalesced_mmio[ring->last].len = len;
1950 +- memcpy(ring->coalesced_mmio[ring->last].data, val, len);
1951 ++ ring->coalesced_mmio[insert].phys_addr = addr;
1952 ++ ring->coalesced_mmio[insert].len = len;
1953 ++ memcpy(ring->coalesced_mmio[insert].data, val, len);
1954 + smp_wmb();
1955 +- ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
1956 ++ ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
1957 + spin_unlock(&dev->kvm->ring_lock);
1958 + return 0;
1959 + }