Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.10 commit in: /
Date: Sun, 05 Nov 2017 18:50:14
Message-Id: 1509907792.679479168a68ec71fc95e7a66dac25305220e45f.mpagano@gentoo
1 commit: 679479168a68ec71fc95e7a66dac25305220e45f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Nov 5 18:49:52 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Nov 5 18:49:52 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=67947916
7
8 Linux patch 3.10.108
9
10 0000_README | 4 +
11 1107_linux-3.10.108.patch | 3467 +++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3471 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index e949f2e..e964bd4 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -470,6 +470,10 @@ Patch: 1106_linux-3.10.107.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.10.107
21
22 +Patch: 1107_linux-3.10.108.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.10.108
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1107_linux-3.10.108.patch b/1107_linux-3.10.108.patch
31 new file mode 100644
32 index 0000000..f8e4e90
33 --- /dev/null
34 +++ b/1107_linux-3.10.108.patch
35 @@ -0,0 +1,3467 @@
36 +diff --git a/Makefile b/Makefile
37 +index 752b1c67daa0..924f98a4bc0f 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,8 +1,8 @@
41 + VERSION = 3
42 + PATCHLEVEL = 10
43 +-SUBLEVEL = 107
44 ++SUBLEVEL = 108
45 + EXTRAVERSION =
46 +-NAME = TOSSUG Baby Fish
47 ++NAME = END-OF-LIFE
48 +
49 + # *DOCUMENTATION*
50 + # To see a list of typical targets execute "make help"
51 +diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
52 +index e28a3e0eb3cb..582d8b61ce5c 100644
53 +--- a/arch/mips/include/asm/branch.h
54 ++++ b/arch/mips/include/asm/branch.h
55 +@@ -44,10 +44,7 @@ static inline int compute_return_epc(struct pt_regs *regs)
56 + return __microMIPS_compute_return_epc(regs);
57 + if (cpu_has_mips16)
58 + return __MIPS16e_compute_return_epc(regs);
59 +- return regs->cp0_epc;
60 +- }
61 +-
62 +- if (!delay_slot(regs)) {
63 ++ } else if (!delay_slot(regs)) {
64 + regs->cp0_epc += 4;
65 + return 0;
66 + }
67 +diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
68 +index 46c2ad0703a0..63b942f613c4 100644
69 +--- a/arch/mips/kernel/branch.c
70 ++++ b/arch/mips/kernel/branch.c
71 +@@ -200,7 +200,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
72 + *
73 + * @regs: Pointer to pt_regs
74 + * @insn: branch instruction to decode
75 +- * @returns: -EFAULT on error and forces SIGBUS, and on success
76 ++ * @returns: -EFAULT on error and forces SIGILL, and on success
77 + * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
78 + * evaluating the branch.
79 + */
80 +@@ -297,6 +297,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
81 + /*
82 + * These are unconditional and in j_format.
83 + */
84 ++ case jalx_op:
85 + case jal_op:
86 + regs->regs[31] = regs->cp0_epc + 8;
87 + case j_op:
88 +@@ -436,8 +437,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
89 + return ret;
90 +
91 + sigill:
92 +- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
93 +- force_sig(SIGBUS, current);
94 ++ pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
95 ++ current->comm);
96 ++ force_sig(SIGILL, current);
97 + return -EFAULT;
98 + }
99 + EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
100 +diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
101 +index b79d13f95bf0..eb0f4dfb385c 100644
102 +--- a/arch/mips/kernel/syscall.c
103 ++++ b/arch/mips/kernel/syscall.c
104 +@@ -140,7 +140,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
105 + "1: ll %[old], (%[addr]) \n"
106 + " move %[tmp], %[new] \n"
107 + "2: sc %[tmp], (%[addr]) \n"
108 +- " bnez %[tmp], 4f \n"
109 ++ " beqz %[tmp], 4f \n"
110 + "3: \n"
111 + " .subsection 2 \n"
112 + "4: b 1b \n"
113 +diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
114 +index 3d492a823a55..dbddc9ccf270 100644
115 +--- a/arch/mips/math-emu/cp1emu.c
116 ++++ b/arch/mips/math-emu/cp1emu.c
117 +@@ -2002,6 +2002,35 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
118 + return 0;
119 + }
120 +
121 ++/*
122 ++ * Emulate FPU instructions.
123 ++ *
124 ++ * If we use FPU hardware, then we have been typically called to handle
125 ++ * an unimplemented operation, such as where an operand is a NaN or
126 ++ * denormalized. In that case exit the emulation loop after a single
127 ++ * iteration so as to let hardware execute any subsequent instructions.
128 ++ *
129 ++ * If we have no FPU hardware or it has been disabled, then continue
130 ++ * emulating floating-point instructions until one of these conditions
131 ++ * has occurred:
132 ++ *
133 ++ * - a non-FPU instruction has been encountered,
134 ++ *
135 ++ * - an attempt to emulate has ended with a signal,
136 ++ *
137 ++ * - the ISA mode has been switched.
138 ++ *
139 ++ * We need to terminate the emulation loop if we got switched to the
140 ++ * MIPS16 mode, whether supported or not, so that we do not attempt
141 ++ * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
142 ++ * Similarly if we got switched to the microMIPS mode and only the
143 ++ * regular MIPS mode is supported, so that we do not attempt to emulate
144 ++ * a microMIPS instruction as a regular MIPS FPU instruction. Or if
145 ++ * we got switched to the regular MIPS mode and only the microMIPS mode
146 ++ * is supported, so that we do not attempt to emulate a regular MIPS
147 ++ * instruction that should cause an Address Error exception instead.
148 ++ * For simplicity we always terminate upon an ISA mode switch.
149 ++ */
150 + int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
151 + int has_fpu, void *__user *fault_addr)
152 + {
153 +@@ -2093,6 +2122,15 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
154 + break;
155 + if (sig)
156 + break;
157 ++ /*
158 ++ * We have to check for the ISA bit explicitly here,
159 ++ * because `get_isa16_mode' may return 0 if support
160 ++ * for code compression has been globally disabled,
161 ++ * or otherwise we may produce the wrong signal or
162 ++ * even proceed successfully where we must not.
163 ++ */
164 ++ if ((xcp->cp0_epc ^ prevepc) & 0x1)
165 ++ break;
166 +
167 + cond_resched();
168 + } while (xcp->cp0_epc > prevepc);
169 +diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
170 +index e3b1d41c89be..84bcdfa410ff 100644
171 +--- a/arch/powerpc/include/asm/atomic.h
172 ++++ b/arch/powerpc/include/asm/atomic.h
173 +@@ -501,7 +501,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
174 + * Atomically increments @v by 1, so long as @v is non-zero.
175 + * Returns non-zero if @v was non-zero, and zero otherwise.
176 + */
177 +-static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
178 ++static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
179 + {
180 + long t1, t2;
181 +
182 +@@ -520,7 +520,7 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
183 + : "r" (&v->counter)
184 + : "cc", "xer", "memory");
185 +
186 +- return t1;
187 ++ return t1 != 0;
188 + }
189 +
190 + #endif /* __powerpc64__ */
191 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
192 +index 469d7715d6aa..954168be7878 100644
193 +--- a/arch/powerpc/include/asm/reg.h
194 ++++ b/arch/powerpc/include/asm/reg.h
195 +@@ -1136,7 +1136,7 @@
196 + " .llong 0\n" \
197 + " .llong 0\n" \
198 + ".previous" \
199 +- : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;})
200 ++ : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG) : "cr0"); rval;})
201 + #else
202 + #define mftb() ({unsigned long rval; \
203 + asm volatile("mftb %0" : "=r" (rval)); rval;})
204 +diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
205 +index 11f5b03a0b06..762c10d46d66 100644
206 +--- a/arch/powerpc/kernel/kprobes.c
207 ++++ b/arch/powerpc/kernel/kprobes.c
208 +@@ -529,6 +529,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
209 + regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
210 + #endif
211 +
212 ++ /*
213 ++ * jprobes use jprobe_return() which skips the normal return
214 ++ * path of the function, and this messes up the accounting of the
215 ++ * function graph tracer.
216 ++ *
217 ++ * Pause function graph tracing while performing the jprobe function.
218 ++ */
219 ++ pause_graph_tracing();
220 ++
221 + return 1;
222 + }
223 +
224 +@@ -551,6 +560,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
225 + * saved regs...
226 + */
227 + memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
228 ++ /* It's OK to start function graph tracing again */
229 ++ unpause_graph_tracing();
230 + preempt_enable_no_resched();
231 + return 1;
232 + }
233 +diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
234 +index 08490ecc465e..23da15ff7796 100644
235 +--- a/arch/powerpc/lib/sstep.c
236 ++++ b/arch/powerpc/lib/sstep.c
237 +@@ -863,6 +863,19 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
238 + goto instr_done;
239 + #endif
240 + case 19: /* mfcr */
241 ++ if ((instr >> 20) & 1) {
242 ++ imm = 0xf0000000UL;
243 ++ for (sh = 0; sh < 8; ++sh) {
244 ++ if (instr & (0x80000 >> sh)) {
245 ++ regs->gpr[rd] = regs->ccr & imm;
246 ++ break;
247 ++ }
248 ++ imm >>= 4;
249 ++ }
250 ++
251 ++ goto instr_done;
252 ++ }
253 ++
254 + regs->gpr[rd] = regs->ccr;
255 + regs->gpr[rd] &= 0xffffffffUL;
256 + goto instr_done;
257 +diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
258 +index d8e8eefbe24c..86ec87dc1f57 100644
259 +--- a/arch/x86/include/asm/io.h
260 ++++ b/arch/x86/include/asm/io.h
261 +@@ -296,13 +296,13 @@ static inline unsigned type in##bwl##_p(int port) \
262 + static inline void outs##bwl(int port, const void *addr, unsigned long count) \
263 + { \
264 + asm volatile("rep; outs" #bwl \
265 +- : "+S"(addr), "+c"(count) : "d"(port)); \
266 ++ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
267 + } \
268 + \
269 + static inline void ins##bwl(int port, void *addr, unsigned long count) \
270 + { \
271 + asm volatile("rep; ins" #bwl \
272 +- : "+D"(addr), "+c"(count) : "d"(port)); \
273 ++ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
274 + }
275 +
276 + BUILDIO(b, b, char)
277 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
278 +index 3cd8bfc3c4b6..bc37ddeaa627 100644
279 +--- a/arch/x86/kernel/apic/apic.c
280 ++++ b/arch/x86/kernel/apic/apic.c
281 +@@ -1581,8 +1581,10 @@ void __init enable_IR_x2apic(void)
282 + int ret, x2apic_enabled = 0;
283 + int hardware_init_ret;
284 +
285 ++#ifdef CONFIG_X86_IO_APIC
286 + if (skip_ioapic_setup)
287 + return;
288 ++#endif
289 +
290 + /* Make sure irq_remap_ops are initialized */
291 + setup_irq_remapping_ops();
292 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
293 +index c4ff2a916139..c95ece93f359 100644
294 +--- a/arch/x86/kernel/kvm.c
295 ++++ b/arch/x86/kernel/kvm.c
296 +@@ -159,8 +159,8 @@ void kvm_async_pf_task_wait(u32 token)
297 + */
298 + rcu_irq_exit();
299 + native_safe_halt();
300 +- rcu_irq_enter();
301 + local_irq_disable();
302 ++ rcu_irq_enter();
303 + }
304 + }
305 + if (!n.halted)
306 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
307 +index d9016e4a80f9..be1389527284 100644
308 +--- a/arch/x86/kvm/vmx.c
309 ++++ b/arch/x86/kvm/vmx.c
310 +@@ -8014,7 +8014,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
311 + * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
312 + */
313 + vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
314 +- kvm_set_cr4(vcpu, vmcs12->host_cr4);
315 ++ vmx_set_cr4(vcpu, vmcs12->host_cr4);
316 +
317 + /* shadow page tables on either EPT or shadow page tables */
318 + kvm_set_cr3(vcpu, vmcs12->host_cr3);
319 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
320 +index b70b67bde90d..3d316cafff91 100644
321 +--- a/arch/x86/kvm/x86.c
322 ++++ b/arch/x86/kvm/x86.c
323 +@@ -4596,6 +4596,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
324 +
325 + if (var.unusable) {
326 + memset(desc, 0, sizeof(*desc));
327 ++ if (base3)
328 ++ *base3 = 0;
329 + return false;
330 + }
331 +
332 +diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
333 +index 73a6d7395bd3..58e7e9d4bbc7 100644
334 +--- a/arch/x86/mm/numa_32.c
335 ++++ b/arch/x86/mm/numa_32.c
336 +@@ -100,5 +100,6 @@ void __init initmem_init(void)
337 + printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
338 + (ulong) pfn_to_kaddr(highstart_pfn));
339 +
340 ++ __vmalloc_start_set = true;
341 + setup_bootmem_allocator();
342 + }
343 +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
344 +index ea05c531db26..8e2747401d34 100644
345 +--- a/crypto/algif_skcipher.c
346 ++++ b/crypto/algif_skcipher.c
347 +@@ -92,8 +92,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
348 + sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
349 + sgl->cur = 0;
350 +
351 +- if (sg)
352 ++ if (sg) {
353 + scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
354 ++ sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
355 ++ }
356 +
357 + list_add_tail(&sgl->list, &ctx->tsgl);
358 + }
359 +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
360 +index 070b843c37ee..8cff7cae7331 100644
361 +--- a/drivers/acpi/apei/ghes.c
362 ++++ b/drivers/acpi/apei/ghes.c
363 +@@ -988,6 +988,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
364 + if (list_empty(&ghes_sci))
365 + unregister_acpi_hed_notifier(&ghes_notifier_sci);
366 + mutex_unlock(&ghes_list_mutex);
367 ++ synchronize_rcu();
368 + break;
369 + case ACPI_HEST_NOTIFY_NMI:
370 + mutex_lock(&ghes_list_mutex);
371 +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
372 +index f3f0801a0e81..aa4e36b3a599 100644
373 +--- a/drivers/ata/libata-scsi.c
374 ++++ b/drivers/ata/libata-scsi.c
375 +@@ -2794,10 +2794,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
376 + static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
377 + {
378 + if (!sata_pmp_attached(ap)) {
379 +- if (likely(devno < ata_link_max_devices(&ap->link)))
380 ++ if (likely(devno >= 0 &&
381 ++ devno < ata_link_max_devices(&ap->link)))
382 + return &ap->link.device[devno];
383 + } else {
384 +- if (likely(devno < ap->nr_pmp_links))
385 ++ if (likely(devno >= 0 &&
386 ++ devno < ap->nr_pmp_links))
387 + return &ap->pmp_link[devno].device[0];
388 + }
389 +
390 +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
391 +index 7072404c8b6d..8d73f999f40f 100644
392 +--- a/drivers/base/power/domain.c
393 ++++ b/drivers/base/power/domain.c
394 +@@ -1692,7 +1692,7 @@ int pm_genpd_add_subdomain_names(const char *master_name,
395 + int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
396 + struct generic_pm_domain *subdomain)
397 + {
398 +- struct gpd_link *link;
399 ++ struct gpd_link *l, *link;
400 + int ret = -EINVAL;
401 +
402 + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
403 +@@ -1701,7 +1701,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
404 + start:
405 + genpd_acquire_lock(genpd);
406 +
407 +- list_for_each_entry(link, &genpd->master_links, master_node) {
408 ++ list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
409 + if (link->slave != subdomain)
410 + continue;
411 +
412 +diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
413 +index f97cb3d8c5a2..f34e8191665f 100644
414 +--- a/drivers/cpufreq/cpufreq_conservative.c
415 ++++ b/drivers/cpufreq/cpufreq_conservative.c
416 +@@ -212,8 +212,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
417 + int ret;
418 + ret = sscanf(buf, "%u", &input);
419 +
420 +- /* cannot be lower than 11 otherwise freq will not fall */
421 +- if (ret != 1 || input < 11 || input > 100 ||
422 ++ /* cannot be lower than 1 otherwise freq will not fall */
423 ++ if (ret != 1 || input < 1 || input > 100 ||
424 + input >= cs_tuners->up_threshold)
425 + return -EINVAL;
426 +
427 +diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
428 +index 4f1881eee3f1..6da4fbd4eef4 100644
429 +--- a/drivers/cpufreq/s3c2416-cpufreq.c
430 ++++ b/drivers/cpufreq/s3c2416-cpufreq.c
431 +@@ -434,7 +434,6 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
432 + rate = clk_get_rate(s3c_freq->hclk);
433 + if (rate < 133 * 1000 * 1000) {
434 + pr_err("cpufreq: HCLK not at 133MHz\n");
435 +- clk_put(s3c_freq->hclk);
436 + ret = -EINVAL;
437 + goto err_armclk;
438 + }
439 +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
440 +index e9d8b235f68d..34815a74d900 100644
441 +--- a/drivers/crypto/caam/caamhash.c
442 ++++ b/drivers/crypto/caam/caamhash.c
443 +@@ -476,7 +476,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
444 + ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
445 + if (!ret) {
446 + /* in progress */
447 +- wait_for_completion_interruptible(&result.completion);
448 ++ wait_for_completion(&result.completion);
449 + ret = result.err;
450 + #ifdef DEBUG
451 + print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
452 +diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
453 +index 87138d2adb5f..fd6bc0bc56c5 100644
454 +--- a/drivers/crypto/caam/key_gen.c
455 ++++ b/drivers/crypto/caam/key_gen.c
456 +@@ -107,7 +107,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
457 + ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
458 + if (!ret) {
459 + /* in progress */
460 +- wait_for_completion_interruptible(&result.completion);
461 ++ wait_for_completion(&result.completion);
462 + ret = result.err;
463 + #ifdef DEBUG
464 + print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
465 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
466 +index 057d894eee66..6e5ba44dfaac 100644
467 +--- a/drivers/crypto/talitos.c
468 ++++ b/drivers/crypto/talitos.c
469 +@@ -623,7 +623,7 @@ static void talitos_unregister_rng(struct device *dev)
470 + * crypto alg
471 + */
472 + #define TALITOS_CRA_PRIORITY 3000
473 +-#define TALITOS_MAX_KEY_SIZE 96
474 ++#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
475 + #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
476 +
477 + #define MD5_BLOCK_SIZE 64
478 +@@ -1380,6 +1380,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
479 + {
480 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
481 +
482 ++ if (keylen > TALITOS_MAX_KEY_SIZE) {
483 ++ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
484 ++ return -EINVAL;
485 ++ }
486 ++
487 + memcpy(&ctx->key, key, keylen);
488 + ctx->keylen = keylen;
489 +
490 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
491 +index 89664933861f..a3a70283bded 100644
492 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
493 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
494 +@@ -368,6 +368,8 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
495 + return fifo_state->static_buffer;
496 + else {
497 + fifo_state->dynamic_buffer = vmalloc(bytes);
498 ++ if (!fifo_state->dynamic_buffer)
499 ++ goto out_err;
500 + return fifo_state->dynamic_buffer;
501 + }
502 + }
503 +diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
504 +index 5f5f20f42231..c61f3e7aa353 100644
505 +--- a/drivers/infiniband/hw/qib/qib_iba7322.c
506 ++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
507 +@@ -6670,7 +6670,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
508 + unsigned long flags;
509 +
510 + while (wait) {
511 +- unsigned long shadow;
512 ++ unsigned long shadow = 0;
513 + int cstart, previ = -1;
514 +
515 + /*
516 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
517 +index 8292554bccb5..7604ae54d7bc 100644
518 +--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
519 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
520 +@@ -165,11 +165,11 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
521 + out:
522 + mutex_unlock(&ppriv->vlan_mutex);
523 +
524 ++ rtnl_unlock();
525 ++
526 + if (result)
527 + free_netdev(priv->dev);
528 +
529 +- rtnl_unlock();
530 +-
531 + return result;
532 + }
533 +
534 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
535 +index 0e7cd14bf7bb..88ba9649bd1f 100644
536 +--- a/drivers/iommu/amd_iommu.c
537 ++++ b/drivers/iommu/amd_iommu.c
538 +@@ -3402,6 +3402,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
539 + mutex_unlock(&domain->api_lock);
540 +
541 + domain_flush_tlb_pde(domain);
542 ++ domain_flush_complete(domain);
543 +
544 + return unmap_size;
545 + }
546 +diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
547 +index 37470ee7c850..4d874427101d 100644
548 +--- a/drivers/md/bitmap.c
549 ++++ b/drivers/md/bitmap.c
550 +@@ -1806,6 +1806,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
551 + long pages;
552 + struct bitmap_page *new_bp;
553 +
554 ++ if (bitmap->storage.file && !init) {
555 ++ pr_info("md: cannot resize file-based bitmap\n");
556 ++ return -EINVAL;
557 ++ }
558 ++
559 + if (chunksize == 0) {
560 + /* If there is enough space, leave the chunk size unchanged,
561 + * else increase by factor of two until there is enough space.
562 +diff --git a/drivers/md/md.c b/drivers/md/md.c
563 +index 7c45286e2662..95eb53f68413 100644
564 +--- a/drivers/md/md.c
565 ++++ b/drivers/md/md.c
566 +@@ -1898,7 +1898,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
567 + }
568 + sb = page_address(rdev->sb_page);
569 + sb->data_size = cpu_to_le64(num_sectors);
570 +- sb->super_offset = rdev->sb_start;
571 ++ sb->super_offset = cpu_to_le64(rdev->sb_start);
572 + sb->sb_csum = calc_sb_1_csum(sb);
573 + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
574 + rdev->sb_page);
575 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
576 +index f53f4f895502..b4de9c3e5ca4 100644
577 +--- a/drivers/md/raid10.c
578 ++++ b/drivers/md/raid10.c
579 +@@ -1569,11 +1569,24 @@ retry_write:
580 + mbio->bi_private = r10_bio;
581 +
582 + atomic_inc(&r10_bio->remaining);
583 ++
584 ++ cb = blk_check_plugged(raid10_unplug, mddev,
585 ++ sizeof(*plug));
586 ++ if (cb)
587 ++ plug = container_of(cb, struct raid10_plug_cb,
588 ++ cb);
589 ++ else
590 ++ plug = NULL;
591 + spin_lock_irqsave(&conf->device_lock, flags);
592 +- bio_list_add(&conf->pending_bio_list, mbio);
593 +- conf->pending_count++;
594 ++ if (plug) {
595 ++ bio_list_add(&plug->pending, mbio);
596 ++ plug->pending_cnt++;
597 ++ } else {
598 ++ bio_list_add(&conf->pending_bio_list, mbio);
599 ++ conf->pending_count++;
600 ++ }
601 + spin_unlock_irqrestore(&conf->device_lock, flags);
602 +- if (!mddev_check_plugged(mddev))
603 ++ if (!plug)
604 + md_wakeup_thread(mddev->thread);
605 + }
606 + }
607 +diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
608 +index 93609091cb23..9dad717fb78c 100644
609 +--- a/drivers/media/platform/davinci/vpfe_capture.c
610 ++++ b/drivers/media/platform/davinci/vpfe_capture.c
611 +@@ -1706,27 +1706,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
612 +
613 + switch (cmd) {
614 + case VPFE_CMD_S_CCDC_RAW_PARAMS:
615 ++ ret = -EINVAL;
616 + v4l2_warn(&vpfe_dev->v4l2_dev,
617 +- "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
618 +- if (ccdc_dev->hw_ops.set_params) {
619 +- ret = ccdc_dev->hw_ops.set_params(param);
620 +- if (ret) {
621 +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
622 +- "Error setting parameters in CCDC\n");
623 +- goto unlock_out;
624 +- }
625 +- ret = vpfe_get_ccdc_image_format(vpfe_dev,
626 +- &vpfe_dev->fmt);
627 +- if (ret < 0) {
628 +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
629 +- "Invalid image format at CCDC\n");
630 +- goto unlock_out;
631 +- }
632 +- } else {
633 +- ret = -EINVAL;
634 +- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
635 +- "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
636 +- }
637 ++ "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
638 + break;
639 + default:
640 + ret = -ENOTTY;
641 +diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
642 +index 72e3fa652481..257bb7a6ff54 100644
643 +--- a/drivers/media/rc/imon.c
644 ++++ b/drivers/media/rc/imon.c
645 +@@ -1530,7 +1530,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
646 + if (kc == KEY_KEYBOARD && !ictx->release_code) {
647 + ictx->last_keycode = kc;
648 + if (!nomouse) {
649 +- ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
650 ++ ictx->pad_mouse = !ictx->pad_mouse;
651 + dev_dbg(dev, "toggling to %s mode\n",
652 + ictx->pad_mouse ? "mouse" : "keyboard");
653 + spin_unlock_irqrestore(&ictx->kc_lock, flags);
654 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
655 +index 9515f3a68f8f..122815e1cb65 100644
656 +--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
657 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
658 +@@ -123,15 +123,10 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
659 + memset(&tvdata,0,sizeof(tvdata));
660 +
661 + eeprom = pvr2_eeprom_fetch(hdw);
662 +- if (!eeprom) return -EINVAL;
663 +-
664 +- {
665 +- struct i2c_client fake_client;
666 +- /* Newer version expects a useless client interface */
667 +- fake_client.addr = hdw->eeprom_addr;
668 +- fake_client.adapter = &hdw->i2c_adap;
669 +- tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
670 +- }
671 ++ if (!eeprom)
672 ++ return -EINVAL;
673 ++
674 ++ tveeprom_hauppauge_analog(NULL, &tvdata, eeprom);
675 +
676 + trace_eeprom("eeprom assumed v4l tveeprom module");
677 + trace_eeprom("eeprom direct call results:");
678 +diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
679 +index c7576a503e5b..2afadd00d3fd 100644
680 +--- a/drivers/mfd/omap-usb-tll.c
681 ++++ b/drivers/mfd/omap-usb-tll.c
682 +@@ -380,8 +380,8 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata)
683 + * and use SDR Mode
684 + */
685 + reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
686 +- | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
687 + | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
688 ++ reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
689 + } else if (pdata->port_mode[i] ==
690 + OMAP_EHCI_PORT_MODE_HSIC) {
691 + /*
692 +diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
693 +index 5484301d57d9..3dc61ea7dc64 100644
694 +--- a/drivers/misc/c2port/c2port-duramar2150.c
695 ++++ b/drivers/misc/c2port/c2port-duramar2150.c
696 +@@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void)
697 +
698 + duramar2150_c2port_dev = c2port_device_register("uc",
699 + &duramar2150_c2port_ops, NULL);
700 +- if (!duramar2150_c2port_dev) {
701 +- ret = -ENODEV;
702 ++ if (IS_ERR(duramar2150_c2port_dev)) {
703 ++ ret = PTR_ERR(duramar2150_c2port_dev);
704 + goto free_region;
705 + }
706 +
707 +diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
708 +index d5455c760618..503f37850fb3 100644
709 +--- a/drivers/net/can/usb/esd_usb2.c
710 ++++ b/drivers/net/can/usb/esd_usb2.c
711 +@@ -335,7 +335,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
712 + }
713 +
714 + cf->can_id = id & ESD_IDMASK;
715 +- cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
716 ++ cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
717 +
718 + if (id & ESD_EXTID)
719 + cf->can_id |= CAN_EFF_FLAG;
720 +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
721 +index 5409fe876a44..69bc0a0eb423 100644
722 +--- a/drivers/net/ethernet/korina.c
723 ++++ b/drivers/net/ethernet/korina.c
724 +@@ -905,10 +905,10 @@ static void korina_restart_task(struct work_struct *work)
725 + DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
726 + &lp->rx_dma_regs->dmasm);
727 +
728 +- korina_free_ring(dev);
729 +-
730 + napi_disable(&lp->napi);
731 +
732 ++ korina_free_ring(dev);
733 ++
734 + if (korina_init(dev) < 0) {
735 + printk(KERN_ERR "%s: cannot restart device\n", dev->name);
736 + return;
737 +@@ -1069,12 +1069,12 @@ static int korina_close(struct net_device *dev)
738 + tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
739 + writel(tmp, &lp->rx_dma_regs->dmasm);
740 +
741 +- korina_free_ring(dev);
742 +-
743 + napi_disable(&lp->napi);
744 +
745 + cancel_work_sync(&lp->restart_task);
746 +
747 ++ korina_free_ring(dev);
748 ++
749 + free_irq(lp->rx_irq, dev);
750 + free_irq(lp->tx_irq, dev);
751 + free_irq(lp->ovr_irq, dev);
752 +diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
753 +index 31d02649be41..d22482b49744 100644
754 +--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
755 ++++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
756 +@@ -113,8 +113,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
757 + if (!buf)
758 + return -ENOMEM;
759 +
760 ++ if (offset_in_page(buf)) {
761 ++ dma_free_coherent(dev, PAGE_SIZE << order,
762 ++ buf, sg_dma_address(mem));
763 ++ return -ENOMEM;
764 ++ }
765 ++
766 + sg_set_buf(mem, buf, PAGE_SIZE << order);
767 +- BUG_ON(mem->offset);
768 + sg_dma_len(mem) = PAGE_SIZE << order;
769 + return 0;
770 + }
771 +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
772 +index 3fb2643d05b4..8c58001aff1d 100644
773 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c
774 ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
775 +@@ -511,8 +511,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
776 + return -ENOSYS;
777 + }
778 +
779 +- mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
780 +-
781 + dev->caps.hca_core_clock = hca_param.hca_core_clock;
782 +
783 + memset(&dev_cap, 0, sizeof(dev_cap));
784 +diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
785 +index 10093f0c4c0f..00a80587b47d 100644
786 +--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
787 ++++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
788 +@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
789 + seg_hdr->cookie = MPI_COREDUMP_COOKIE;
790 + seg_hdr->segNum = seg_number;
791 + seg_hdr->segSize = seg_size;
792 +- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
793 ++ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
794 + }
795 +
796 + /*
797 +diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
798 +index b7268b3dae77..5f5f84ad0697 100644
799 +--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
800 ++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
801 +@@ -398,7 +398,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
802 + *
803 + * Return: Total number of bytes received
804 + */
805 +-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
806 ++static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
807 + {
808 + void __iomem *addr;
809 + u16 length, proto_type;
810 +@@ -438,7 +438,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
811 +
812 + /* Check if received ethernet frame is a raw ethernet frame
813 + * or an IP packet or an ARP packet */
814 +- if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
815 ++ if (proto_type > ETH_DATA_LEN) {
816 +
817 + if (proto_type == ETH_P_IP) {
818 + length = ((ntohl(in_be32(addr +
819 +@@ -446,6 +446,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
820 + XEL_RXBUFF_OFFSET)) >>
821 + XEL_HEADER_SHIFT) &
822 + XEL_RPLR_LENGTH_MASK);
823 ++ length = min_t(u16, length, ETH_DATA_LEN);
824 + length += ETH_HLEN + ETH_FCS_LEN;
825 +
826 + } else if (proto_type == ETH_P_ARP)
827 +@@ -458,6 +459,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
828 + /* Use the length in the frame, plus the header and trailer */
829 + length = proto_type + ETH_HLEN + ETH_FCS_LEN;
830 +
831 ++ if (WARN_ON(length > maxlen))
832 ++ length = maxlen;
833 ++
834 + /* Read from the EmacLite device */
835 + xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
836 + data, length);
837 +@@ -632,7 +636,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
838 +
839 + skb_reserve(skb, 2);
840 +
841 +- len = xemaclite_recv_data(lp, (u8 *) skb->data);
842 ++ len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
843 +
844 + if (!len) {
845 + dev->stats.rx_errors++;
846 +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
847 +index 202fe1ff1987..b23f36a5b0dd 100644
848 +--- a/drivers/net/phy/marvell.c
849 ++++ b/drivers/net/phy/marvell.c
850 +@@ -656,8 +656,6 @@ static int marvell_read_status(struct phy_device *phydev)
851 + if (adv < 0)
852 + return adv;
853 +
854 +- lpa &= adv;
855 +-
856 + if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
857 + phydev->duplex = DUPLEX_FULL;
858 + else
859 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
860 +index 5225d4321e7c..0a3ad7ba2bea 100644
861 +--- a/drivers/net/team/team.c
862 ++++ b/drivers/net/team/team.c
863 +@@ -2121,8 +2121,10 @@ start_again:
864 +
865 + hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
866 + TEAM_CMD_OPTIONS_GET);
867 +- if (!hdr)
868 ++ if (!hdr) {
869 ++ nlmsg_free(skb);
870 + return -EMSGSIZE;
871 ++ }
872 +
873 + if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
874 + goto nla_put_failure;
875 +@@ -2389,8 +2391,10 @@ start_again:
876 +
877 + hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
878 + TEAM_CMD_PORT_LIST_GET);
879 +- if (!hdr)
880 ++ if (!hdr) {
881 ++ nlmsg_free(skb);
882 + return -EMSGSIZE;
883 ++ }
884 +
885 + if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
886 + goto nla_put_failure;
887 +diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
888 +index 2c524305589f..8afb60925332 100644
889 +--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
890 ++++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
891 +@@ -4019,6 +4019,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
892 + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
893 + GFP_KERNEL);
894 + } else if (ieee80211_is_action(mgmt->frame_control)) {
895 ++ if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
896 ++ brcmf_err("invalid action frame length\n");
897 ++ err = -EINVAL;
898 ++ goto exit;
899 ++ }
900 + af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
901 + if (af_params == NULL) {
902 + brcmf_err("unable to allocate frame\n");
903 +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
904 +index bf13e73ecabc..0f3581b7a2e4 100644
905 +--- a/drivers/s390/scsi/zfcp_dbf.c
906 ++++ b/drivers/s390/scsi/zfcp_dbf.c
907 +@@ -556,19 +556,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
908 +
909 + if (fsf) {
910 + rec->fsf_req_id = fsf->req_id;
911 ++ rec->pl_len = FCP_RESP_WITH_EXT;
912 + fcp_rsp = (struct fcp_resp_with_ext *)
913 + &(fsf->qtcb->bottom.io.fcp_rsp);
914 ++ /* mandatory parts of FCP_RSP IU in this SCSI record */
915 + memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
916 + if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
917 + fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
918 + rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
919 ++ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
920 + }
921 + if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
922 +- rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
923 +- (u16)ZFCP_DBF_PAY_MAX_REC);
924 +- zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
925 +- "fcp_sns", fsf->req_id);
926 ++ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
927 + }
928 ++ /* complete FCP_RSP IU in associated PAYload record
929 ++ * but only if there are optional parts
930 ++ */
931 ++ if (fcp_rsp->resp.fr_flags != 0)
932 ++ zfcp_dbf_pl_write(
933 ++ dbf, fcp_rsp,
934 ++ /* at least one full PAY record
935 ++ * but not beyond hardware response field
936 ++ */
937 ++ min_t(u16, max_t(u16, rec->pl_len,
938 ++ ZFCP_DBF_PAY_MAX_REC),
939 ++ FSF_FCP_RSP_SIZE),
940 ++ "fcp_riu", fsf->req_id);
941 + }
942 +
943 + debug_event(dbf->scsi, level, rec, sizeof(*rec));
944 +diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
945 +index a8165f142550..712a8484a7b3 100644
946 +--- a/drivers/s390/scsi/zfcp_dbf.h
947 ++++ b/drivers/s390/scsi/zfcp_dbf.h
948 +@@ -323,7 +323,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
949 + {
950 + struct fsf_qtcb *qtcb = req->qtcb;
951 +
952 +- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
953 ++ if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
954 ++ ZFCP_STATUS_FSFREQ_ERROR))) {
955 ++ zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
956 ++
957 ++ } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
958 + (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
959 + zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
960 +
961 +diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
962 +index b1d2024ed513..c2e40e10b293 100644
963 +--- a/drivers/s390/scsi/zfcp_fc.h
964 ++++ b/drivers/s390/scsi/zfcp_fc.h
965 +@@ -4,7 +4,7 @@
966 + * Fibre Channel related definitions and inline functions for the zfcp
967 + * device driver
968 + *
969 +- * Copyright IBM Corp. 2009
970 ++ * Copyright IBM Corp. 2009, 2017
971 + */
972 +
973 + #ifndef ZFCP_FC_H
974 +@@ -291,6 +291,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
975 + !(rsp_flags & FCP_SNS_LEN_VAL) &&
976 + fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
977 + set_host_byte(scsi, DID_ERROR);
978 ++ } else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
979 ++ /* FCP_DL was not sufficient for SCSI data length */
980 ++ if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
981 ++ set_host_byte(scsi, DID_ERROR);
982 + }
983 + }
984 +
985 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
986 +index ad5718401eab..d27b49194d68 100644
987 +--- a/drivers/s390/scsi/zfcp_fsf.c
988 ++++ b/drivers/s390/scsi/zfcp_fsf.c
989 +@@ -2286,7 +2286,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
990 + fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
991 + zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
992 +
993 +- if (scsi_prot_sg_count(scsi_cmnd)) {
994 ++ if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
995 ++ scsi_prot_sg_count(scsi_cmnd)) {
996 + zfcp_qdio_set_data_div(qdio, &req->qdio_req,
997 + scsi_prot_sg_count(scsi_cmnd));
998 + retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
999 +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
1000 +index 66c37e77ac7c..8ec101a4a5eb 100644
1001 +--- a/drivers/s390/scsi/zfcp_scsi.c
1002 ++++ b/drivers/s390/scsi/zfcp_scsi.c
1003 +@@ -294,8 +294,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
1004 +
1005 + zfcp_erp_wait(adapter);
1006 + ret = fc_block_scsi_eh(scpnt);
1007 +- if (ret)
1008 ++ if (ret) {
1009 ++ zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags);
1010 + return ret;
1011 ++ }
1012 +
1013 + if (!(atomic_read(&adapter->status) &
1014 + ZFCP_STATUS_COMMON_RUNNING)) {
1015 +@@ -303,8 +305,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
1016 + return SUCCESS;
1017 + }
1018 + }
1019 +- if (!fsf_req)
1020 ++ if (!fsf_req) {
1021 ++ zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags);
1022 + return FAILED;
1023 ++ }
1024 +
1025 + wait_for_completion(&fsf_req->completion);
1026 +
1027 +diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
1028 +index e1c8be06de9d..f94fcda1285d 100644
1029 +--- a/drivers/scsi/device_handler/scsi_dh_emc.c
1030 ++++ b/drivers/scsi/device_handler/scsi_dh_emc.c
1031 +@@ -464,7 +464,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
1032 + static int clariion_std_inquiry(struct scsi_device *sdev,
1033 + struct clariion_dh_data *csdev)
1034 + {
1035 +- int err;
1036 ++ int err = SCSI_DH_OK;
1037 + char *sp_model;
1038 +
1039 + err = send_inquiry_cmd(sdev, 0, csdev);
1040 +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
1041 +index bf60c631abb5..3b0f02c146d5 100644
1042 +--- a/drivers/scsi/qla2xxx/qla_attr.c
1043 ++++ b/drivers/scsi/qla2xxx/qla_attr.c
1044 +@@ -299,6 +299,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
1045 + return -EINVAL;
1046 + if (start > ha->optrom_size)
1047 + return -EINVAL;
1048 ++ if (size > ha->optrom_size - start)
1049 ++ size = ha->optrom_size - start;
1050 +
1051 + switch (val) {
1052 + case 0:
1053 +@@ -320,8 +322,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
1054 + return -EINVAL;
1055 +
1056 + ha->optrom_region_start = start;
1057 +- ha->optrom_region_size = start + size > ha->optrom_size ?
1058 +- ha->optrom_size - start : size;
1059 ++ ha->optrom_region_size = start + size;
1060 +
1061 + ha->optrom_state = QLA_SREADING;
1062 + ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1063 +@@ -388,8 +389,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
1064 + }
1065 +
1066 + ha->optrom_region_start = start;
1067 +- ha->optrom_region_size = start + size > ha->optrom_size ?
1068 +- ha->optrom_size - start : size;
1069 ++ ha->optrom_region_size = start + size;
1070 +
1071 + ha->optrom_state = QLA_SWRITING;
1072 + ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1073 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1074 +index 40fe8a77236a..c11b82e70956 100644
1075 +--- a/drivers/scsi/qla2xxx/qla_os.c
1076 ++++ b/drivers/scsi/qla2xxx/qla_os.c
1077 +@@ -2342,10 +2342,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1078 +
1079 + if (mem_only) {
1080 + if (pci_enable_device_mem(pdev))
1081 +- goto probe_out;
1082 ++ return ret;
1083 + } else {
1084 + if (pci_enable_device(pdev))
1085 +- goto probe_out;
1086 ++ return ret;
1087 + }
1088 +
1089 + /* This may fail but that's ok */
1090 +@@ -2355,7 +2355,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1091 + if (!ha) {
1092 + ql_log_pci(ql_log_fatal, pdev, 0x0009,
1093 + "Unable to allocate memory for ha.\n");
1094 +- goto probe_out;
1095 ++ goto disable_device;
1096 + }
1097 + ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
1098 + "Memory allocated for ha=%p.\n", ha);
1099 +@@ -2899,7 +2899,7 @@ iospace_config_failed:
1100 + kfree(ha);
1101 + ha = NULL;
1102 +
1103 +-probe_out:
1104 ++disable_device:
1105 + pci_disable_device(pdev);
1106 + return ret;
1107 + }
1108 +diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
1109 +index 0ae406a47507..2aba2f75fb87 100644
1110 +--- a/drivers/staging/comedi/comedi_fops.c
1111 ++++ b/drivers/staging/comedi/comedi_fops.c
1112 +@@ -2557,15 +2557,13 @@ static int __init comedi_init(void)
1113 +
1114 + comedi_class->dev_attrs = comedi_dev_attrs;
1115 +
1116 +- /* XXX requires /proc interface */
1117 +- comedi_proc_init();
1118 +-
1119 + /* create devices files for legacy/manual use */
1120 + for (i = 0; i < comedi_num_legacy_minors; i++) {
1121 + struct comedi_device *dev;
1122 + dev = comedi_alloc_board_minor(NULL);
1123 + if (IS_ERR(dev)) {
1124 + comedi_cleanup_board_minors();
1125 ++ class_destroy(comedi_class);
1126 + cdev_del(&comedi_cdev);
1127 + unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
1128 + COMEDI_NUM_MINORS);
1129 +@@ -2576,6 +2574,9 @@ static int __init comedi_init(void)
1130 + }
1131 + }
1132 +
1133 ++ /* XXX requires /proc interface */
1134 ++ comedi_proc_init();
1135 ++
1136 + return 0;
1137 + }
1138 + module_init(comedi_init);
1139 +diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
1140 +index 0d3356d4b7d2..3c0b16fe172b 100644
1141 +--- a/drivers/staging/iio/resolver/ad2s1210.c
1142 ++++ b/drivers/staging/iio/resolver/ad2s1210.c
1143 +@@ -477,7 +477,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
1144 + long m)
1145 + {
1146 + struct ad2s1210_state *st = iio_priv(indio_dev);
1147 +- bool negative;
1148 ++ u16 negative;
1149 + int ret = 0;
1150 + u16 pos;
1151 + s16 vel;
1152 +diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
1153 +index 04c775cb3e65..179f7810d398 100644
1154 +--- a/drivers/target/target_core_fabric_configfs.c
1155 ++++ b/drivers/target/target_core_fabric_configfs.c
1156 +@@ -84,6 +84,11 @@ static int target_fabric_mappedlun_link(
1157 + "_tpg does not exist\n");
1158 + return -EINVAL;
1159 + }
1160 ++ if (lun->lun_shutdown) {
1161 ++ pr_err("Unable to create mappedlun symlink because"
1162 ++ " lun->lun_shutdown=true\n");
1163 ++ return -EINVAL;
1164 ++ }
1165 + se_tpg = lun->lun_sep->sep_tpg;
1166 +
1167 + nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
1168 +diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
1169 +index 8572207e3d4d..bc3092f032b0 100644
1170 +--- a/drivers/target/target_core_tpg.c
1171 ++++ b/drivers/target/target_core_tpg.c
1172 +@@ -839,6 +839,8 @@ static void core_tpg_shutdown_lun(
1173 + struct se_portal_group *tpg,
1174 + struct se_lun *lun)
1175 + {
1176 ++ lun->lun_shutdown = true;
1177 ++
1178 + core_clear_lun_from_tpg(lun, tpg);
1179 + transport_clear_lun_from_sessions(lun);
1180 + }
1181 +@@ -868,6 +870,7 @@ struct se_lun *core_tpg_pre_dellun(
1182 + spin_unlock(&tpg->tpg_lun_lock);
1183 + return ERR_PTR(-ENODEV);
1184 + }
1185 ++ lun->lun_shutdown = false;
1186 + spin_unlock(&tpg->tpg_lun_lock);
1187 +
1188 + return lun;
1189 +diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
1190 +index 7d199c8e1a75..c9635f1c7108 100644
1191 +--- a/drivers/tty/serial/efm32-uart.c
1192 ++++ b/drivers/tty/serial/efm32-uart.c
1193 +@@ -27,6 +27,7 @@
1194 + #define UARTn_FRAME 0x04
1195 + #define UARTn_FRAME_DATABITS__MASK 0x000f
1196 + #define UARTn_FRAME_DATABITS(n) ((n) - 3)
1197 ++#define UARTn_FRAME_PARITY__MASK 0x0300
1198 + #define UARTn_FRAME_PARITY_NONE 0x0000
1199 + #define UARTn_FRAME_PARITY_EVEN 0x0200
1200 + #define UARTn_FRAME_PARITY_ODD 0x0300
1201 +@@ -578,12 +579,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port,
1202 + 16 * (4 + (clkdiv >> 6)));
1203 +
1204 + frame = efm32_uart_read32(efm_port, UARTn_FRAME);
1205 +- if (frame & UARTn_FRAME_PARITY_ODD)
1206 ++ switch (frame & UARTn_FRAME_PARITY__MASK) {
1207 ++ case UARTn_FRAME_PARITY_ODD:
1208 + *parity = 'o';
1209 +- else if (frame & UARTn_FRAME_PARITY_EVEN)
1210 ++ break;
1211 ++ case UARTn_FRAME_PARITY_EVEN:
1212 + *parity = 'e';
1213 +- else
1214 ++ break;
1215 ++ default:
1216 + *parity = 'n';
1217 ++ }
1218 +
1219 + *bits = (frame & UARTn_FRAME_DATABITS__MASK) -
1220 + UARTn_FRAME_DATABITS(4) + 4;
1221 +diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
1222 +index 8b1534c424af..be3dc751dfbb 100644
1223 +--- a/drivers/tty/serial/ifx6x60.c
1224 ++++ b/drivers/tty/serial/ifx6x60.c
1225 +@@ -1379,9 +1379,9 @@ static struct spi_driver ifx_spi_driver = {
1226 + static void __exit ifx_spi_exit(void)
1227 + {
1228 + /* unregister */
1229 ++ spi_unregister_driver((void *)&ifx_spi_driver);
1230 + tty_unregister_driver(tty_drv);
1231 + put_tty_driver(tty_drv);
1232 +- spi_unregister_driver((void *)&ifx_spi_driver);
1233 + unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
1234 + }
1235 +
1236 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1237 +index 010ec70d59fb..3390a39f5a78 100644
1238 +--- a/drivers/tty/vt/vt.c
1239 ++++ b/drivers/tty/vt/vt.c
1240 +@@ -2601,13 +2601,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
1241 + * related to the kernel should not use this.
1242 + */
1243 + data = vt_get_shift_state();
1244 +- ret = __put_user(data, p);
1245 ++ ret = put_user(data, p);
1246 + break;
1247 + case TIOCL_GETMOUSEREPORTING:
1248 + console_lock(); /* May be overkill */
1249 + data = mouse_reporting();
1250 + console_unlock();
1251 +- ret = __put_user(data, p);
1252 ++ ret = put_user(data, p);
1253 + break;
1254 + case TIOCL_SETVESABLANK:
1255 + console_lock();
1256 +@@ -2616,7 +2616,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
1257 + break;
1258 + case TIOCL_GETKMSGREDIRECT:
1259 + data = vt_get_kmsg_redirect();
1260 +- ret = __put_user(data, p);
1261 ++ ret = put_user(data, p);
1262 + break;
1263 + case TIOCL_SETKMSGREDIRECT:
1264 + if (!capable(CAP_SYS_ADMIN)) {
1265 +diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
1266 +index 36a7063a6cba..5a38ca87f406 100644
1267 +--- a/drivers/usb/chipidea/debug.c
1268 ++++ b/drivers/usb/chipidea/debug.c
1269 +@@ -203,7 +203,8 @@ static int ci_role_show(struct seq_file *s, void *data)
1270 + {
1271 + struct ci13xxx *ci = s->private;
1272 +
1273 +- seq_printf(s, "%s\n", ci_role(ci)->name);
1274 ++ if (ci->role != CI_ROLE_END)
1275 ++ seq_printf(s, "%s\n", ci_role(ci)->name);
1276 +
1277 + return 0;
1278 + }
1279 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
1280 +index a9142a46ae82..2cbb26c20082 100644
1281 +--- a/drivers/usb/gadget/composite.c
1282 ++++ b/drivers/usb/gadget/composite.c
1283 +@@ -1522,6 +1522,8 @@ static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL);
1284 + static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
1285 + {
1286 + struct usb_composite_dev *cdev = get_gadget_data(gadget);
1287 ++ struct usb_gadget_strings *gstr = cdev->driver->strings[0];
1288 ++ struct usb_string *dev_str = gstr->strings;
1289 +
1290 + /* composite_disconnect() must already have been called
1291 + * by the underlying peripheral controller driver!
1292 +@@ -1541,6 +1543,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
1293 +
1294 + composite_dev_cleanup(cdev);
1295 +
1296 ++ if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
1297 ++ dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
1298 ++
1299 + kfree(cdev->def_manufacturer);
1300 + kfree(cdev);
1301 + set_gadget_data(gadget, NULL);
1302 +diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
1303 +index 6656dfda5665..0fa139081b16 100644
1304 +--- a/drivers/usb/host/r8a66597-hcd.c
1305 ++++ b/drivers/usb/host/r8a66597-hcd.c
1306 +@@ -1270,7 +1270,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
1307 + time = 30;
1308 + break;
1309 + default:
1310 +- time = 300;
1311 ++ time = 50;
1312 + break;
1313 + }
1314 +
1315 +@@ -1786,6 +1786,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
1316 + pipe = td->pipe;
1317 + pipe_stop(r8a66597, pipe);
1318 +
1319 ++ /* Select a different address or endpoint */
1320 + new_td = td;
1321 + do {
1322 + list_move_tail(&new_td->queue,
1323 +@@ -1795,7 +1796,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
1324 + new_td = td;
1325 + break;
1326 + }
1327 +- } while (td != new_td && td->address == new_td->address);
1328 ++ } while (td != new_td && td->address == new_td->address &&
1329 ++ td->pipe->info.epnum == new_td->pipe->info.epnum);
1330 +
1331 + start_transfer(r8a66597, new_td);
1332 +
1333 +diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
1334 +index cfd205036aba..a4b027342589 100644
1335 +--- a/drivers/usb/renesas_usbhs/common.c
1336 ++++ b/drivers/usb/renesas_usbhs/common.c
1337 +@@ -600,8 +600,10 @@ static int usbhsc_resume(struct device *dev)
1338 + struct usbhs_priv *priv = dev_get_drvdata(dev);
1339 + struct platform_device *pdev = usbhs_priv_to_pdev(priv);
1340 +
1341 +- if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
1342 ++ if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
1343 + usbhsc_power_ctrl(priv, 1);
1344 ++ usbhs_mod_autonomy_mode(priv);
1345 ++ }
1346 +
1347 + usbhs_platform_call(priv, phy_reset, pdev);
1348 +
1349 +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
1350 +index 157a9f9afc2d..0c962ff5eed2 100644
1351 +--- a/drivers/usb/renesas_usbhs/fifo.c
1352 ++++ b/drivers/usb/renesas_usbhs/fifo.c
1353 +@@ -261,11 +261,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
1354 + struct usbhs_fifo *fifo)
1355 + {
1356 + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1357 ++ int ret = 0;
1358 +
1359 +- if (!usbhs_pipe_is_dcp(pipe))
1360 +- usbhsf_fifo_barrier(priv, fifo);
1361 ++ if (!usbhs_pipe_is_dcp(pipe)) {
1362 ++ /*
1363 ++ * This driver checks the pipe condition first to avoid -EBUSY
1364 ++ * from usbhsf_fifo_barrier() with about 10 msec delay in
1365 ++ * the interrupt handler if the pipe is RX direction and empty.
1366 ++ */
1367 ++ if (usbhs_pipe_is_dir_in(pipe))
1368 ++ ret = usbhs_pipe_is_accessible(pipe);
1369 ++ if (!ret)
1370 ++ ret = usbhsf_fifo_barrier(priv, fifo);
1371 ++ }
1372 +
1373 +- usbhs_write(priv, fifo->ctr, BCLR);
1374 ++ /*
1375 ++ * if non-DCP pipe, this driver should set BCLR when
1376 ++ * usbhsf_fifo_barrier() returns 0.
1377 ++ */
1378 ++ if (!ret)
1379 ++ usbhs_write(priv, fifo->ctr, BCLR);
1380 + }
1381 +
1382 + static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
1383 +@@ -545,6 +560,7 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
1384 + usbhsf_send_terminator(pipe, fifo);
1385 +
1386 + usbhsf_tx_irq_ctrl(pipe, !*is_done);
1387 ++ usbhs_pipe_running(pipe, !*is_done);
1388 + usbhs_pipe_enable(pipe);
1389 +
1390 + dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
1391 +@@ -571,12 +587,21 @@ usbhs_fifo_write_busy:
1392 + * retry in interrupt
1393 + */
1394 + usbhsf_tx_irq_ctrl(pipe, 1);
1395 ++ usbhs_pipe_running(pipe, 1);
1396 +
1397 + return ret;
1398 + }
1399 +
1400 ++static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
1401 ++{
1402 ++ if (usbhs_pipe_is_running(pkt->pipe))
1403 ++ return 0;
1404 ++
1405 ++ return usbhsf_pio_try_push(pkt, is_done);
1406 ++}
1407 ++
1408 + struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
1409 +- .prepare = usbhsf_pio_try_push,
1410 ++ .prepare = usbhsf_pio_prepare_push,
1411 + .try_run = usbhsf_pio_try_push,
1412 + };
1413 +
1414 +@@ -590,6 +615,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
1415 + if (usbhs_pipe_is_busy(pipe))
1416 + return 0;
1417 +
1418 ++ if (usbhs_pipe_is_running(pipe))
1419 ++ return 0;
1420 ++
1421 + /*
1422 + * pipe enable to prepare packet receive
1423 + */
1424 +@@ -598,6 +626,7 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
1425 +
1426 + usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
1427 + usbhs_pipe_enable(pipe);
1428 ++ usbhs_pipe_running(pipe, 1);
1429 + usbhsf_rx_irq_ctrl(pipe, 1);
1430 +
1431 + return 0;
1432 +@@ -643,6 +672,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
1433 + (total_len < maxp)) { /* short packet */
1434 + *is_done = 1;
1435 + usbhsf_rx_irq_ctrl(pipe, 0);
1436 ++ usbhs_pipe_running(pipe, 0);
1437 + usbhs_pipe_disable(pipe); /* disable pipe first */
1438 + }
1439 +
1440 +@@ -798,10 +828,11 @@ static void xfer_work(struct work_struct *work)
1441 + dev_dbg(dev, " %s %d (%d/ %d)\n",
1442 + fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
1443 +
1444 ++ usbhs_pipe_running(pipe, 1);
1445 + usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
1446 +- usbhs_pipe_enable(pipe);
1447 +- usbhsf_dma_start(pipe, fifo);
1448 + dma_async_issue_pending(chan);
1449 ++ usbhsf_dma_start(pipe, fifo);
1450 ++ usbhs_pipe_enable(pipe);
1451 + }
1452 +
1453 + /*
1454 +@@ -829,6 +860,10 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
1455 + if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
1456 + goto usbhsf_pio_prepare_push;
1457 +
1458 ++ /* return at this time if the pipe is running */
1459 ++ if (usbhs_pipe_is_running(pipe))
1460 ++ return 0;
1461 ++
1462 + /* get enable DMA fifo */
1463 + fifo = usbhsf_get_dma_fifo(priv, pkt);
1464 + if (!fifo)
1465 +@@ -866,6 +901,7 @@ static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
1466 + pkt->actual = pkt->trans;
1467 +
1468 + *is_done = !pkt->zero; /* send zero packet ? */
1469 ++ usbhs_pipe_running(pipe, !*is_done);
1470 +
1471 + usbhsf_dma_stop(pipe, pipe->fifo);
1472 + usbhsf_dma_unmap(pkt);
1473 +@@ -966,8 +1002,10 @@ static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
1474 + if ((pkt->actual == pkt->length) || /* receive all data */
1475 + (pkt->trans < maxp)) { /* short packet */
1476 + *is_done = 1;
1477 ++ usbhs_pipe_running(pipe, 0);
1478 + } else {
1479 + /* re-enable */
1480 ++ usbhs_pipe_running(pipe, 0);
1481 + usbhsf_prepare_pop(pkt, is_done);
1482 + }
1483 +
1484 +diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
1485 +index 7926e1c700f1..85e30e1d5e82 100644
1486 +--- a/drivers/usb/renesas_usbhs/pipe.c
1487 ++++ b/drivers/usb/renesas_usbhs/pipe.c
1488 +@@ -578,6 +578,19 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
1489 + return usbhsp_flags_has(pipe, IS_DIR_HOST);
1490 + }
1491 +
1492 ++int usbhs_pipe_is_running(struct usbhs_pipe *pipe)
1493 ++{
1494 ++ return usbhsp_flags_has(pipe, IS_RUNNING);
1495 ++}
1496 ++
1497 ++void usbhs_pipe_running(struct usbhs_pipe *pipe, int running)
1498 ++{
1499 ++ if (running)
1500 ++ usbhsp_flags_set(pipe, IS_RUNNING);
1501 ++ else
1502 ++ usbhsp_flags_clr(pipe, IS_RUNNING);
1503 ++}
1504 ++
1505 + void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
1506 + {
1507 + u16 mask = (SQCLR | SQSET);
1508 +diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
1509 +index b476fde955bf..b18a794922d3 100644
1510 +--- a/drivers/usb/renesas_usbhs/pipe.h
1511 ++++ b/drivers/usb/renesas_usbhs/pipe.h
1512 +@@ -36,6 +36,7 @@ struct usbhs_pipe {
1513 + #define USBHS_PIPE_FLAGS_IS_USED (1 << 0)
1514 + #define USBHS_PIPE_FLAGS_IS_DIR_IN (1 << 1)
1515 + #define USBHS_PIPE_FLAGS_IS_DIR_HOST (1 << 2)
1516 ++#define USBHS_PIPE_FLAGS_IS_RUNNING (1 << 3)
1517 +
1518 + struct usbhs_pkt_handle *handler;
1519 +
1520 +@@ -79,6 +80,9 @@ int usbhs_pipe_probe(struct usbhs_priv *priv);
1521 + void usbhs_pipe_remove(struct usbhs_priv *priv);
1522 + int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe);
1523 + int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe);
1524 ++int usbhs_pipe_is_running(struct usbhs_pipe *pipe);
1525 ++void usbhs_pipe_running(struct usbhs_pipe *pipe, int running);
1526 ++
1527 + void usbhs_pipe_init(struct usbhs_priv *priv,
1528 + int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map));
1529 + int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
1530 +diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
1531 +index 5f3bcd31e204..f3bbe210119d 100644
1532 +--- a/drivers/usb/serial/console.c
1533 ++++ b/drivers/usb/serial/console.c
1534 +@@ -188,6 +188,7 @@ static int usb_console_setup(struct console *co, char *options)
1535 + kfree(tty);
1536 + reset_open_count:
1537 + port->port.count = 0;
1538 ++ info->port = NULL;
1539 + usb_autopm_put_interface(serial->interface);
1540 + error_get_interface:
1541 + usb_serial_put(serial);
1542 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1543 +index 296cc1b49446..7831e6865f16 100644
1544 +--- a/fs/btrfs/ioctl.c
1545 ++++ b/fs/btrfs/ioctl.c
1546 +@@ -2974,6 +2974,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
1547 + ret = PTR_ERR(new_root);
1548 + goto out;
1549 + }
1550 ++ if (!is_fstree(new_root->objectid)) {
1551 ++ ret = -ENOENT;
1552 ++ goto out;
1553 ++ }
1554 +
1555 + if (btrfs_root_refs(&new_root->root_item) == 0) {
1556 + ret = -ENOENT;
1557 +diff --git a/fs/direct-io.c b/fs/direct-io.c
1558 +index 7ab90f5081ee..4007749a478e 100644
1559 +--- a/fs/direct-io.c
1560 ++++ b/fs/direct-io.c
1561 +@@ -759,7 +759,8 @@ out:
1562 + */
1563 + if (sdio->boundary) {
1564 + ret = dio_send_cur_page(dio, sdio, map_bh);
1565 +- dio_bio_submit(dio, sdio);
1566 ++ if (sdio->bio)
1567 ++ dio_bio_submit(dio, sdio);
1568 + page_cache_release(sdio->cur_page);
1569 + sdio->cur_page = NULL;
1570 + }
1571 +@@ -933,6 +934,7 @@ do_holes:
1572 + i_size_aligned >> blkbits) {
1573 + /* We hit eof */
1574 + page_cache_release(page);
1575 ++ dio_cleanup(dio, sdio);
1576 + goto out;
1577 + }
1578 + zero_user(page, block_in_page << blkbits,
1579 +diff --git a/fs/exec.c b/fs/exec.c
1580 +index c945a555eb25..e3abc8e3d58f 100644
1581 +--- a/fs/exec.c
1582 ++++ b/fs/exec.c
1583 +@@ -196,8 +196,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
1584 +
1585 + if (write) {
1586 + unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
1587 ++ unsigned long ptr_size;
1588 + struct rlimit *rlim;
1589 +
1590 ++ /*
1591 ++ * Since the stack will hold pointers to the strings, we
1592 ++ * must account for them as well.
1593 ++ *
1594 ++ * The size calculation is the entire vma while each arg page is
1595 ++ * built, so each time we get here it's calculating how far it
1596 ++ * is currently (rather than each call being just the newly
1597 ++ * added size from the arg page). As a result, we need to
1598 ++ * always add the entire size of the pointers, so that on the
1599 ++ * last call to get_arg_page() we'll actually have the entire
1600 ++ * correct size.
1601 ++ */
1602 ++ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
1603 ++ if (ptr_size > ULONG_MAX - size)
1604 ++ goto fail;
1605 ++ size += ptr_size;
1606 ++
1607 + acct_arg_size(bprm, size / PAGE_SIZE);
1608 +
1609 + /*
1610 +@@ -215,13 +233,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
1611 + * to work from.
1612 + */
1613 + rlim = current->signal->rlim;
1614 +- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
1615 +- put_page(page);
1616 +- return NULL;
1617 +- }
1618 ++ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
1619 ++ goto fail;
1620 + }
1621 +
1622 + return page;
1623 ++
1624 ++fail:
1625 ++ put_page(page);
1626 ++ return NULL;
1627 + }
1628 +
1629 + static void put_arg_page(struct page *page)
1630 +diff --git a/fs/ext4/file.c b/fs/ext4/file.c
1631 +index ec9770f42538..ed2badabebf0 100644
1632 +--- a/fs/ext4/file.c
1633 ++++ b/fs/ext4/file.c
1634 +@@ -325,47 +325,27 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
1635 + num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1636 + nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1637 + (pgoff_t)num);
1638 +- if (nr_pages == 0) {
1639 +- if (whence == SEEK_DATA)
1640 +- break;
1641 +-
1642 +- BUG_ON(whence != SEEK_HOLE);
1643 +- /*
1644 +- * If this is the first time to go into the loop and
1645 +- * offset is not beyond the end offset, it will be a
1646 +- * hole at this offset
1647 +- */
1648 +- if (lastoff == startoff || lastoff < endoff)
1649 +- found = 1;
1650 ++ if (nr_pages == 0)
1651 + break;
1652 +- }
1653 +-
1654 +- /*
1655 +- * If this is the first time to go into the loop and
1656 +- * offset is smaller than the first page offset, it will be a
1657 +- * hole at this offset.
1658 +- */
1659 +- if (lastoff == startoff && whence == SEEK_HOLE &&
1660 +- lastoff < page_offset(pvec.pages[0])) {
1661 +- found = 1;
1662 +- break;
1663 +- }
1664 +
1665 + for (i = 0; i < nr_pages; i++) {
1666 + struct page *page = pvec.pages[i];
1667 + struct buffer_head *bh, *head;
1668 +
1669 + /*
1670 +- * If the current offset is not beyond the end of given
1671 +- * range, it will be a hole.
1672 ++ * If current offset is smaller than the page offset,
1673 ++ * there is a hole at this offset.
1674 + */
1675 +- if (lastoff < endoff && whence == SEEK_HOLE &&
1676 +- page->index > end) {
1677 ++ if (whence == SEEK_HOLE && lastoff < endoff &&
1678 ++ lastoff < page_offset(pvec.pages[i])) {
1679 + found = 1;
1680 + *offset = lastoff;
1681 + goto out;
1682 + }
1683 +
1684 ++ if (page->index > end)
1685 ++ goto out;
1686 ++
1687 + lock_page(page);
1688 +
1689 + if (unlikely(page->mapping != inode->i_mapping)) {
1690 +@@ -382,6 +362,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
1691 + lastoff = page_offset(page);
1692 + bh = head = page_buffers(page);
1693 + do {
1694 ++ if (lastoff + bh->b_size <= startoff)
1695 ++ goto next;
1696 + if (buffer_uptodate(bh) ||
1697 + buffer_unwritten(bh)) {
1698 + if (whence == SEEK_DATA)
1699 +@@ -396,6 +378,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
1700 + unlock_page(page);
1701 + goto out;
1702 + }
1703 ++next:
1704 + lastoff += bh->b_size;
1705 + bh = bh->b_this_page;
1706 + } while (bh != head);
1707 +@@ -405,20 +388,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
1708 + unlock_page(page);
1709 + }
1710 +
1711 +- /*
1712 +- * The no. of pages is less than our desired, that would be a
1713 +- * hole in there.
1714 +- */
1715 +- if (nr_pages < num && whence == SEEK_HOLE) {
1716 +- found = 1;
1717 +- *offset = lastoff;
1718 ++ /* The no. of pages is less than our desired, we are done. */
1719 ++ if (nr_pages < num)
1720 + break;
1721 +- }
1722 +
1723 + index = pvec.pages[i - 1]->index + 1;
1724 + pagevec_release(&pvec);
1725 + } while (index <= end);
1726 +
1727 ++ if (whence == SEEK_HOLE && lastoff < endoff) {
1728 ++ found = 1;
1729 ++ *offset = lastoff;
1730 ++ }
1731 + out:
1732 + pagevec_release(&pvec);
1733 + return found;
1734 +@@ -440,7 +421,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
1735 + mutex_lock(&inode->i_mutex);
1736 +
1737 + isize = i_size_read(inode);
1738 +- if (offset >= isize) {
1739 ++ if (offset < 0 || offset >= isize) {
1740 + mutex_unlock(&inode->i_mutex);
1741 + return -ENXIO;
1742 + }
1743 +@@ -523,7 +504,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
1744 + mutex_lock(&inode->i_mutex);
1745 +
1746 + isize = i_size_read(inode);
1747 +- if (offset >= isize) {
1748 ++ if (offset < 0 || offset >= isize) {
1749 + mutex_unlock(&inode->i_mutex);
1750 + return -ENXIO;
1751 + }
1752 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1753 +index 1095d77c2a9d..26054c19e6cd 100644
1754 +--- a/fs/ext4/inode.c
1755 ++++ b/fs/ext4/inode.c
1756 +@@ -5045,8 +5045,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
1757 + /* No extended attributes present */
1758 + if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
1759 + header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
1760 +- memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
1761 +- new_extra_isize);
1762 ++ memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
1763 ++ EXT4_I(inode)->i_extra_isize, 0,
1764 ++ new_extra_isize - EXT4_I(inode)->i_extra_isize);
1765 + EXT4_I(inode)->i_extra_isize = new_extra_isize;
1766 + return 0;
1767 + }
1768 +@@ -5097,8 +5098,6 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
1769 + sbi->s_want_extra_isize,
1770 + iloc, handle);
1771 + if (ret) {
1772 +- ext4_set_inode_state(inode,
1773 +- EXT4_STATE_NO_EXPAND);
1774 + if (mnt_count !=
1775 + le16_to_cpu(sbi->s_es->s_mnt_count)) {
1776 + ext4_warning(inode->i_sb,
1777 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
1778 +index cf0a70486618..f6190fdfd8ce 100644
1779 +--- a/fs/ext4/resize.c
1780 ++++ b/fs/ext4/resize.c
1781 +@@ -1911,7 +1911,8 @@ retry:
1782 + n_desc_blocks = o_desc_blocks +
1783 + le16_to_cpu(es->s_reserved_gdt_blocks);
1784 + n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
1785 +- n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
1786 ++ n_blocks_count = (ext4_fsblk_t)n_group *
1787 ++ EXT4_BLOCKS_PER_GROUP(sb);
1788 + n_group--; /* set to last group number */
1789 + }
1790 +
1791 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
1792 +index 92850bab4513..dde00d1e2994 100644
1793 +--- a/fs/ext4/xattr.c
1794 ++++ b/fs/ext4/xattr.c
1795 +@@ -1266,11 +1266,13 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
1796 + int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
1797 +
1798 + down_write(&EXT4_I(inode)->xattr_sem);
1799 ++ /*
1800 ++ * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
1801 ++ */
1802 ++ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
1803 + retry:
1804 +- if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
1805 +- up_write(&EXT4_I(inode)->xattr_sem);
1806 +- return 0;
1807 +- }
1808 ++ if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
1809 ++ goto out;
1810 +
1811 + header = IHDR(inode, raw_inode);
1812 + entry = IFIRST(header);
1813 +@@ -1295,8 +1297,7 @@ retry:
1814 + (void *)header, total_ino,
1815 + inode->i_sb->s_blocksize);
1816 + EXT4_I(inode)->i_extra_isize = new_extra_isize;
1817 +- error = 0;
1818 +- goto cleanup;
1819 ++ goto out;
1820 + }
1821 +
1822 + /*
1823 +@@ -1457,6 +1458,8 @@ retry:
1824 + kfree(bs);
1825 + }
1826 + brelse(bh);
1827 ++out:
1828 ++ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
1829 + up_write(&EXT4_I(inode)->xattr_sem);
1830 + return 0;
1831 +
1832 +@@ -1468,6 +1471,10 @@ cleanup:
1833 + kfree(is);
1834 + kfree(bs);
1835 + brelse(bh);
1836 ++ /*
1837 ++ * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
1838 ++ * size expansion failed.
1839 ++ */
1840 + up_write(&EXT4_I(inode)->xattr_sem);
1841 + return error;
1842 + }
1843 +diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
1844 +index f27c89d17885..e7cf8c5f2677 100644
1845 +--- a/fs/fscache/object-list.c
1846 ++++ b/fs/fscache/object-list.c
1847 +@@ -338,6 +338,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
1848 + rcu_read_lock();
1849 +
1850 + confkey = key->payload.data;
1851 ++ if (!confkey) {
1852 ++ /* key was revoked */
1853 ++ rcu_read_unlock();
1854 ++ key_put(key);
1855 ++ goto no_config;
1856 ++ }
1857 ++
1858 + buf = confkey->data;
1859 +
1860 + for (len = confkey->datalen - 1; len >= 0; len--) {
1861 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
1862 +index 1dce93041012..ee5c3e9a5983 100644
1863 +--- a/fs/fuse/file.c
1864 ++++ b/fs/fuse/file.c
1865 +@@ -54,7 +54,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
1866 + {
1867 + struct fuse_file *ff;
1868 +
1869 +- ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
1870 ++ ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
1871 + if (unlikely(!ff))
1872 + return NULL;
1873 +
1874 +diff --git a/fs/udf/inode.c b/fs/udf/inode.c
1875 +index 5c1120a5fa42..0ead8bed774a 100644
1876 +--- a/fs/udf/inode.c
1877 ++++ b/fs/udf/inode.c
1878 +@@ -1237,8 +1237,8 @@ int udf_setsize(struct inode *inode, loff_t newsize)
1879 + return err;
1880 + }
1881 + set_size:
1882 +- truncate_setsize(inode, newsize);
1883 + up_write(&iinfo->i_data_sem);
1884 ++ truncate_setsize(inode, newsize);
1885 + } else {
1886 + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1887 + down_write(&iinfo->i_data_sem);
1888 +@@ -1255,9 +1255,9 @@ set_size:
1889 + udf_get_block);
1890 + if (err)
1891 + return err;
1892 ++ truncate_setsize(inode, newsize);
1893 + down_write(&iinfo->i_data_sem);
1894 + udf_clear_extent_cache(inode);
1895 +- truncate_setsize(inode, newsize);
1896 + udf_truncate_extents(inode);
1897 + up_write(&iinfo->i_data_sem);
1898 + }
1899 +diff --git a/include/linux/key.h b/include/linux/key.h
1900 +index 4dfde1161c5e..66633b5f2f65 100644
1901 +--- a/include/linux/key.h
1902 ++++ b/include/linux/key.h
1903 +@@ -162,6 +162,7 @@ struct key {
1904 + #define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
1905 + #define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
1906 + #define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
1907 ++#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */
1908 +
1909 + /* the description string
1910 + * - this is used to match a key against search criteria
1911 +@@ -203,6 +204,7 @@ extern struct key *key_alloc(struct key_type *type,
1912 + #define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
1913 + #define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
1914 + #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
1915 ++#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
1916 +
1917 + extern void key_revoke(struct key *key);
1918 + extern void key_invalidate(struct key *key);
1919 +diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
1920 +index 120dd354849d..4dab847a1d75 100644
1921 +--- a/include/linux/workqueue.h
1922 ++++ b/include/linux/workqueue.h
1923 +@@ -306,6 +306,7 @@ enum {
1924 +
1925 + __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
1926 + __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
1927 ++ __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
1928 +
1929 + WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
1930 + WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
1931 +@@ -408,7 +409,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
1932 + * Pointer to the allocated workqueue on success, %NULL on failure.
1933 + */
1934 + #define alloc_ordered_workqueue(fmt, flags, args...) \
1935 +- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
1936 ++ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
1937 ++ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
1938 +
1939 + #define create_workqueue(name) \
1940 + alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
1941 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
1942 +index 413e23be60d1..1c96547c2a3f 100644
1943 +--- a/include/net/ipv6.h
1944 ++++ b/include/net/ipv6.h
1945 +@@ -822,6 +822,7 @@ extern int inet6_hash_connect(struct inet_timewait_death_row *death_row,
1946 + */
1947 + extern const struct proto_ops inet6_stream_ops;
1948 + extern const struct proto_ops inet6_dgram_ops;
1949 ++extern const struct proto_ops inet6_sockraw_ops;
1950 +
1951 + struct group_source_req;
1952 + struct group_filter;
1953 +diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
1954 +index 5d5a6a4732ef..5af07a1ab0c2 100644
1955 +--- a/include/net/iw_handler.h
1956 ++++ b/include/net/iw_handler.h
1957 +@@ -551,7 +551,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
1958 + memcpy(stream + lcp_len,
1959 + ((char *) &iwe->u) + IW_EV_POINT_OFF,
1960 + IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
1961 +- memcpy(stream + point_len, extra, iwe->u.data.length);
1962 ++ if (iwe->u.data.length && extra)
1963 ++ memcpy(stream + point_len, extra, iwe->u.data.length);
1964 + stream += event_len;
1965 + }
1966 + return stream;
1967 +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
1968 +index 845ab6decc45..ee81c68f24a6 100644
1969 +--- a/include/net/sctp/sctp.h
1970 ++++ b/include/net/sctp/sctp.h
1971 +@@ -555,6 +555,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
1972 +
1973 + #define _sctp_walk_params(pos, chunk, end, member)\
1974 + for (pos.v = chunk->member;\
1975 ++ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
1976 ++ (void *)chunk + end) &&\
1977 + pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
1978 + ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
1979 + pos.v += WORD_ROUND(ntohs(pos.p->length)))
1980 +@@ -565,6 +567,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
1981 + #define _sctp_walk_errors(err, chunk_hdr, end)\
1982 + for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
1983 + sizeof(sctp_chunkhdr_t));\
1984 ++ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
1985 ++ (void *)chunk_hdr + end) &&\
1986 + (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
1987 + ntohs(err->length) >= sizeof(sctp_errhdr_t); \
1988 + err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
1989 +diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
1990 +index ca4693b4e09e..00c0e5bf5d3e 100644
1991 +--- a/include/net/sctp/ulpevent.h
1992 ++++ b/include/net/sctp/ulpevent.h
1993 +@@ -143,8 +143,12 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
1994 + static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
1995 + struct sctp_event_subscribe *mask)
1996 + {
1997 ++ int offset = sn_type - SCTP_SN_TYPE_BASE;
1998 + char *amask = (char *) mask;
1999 +- return amask[sn_type - SCTP_SN_TYPE_BASE];
2000 ++
2001 ++ if (offset >= sizeof(struct sctp_event_subscribe))
2002 ++ return 0;
2003 ++ return amask[offset];
2004 + }
2005 +
2006 + /* Given an event subscription, is this event enabled? */
2007 +diff --git a/include/net/tcp.h b/include/net/tcp.h
2008 +index 79cd118d5994..c4db9acefa9c 100644
2009 +--- a/include/net/tcp.h
2010 ++++ b/include/net/tcp.h
2011 +@@ -1592,4 +1592,14 @@ struct tcp_request_sock_ops {
2012 + extern void tcp_v4_init(void);
2013 + extern void tcp_init(void);
2014 +
2015 ++/* At how many jiffies into the future should the RTO fire? */
2016 ++static inline s32 tcp_rto_delta(const struct sock *sk)
2017 ++{
2018 ++ const struct sk_buff *skb = tcp_write_queue_head(sk);
2019 ++ const u32 rto = inet_csk(sk)->icsk_rto;
2020 ++ const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
2021 ++
2022 ++ return (s32)(rto_time_stamp - tcp_time_stamp);
2023 ++}
2024 ++
2025 + #endif /* _TCP_H */
2026 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
2027 +index 7d99c0b5b789..8e271438f77c 100644
2028 +--- a/include/target/target_core_base.h
2029 ++++ b/include/target/target_core_base.h
2030 +@@ -729,6 +729,7 @@ struct se_port_stat_grps {
2031 + struct se_lun {
2032 + #define SE_LUN_LINK_MAGIC 0xffff7771
2033 + u32 lun_link_magic;
2034 ++ bool lun_shutdown;
2035 + /* See transport_lun_status_table */
2036 + enum transport_lun_status_table lun_status;
2037 + u32 lun_access;
2038 +diff --git a/kernel/extable.c b/kernel/extable.c
2039 +index 67460b93b1a1..5ec4b6f861d1 100644
2040 +--- a/kernel/extable.c
2041 ++++ b/kernel/extable.c
2042 +@@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr)
2043 + return 0;
2044 + }
2045 +
2046 +-int core_kernel_text(unsigned long addr)
2047 ++int notrace core_kernel_text(unsigned long addr)
2048 + {
2049 + if (addr >= (unsigned long)_stext &&
2050 + addr <= (unsigned long)_etext)
2051 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2052 +index edffb6781c0e..359fbd32dc9e 100644
2053 +--- a/kernel/trace/trace.c
2054 ++++ b/kernel/trace/trace.c
2055 +@@ -3061,11 +3061,17 @@ static int tracing_open(struct inode *inode, struct file *file)
2056 + /* If this file was open for write, then erase contents */
2057 + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2058 + int cpu = tracing_get_cpu(inode);
2059 ++ struct trace_buffer *trace_buf = &tr->trace_buffer;
2060 ++
2061 ++#ifdef CONFIG_TRACER_MAX_TRACE
2062 ++ if (tr->current_trace->print_max)
2063 ++ trace_buf = &tr->max_buffer;
2064 ++#endif
2065 +
2066 + if (cpu == RING_BUFFER_ALL_CPUS)
2067 +- tracing_reset_online_cpus(&tr->trace_buffer);
2068 ++ tracing_reset_online_cpus(trace_buf);
2069 + else
2070 +- tracing_reset(&tr->trace_buffer, cpu);
2071 ++ tracing_reset(trace_buf, cpu);
2072 + }
2073 +
2074 + if (file->f_mode & FMODE_READ) {
2075 +@@ -4654,7 +4660,7 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
2076 + tracing_reset_online_cpus(&tr->trace_buffer);
2077 +
2078 + #ifdef CONFIG_TRACER_MAX_TRACE
2079 +- if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
2080 ++ if (tr->max_buffer.buffer)
2081 + ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
2082 + tracing_reset_online_cpus(&tr->max_buffer);
2083 + #endif
2084 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2085 +index 66972ac0c6c0..f55fbfa7feda 100644
2086 +--- a/kernel/workqueue.c
2087 ++++ b/kernel/workqueue.c
2088 +@@ -3399,7 +3399,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
2089 + * attributes breaks ordering guarantee. Disallow exposing ordered
2090 + * workqueues.
2091 + */
2092 +- if (WARN_ON(wq->flags & __WQ_ORDERED))
2093 ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
2094 + return -EINVAL;
2095 +
2096 + wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
2097 +@@ -3964,8 +3964,12 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
2098 + return -EINVAL;
2099 +
2100 + /* creating multiple pwqs breaks ordering guarantee */
2101 +- if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
2102 +- return -EINVAL;
2103 ++ if (!list_empty(&wq->pwqs)) {
2104 ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
2105 ++ return -EINVAL;
2106 ++
2107 ++ wq->flags &= ~__WQ_ORDERED;
2108 ++ }
2109 +
2110 + pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL);
2111 + new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
2112 +@@ -4213,6 +4217,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
2113 + struct workqueue_struct *wq;
2114 + struct pool_workqueue *pwq;
2115 +
2116 ++ /*
2117 ++ * Unbound && max_active == 1 used to imply ordered, which is no
2118 ++ * longer the case on NUMA machines due to per-node pools. While
2119 ++ * alloc_ordered_workqueue() is the right way to create an ordered
2120 ++ * workqueue, keep the previous behavior to avoid subtle breakages
2121 ++ * on NUMA.
2122 ++ */
2123 ++ if ((flags & WQ_UNBOUND) && max_active == 1)
2124 ++ flags |= __WQ_ORDERED;
2125 ++
2126 + /* allocate wq and format name */
2127 + if (flags & WQ_UNBOUND)
2128 + tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
2129 +@@ -4401,13 +4415,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2130 + struct pool_workqueue *pwq;
2131 +
2132 + /* disallow meddling with max_active for ordered workqueues */
2133 +- if (WARN_ON(wq->flags & __WQ_ORDERED))
2134 ++ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
2135 + return;
2136 +
2137 + max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
2138 +
2139 + mutex_lock(&wq->mutex);
2140 +
2141 ++ wq->flags &= ~__WQ_ORDERED;
2142 + wq->saved_max_active = max_active;
2143 +
2144 + for_each_pwq(pwq, wq)
2145 +diff --git a/lib/cmdline.c b/lib/cmdline.c
2146 +index eb6791188cf5..efc35fbce780 100644
2147 +--- a/lib/cmdline.c
2148 ++++ b/lib/cmdline.c
2149 +@@ -22,14 +22,14 @@
2150 + * the values[M, M+1, ..., N] into the ints array in get_options.
2151 + */
2152 +
2153 +-static int get_range(char **str, int *pint)
2154 ++static int get_range(char **str, int *pint, int n)
2155 + {
2156 + int x, inc_counter, upper_range;
2157 +
2158 + (*str)++;
2159 + upper_range = simple_strtol((*str), NULL, 0);
2160 + inc_counter = upper_range - *pint;
2161 +- for (x = *pint; x < upper_range; x++)
2162 ++ for (x = *pint; n && x < upper_range; x++, n--)
2163 + *pint++ = x;
2164 + return inc_counter;
2165 + }
2166 +@@ -95,7 +95,7 @@ char *get_options(const char *str, int nints, int *ints)
2167 + break;
2168 + if (res == 3) {
2169 + int range_nums;
2170 +- range_nums = get_range((char **)&str, ints + i);
2171 ++ range_nums = get_range((char **)&str, ints + i, nints - i);
2172 + if (range_nums < 0)
2173 + break;
2174 + /*
2175 +diff --git a/lib/digsig.c b/lib/digsig.c
2176 +index 2f31e6a45f0a..ae703dfc9731 100644
2177 +--- a/lib/digsig.c
2178 ++++ b/lib/digsig.c
2179 +@@ -86,6 +86,12 @@ static int digsig_verify_rsa(struct key *key,
2180 + down_read(&key->sem);
2181 + ukp = key->payload.data;
2182 +
2183 ++ if (!ukp) {
2184 ++ /* key was revoked before we acquired its semaphore */
2185 ++ err = -EKEYREVOKED;
2186 ++ goto err1;
2187 ++ }
2188 ++
2189 + if (ukp->datalen < sizeof(*pkh))
2190 + goto err1;
2191 +
2192 +diff --git a/mm/mmap.c b/mm/mmap.c
2193 +index 3c4e4d7ae54e..d042e254a163 100644
2194 +--- a/mm/mmap.c
2195 ++++ b/mm/mmap.c
2196 +@@ -2132,7 +2132,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2197 +
2198 + /* Guard against exceeding limits of the address space. */
2199 + address &= PAGE_MASK;
2200 +- if (address >= TASK_SIZE)
2201 ++ if (address >= (TASK_SIZE & PAGE_MASK))
2202 + return -ENOMEM;
2203 + address += PAGE_SIZE;
2204 +
2205 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2206 +index 4e8927539299..829ee76d5521 100644
2207 +--- a/mm/page_alloc.c
2208 ++++ b/mm/page_alloc.c
2209 +@@ -5177,8 +5177,8 @@ unsigned long free_reserved_area(unsigned long start, unsigned long end,
2210 + }
2211 +
2212 + if (pages && s)
2213 +- pr_info("Freeing %s memory: %ldK (%lx - %lx)\n",
2214 +- s, pages << (PAGE_SHIFT - 10), start, end);
2215 ++ pr_info("Freeing %s memory: %ldK\n",
2216 ++ s, pages << (PAGE_SHIFT - 10));
2217 +
2218 + return pages;
2219 + }
2220 +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
2221 +index 86abb2e59aea..82fdb35154fc 100644
2222 +--- a/net/8021q/vlan.c
2223 ++++ b/net/8021q/vlan.c
2224 +@@ -274,7 +274,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
2225 + return 0;
2226 +
2227 + out_free_newdev:
2228 +- free_netdev(new_dev);
2229 ++ if (new_dev->reg_state == NETREG_UNINITIALIZED)
2230 ++ free_netdev(new_dev);
2231 + return err;
2232 + }
2233 +
2234 +diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
2235 +index e430b1abcd2f..e387e6719fa2 100644
2236 +--- a/net/bluetooth/bnep/core.c
2237 ++++ b/net/bluetooth/bnep/core.c
2238 +@@ -32,6 +32,7 @@
2239 + #include <asm/unaligned.h>
2240 +
2241 + #include <net/bluetooth/bluetooth.h>
2242 ++#include <net/bluetooth/l2cap.h>
2243 + #include <net/bluetooth/hci_core.h>
2244 +
2245 + #include "bnep.h"
2246 +@@ -539,6 +540,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
2247 +
2248 + BT_DBG("");
2249 +
2250 ++ if (!l2cap_is_socket(sock))
2251 ++ return -EBADFD;
2252 ++
2253 + baswap((void *) dst, &bt_sk(sock->sk)->dst);
2254 + baswap((void *) src, &bt_sk(sock->sk)->src);
2255 +
2256 +diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
2257 +index e0a6ebf2baa6..84460f623fc8 100644
2258 +--- a/net/bluetooth/cmtp/core.c
2259 ++++ b/net/bluetooth/cmtp/core.c
2260 +@@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
2261 +
2262 + BT_DBG("");
2263 +
2264 ++ if (!l2cap_is_socket(sock))
2265 ++ return -EBADFD;
2266 ++
2267 + session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
2268 + if (!session)
2269 + return -ENOMEM;
2270 +diff --git a/net/core/dev.c b/net/core/dev.c
2271 +index 682bf5ad63a0..d69d8ec11383 100644
2272 +--- a/net/core/dev.c
2273 ++++ b/net/core/dev.c
2274 +@@ -2342,9 +2342,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
2275 + static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2276 + {
2277 + if (tx_path)
2278 +- return skb->ip_summed != CHECKSUM_PARTIAL;
2279 +- else
2280 +- return skb->ip_summed == CHECKSUM_NONE;
2281 ++ return skb->ip_summed != CHECKSUM_PARTIAL &&
2282 ++ skb->ip_summed != CHECKSUM_NONE;
2283 ++
2284 ++ return skb->ip_summed == CHECKSUM_NONE;
2285 + }
2286 +
2287 + /**
2288 +@@ -2361,11 +2362,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2289 + struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2290 + netdev_features_t features, bool tx_path)
2291 + {
2292 ++ struct sk_buff *segs;
2293 ++
2294 + if (unlikely(skb_needs_check(skb, tx_path))) {
2295 + int err;
2296 +
2297 +- skb_warn_bad_offload(skb);
2298 +-
2299 ++ /* We're going to init ->check field in TCP or UDP header */
2300 + if (skb_header_cloned(skb) &&
2301 + (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2302 + return ERR_PTR(err);
2303 +@@ -2375,7 +2377,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2304 + skb_reset_mac_header(skb);
2305 + skb_reset_mac_len(skb);
2306 +
2307 +- return skb_mac_gso_segment(skb, features);
2308 ++ segs = skb_mac_gso_segment(skb, features);
2309 ++
2310 ++ if (unlikely(skb_needs_check(skb, tx_path)))
2311 ++ skb_warn_bad_offload(skb);
2312 ++
2313 ++ return segs;
2314 + }
2315 + EXPORT_SYMBOL(__skb_gso_segment);
2316 +
2317 +@@ -5636,7 +5643,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2318 + } else {
2319 + netdev_stats_to_stats64(storage, &dev->stats);
2320 + }
2321 +- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
2322 ++ storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
2323 + return storage;
2324 + }
2325 + EXPORT_SYMBOL(dev_get_stats);
2326 +diff --git a/net/core/sock.c b/net/core/sock.c
2327 +index 96e125919324..104784ee4bbd 100644
2328 +--- a/net/core/sock.c
2329 ++++ b/net/core/sock.c
2330 +@@ -1470,6 +1470,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
2331 +
2332 + sock_copy(newsk, sk);
2333 +
2334 ++ newsk->sk_prot_creator = sk->sk_prot;
2335 ++
2336 + /* SANITY */
2337 + get_net(sock_net(newsk));
2338 + sk_node_init(&newsk->sk_node);
2339 +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
2340 +index 975c369d4e6d..03610ebd8d0b 100644
2341 +--- a/net/ipv4/af_inet.c
2342 ++++ b/net/ipv4/af_inet.c
2343 +@@ -1053,7 +1053,7 @@ static struct inet_protosw inetsw_array[] =
2344 + .type = SOCK_DGRAM,
2345 + .protocol = IPPROTO_ICMP,
2346 + .prot = &ping_prot,
2347 +- .ops = &inet_dgram_ops,
2348 ++ .ops = &inet_sockraw_ops,
2349 + .no_check = UDP_CSUM_DEFAULT,
2350 + .flags = INET_PROTOSW_REUSE,
2351 + },
2352 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
2353 +index 017b4792cd44..bcd0a05d6002 100644
2354 +--- a/net/ipv4/fib_frontend.c
2355 ++++ b/net/ipv4/fib_frontend.c
2356 +@@ -1170,13 +1170,14 @@ static struct pernet_operations fib_net_ops = {
2357 +
2358 + void __init ip_fib_init(void)
2359 + {
2360 +- rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
2361 +- rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
2362 +- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
2363 ++ fib_trie_init();
2364 +
2365 + register_pernet_subsys(&fib_net_ops);
2366 ++
2367 + register_netdevice_notifier(&fib_netdev_notifier);
2368 + register_inetaddr_notifier(&fib_inetaddr_notifier);
2369 +
2370 +- fib_trie_init();
2371 ++ rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
2372 ++ rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
2373 ++ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
2374 + }
2375 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
2376 +index 5f077efad29d..40faf48cb10c 100644
2377 +--- a/net/ipv4/ip_output.c
2378 ++++ b/net/ipv4/ip_output.c
2379 +@@ -846,10 +846,12 @@ static int __ip_append_data(struct sock *sk,
2380 + csummode = CHECKSUM_PARTIAL;
2381 +
2382 + cork->length += length;
2383 +- if (((length > mtu) || (skb && skb_has_frags(skb))) &&
2384 ++ if ((skb && skb_has_frags(skb)) ||
2385 ++ ((length > mtu) &&
2386 ++ (skb_queue_len(queue) <= 1) &&
2387 + (sk->sk_protocol == IPPROTO_UDP) &&
2388 + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
2389 +- (sk->sk_type == SOCK_DGRAM)) {
2390 ++ (sk->sk_type == SOCK_DGRAM))) {
2391 + err = ip_ufo_append_data(sk, queue, getfrag, from, length,
2392 + hh_len, fragheaderlen, transhdrlen,
2393 + maxfraglen, flags);
2394 +@@ -1160,6 +1162,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
2395 +
2396 + cork->length += size;
2397 + if ((size + skb->len > mtu) &&
2398 ++ (skb_queue_len(&sk->sk_write_queue) == 1) &&
2399 + (sk->sk_protocol == IPPROTO_UDP) &&
2400 + (rt->dst.dev->features & NETIF_F_UFO)) {
2401 + skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
2402 +diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
2403 +index 5f011cc89cd9..1e82bdb0f07e 100644
2404 +--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
2405 ++++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
2406 +@@ -1305,6 +1305,7 @@ static int __init nf_nat_snmp_basic_init(void)
2407 + static void __exit nf_nat_snmp_basic_fini(void)
2408 + {
2409 + RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
2410 ++ synchronize_rcu();
2411 + nf_conntrack_helper_unregister(&snmp_trap_helper);
2412 + }
2413 +
2414 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2415 +index d1e04221c275..b80b399f2377 100644
2416 +--- a/net/ipv4/tcp.c
2417 ++++ b/net/ipv4/tcp.c
2418 +@@ -2313,9 +2313,15 @@ int tcp_disconnect(struct sock *sk, int flags)
2419 + tcp_set_ca_state(sk, TCP_CA_Open);
2420 + tcp_clear_retrans(tp);
2421 + inet_csk_delack_init(sk);
2422 ++ /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2423 ++ * issue in __tcp_select_window()
2424 ++ */
2425 ++ icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
2426 + tcp_init_send_head(sk);
2427 + memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2428 + __sk_dst_reset(sk);
2429 ++ dst_release(sk->sk_rx_dst);
2430 ++ sk->sk_rx_dst = NULL;
2431 +
2432 + WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2433 +
2434 +diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
2435 +index 019c2389a341..2ca6c080a4bc 100644
2436 +--- a/net/ipv4/tcp_cong.c
2437 ++++ b/net/ipv4/tcp_cong.c
2438 +@@ -95,6 +95,7 @@ void tcp_init_congestion_control(struct sock *sk)
2439 + rcu_read_unlock();
2440 + }
2441 +
2442 ++ tcp_sk(sk)->prior_ssthresh = 0;
2443 + if (icsk->icsk_ca_ops->init)
2444 + icsk->icsk_ca_ops->init(sk);
2445 + }
2446 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2447 +index 0680058fe693..85dd09be1618 100644
2448 +--- a/net/ipv4/tcp_input.c
2449 ++++ b/net/ipv4/tcp_input.c
2450 +@@ -111,6 +111,7 @@ int sysctl_tcp_early_retrans __read_mostly = 3;
2451 + #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
2452 + #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
2453 + #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
2454 ++#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
2455 + #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
2456 + #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
2457 +
2458 +@@ -2553,8 +2554,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
2459 + struct tcp_sock *tp = tcp_sk(sk);
2460 +
2461 + /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
2462 +- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
2463 +- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
2464 ++ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
2465 ++ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
2466 + tp->snd_cwnd = tp->snd_ssthresh;
2467 + tp->snd_cwnd_stamp = tcp_time_stamp;
2468 + }
2469 +@@ -2972,14 +2973,11 @@ void tcp_rearm_rto(struct sock *sk)
2470 + /* Offset the time elapsed after installing regular RTO */
2471 + if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2472 + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2473 +- struct sk_buff *skb = tcp_write_queue_head(sk);
2474 +- const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
2475 +- s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
2476 ++ s32 delta = tcp_rto_delta(sk);
2477 + /* delta may not be positive if the socket is locked
2478 + * when the retrans timer fires and is rescheduled.
2479 + */
2480 +- if (delta > 0)
2481 +- rto = delta;
2482 ++ rto = max_t(int, delta, 1);
2483 + }
2484 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
2485 + TCP_RTO_MAX);
2486 +@@ -3004,6 +3002,13 @@ void tcp_resume_early_retransmit(struct sock *sk)
2487 + tcp_xmit_retransmit_queue(sk);
2488 + }
2489 +
2490 ++/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
2491 ++static void tcp_set_xmit_timer(struct sock *sk)
2492 ++{
2493 ++ if (!tcp_schedule_loss_probe(sk))
2494 ++ tcp_rearm_rto(sk);
2495 ++}
2496 ++
2497 + /* If we get here, the whole TSO packet has not been acked. */
2498 + static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
2499 + {
2500 +@@ -3134,7 +3139,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
2501 + }
2502 +
2503 + tcp_ack_update_rtt(sk, flag, seq_rtt);
2504 +- tcp_rearm_rto(sk);
2505 ++ flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
2506 +
2507 + if (tcp_is_reno(tp)) {
2508 + tcp_remove_reno_sacks(sk, pkts_acked);
2509 +@@ -3394,10 +3399,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
2510 + if (after(ack, tp->snd_nxt))
2511 + goto invalid_ack;
2512 +
2513 +- if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2514 +- icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
2515 +- tcp_rearm_rto(sk);
2516 +-
2517 + if (after(ack, prior_snd_una))
2518 + flag |= FLAG_SND_UNA_ADVANCED;
2519 +
2520 +@@ -3454,6 +3455,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
2521 +
2522 + pkts_acked = previous_packets_out - tp->packets_out;
2523 +
2524 ++ if (tp->tlp_high_seq)
2525 ++ tcp_process_tlp_ack(sk, ack, flag);
2526 ++ /* If needed, reset TLP/RTO timer; RACK may later override this. */
2527 ++ if (flag & FLAG_SET_XMIT_TIMER)
2528 ++ tcp_set_xmit_timer(sk);
2529 ++
2530 + if (tcp_ack_is_dubious(sk, flag)) {
2531 + /* Advance CWND, if state allows this. */
2532 + if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
2533 +@@ -3466,17 +3473,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
2534 + tcp_cong_avoid(sk, ack, prior_in_flight);
2535 + }
2536 +
2537 +- if (tp->tlp_high_seq)
2538 +- tcp_process_tlp_ack(sk, ack, flag);
2539 +-
2540 + if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
2541 + struct dst_entry *dst = __sk_dst_get(sk);
2542 + if (dst)
2543 + dst_confirm(dst);
2544 + }
2545 +
2546 +- if (icsk->icsk_pending == ICSK_TIME_RETRANS)
2547 +- tcp_schedule_loss_probe(sk);
2548 + if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd)
2549 + tcp_update_pacing_rate(sk);
2550 + return 1;
2551 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2552 +index 8729a934124f..f5d670ccd403 100644
2553 +--- a/net/ipv4/tcp_output.c
2554 ++++ b/net/ipv4/tcp_output.c
2555 +@@ -1945,28 +1945,16 @@ repair:
2556 +
2557 + bool tcp_schedule_loss_probe(struct sock *sk)
2558 + {
2559 +- struct inet_connection_sock *icsk = inet_csk(sk);
2560 + struct tcp_sock *tp = tcp_sk(sk);
2561 +- u32 timeout, tlp_time_stamp, rto_time_stamp;
2562 + u32 rtt = tp->srtt >> 3;
2563 ++ u32 timeout, rto_delta;
2564 +
2565 +- if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
2566 +- return false;
2567 +- /* No consecutive loss probes. */
2568 +- if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
2569 +- tcp_rearm_rto(sk);
2570 +- return false;
2571 +- }
2572 + /* Don't do any loss probe on a Fast Open connection before 3WHS
2573 + * finishes.
2574 + */
2575 + if (sk->sk_state == TCP_SYN_RECV)
2576 + return false;
2577 +
2578 +- /* TLP is only scheduled when next timer event is RTO. */
2579 +- if (icsk->icsk_pending != ICSK_TIME_RETRANS)
2580 +- return false;
2581 +-
2582 + /* Schedule a loss probe in 2*RTT for SACK capable connections
2583 + * in Open state, that are either limited by cwnd or application.
2584 + */
2585 +@@ -1987,14 +1975,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2586 + (rtt + (rtt >> 1) + TCP_DELACK_MAX));
2587 + timeout = max_t(u32, timeout, msecs_to_jiffies(10));
2588 +
2589 +- /* If RTO is shorter, just schedule TLP in its place. */
2590 +- tlp_time_stamp = tcp_time_stamp + timeout;
2591 +- rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
2592 +- if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
2593 +- s32 delta = rto_time_stamp - tcp_time_stamp;
2594 +- if (delta > 0)
2595 +- timeout = delta;
2596 +- }
2597 ++ /* If the RTO formula yields an earlier time, then use that time. */
2598 ++ rto_delta = tcp_rto_delta(sk); /* How far in future is RTO? */
2599 ++ if (rto_delta > 0)
2600 ++ timeout = min_t(u32, timeout, rto_delta);
2601 +
2602 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
2603 + TCP_RTO_MAX);
2604 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2605 +index 68174e4d88c7..882b23e8e777 100644
2606 +--- a/net/ipv4/udp.c
2607 ++++ b/net/ipv4/udp.c
2608 +@@ -763,7 +763,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
2609 + if (is_udplite) /* UDP-Lite */
2610 + csum = udplite_csum(skb);
2611 +
2612 +- else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */
2613 ++ else if (sk->sk_no_check == UDP_CSUM_NOXMIT && !skb_has_frags(skb)) { /* UDP csum off */
2614 +
2615 + skb->ip_summed = CHECKSUM_NONE;
2616 + goto send;
2617 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2618 +index 9c4aa2e22448..5ea5f77c0ec1 100644
2619 +--- a/net/ipv6/addrconf.c
2620 ++++ b/net/ipv6/addrconf.c
2621 +@@ -2892,6 +2892,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2622 + {
2623 + struct net_device *dev = (struct net_device *) data;
2624 + struct inet6_dev *idev = __in6_dev_get(dev);
2625 ++ struct net *net = dev_net(dev);
2626 + int run_pending = 0;
2627 + int err;
2628 +
2629 +@@ -2988,7 +2989,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2630 + * IPV6_MIN_MTU stop IPv6 on this interface.
2631 + */
2632 + if (dev->mtu < IPV6_MIN_MTU)
2633 +- addrconf_ifdown(dev, 1);
2634 ++ addrconf_ifdown(dev, dev != net->loopback_dev);
2635 + }
2636 + break;
2637 +
2638 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2639 +index 46458ee31939..6de0d44a9429 100644
2640 +--- a/net/ipv6/ip6_fib.c
2641 ++++ b/net/ipv6/ip6_fib.c
2642 +@@ -167,6 +167,12 @@ static __inline__ void rt6_release(struct rt6_info *rt)
2643 + dst_free(&rt->dst);
2644 + }
2645 +
2646 ++static void fib6_free_table(struct fib6_table *table)
2647 ++{
2648 ++ inetpeer_invalidate_tree(&table->tb6_peers);
2649 ++ kfree(table);
2650 ++}
2651 ++
2652 + static void fib6_link_table(struct net *net, struct fib6_table *tb)
2653 + {
2654 + unsigned int h;
2655 +@@ -1738,15 +1744,22 @@ out_timer:
2656 +
2657 + static void fib6_net_exit(struct net *net)
2658 + {
2659 ++ unsigned int i;
2660 ++
2661 + rt6_ifdown(net, NULL);
2662 + del_timer_sync(&net->ipv6.ip6_fib_timer);
2663 +
2664 +-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2665 +- inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
2666 +- kfree(net->ipv6.fib6_local_tbl);
2667 +-#endif
2668 +- inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
2669 +- kfree(net->ipv6.fib6_main_tbl);
2670 ++ for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
2671 ++ struct hlist_head *head = &net->ipv6.fib_table_hash[i];
2672 ++ struct hlist_node *tmp;
2673 ++ struct fib6_table *tb;
2674 ++
2675 ++ hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
2676 ++ hlist_del(&tb->tb6_hlist);
2677 ++ fib6_free_table(tb);
2678 ++ }
2679 ++ }
2680 ++
2681 + kfree(net->ipv6.fib_table_hash);
2682 + kfree(net->ipv6.rt6_stats);
2683 + }
2684 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
2685 +index ae88e17f5c72..529348e6a98b 100644
2686 +--- a/net/ipv6/ip6_gre.c
2687 ++++ b/net/ipv6/ip6_gre.c
2688 +@@ -419,7 +419,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
2689 + if (code == ICMPV6_HDR_FIELD)
2690 + teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
2691 +
2692 +- if (teli && teli == info - 2) {
2693 ++ if (teli && teli == be32_to_cpu(info) - 2) {
2694 + tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
2695 + if (tel->encap_limit == 0) {
2696 + net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
2697 +@@ -431,7 +431,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
2698 + }
2699 + break;
2700 + case ICMPV6_PKT_TOOBIG:
2701 +- mtu = info - offset;
2702 ++ mtu = be32_to_cpu(info) - offset;
2703 + if (mtu < IPV6_MIN_MTU)
2704 + mtu = IPV6_MIN_MTU;
2705 + t->dev->mtu = mtu;
2706 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2707 +index 17a88ebcc845..3a65b9a9cb4d 100644
2708 +--- a/net/ipv6/ip6_output.c
2709 ++++ b/net/ipv6/ip6_output.c
2710 +@@ -1288,11 +1288,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
2711 +
2712 + skb = skb_peek_tail(&sk->sk_write_queue);
2713 + cork->length += length;
2714 +- if (((length > mtu) ||
2715 +- (skb && skb_has_frags(skb))) &&
2716 ++ if ((skb && skb_has_frags(skb)) ||
2717 ++ (((length + fragheaderlen) > mtu) &&
2718 ++ (skb_queue_len(&sk->sk_write_queue) <= 1) &&
2719 + (sk->sk_protocol == IPPROTO_UDP) &&
2720 + (rt->dst.dev->features & NETIF_F_UFO) &&
2721 +- (sk->sk_type == SOCK_DGRAM)) {
2722 ++ (sk->sk_type == SOCK_DGRAM))) {
2723 + err = ip6_ufo_append_data(sk, getfrag, from, length,
2724 + hh_len, fragheaderlen,
2725 + transhdrlen, mtu, flags, rt);
2726 +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
2727 +index c7ce2be09d90..a05e1f1a1a38 100644
2728 +--- a/net/ipv6/raw.c
2729 ++++ b/net/ipv6/raw.c
2730 +@@ -1319,7 +1319,7 @@ void raw6_proc_exit(void)
2731 + #endif /* CONFIG_PROC_FS */
2732 +
2733 + /* Same as inet6_dgram_ops, sans udp_poll. */
2734 +-static const struct proto_ops inet6_sockraw_ops = {
2735 ++const struct proto_ops inet6_sockraw_ops = {
2736 + .family = PF_INET6,
2737 + .owner = THIS_MODULE,
2738 + .release = inet6_release,
2739 +diff --git a/net/key/af_key.c b/net/key/af_key.c
2740 +index 66f51c5a8a3a..3ff567fb90ee 100644
2741 +--- a/net/key/af_key.c
2742 ++++ b/net/key/af_key.c
2743 +@@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
2744 + goto out;
2745 + }
2746 +
2747 ++ err = -ENOBUFS;
2748 + key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
2749 + if (sa->sadb_sa_auth) {
2750 + int keysize = 0;
2751 +@@ -1146,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
2752 + if (key)
2753 + keysize = (key->sadb_key_bits + 7) / 8;
2754 + x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
2755 +- if (!x->aalg)
2756 ++ if (!x->aalg) {
2757 ++ err = -ENOMEM;
2758 + goto out;
2759 ++ }
2760 + strcpy(x->aalg->alg_name, a->name);
2761 + x->aalg->alg_key_len = 0;
2762 + if (key) {
2763 +@@ -1166,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
2764 + goto out;
2765 + }
2766 + x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
2767 +- if (!x->calg)
2768 ++ if (!x->calg) {
2769 ++ err = -ENOMEM;
2770 + goto out;
2771 ++ }
2772 + strcpy(x->calg->alg_name, a->name);
2773 + x->props.calgo = sa->sadb_sa_encrypt;
2774 + } else {
2775 +@@ -1181,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
2776 + if (key)
2777 + keysize = (key->sadb_key_bits + 7) / 8;
2778 + x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
2779 +- if (!x->ealg)
2780 ++ if (!x->ealg) {
2781 ++ err = -ENOMEM;
2782 + goto out;
2783 ++ }
2784 + strcpy(x->ealg->alg_name, a->name);
2785 + x->ealg->alg_key_len = 0;
2786 + if (key) {
2787 +@@ -1230,8 +1237,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
2788 + struct xfrm_encap_tmpl *natt;
2789 +
2790 + x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
2791 +- if (!x->encap)
2792 ++ if (!x->encap) {
2793 ++ err = -ENOMEM;
2794 + goto out;
2795 ++ }
2796 +
2797 + natt = x->encap;
2798 + n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
2799 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
2800 +index 1c6a71c41e62..ca66520b8942 100644
2801 +--- a/net/netfilter/ipvs/ip_vs_core.c
2802 ++++ b/net/netfilter/ipvs/ip_vs_core.c
2803 +@@ -795,10 +795,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
2804 + {
2805 + unsigned int verdict = NF_DROP;
2806 +
2807 +- if (IP_VS_FWD_METHOD(cp) != 0) {
2808 +- pr_err("shouldn't reach here, because the box is on the "
2809 +- "half connection in the tun/dr module.\n");
2810 +- }
2811 ++ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
2812 ++ goto ignore_cp;
2813 +
2814 + /* Ensure the checksum is correct */
2815 + if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
2816 +@@ -832,6 +830,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
2817 + ip_vs_notrack(skb);
2818 + else
2819 + ip_vs_update_conntrack(skb, cp, 0);
2820 ++
2821 ++ignore_cp:
2822 + verdict = NF_ACCEPT;
2823 +
2824 + out:
2825 +@@ -1182,8 +1182,11 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
2826 + */
2827 + cp = pp->conn_out_get(af, skb, &iph, 0);
2828 +
2829 +- if (likely(cp))
2830 ++ if (likely(cp)) {
2831 ++ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
2832 ++ goto ignore_cp;
2833 + return handle_response(af, skb, pd, cp, &iph, hooknum);
2834 ++ }
2835 + if (sysctl_nat_icmp_send(net) &&
2836 + (pp->protocol == IPPROTO_TCP ||
2837 + pp->protocol == IPPROTO_UDP ||
2838 +@@ -1225,9 +1228,15 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
2839 + }
2840 + }
2841 + }
2842 ++
2843 ++out:
2844 + IP_VS_DBG_PKT(12, af, pp, skb, 0,
2845 + "ip_vs_out: packet continues traversal as normal");
2846 + return NF_ACCEPT;
2847 ++
2848 ++ignore_cp:
2849 ++ __ip_vs_conn_put(cp);
2850 ++ goto out;
2851 + }
2852 +
2853 + /*
2854 +diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
2855 +index 1df176146567..c9f131fc4bf3 100644
2856 +--- a/net/netfilter/nf_conntrack_ecache.c
2857 ++++ b/net/netfilter/nf_conntrack_ecache.c
2858 +@@ -116,6 +116,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
2859 + BUG_ON(notify != new);
2860 + RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
2861 + mutex_unlock(&nf_ct_ecache_mutex);
2862 ++ /* synchronize_rcu() is called from ctnetlink_exit. */
2863 + }
2864 + EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
2865 +
2866 +@@ -152,6 +153,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
2867 + BUG_ON(notify != new);
2868 + RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
2869 + mutex_unlock(&nf_ct_ecache_mutex);
2870 ++ /* synchronize_rcu() is called from ctnetlink_exit. */
2871 + }
2872 + EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
2873 +
2874 +diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
2875 +index 1a9545965c0d..531ca55f1af6 100644
2876 +--- a/net/netfilter/nf_conntrack_extend.c
2877 ++++ b/net/netfilter/nf_conntrack_extend.c
2878 +@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
2879 +
2880 + rcu_read_lock();
2881 + t = rcu_dereference(nf_ct_ext_types[id]);
2882 +- BUG_ON(t == NULL);
2883 ++ if (!t) {
2884 ++ rcu_read_unlock();
2885 ++ return NULL;
2886 ++ }
2887 ++
2888 + off = ALIGN(sizeof(struct nf_ct_ext), t->align);
2889 + len = off + t->len + var_alloc_len;
2890 + alloc_size = t->alloc_size + var_alloc_len;
2891 +@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
2892 +
2893 + rcu_read_lock();
2894 + t = rcu_dereference(nf_ct_ext_types[id]);
2895 +- BUG_ON(t == NULL);
2896 ++ if (!t) {
2897 ++ rcu_read_unlock();
2898 ++ return NULL;
2899 ++ }
2900 +
2901 + newoff = ALIGN(old->len, t->align);
2902 + newlen = newoff + t->len + var_alloc_len;
2903 +@@ -186,6 +193,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
2904 + RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
2905 + update_alloc_size(type);
2906 + mutex_unlock(&nf_ct_ext_type_mutex);
2907 +- rcu_barrier(); /* Wait for completion of call_rcu()'s */
2908 ++ synchronize_rcu();
2909 + }
2910 + EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
2911 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
2912 +index ecf065f94032..df65d52ba768 100644
2913 +--- a/net/netfilter/nf_conntrack_netlink.c
2914 ++++ b/net/netfilter/nf_conntrack_netlink.c
2915 +@@ -3132,6 +3132,7 @@ static void __exit ctnetlink_exit(void)
2916 + #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2917 + RCU_INIT_POINTER(nfq_ct_hook, NULL);
2918 + #endif
2919 ++ synchronize_rcu();
2920 + }
2921 +
2922 + module_init(ctnetlink_init);
2923 +diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
2924 +index 2bb801e3ee8c..7658d0181050 100644
2925 +--- a/net/netfilter/nf_nat_core.c
2926 ++++ b/net/netfilter/nf_nat_core.c
2927 +@@ -853,6 +853,8 @@ static void __exit nf_nat_cleanup(void)
2928 + #ifdef CONFIG_XFRM
2929 + RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
2930 + #endif
2931 ++ synchronize_rcu();
2932 ++
2933 + for (i = 0; i < NFPROTO_NUMPROTO; i++)
2934 + kfree(nf_nat_l4protos[i]);
2935 + synchronize_net();
2936 +diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
2937 +index 65074dfb9383..10d78dc0d2c6 100644
2938 +--- a/net/netfilter/nfnetlink_cttimeout.c
2939 ++++ b/net/netfilter/nfnetlink_cttimeout.c
2940 +@@ -431,6 +431,7 @@ static void __exit cttimeout_exit(void)
2941 + #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
2942 + RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
2943 + RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
2944 ++ synchronize_rcu();
2945 + #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
2946 + }
2947 +
2948 +diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
2949 +index 7011c71646f0..c656269c4cf0 100644
2950 +--- a/net/netfilter/xt_TCPMSS.c
2951 ++++ b/net/netfilter/xt_TCPMSS.c
2952 +@@ -68,7 +68,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
2953 + tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
2954 +
2955 + /* Header cannot be larger than the packet */
2956 +- if (tcplen < tcph->doff*4)
2957 ++ if (tcplen < tcph->doff*4 || tcph->doff*4 < sizeof(struct tcphdr))
2958 + return -1;
2959 +
2960 + if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
2961 +@@ -117,6 +117,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
2962 + if (tcplen > tcph->doff*4)
2963 + return 0;
2964 +
2965 ++ /* tcph->doff has 4 bits, do not wrap it to 0 */
2966 ++ if (tcph->doff >= 15)
2967 ++ return 0;
2968 ++
2969 + /*
2970 + * MSS Option not found ?! add it..
2971 + */
2972 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2973 +index 0bbb3470fa78..2f22b0759f2c 100644
2974 +--- a/net/packet/af_packet.c
2975 ++++ b/net/packet/af_packet.c
2976 +@@ -3183,14 +3183,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2977 +
2978 + if (optlen != sizeof(val))
2979 + return -EINVAL;
2980 +- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2981 +- return -EBUSY;
2982 + if (copy_from_user(&val, optval, sizeof(val)))
2983 + return -EFAULT;
2984 + if (val > INT_MAX)
2985 + return -EINVAL;
2986 +- po->tp_reserve = val;
2987 +- return 0;
2988 ++ lock_sock(sk);
2989 ++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
2990 ++ ret = -EBUSY;
2991 ++ } else {
2992 ++ po->tp_reserve = val;
2993 ++ ret = 0;
2994 ++ }
2995 ++ release_sock(sk);
2996 ++ return ret;
2997 + }
2998 + case PACKET_LOSS:
2999 + {
3000 +@@ -3338,6 +3343,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3001 + case PACKET_HDRLEN:
3002 + if (len > sizeof(int))
3003 + len = sizeof(int);
3004 ++ if (len < sizeof(int))
3005 ++ return -EINVAL;
3006 + if (copy_from_user(&val, optval, len))
3007 + return -EFAULT;
3008 + switch (val) {
3009 +diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
3010 +index 7633a752c65e..10e6e5de36e1 100644
3011 +--- a/net/rxrpc/ar-key.c
3012 ++++ b/net/rxrpc/ar-key.c
3013 +@@ -213,7 +213,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
3014 + unsigned int *_toklen)
3015 + {
3016 + const __be32 *xdr = *_xdr;
3017 +- unsigned int toklen = *_toklen, n_parts, loop, tmp;
3018 ++ unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
3019 +
3020 + /* there must be at least one name, and at least #names+1 length
3021 + * words */
3022 +@@ -243,16 +243,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
3023 + toklen -= 4;
3024 + if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
3025 + return -EINVAL;
3026 +- if (tmp > toklen)
3027 ++ paddedlen = (tmp + 3) & ~3;
3028 ++ if (paddedlen > toklen)
3029 + return -EINVAL;
3030 + princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
3031 + if (!princ->name_parts[loop])
3032 + return -ENOMEM;
3033 + memcpy(princ->name_parts[loop], xdr, tmp);
3034 + princ->name_parts[loop][tmp] = 0;
3035 +- tmp = (tmp + 3) & ~3;
3036 +- toklen -= tmp;
3037 +- xdr += tmp >> 2;
3038 ++ toklen -= paddedlen;
3039 ++ xdr += paddedlen >> 2;
3040 + }
3041 +
3042 + if (toklen < 4)
3043 +@@ -261,16 +261,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
3044 + toklen -= 4;
3045 + if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
3046 + return -EINVAL;
3047 +- if (tmp > toklen)
3048 ++ paddedlen = (tmp + 3) & ~3;
3049 ++ if (paddedlen > toklen)
3050 + return -EINVAL;
3051 + princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
3052 + if (!princ->realm)
3053 + return -ENOMEM;
3054 + memcpy(princ->realm, xdr, tmp);
3055 + princ->realm[tmp] = 0;
3056 +- tmp = (tmp + 3) & ~3;
3057 +- toklen -= tmp;
3058 +- xdr += tmp >> 2;
3059 ++ toklen -= paddedlen;
3060 ++ xdr += paddedlen >> 2;
3061 +
3062 + _debug("%s/...@%s", princ->name_parts[0], princ->realm);
3063 +
3064 +@@ -289,7 +289,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
3065 + unsigned int *_toklen)
3066 + {
3067 + const __be32 *xdr = *_xdr;
3068 +- unsigned int toklen = *_toklen, len;
3069 ++ unsigned int toklen = *_toklen, len, paddedlen;
3070 +
3071 + /* there must be at least one tag and one length word */
3072 + if (toklen <= 8)
3073 +@@ -303,15 +303,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
3074 + toklen -= 8;
3075 + if (len > max_data_size)
3076 + return -EINVAL;
3077 ++ paddedlen = (len + 3) & ~3;
3078 ++ if (paddedlen > toklen)
3079 ++ return -EINVAL;
3080 + td->data_len = len;
3081 +
3082 + if (len > 0) {
3083 + td->data = kmemdup(xdr, len, GFP_KERNEL);
3084 + if (!td->data)
3085 + return -ENOMEM;
3086 +- len = (len + 3) & ~3;
3087 +- toklen -= len;
3088 +- xdr += len >> 2;
3089 ++ toklen -= paddedlen;
3090 ++ xdr += paddedlen >> 2;
3091 + }
3092 +
3093 + _debug("tag %x len %x", td->tag, td->data_len);
3094 +@@ -383,7 +385,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
3095 + const __be32 **_xdr, unsigned int *_toklen)
3096 + {
3097 + const __be32 *xdr = *_xdr;
3098 +- unsigned int toklen = *_toklen, len;
3099 ++ unsigned int toklen = *_toklen, len, paddedlen;
3100 +
3101 + /* there must be at least one length word */
3102 + if (toklen <= 4)
3103 +@@ -395,6 +397,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
3104 + toklen -= 4;
3105 + if (len > AFSTOKEN_K5_TIX_MAX)
3106 + return -EINVAL;
3107 ++ paddedlen = (len + 3) & ~3;
3108 ++ if (paddedlen > toklen)
3109 ++ return -EINVAL;
3110 + *_tktlen = len;
3111 +
3112 + _debug("ticket len %u", len);
3113 +@@ -403,9 +408,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
3114 + *_ticket = kmemdup(xdr, len, GFP_KERNEL);
3115 + if (!*_ticket)
3116 + return -ENOMEM;
3117 +- len = (len + 3) & ~3;
3118 +- toklen -= len;
3119 +- xdr += len >> 2;
3120 ++ toklen -= paddedlen;
3121 ++ xdr += paddedlen >> 2;
3122 + }
3123 +
3124 + *_xdr = xdr;
3125 +@@ -549,7 +553,7 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal
3126 + {
3127 + const __be32 *xdr = data, *token;
3128 + const char *cp;
3129 +- unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
3130 ++ unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
3131 + int ret;
3132 +
3133 + _enter(",{%x,%x,%x,%x},%zu",
3134 +@@ -574,22 +578,21 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal
3135 + if (len < 1 || len > AFSTOKEN_CELL_MAX)
3136 + goto not_xdr;
3137 + datalen -= 4;
3138 +- tmp = (len + 3) & ~3;
3139 +- if (tmp > datalen)
3140 ++ paddedlen = (len + 3) & ~3;
3141 ++ if (paddedlen > datalen)
3142 + goto not_xdr;
3143 +
3144 + cp = (const char *) xdr;
3145 + for (loop = 0; loop < len; loop++)
3146 + if (!isprint(cp[loop]))
3147 + goto not_xdr;
3148 +- if (len < tmp)
3149 +- for (; loop < tmp; loop++)
3150 +- if (cp[loop])
3151 +- goto not_xdr;
3152 ++ for (; loop < paddedlen; loop++)
3153 ++ if (cp[loop])
3154 ++ goto not_xdr;
3155 + _debug("cellname: [%u/%u] '%*.*s'",
3156 +- len, tmp, len, len, (const char *) xdr);
3157 +- datalen -= tmp;
3158 +- xdr += tmp >> 2;
3159 ++ len, paddedlen, len, len, (const char *) xdr);
3160 ++ datalen -= paddedlen;
3161 ++ xdr += paddedlen >> 2;
3162 +
3163 + /* get the token count */
3164 + if (datalen < 12)
3165 +@@ -610,10 +613,11 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal
3166 + sec_ix = ntohl(*xdr);
3167 + datalen -= 4;
3168 + _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
3169 +- if (toklen < 20 || toklen > datalen)
3170 ++ paddedlen = (toklen + 3) & ~3;
3171 ++ if (toklen < 20 || toklen > datalen || paddedlen > datalen)
3172 + goto not_xdr;
3173 +- datalen -= (toklen + 3) & ~3;
3174 +- xdr += (toklen + 3) >> 2;
3175 ++ datalen -= paddedlen;
3176 ++ xdr += paddedlen >> 2;
3177 +
3178 + } while (--loop > 0);
3179 +
3180 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
3181 +index 10d3e2874dd1..7c2fea69c832 100644
3182 +--- a/net/sctp/ipv6.c
3183 ++++ b/net/sctp/ipv6.c
3184 +@@ -492,7 +492,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
3185 + {
3186 + addr->sa.sa_family = AF_INET6;
3187 + addr->v6.sin6_port = port;
3188 ++ addr->v6.sin6_flowinfo = 0;
3189 + addr->v6.sin6_addr = *saddr;
3190 ++ addr->v6.sin6_scope_id = 0;
3191 + }
3192 +
3193 + /* Compare addresses exactly.
3194 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3195 +index dd3dbed89c8f..da79f9b86dfd 100644
3196 +--- a/net/wireless/nl80211.c
3197 ++++ b/net/wireless/nl80211.c
3198 +@@ -310,8 +310,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
3199 + [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
3200 + [NL80211_ATTR_PID] = { .type = NLA_U32 },
3201 + [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
3202 +- [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
3203 +- .len = WLAN_PMKID_LEN },
3204 ++ [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
3205 + [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
3206 + [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
3207 + [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
3208 +@@ -5044,6 +5043,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
3209 + struct nlattr *attr1, *attr2;
3210 + int n_channels = 0, tmp1, tmp2;
3211 +
3212 ++ nla_for_each_nested(attr1, freqs, tmp1)
3213 ++ if (nla_len(attr1) != sizeof(u32))
3214 ++ return 0;
3215 ++
3216 + nla_for_each_nested(attr1, freqs, tmp1) {
3217 + n_channels++;
3218 + /*
3219 +@@ -8010,6 +8013,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
3220 + if (err)
3221 + return err;
3222 +
3223 ++ if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
3224 ++ !tb[NL80211_REKEY_DATA_KCK])
3225 ++ return -EINVAL;
3226 + if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
3227 + return -ERANGE;
3228 + if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
3229 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3230 +index ea970b8002a2..10c556e373b0 100644
3231 +--- a/net/xfrm/xfrm_policy.c
3232 ++++ b/net/xfrm/xfrm_policy.c
3233 +@@ -3201,9 +3201,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3234 + struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3235 + struct xfrm_migrate *mp;
3236 +
3237 ++ /* Stage 0 - sanity checks */
3238 + if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3239 + goto out;
3240 +
3241 ++ if (dir >= XFRM_POLICY_MAX) {
3242 ++ err = -EINVAL;
3243 ++ goto out;
3244 ++ }
3245 ++
3246 + /* Stage 1 - find policy */
3247 + if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
3248 + err = -ENOENT;
3249 +diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
3250 +index c4c8df4b214d..b7d7cffe7349 100644
3251 +--- a/security/keys/encrypted-keys/encrypted.c
3252 ++++ b/security/keys/encrypted-keys/encrypted.c
3253 +@@ -315,6 +315,13 @@ static struct key *request_user_key(const char *master_desc, u8 **master_key,
3254 +
3255 + down_read(&ukey->sem);
3256 + upayload = ukey->payload.data;
3257 ++ if (!upayload) {
3258 ++ /* key was revoked before we acquired its semaphore */
3259 ++ up_read(&ukey->sem);
3260 ++ key_put(ukey);
3261 ++ ukey = ERR_PTR(-EKEYREVOKED);
3262 ++ goto error;
3263 ++ }
3264 + *master_key = upayload->data;
3265 + *master_keylen = upayload->datalen;
3266 + error:
3267 +@@ -428,7 +435,7 @@ static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
3268 + static struct key *request_master_key(struct encrypted_key_payload *epayload,
3269 + u8 **master_key, size_t *master_keylen)
3270 + {
3271 +- struct key *mkey = NULL;
3272 ++ struct key *mkey = ERR_PTR(-EINVAL);
3273 +
3274 + if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
3275 + KEY_TRUSTED_PREFIX_LEN)) {
3276 +diff --git a/security/keys/internal.h b/security/keys/internal.h
3277 +index d4f1468b9b50..ce6d4634a840 100644
3278 +--- a/security/keys/internal.h
3279 ++++ b/security/keys/internal.h
3280 +@@ -126,7 +126,7 @@ extern key_ref_t search_process_keyrings(struct key_type *type,
3281 + key_match_func_t match,
3282 + const struct cred *cred);
3283 +
3284 +-extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
3285 ++extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
3286 +
3287 + extern int install_user_keyrings(void);
3288 + extern int install_thread_keyring_to_cred(struct cred *);
3289 +diff --git a/security/keys/key.c b/security/keys/key.c
3290 +index 6595b2dd89fe..248c2e731375 100644
3291 +--- a/security/keys/key.c
3292 ++++ b/security/keys/key.c
3293 +@@ -299,6 +299,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
3294 +
3295 + if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
3296 + key->flags |= 1 << KEY_FLAG_IN_QUOTA;
3297 ++ if (flags & KEY_ALLOC_UID_KEYRING)
3298 ++ key->flags |= 1 << KEY_FLAG_UID_KEYRING;
3299 +
3300 + memset(&key->type_data, 0, sizeof(key->type_data));
3301 +
3302 +@@ -897,6 +899,16 @@ error:
3303 + */
3304 + __key_link_end(keyring, ktype, prealloc);
3305 +
3306 ++ key = key_ref_to_ptr(key_ref);
3307 ++ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
3308 ++ ret = wait_for_key_construction(key, true);
3309 ++ if (ret < 0) {
3310 ++ key_ref_put(key_ref);
3311 ++ key_ref = ERR_PTR(ret);
3312 ++ goto error_free_prep;
3313 ++ }
3314 ++ }
3315 ++
3316 + key_ref = __key_update(key_ref, &prep);
3317 + goto error_free_prep;
3318 + }
3319 +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
3320 +index 066baa1926bb..7576f49eeb34 100644
3321 +--- a/security/keys/keyctl.c
3322 ++++ b/security/keys/keyctl.c
3323 +@@ -93,7 +93,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
3324 + payload = NULL;
3325 +
3326 + vm = false;
3327 +- if (_payload) {
3328 ++ if (plen) {
3329 + ret = -ENOMEM;
3330 + payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
3331 + if (!payload) {
3332 +@@ -327,7 +327,7 @@ long keyctl_update_key(key_serial_t id,
3333 +
3334 + /* pull the payload in if one was supplied */
3335 + payload = NULL;
3336 +- if (_payload) {
3337 ++ if (plen) {
3338 + ret = -ENOMEM;
3339 + payload = kmalloc(plen, GFP_KERNEL);
3340 + if (!payload)
3341 +diff --git a/security/keys/keyring.c b/security/keys/keyring.c
3342 +index 6ece7f2e5707..b0cabf68c678 100644
3343 +--- a/security/keys/keyring.c
3344 ++++ b/security/keys/keyring.c
3345 +@@ -583,15 +583,15 @@ found:
3346 + /*
3347 + * Find a keyring with the specified name.
3348 + *
3349 +- * All named keyrings in the current user namespace are searched, provided they
3350 +- * grant Search permission directly to the caller (unless this check is
3351 +- * skipped). Keyrings whose usage points have reached zero or who have been
3352 +- * revoked are skipped.
3353 ++ * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
3354 ++ * user in the current user namespace are considered. If @uid_keyring is %true,
3355 ++ * the keyring additionally must have been allocated as a user or user session
3356 ++ * keyring; otherwise, it must grant Search permission directly to the caller.
3357 + *
3358 + * Returns a pointer to the keyring with the keyring's refcount having being
3359 + * incremented on success. -ENOKEY is returned if a key could not be found.
3360 + */
3361 +-struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
3362 ++struct key *find_keyring_by_name(const char *name, bool uid_keyring)
3363 + {
3364 + struct key *keyring;
3365 + int bucket;
3366 +@@ -619,10 +619,15 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
3367 + if (strcmp(keyring->description, name) != 0)
3368 + continue;
3369 +
3370 +- if (!skip_perm_check &&
3371 +- key_permission(make_key_ref(keyring, 0),
3372 +- KEY_SEARCH) < 0)
3373 +- continue;
3374 ++ if (uid_keyring) {
3375 ++ if (!test_bit(KEY_FLAG_UID_KEYRING,
3376 ++ &keyring->flags))
3377 ++ continue;
3378 ++ } else {
3379 ++ if (key_permission(make_key_ref(keyring, 0),
3380 ++ KEY_SEARCH) < 0)
3381 ++ continue;
3382 ++ }
3383 +
3384 + /* we've got a match but we might end up racing with
3385 + * key_cleanup() if the keyring is currently 'dead'
3386 +diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
3387 +index 33384662fc82..f58a5aa05fa4 100644
3388 +--- a/security/keys/process_keys.c
3389 ++++ b/security/keys/process_keys.c
3390 +@@ -76,7 +76,9 @@ int install_user_keyrings(void)
3391 + if (IS_ERR(uid_keyring)) {
3392 + uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
3393 + cred, user_keyring_perm,
3394 +- KEY_ALLOC_IN_QUOTA, NULL);
3395 ++ KEY_ALLOC_UID_KEYRING |
3396 ++ KEY_ALLOC_IN_QUOTA,
3397 ++ NULL);
3398 + if (IS_ERR(uid_keyring)) {
3399 + ret = PTR_ERR(uid_keyring);
3400 + goto error;
3401 +@@ -92,7 +94,9 @@ int install_user_keyrings(void)
3402 + session_keyring =
3403 + keyring_alloc(buf, user->uid, INVALID_GID,
3404 + cred, user_keyring_perm,
3405 +- KEY_ALLOC_IN_QUOTA, NULL);
3406 ++ KEY_ALLOC_UID_KEYRING |
3407 ++ KEY_ALLOC_IN_QUOTA,
3408 ++ NULL);
3409 + if (IS_ERR(session_keyring)) {
3410 + ret = PTR_ERR(session_keyring);
3411 + goto error_release;
3412 +diff --git a/sound/core/control.c b/sound/core/control.c
3413 +index 251bc575f5c3..c39282611368 100644
3414 +--- a/sound/core/control.c
3415 ++++ b/sound/core/control.c
3416 +@@ -1088,7 +1088,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
3417 + mutex_lock(&ue->card->user_ctl_lock);
3418 + change = ue->tlv_data_size != size;
3419 + if (!change)
3420 +- change = memcmp(ue->tlv_data, new_data, size);
3421 ++ change = memcmp(ue->tlv_data, new_data, size) != 0;
3422 + kfree(ue->tlv_data);
3423 + ue->tlv_data = new_data;
3424 + ue->tlv_data_size = size;
3425 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
3426 +index d449dde1bf50..7b5a7902b7a2 100644
3427 +--- a/sound/core/seq/seq_clientmgr.c
3428 ++++ b/sound/core/seq/seq_clientmgr.c
3429 +@@ -1248,6 +1248,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
3430 + struct snd_seq_client_port *port;
3431 + struct snd_seq_port_info info;
3432 + struct snd_seq_port_callback *callback;
3433 ++ int port_idx;
3434 +
3435 + if (copy_from_user(&info, arg, sizeof(info)))
3436 + return -EFAULT;
3437 +@@ -1261,7 +1262,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
3438 + return -ENOMEM;
3439 +
3440 + if (client->type == USER_CLIENT && info.kernel) {
3441 +- snd_seq_delete_port(client, port->addr.port);
3442 ++ port_idx = port->addr.port;
3443 ++ snd_seq_port_unlock(port);
3444 ++ snd_seq_delete_port(client, port_idx);
3445 + return -EINVAL;
3446 + }
3447 + if (client->type == KERNEL_CLIENT) {
3448 +@@ -1283,6 +1286,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
3449 +
3450 + snd_seq_set_port_info(port, &info);
3451 + snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
3452 ++ snd_seq_port_unlock(port);
3453 +
3454 + if (copy_to_user(arg, &info, sizeof(info)))
3455 + return -EFAULT;
3456 +diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
3457 +index ee0522a8f730..a28d1acad574 100644
3458 +--- a/sound/core/seq/seq_ports.c
3459 ++++ b/sound/core/seq/seq_ports.c
3460 +@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
3461 + }
3462 +
3463 +
3464 +-/* create a port, port number is returned (-1 on failure) */
3465 ++/* create a port, port number is returned (-1 on failure);
3466 ++ * the caller needs to unref the port via snd_seq_port_unlock() appropriately
3467 ++ */
3468 + struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
3469 + int port)
3470 + {
3471 +@@ -153,6 +155,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
3472 + snd_use_lock_init(&new_port->use_lock);
3473 + port_subs_info_init(&new_port->c_src);
3474 + port_subs_info_init(&new_port->c_dest);
3475 ++ snd_use_lock_use(&new_port->use_lock);
3476 +
3477 + num = port >= 0 ? port : 0;
3478 + mutex_lock(&client->ports_mutex);
3479 +@@ -167,9 +170,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
3480 + list_add_tail(&new_port->list, &p->list);
3481 + client->num_ports++;
3482 + new_port->addr.port = num; /* store the port number in the port */
3483 ++ sprintf(new_port->name, "port-%d", num);
3484 + write_unlock_irqrestore(&client->ports_lock, flags);
3485 + mutex_unlock(&client->ports_mutex);
3486 +- sprintf(new_port->name, "port-%d", num);
3487 +
3488 + return new_port;
3489 + }
3490 +diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
3491 +index bbc782e364b0..9118fb8cc100 100644
3492 +--- a/tools/perf/ui/browser.c
3493 ++++ b/tools/perf/ui/browser.c
3494 +@@ -672,7 +672,7 @@ static void __ui_browser__line_arrow_down(struct ui_browser *browser,
3495 + ui_browser__gotorc(browser, row, column + 1);
3496 + SLsmg_draw_hline(2);
3497 +
3498 +- if (row++ == 0)
3499 ++ if (++row == 0)
3500 + goto out;
3501 + } else
3502 + row = 0;