Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.12 commit in: /
Date: Fri, 10 Mar 2017 00:38:11
Message-Id: 1489106280.5a3c2c16a6d0eb0c726b98af9ed28cca9c25a872.mpagano@gentoo
1 commit: 5a3c2c16a6d0eb0c726b98af9ed28cca9c25a872
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Mar 10 00:38:00 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Mar 10 00:38:00 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5a3c2c16
7
8 Linux patch 3.12.71
9
10 0000_README | 4 +
11 1070_linux-3.12.71.patch | 3728 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3732 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 89b165d..8a27c91 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -326,6 +326,10 @@ Patch: 1069_linux-3.12.70.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.12.70
21
22 +Patch: 1070_linux-3.12.71.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.12.71
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1070_linux-3.12.71.patch b/1070_linux-3.12.71.patch
31 new file mode 100644
32 index 0000000..e6e53a2
33 --- /dev/null
34 +++ b/1070_linux-3.12.71.patch
35 @@ -0,0 +1,3728 @@
36 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
37 +index 64c6734da6d8..1ebce8682832 100644
38 +--- a/Documentation/kernel-parameters.txt
39 ++++ b/Documentation/kernel-parameters.txt
40 +@@ -1013,6 +1013,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
41 + When zero, profiling data is discarded and associated
42 + debugfs files are removed at module unload time.
43 +
44 ++ goldfish [X86] Enable the goldfish android emulator platform.
45 ++ Don't use this when you are not running on the
46 ++ android emulator
47 ++
48 + gpt [EFI] Forces disk with valid GPT signature but
49 + invalid Protective MBR to be treated as GPT.
50 +
51 +diff --git a/Makefile b/Makefile
52 +index d0e6e38ee77b..f9da868f99a8 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,6 +1,6 @@
56 + VERSION = 3
57 + PATCHLEVEL = 12
58 +-SUBLEVEL = 70
59 ++SUBLEVEL = 71
60 + EXTRAVERSION =
61 + NAME = One Giant Leap for Frogkind
62 +
63 +diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
64 +index 7ff5b5c183bb..2cc82b6ec23d 100644
65 +--- a/arch/arc/kernel/unaligned.c
66 ++++ b/arch/arc/kernel/unaligned.c
67 +@@ -240,8 +240,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
68 + if (state.fault)
69 + goto fault;
70 +
71 ++ /* clear any remanants of delay slot */
72 + if (delay_mode(regs)) {
73 +- regs->ret = regs->bta;
74 ++ regs->ret = regs->bta & ~1U;
75 + regs->status32 &= ~STATUS_DE_MASK;
76 + } else {
77 + regs->ret += state.instr_len;
78 +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
79 +index ec33df500f86..93e6b7ea81b9 100644
80 +--- a/arch/arm/kernel/ptrace.c
81 ++++ b/arch/arm/kernel/ptrace.c
82 +@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
83 + const void *kbuf, const void __user *ubuf)
84 + {
85 + int ret;
86 +- struct pt_regs newregs;
87 ++ struct pt_regs newregs = *task_pt_regs(target);
88 +
89 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
90 + &newregs,
91 +diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
92 +index 8c9b631d2a78..8c00e6c06266 100644
93 +--- a/arch/parisc/include/asm/bitops.h
94 ++++ b/arch/parisc/include/asm/bitops.h
95 +@@ -6,7 +6,7 @@
96 + #endif
97 +
98 + #include <linux/compiler.h>
99 +-#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
100 ++#include <asm/types.h>
101 + #include <asm/byteorder.h>
102 + #include <linux/atomic.h>
103 +
104 +@@ -16,6 +16,12 @@
105 + * to include/asm-i386/bitops.h or kerneldoc
106 + */
107 +
108 ++#if __BITS_PER_LONG == 64
109 ++#define SHIFT_PER_LONG 6
110 ++#else
111 ++#define SHIFT_PER_LONG 5
112 ++#endif
113 ++
114 + #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
115 +
116 +
117 +diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
118 +index 75196b415d3f..540c94de4427 100644
119 +--- a/arch/parisc/include/uapi/asm/bitsperlong.h
120 ++++ b/arch/parisc/include/uapi/asm/bitsperlong.h
121 +@@ -9,10 +9,8 @@
122 + */
123 + #if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__)
124 + #define __BITS_PER_LONG 64
125 +-#define SHIFT_PER_LONG 6
126 + #else
127 + #define __BITS_PER_LONG 32
128 +-#define SHIFT_PER_LONG 5
129 + #endif
130 +
131 + #include <asm-generic/bitsperlong.h>
132 +diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
133 +index e78403b129ef..928e1bbac98f 100644
134 +--- a/arch/parisc/include/uapi/asm/swab.h
135 ++++ b/arch/parisc/include/uapi/asm/swab.h
136 +@@ -1,6 +1,7 @@
137 + #ifndef _PARISC_SWAB_H
138 + #define _PARISC_SWAB_H
139 +
140 ++#include <asm/bitsperlong.h>
141 + #include <linux/types.h>
142 + #include <linux/compiler.h>
143 +
144 +@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
145 + }
146 + #define __arch_swab32 __arch_swab32
147 +
148 +-#if BITS_PER_LONG > 32
149 ++#if __BITS_PER_LONG > 32
150 + /*
151 + ** From "PA-RISC 2.0 Architecture", HP Professional Books.
152 + ** See Appendix I page 8 , "Endian Byte Swapping".
153 +@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
154 + return x;
155 + }
156 + #define __arch_swab64 __arch_swab64
157 +-#endif /* BITS_PER_LONG > 32 */
158 ++#endif /* __BITS_PER_LONG > 32 */
159 +
160 + #endif /* _PARISC_SWAB_H */
161 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
162 +index 906fba63b66d..45f3d31c8e5e 100644
163 +--- a/arch/s390/net/bpf_jit_comp.c
164 ++++ b/arch/s390/net/bpf_jit_comp.c
165 +@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
166 + return NULL;
167 + memset(header, 0, sz);
168 + header->pages = sz / PAGE_SIZE;
169 +- hole = sz - (bpfsize + sizeof(*header));
170 ++ hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
171 + /* Insert random number of illegal instructions before BPF code
172 + * and make sure the first instruction starts at an even address.
173 + */
174 +diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
175 +index de98c6ddf136..2343126c4ad2 100644
176 +--- a/arch/tile/kernel/ptrace.c
177 ++++ b/arch/tile/kernel/ptrace.c
178 +@@ -110,7 +110,7 @@ static int tile_gpr_set(struct task_struct *target,
179 + const void *kbuf, const void __user *ubuf)
180 + {
181 + int ret;
182 +- struct pt_regs regs;
183 ++ struct pt_regs regs = *task_pt_regs(target);
184 +
185 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
186 + sizeof(regs));
187 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
188 +index 0cda30450825..7255e3dee799 100644
189 +--- a/arch/x86/Kconfig
190 ++++ b/arch/x86/Kconfig
191 +@@ -894,7 +894,7 @@ config X86_LOCAL_APIC
192 +
193 + config X86_IO_APIC
194 + def_bool y
195 +- depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
196 ++ depends on X86_LOCAL_APIC || X86_UP_IOAPIC
197 +
198 + config X86_VISWS_APIC
199 + def_bool y
200 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
201 +index 1b72000b6be2..1fed139f8eae 100644
202 +--- a/arch/x86/net/bpf_jit_comp.c
203 ++++ b/arch/x86/net/bpf_jit_comp.c
204 +@@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
205 + memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
206 +
207 + header->pages = sz / PAGE_SIZE;
208 +- hole = sz - (proglen + sizeof(*header));
209 ++ hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
210 +
211 + /* insert a random number of int3 instructions before BPF code */
212 + *image_ptr = &header->image[prandom_u32() % hole];
213 +diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c
214 +index 1693107a518e..0d17c0aafeb1 100644
215 +--- a/arch/x86/platform/goldfish/goldfish.c
216 ++++ b/arch/x86/platform/goldfish/goldfish.c
217 +@@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = {
218 + }
219 + };
220 +
221 ++static bool goldfish_enable __initdata;
222 ++
223 ++static int __init goldfish_setup(char *str)
224 ++{
225 ++ goldfish_enable = true;
226 ++ return 0;
227 ++}
228 ++__setup("goldfish", goldfish_setup);
229 ++
230 + static int __init goldfish_init(void)
231 + {
232 ++ if (!goldfish_enable)
233 ++ return -ENODEV;
234 ++
235 + platform_device_register_simple("goldfish_pdev_bus", -1,
236 +- goldfish_pdev_bus_resources, 2);
237 ++ goldfish_pdev_bus_resources, 2);
238 + return 0;
239 + }
240 + device_initcall(goldfish_init);
241 +diff --git a/crypto/algapi.c b/crypto/algapi.c
242 +index daf2f653b131..8ea7a5dc3839 100644
243 +--- a/crypto/algapi.c
244 ++++ b/crypto/algapi.c
245 +@@ -337,6 +337,7 @@ int crypto_register_alg(struct crypto_alg *alg)
246 + struct crypto_larval *larval;
247 + int err;
248 +
249 ++ alg->cra_flags &= ~CRYPTO_ALG_DEAD;
250 + err = crypto_check_alg(alg);
251 + if (err)
252 + return err;
253 +diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
254 +index dc9d4b1ea4ec..90a71cc5c910 100644
255 +--- a/drivers/ata/sata_mv.c
256 ++++ b/drivers/ata/sata_mv.c
257 +@@ -4098,6 +4098,9 @@ static int mv_platform_probe(struct platform_device *pdev)
258 + host->iomap = NULL;
259 + hpriv->base = devm_ioremap(&pdev->dev, res->start,
260 + resource_size(res));
261 ++ if (!hpriv->base)
262 ++ return -ENOMEM;
263 ++
264 + hpriv->base -= SATAHC0_REG_BASE;
265 +
266 + hpriv->clk = clk_get(&pdev->dev, NULL);
267 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
268 +index 8356b481e339..a7b2a5f53b2b 100644
269 +--- a/drivers/cpufreq/cpufreq.c
270 ++++ b/drivers/cpufreq/cpufreq.c
271 +@@ -860,9 +860,6 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
272 +
273 + /* set default policy */
274 + ret = __cpufreq_set_policy(policy, &new_policy);
275 +- policy->user_policy.policy = policy->policy;
276 +- policy->user_policy.governor = policy->governor;
277 +-
278 + if (ret) {
279 + pr_debug("setting policy failed\n");
280 + if (cpufreq_driver->exit)
281 +@@ -872,8 +869,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
282 +
283 + #ifdef CONFIG_HOTPLUG_CPU
284 + static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
285 +- unsigned int cpu, struct device *dev,
286 +- bool frozen)
287 ++ unsigned int cpu, struct device *dev)
288 + {
289 + int ret = 0, has_target = !!cpufreq_driver->target;
290 + unsigned long flags;
291 +@@ -904,11 +900,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
292 + }
293 + }
294 +
295 +- /* Don't touch sysfs links during light-weight init */
296 +- if (!frozen)
297 +- ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
298 +-
299 +- return ret;
300 ++ return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
301 + }
302 + #endif
303 +
304 +@@ -951,6 +943,27 @@ err_free_policy:
305 + return NULL;
306 + }
307 +
308 ++static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
309 ++{
310 ++ struct kobject *kobj;
311 ++ struct completion *cmp;
312 ++
313 ++ lock_policy_rwsem_read(policy->cpu);
314 ++ kobj = &policy->kobj;
315 ++ cmp = &policy->kobj_unregister;
316 ++ unlock_policy_rwsem_read(policy->cpu);
317 ++ kobject_put(kobj);
318 ++
319 ++ /*
320 ++ * We need to make sure that the underlying kobj is
321 ++ * actually not referenced anymore by anybody before we
322 ++ * proceed with unloading.
323 ++ */
324 ++ pr_debug("waiting for dropping of refcount\n");
325 ++ wait_for_completion(cmp);
326 ++ pr_debug("wait complete\n");
327 ++}
328 ++
329 + static void cpufreq_policy_free(struct cpufreq_policy *policy)
330 + {
331 + free_cpumask_var(policy->related_cpus);
332 +@@ -1020,7 +1033,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
333 + list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
334 + if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
335 + read_unlock_irqrestore(&cpufreq_driver_lock, flags);
336 +- ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
337 ++ ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
338 + up_read(&cpufreq_rwsem);
339 + return ret;
340 + }
341 +@@ -1028,15 +1041,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
342 + read_unlock_irqrestore(&cpufreq_driver_lock, flags);
343 + #endif
344 +
345 +- if (frozen)
346 +- /* Restore the saved policy when doing light-weight init */
347 +- policy = cpufreq_policy_restore(cpu);
348 +- else
349 ++ /*
350 ++ * Restore the saved policy when doing light-weight init and fall back
351 ++ * to the full init if that fails.
352 ++ */
353 ++ policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
354 ++ if (!policy) {
355 ++ frozen = false;
356 + policy = cpufreq_policy_alloc();
357 +-
358 +- if (!policy)
359 +- goto nomem_out;
360 +-
361 ++ if (!policy)
362 ++ goto nomem_out;
363 ++ }
364 +
365 + /*
366 + * In the resume path, since we restore a saved policy, the assignment
367 +@@ -1073,8 +1088,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
368 + */
369 + cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
370 +
371 +- policy->user_policy.min = policy->min;
372 +- policy->user_policy.max = policy->max;
373 ++ if (!frozen) {
374 ++ policy->user_policy.min = policy->min;
375 ++ policy->user_policy.max = policy->max;
376 ++ }
377 +
378 + blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
379 + CPUFREQ_START, policy);
380 +@@ -1105,6 +1122,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
381 +
382 + cpufreq_init_policy(policy);
383 +
384 ++ if (!frozen) {
385 ++ policy->user_policy.policy = policy->policy;
386 ++ policy->user_policy.governor = policy->governor;
387 ++ }
388 ++
389 + kobject_uevent(&policy->kobj, KOBJ_ADD);
390 + up_read(&cpufreq_rwsem);
391 +
392 +@@ -1119,7 +1141,13 @@ err_out_unregister:
393 + write_unlock_irqrestore(&cpufreq_driver_lock, flags);
394 +
395 + err_set_policy_cpu:
396 ++ if (frozen) {
397 ++ /* Do not leave stale fallback data behind. */
398 ++ per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
399 ++ cpufreq_policy_put_kobj(policy);
400 ++ }
401 + cpufreq_policy_free(policy);
402 ++
403 + nomem_out:
404 + up_read(&cpufreq_rwsem);
405 +
406 +@@ -1141,7 +1169,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
407 + }
408 +
409 + static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
410 +- unsigned int old_cpu, bool frozen)
411 ++ unsigned int old_cpu)
412 + {
413 + struct device *cpu_dev;
414 + int ret;
415 +@@ -1149,10 +1177,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
416 + /* first sibling now owns the new sysfs dir */
417 + cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
418 +
419 +- /* Don't touch sysfs files during light-weight tear-down */
420 +- if (frozen)
421 +- return cpu_dev->id;
422 +-
423 + sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
424 + ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
425 + if (ret) {
426 +@@ -1220,7 +1244,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
427 + sysfs_remove_link(&dev->kobj, "cpufreq");
428 + } else if (cpus > 1) {
429 +
430 +- new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
431 ++ new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
432 + if (new_cpu >= 0) {
433 + update_policy_cpu(policy, new_cpu);
434 +
435 +@@ -1242,8 +1266,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
436 + int ret;
437 + unsigned long flags;
438 + struct cpufreq_policy *policy;
439 +- struct kobject *kobj;
440 +- struct completion *cmp;
441 +
442 + read_lock_irqsave(&cpufreq_driver_lock, flags);
443 + policy = per_cpu(cpufreq_cpu_data, cpu);
444 +@@ -1273,22 +1295,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
445 + }
446 + }
447 +
448 +- if (!frozen) {
449 +- lock_policy_rwsem_read(cpu);
450 +- kobj = &policy->kobj;
451 +- cmp = &policy->kobj_unregister;
452 +- unlock_policy_rwsem_read(cpu);
453 +- kobject_put(kobj);
454 +-
455 +- /*
456 +- * We need to make sure that the underlying kobj is
457 +- * actually not referenced anymore by anybody before we
458 +- * proceed with unloading.
459 +- */
460 +- pr_debug("waiting for dropping of refcount\n");
461 +- wait_for_completion(cmp);
462 +- pr_debug("wait complete\n");
463 +- }
464 ++ if (!frozen)
465 ++ cpufreq_policy_put_kobj(policy);
466 +
467 + /*
468 + * Perform the ->exit() even during light-weight tear-down,
469 +@@ -2062,9 +2070,6 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
470 + dev = get_cpu_device(cpu);
471 + if (dev) {
472 +
473 +- if (action & CPU_TASKS_FROZEN)
474 +- frozen = true;
475 +-
476 + switch (action & ~CPU_TASKS_FROZEN) {
477 + case CPU_ONLINE:
478 + __cpufreq_add_dev(dev, NULL, frozen);
479 +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
480 +index 92d2116bf1ad..170df51257ea 100644
481 +--- a/drivers/crypto/caam/caamhash.c
482 ++++ b/drivers/crypto/caam/caamhash.c
483 +@@ -1799,6 +1799,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
484 + template->name);
485 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
486 + template->driver_name);
487 ++ t_alg->ahash_alg.setkey = NULL;
488 + }
489 + alg->cra_module = THIS_MODULE;
490 + alg->cra_init = caam_hash_cra_init;
491 +diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
492 +index 74ef54a4645f..62a0e501057b 100644
493 +--- a/drivers/gpu/drm/i915/intel_crt.c
494 ++++ b/drivers/gpu/drm/i915/intel_crt.c
495 +@@ -475,6 +475,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
496 + struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
497 + struct edid *edid;
498 + struct i2c_adapter *i2c;
499 ++ bool ret = false;
500 +
501 + BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
502 +
503 +@@ -491,17 +492,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
504 + */
505 + if (!is_digital) {
506 + DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
507 +- return true;
508 ++ ret = true;
509 ++ } else {
510 ++ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
511 + }
512 +-
513 +- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
514 + } else {
515 + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
516 + }
517 +
518 + kfree(edid);
519 +
520 +- return false;
521 ++ return ret;
522 + }
523 +
524 + static enum drm_connector_status
525 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
526 +index 57d5abc420d1..bfb054d1d5b0 100644
527 +--- a/drivers/gpu/drm/i915/intel_display.c
528 ++++ b/drivers/gpu/drm/i915/intel_display.c
529 +@@ -7696,9 +7696,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
530 +
531 + wake_up_all(&dev_priv->pending_flip_queue);
532 +
533 +- queue_work(dev_priv->wq, &work->work);
534 +-
535 + trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
536 ++
537 ++ queue_work(dev_priv->wq, &work->work);
538 + }
539 +
540 + void intel_finish_page_flip(struct drm_device *dev, int pipe)
541 +diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
542 +index 973056b86207..b16e051e48f0 100644
543 +--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
544 ++++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
545 +@@ -224,6 +224,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
546 + uint32_t mpllP;
547 +
548 + pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
549 ++ mpllP = (mpllP >> 8) & 0xf;
550 + if (!mpllP)
551 + mpllP = 4;
552 +
553 +@@ -234,7 +235,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
554 + uint32_t clock;
555 +
556 + pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
557 +- return clock;
558 ++ return clock / 1000;
559 + }
560 +
561 + ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
562 +diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
563 +index f8e66c08b11a..4e384a2f99c3 100644
564 +--- a/drivers/gpu/drm/nouveau/nv50_display.c
565 ++++ b/drivers/gpu/drm/nouveau/nv50_display.c
566 +@@ -1265,7 +1265,7 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
567 + uint32_t start, uint32_t size)
568 + {
569 + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
570 +- u32 end = max(start + size, (u32)256);
571 ++ u32 end = min_t(u32, start + size, 256);
572 + u32 i;
573 +
574 + for (i = start; i < end; i++) {
575 +diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
576 +index a82e542ffc21..fecbf1d2f60b 100644
577 +--- a/drivers/isdn/hardware/eicon/message.c
578 ++++ b/drivers/isdn/hardware/eicon/message.c
579 +@@ -11304,7 +11304,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
580 + ((CAPI_MSG *) msg)->header.ncci = 0;
581 + ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
582 + ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
583 +- PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
584 ++ ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
585 ++ ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
586 + ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
587 + w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
588 + if (w != _QUEUE_FULL)
589 +diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
590 +index d18be19c96cd..db62d7ede7fe 100644
591 +--- a/drivers/media/i2c/Kconfig
592 ++++ b/drivers/media/i2c/Kconfig
593 +@@ -590,6 +590,7 @@ config VIDEO_S5K6AA
594 + config VIDEO_S5K4ECGX
595 + tristate "Samsung S5K4ECGX sensor support"
596 + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
597 ++ select CRC32
598 + ---help---
599 + This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
600 + camera sensor with an embedded SoC image signal processor.
601 +diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
602 +index 03761c6f472f..8e7c78567138 100644
603 +--- a/drivers/media/usb/siano/smsusb.c
604 ++++ b/drivers/media/usb/siano/smsusb.c
605 +@@ -206,20 +206,28 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev)
606 + static int smsusb_sendrequest(void *context, void *buffer, size_t size)
607 + {
608 + struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
609 +- struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
610 +- int dummy;
611 ++ struct sms_msg_hdr *phdr;
612 ++ int dummy, ret;
613 +
614 + if (dev->state != SMSUSB_ACTIVE)
615 + return -ENOENT;
616 +
617 ++ phdr = kmalloc(size, GFP_KERNEL);
618 ++ if (!phdr)
619 ++ return -ENOMEM;
620 ++ memcpy(phdr, buffer, size);
621 ++
622 + sms_debug("sending %s(%d) size: %d",
623 + smscore_translate_msg(phdr->msg_type), phdr->msg_type,
624 + phdr->msg_length);
625 +
626 + smsendian_handle_tx_message((struct sms_msg_data *) phdr);
627 +- smsendian_handle_message_header((struct sms_msg_hdr *)buffer);
628 +- return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
629 +- buffer, size, &dummy, 1000);
630 ++ smsendian_handle_message_header((struct sms_msg_hdr *)phdr);
631 ++ ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
632 ++ phdr, size, &dummy, 1000);
633 ++
634 ++ kfree(phdr);
635 ++ return ret;
636 + }
637 +
638 + static char *smsusb1_fw_lkup[] = {
639 +diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
640 +index a6841f77aa5e..484fe66e6c88 100644
641 +--- a/drivers/mfd/pm8921-core.c
642 ++++ b/drivers/mfd/pm8921-core.c
643 +@@ -171,11 +171,12 @@ static int pm8921_remove(struct platform_device *pdev)
644 + drvdata = platform_get_drvdata(pdev);
645 + if (drvdata)
646 + pmic = drvdata->pm_chip_data;
647 +- if (pmic)
648 ++ if (pmic) {
649 + mfd_remove_devices(pmic->dev);
650 +- if (pmic->irq_chip) {
651 +- pm8xxx_irq_exit(pmic->irq_chip);
652 +- pmic->irq_chip = NULL;
653 ++ if (pmic->irq_chip) {
654 ++ pm8xxx_irq_exit(pmic->irq_chip);
655 ++ pmic->irq_chip = NULL;
656 ++ }
657 + }
658 +
659 + return 0;
660 +diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
661 +index b374be7891a2..b905e5e840f7 100644
662 +--- a/drivers/net/can/c_can/c_can_pci.c
663 ++++ b/drivers/net/can/c_can/c_can_pci.c
664 +@@ -109,6 +109,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
665 +
666 + dev->irq = pdev->irq;
667 + priv->base = addr;
668 ++ priv->device = &pdev->dev;
669 +
670 + if (!c_can_pci_data->freq) {
671 + dev_err(&pdev->dev, "no clock frequency defined\n");
672 +diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
673 +index 3a349a22d5bc..0269e41b7659 100644
674 +--- a/drivers/net/can/ti_hecc.c
675 ++++ b/drivers/net/can/ti_hecc.c
676 +@@ -962,7 +962,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
677 + netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
678 + HECC_DEF_NAPI_WEIGHT);
679 +
680 +- clk_enable(priv->clk);
681 ++ err = clk_prepare_enable(priv->clk);
682 ++ if (err) {
683 ++ dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
684 ++ goto probe_exit_clk;
685 ++ }
686 ++
687 + err = register_candev(ndev);
688 + if (err) {
689 + dev_err(&pdev->dev, "register_candev() failed\n");
690 +@@ -995,7 +1000,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
691 + struct ti_hecc_priv *priv = netdev_priv(ndev);
692 +
693 + unregister_candev(ndev);
694 +- clk_disable(priv->clk);
695 ++ clk_disable_unprepare(priv->clk);
696 + clk_put(priv->clk);
697 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
698 + iounmap(priv->base);
699 +@@ -1020,7 +1025,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
700 + hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
701 + priv->can.state = CAN_STATE_SLEEPING;
702 +
703 +- clk_disable(priv->clk);
704 ++ clk_disable_unprepare(priv->clk);
705 +
706 + return 0;
707 + }
708 +@@ -1029,8 +1034,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
709 + {
710 + struct net_device *dev = platform_get_drvdata(pdev);
711 + struct ti_hecc_priv *priv = netdev_priv(dev);
712 ++ int err;
713 +
714 +- clk_enable(priv->clk);
715 ++ err = clk_prepare_enable(priv->clk);
716 ++ if (err)
717 ++ return err;
718 +
719 + hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
720 + priv->can.state = CAN_STATE_ERROR_ACTIVE;
721 +diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
722 +index d3d7ede27ef1..c0f7328adb13 100644
723 +--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
724 ++++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
725 +@@ -553,6 +553,9 @@ static int gfar_spauseparam(struct net_device *dev,
726 + struct gfar __iomem *regs = priv->gfargrp[0].regs;
727 + u32 oldadv, newadv;
728 +
729 ++ if (!phydev)
730 ++ return -ENODEV;
731 ++
732 + if (!(phydev->supported & SUPPORTED_Pause) ||
733 + (!(phydev->supported & SUPPORTED_Asym_Pause) &&
734 + (epause->rx_pause != epause->tx_pause)))
735 +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
736 +index 98ce4feb9a79..2f6da225fab4 100644
737 +--- a/drivers/net/macvtap.c
738 ++++ b/drivers/net/macvtap.c
739 +@@ -655,7 +655,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
740 + size_t linear;
741 +
742 + if (q->flags & IFF_VNET_HDR) {
743 +- vnet_hdr_len = q->vnet_hdr_sz;
744 ++ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
745 +
746 + err = -EINVAL;
747 + if (len < vnet_hdr_len)
748 +@@ -792,7 +792,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
749 +
750 + if (q->flags & IFF_VNET_HDR) {
751 + struct virtio_net_hdr vnet_hdr;
752 +- vnet_hdr_len = q->vnet_hdr_sz;
753 ++ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
754 + if ((len -= vnet_hdr_len) < 0)
755 + return -EINVAL;
756 +
757 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
758 +index 813750d09680..ade348b7b19e 100644
759 +--- a/drivers/net/tun.c
760 ++++ b/drivers/net/tun.c
761 +@@ -997,9 +997,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
762 + }
763 +
764 + if (tun->flags & TUN_VNET_HDR) {
765 +- if (len < tun->vnet_hdr_sz)
766 ++ int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
767 ++
768 ++ if (len < vnet_hdr_sz)
769 + return -EINVAL;
770 +- len -= tun->vnet_hdr_sz;
771 ++ len -= vnet_hdr_sz;
772 +
773 + if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
774 + return -EFAULT;
775 +@@ -1010,7 +1012,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
776 +
777 + if (gso.hdr_len > len)
778 + return -EINVAL;
779 +- offset += tun->vnet_hdr_sz;
780 ++ offset += vnet_hdr_sz;
781 + }
782 +
783 + if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
784 +@@ -1187,15 +1189,19 @@ static ssize_t tun_put_user(struct tun_struct *tun,
785 + ssize_t total = 0;
786 + int vlan_offset = 0, copied;
787 + int vlan_hlen = 0;
788 ++ int vnet_hdr_sz = 0;
789 +
790 + if (vlan_tx_tag_present(skb))
791 + vlan_hlen = VLAN_HLEN;
792 +
793 ++ if (tun->flags & TUN_VNET_HDR)
794 ++ vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
795 ++
796 + if (!(tun->flags & TUN_NO_PI)) {
797 + if ((len -= sizeof(pi)) < 0)
798 + return -EINVAL;
799 +
800 +- if (len < skb->len) {
801 ++ if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
802 + /* Packet will be striped */
803 + pi.flags |= TUN_PKT_STRIP;
804 + }
805 +@@ -1205,9 +1211,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
806 + total += sizeof(pi);
807 + }
808 +
809 +- if (tun->flags & TUN_VNET_HDR) {
810 ++ if (vnet_hdr_sz) {
811 + struct virtio_net_hdr gso = { 0 }; /* no info leak */
812 +- if ((len -= tun->vnet_hdr_sz) < 0)
813 ++ if ((len -= vnet_hdr_sz) < 0)
814 + return -EINVAL;
815 +
816 + if (skb_is_gso(skb)) {
817 +@@ -1251,7 +1257,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
818 + if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
819 + sizeof(gso))))
820 + return -EFAULT;
821 +- total += tun->vnet_hdr_sz;
822 ++ total += vnet_hdr_sz;
823 + }
824 +
825 + copied = total;
826 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
827 +index 756bb3a8e02c..3651f3cd474e 100644
828 +--- a/drivers/net/usb/cdc_ether.c
829 ++++ b/drivers/net/usb/cdc_ether.c
830 +@@ -487,6 +487,7 @@ static const struct driver_info wwan_info = {
831 + #define ZTE_VENDOR_ID 0x19D2
832 + #define DELL_VENDOR_ID 0x413C
833 + #define REALTEK_VENDOR_ID 0x0bda
834 ++#define HP_VENDOR_ID 0x03f0
835 +
836 + static const struct usb_device_id products[] = {
837 + /* BLACKLIST !!
838 +@@ -633,6 +634,13 @@ static const struct usb_device_id products[] = {
839 + .driver_info = 0,
840 + },
841 +
842 ++/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
843 ++{
844 ++ USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
845 ++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
846 ++ .driver_info = 0,
847 ++},
848 ++
849 + /* AnyDATA ADU960S - handled by qmi_wwan */
850 + {
851 + USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
852 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
853 +index 415bbe0365c6..40eabbb4bcd7 100644
854 +--- a/drivers/net/usb/qmi_wwan.c
855 ++++ b/drivers/net/usb/qmi_wwan.c
856 +@@ -560,6 +560,13 @@ static const struct usb_device_id products[] = {
857 + USB_CDC_PROTO_NONE),
858 + .driver_info = (unsigned long)&qmi_wwan_info,
859 + },
860 ++ { /* HP lt2523 (Novatel E371) */
861 ++ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
862 ++ USB_CLASS_COMM,
863 ++ USB_CDC_SUBCLASS_ETHERNET,
864 ++ USB_CDC_PROTO_NONE),
865 ++ .driver_info = (unsigned long)&qmi_wwan_info,
866 ++ },
867 + { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
868 + USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
869 + .driver_info = (unsigned long)&qmi_wwan_info,
870 +diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
871 +index 832560aa2274..2719ca31b469 100644
872 +--- a/drivers/net/wireless/rtlwifi/usb.c
873 ++++ b/drivers/net/wireless/rtlwifi/usb.c
874 +@@ -830,6 +830,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
875 + struct rtl_priv *rtlpriv = rtl_priv(hw);
876 + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
877 + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
878 ++ struct urb *urb;
879 +
880 + /* should after adapter start and interrupt enable. */
881 + set_hal_stop(rtlhal);
882 +@@ -837,6 +838,23 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
883 + /* Enable software */
884 + SET_USB_STOP(rtlusb);
885 + rtl_usb_deinit(hw);
886 ++
887 ++ /* free pre-allocated URBs from rtl_usb_start() */
888 ++ usb_kill_anchored_urbs(&rtlusb->rx_submitted);
889 ++
890 ++ tasklet_kill(&rtlusb->rx_work_tasklet);
891 ++ cancel_work_sync(&rtlpriv->works.lps_change_work);
892 ++
893 ++ flush_workqueue(rtlpriv->works.rtl_wq);
894 ++
895 ++ skb_queue_purge(&rtlusb->rx_queue);
896 ++
897 ++ while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
898 ++ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
899 ++ urb->transfer_buffer, urb->transfer_dma);
900 ++ usb_free_urb(urb);
901 ++ }
902 ++
903 + rtlpriv->cfg->ops->hw_disable(hw);
904 + }
905 +
906 +diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
907 +index 1324c3b93ee5..d2698834d446 100644
908 +--- a/drivers/pci/host/pci-mvebu.c
909 ++++ b/drivers/pci/host/pci-mvebu.c
910 +@@ -266,6 +266,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
911 + return ret;
912 + }
913 +
914 ++/*
915 ++ * Remove windows, starting from the largest ones to the smallest
916 ++ * ones.
917 ++ */
918 ++static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
919 ++ phys_addr_t base, size_t size)
920 ++{
921 ++ while (size) {
922 ++ size_t sz = 1 << (fls(size) - 1);
923 ++
924 ++ mvebu_mbus_del_window(base, sz);
925 ++ base += sz;
926 ++ size -= sz;
927 ++ }
928 ++}
929 ++
930 ++/*
931 ++ * MBus windows can only have a power of two size, but PCI BARs do not
932 ++ * have this constraint. Therefore, we have to split the PCI BAR into
933 ++ * areas each having a power of two size. We start from the largest
934 ++ * one (i.e highest order bit set in the size).
935 ++ */
936 ++static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
937 ++ unsigned int target, unsigned int attribute,
938 ++ phys_addr_t base, size_t size,
939 ++ phys_addr_t remap)
940 ++{
941 ++ size_t size_mapped = 0;
942 ++
943 ++ while (size) {
944 ++ size_t sz = 1 << (fls(size) - 1);
945 ++ int ret;
946 ++
947 ++ ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
948 ++ sz, remap);
949 ++ if (ret) {
950 ++ dev_err(&port->pcie->pdev->dev,
951 ++ "Could not create MBus window at 0x%x, size 0x%x: %d\n",
952 ++ base, sz, ret);
953 ++ mvebu_pcie_del_windows(port, base - size_mapped,
954 ++ size_mapped);
955 ++ return;
956 ++ }
957 ++
958 ++ size -= sz;
959 ++ size_mapped += sz;
960 ++ base += sz;
961 ++ if (remap != MVEBU_MBUS_NO_REMAP)
962 ++ remap += sz;
963 ++ }
964 ++}
965 ++
966 + static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
967 + {
968 + phys_addr_t iobase;
969 +@@ -276,8 +328,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
970 +
971 + /* If a window was configured, remove it */
972 + if (port->iowin_base) {
973 +- mvebu_mbus_del_window(port->iowin_base,
974 +- port->iowin_size);
975 ++ mvebu_pcie_del_windows(port, port->iowin_base,
976 ++ port->iowin_size);
977 + port->iowin_base = 0;
978 + port->iowin_size = 0;
979 + }
980 +@@ -299,9 +351,9 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
981 + (port->bridge.iolimitupper << 16)) -
982 + iobase) + 1;
983 +
984 +- mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
985 +- port->iowin_base, port->iowin_size,
986 +- iobase);
987 ++ mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
988 ++ port->iowin_base, port->iowin_size,
989 ++ iobase);
990 +
991 + pci_ioremap_io(iobase, port->iowin_base);
992 + }
993 +@@ -313,8 +365,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
994 +
995 + /* If a window was configured, remove it */
996 + if (port->memwin_base) {
997 +- mvebu_mbus_del_window(port->memwin_base,
998 +- port->memwin_size);
999 ++ mvebu_pcie_del_windows(port, port->memwin_base,
1000 ++ port->memwin_size);
1001 + port->memwin_base = 0;
1002 + port->memwin_size = 0;
1003 + }
1004 +@@ -333,8 +385,9 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
1005 + (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
1006 + port->memwin_base + 1;
1007 +
1008 +- mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr,
1009 +- port->memwin_base, port->memwin_size);
1010 ++ mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
1011 ++ port->memwin_base, port->memwin_size,
1012 ++ MVEBU_MBUS_NO_REMAP);
1013 + }
1014 +
1015 + /*
1016 +@@ -677,14 +730,21 @@ resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
1017 +
1018 + /*
1019 + * On the PCI-to-PCI bridge side, the I/O windows must have at
1020 +- * least a 64 KB size and be aligned on their size, and the
1021 +- * memory windows must have at least a 1 MB size and be
1022 +- * aligned on their size
1023 ++ * least a 64 KB size and the memory windows must have at
1024 ++ * least a 1 MB size. Moreover, MBus windows need to have a
1025 ++ * base address aligned on their size, and their size must be
1026 ++ * a power of two. This means that if the BAR doesn't have a
1027 ++ * power of two size, several MBus windows will actually be
1028 ++ * created. We need to ensure that the biggest MBus window
1029 ++ * (which will be the first one) is aligned on its size, which
1030 ++ * explains the rounddown_pow_of_two() being done here.
1031 + */
1032 + if (res->flags & IORESOURCE_IO)
1033 +- return round_up(start, max((resource_size_t)SZ_64K, size));
1034 ++ return round_up(start, max_t(resource_size_t, SZ_64K,
1035 ++ rounddown_pow_of_two(size)));
1036 + else if (res->flags & IORESOURCE_MEM)
1037 +- return round_up(start, max((resource_size_t)SZ_1M, size));
1038 ++ return round_up(start, max_t(resource_size_t, SZ_1M,
1039 ++ rounddown_pow_of_two(size)));
1040 + else
1041 + return start;
1042 + }
1043 +diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c
1044 +index 92cc4cfafde5..6bcd57cb2f75 100644
1045 +--- a/drivers/platform/goldfish/pdev_bus.c
1046 ++++ b/drivers/platform/goldfish/pdev_bus.c
1047 +@@ -153,23 +153,26 @@ static int goldfish_new_pdev(void)
1048 + static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id)
1049 + {
1050 + irqreturn_t ret = IRQ_NONE;
1051 ++
1052 + while (1) {
1053 + u32 op = readl(pdev_bus_base + PDEV_BUS_OP);
1054 +- switch (op) {
1055 +- case PDEV_BUS_OP_DONE:
1056 +- return IRQ_NONE;
1057 +
1058 ++ switch (op) {
1059 + case PDEV_BUS_OP_REMOVE_DEV:
1060 + goldfish_pdev_remove();
1061 ++ ret = IRQ_HANDLED;
1062 + break;
1063 +
1064 + case PDEV_BUS_OP_ADD_DEV:
1065 + goldfish_new_pdev();
1066 ++ ret = IRQ_HANDLED;
1067 + break;
1068 ++
1069 ++ case PDEV_BUS_OP_DONE:
1070 ++ default:
1071 ++ return ret;
1072 + }
1073 +- ret = IRQ_HANDLED;
1074 + }
1075 +- return ret;
1076 + }
1077 +
1078 + static int goldfish_pdev_bus_probe(struct platform_device *pdev)
1079 +diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
1080 +index 6b18aba82cfa..018abbe3ea07 100644
1081 +--- a/drivers/platform/x86/intel_mid_powerbtn.c
1082 ++++ b/drivers/platform/x86/intel_mid_powerbtn.c
1083 +@@ -78,8 +78,8 @@ static int mfld_pb_probe(struct platform_device *pdev)
1084 +
1085 + input_set_capability(input, EV_KEY, KEY_POWER);
1086 +
1087 +- error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND,
1088 +- DRIVER_NAME, input);
1089 ++ error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND |
1090 ++ IRQF_ONESHOT, DRIVER_NAME, input);
1091 + if (error) {
1092 + dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
1093 + "button\n", irq);
1094 +diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
1095 +index ff20d90ea8e7..2062937a3e0e 100644
1096 +--- a/drivers/rtc/interface.c
1097 ++++ b/drivers/rtc/interface.c
1098 +@@ -773,9 +773,23 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
1099 + */
1100 + static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
1101 + {
1102 ++ struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
1103 ++ struct rtc_time tm;
1104 ++ ktime_t now;
1105 ++
1106 + timer->enabled = 1;
1107 ++ __rtc_read_time(rtc, &tm);
1108 ++ now = rtc_tm_to_ktime(tm);
1109 ++
1110 ++ /* Skip over expired timers */
1111 ++ while (next) {
1112 ++ if (next->expires.tv64 >= now.tv64)
1113 ++ break;
1114 ++ next = timerqueue_iterate_next(next);
1115 ++ }
1116 ++
1117 + timerqueue_add(&rtc->timerqueue, &timer->node);
1118 +- if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
1119 ++ if (!next) {
1120 + struct rtc_wkalrm alarm;
1121 + int err;
1122 + alarm.time = rtc_ktime_to_tm(timer->node.expires);
1123 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
1124 +index 6065212fdeed..36cf11cafee7 100644
1125 +--- a/drivers/s390/scsi/zfcp_fsf.c
1126 ++++ b/drivers/s390/scsi/zfcp_fsf.c
1127 +@@ -1584,7 +1584,7 @@ out:
1128 + int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1129 + {
1130 + struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1131 +- struct zfcp_fsf_req *req = NULL;
1132 ++ struct zfcp_fsf_req *req;
1133 + int retval = -EIO;
1134 +
1135 + spin_lock_irq(&qdio->req_q_lock);
1136 +@@ -1613,7 +1613,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1137 + zfcp_fsf_req_free(req);
1138 + out:
1139 + spin_unlock_irq(&qdio->req_q_lock);
1140 +- if (req && !IS_ERR(req))
1141 ++ if (!retval)
1142 + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
1143 + return retval;
1144 + }
1145 +@@ -1639,7 +1639,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1146 + int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1147 + {
1148 + struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1149 +- struct zfcp_fsf_req *req = NULL;
1150 ++ struct zfcp_fsf_req *req;
1151 + int retval = -EIO;
1152 +
1153 + spin_lock_irq(&qdio->req_q_lock);
1154 +@@ -1668,7 +1668,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1155 + zfcp_fsf_req_free(req);
1156 + out:
1157 + spin_unlock_irq(&qdio->req_q_lock);
1158 +- if (req && !IS_ERR(req))
1159 ++ if (!retval)
1160 + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
1161 + return retval;
1162 + }
1163 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
1164 +index aeff39767588..f3f2dc86fda7 100644
1165 +--- a/drivers/scsi/scsi_lib.c
1166 ++++ b/drivers/scsi/scsi_lib.c
1167 +@@ -1025,8 +1025,12 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1168 + int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1169 + {
1170 + struct request *rq = cmd->request;
1171 ++ int error;
1172 +
1173 +- int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
1174 ++ if (WARN_ON_ONCE(!rq->nr_phys_segments))
1175 ++ return -EINVAL;
1176 ++
1177 ++ error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
1178 + if (error)
1179 + goto err_exit;
1180 +
1181 +@@ -1128,11 +1132,7 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1182 + * submit a request without an attached bio.
1183 + */
1184 + if (req->bio) {
1185 +- int ret;
1186 +-
1187 +- BUG_ON(!req->nr_phys_segments);
1188 +-
1189 +- ret = scsi_init_io(cmd, GFP_ATOMIC);
1190 ++ int ret = scsi_init_io(cmd, GFP_ATOMIC);
1191 + if (unlikely(ret))
1192 + return ret;
1193 + } else {
1194 +@@ -1176,11 +1176,6 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1195 + return ret;
1196 + }
1197 +
1198 +- /*
1199 +- * Filesystem requests must transfer data.
1200 +- */
1201 +- BUG_ON(!req->nr_phys_segments);
1202 +-
1203 + cmd = scsi_get_cmd_from_req(sdev, req);
1204 + if (unlikely(!cmd))
1205 + return BLKPREP_DEFER;
1206 +diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
1207 +index 8acff44a9e75..3f6c96cf8ebe 100644
1208 +--- a/drivers/staging/vt6655/hostap.c
1209 ++++ b/drivers/staging/vt6655/hostap.c
1210 +@@ -143,7 +143,8 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
1211 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
1212 + pDevice->dev->name, pDevice->apdev->name);
1213 + }
1214 +- free_netdev(pDevice->apdev);
1215 ++ if (pDevice->apdev)
1216 ++ free_netdev(pDevice->apdev);
1217 + pDevice->apdev = NULL;
1218 + pDevice->bEnable8021x = false;
1219 + pDevice->bEnableHostWEP = false;
1220 +diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
1221 +index c699a3058b39..cfffdd20e435 100644
1222 +--- a/drivers/staging/vt6656/hostap.c
1223 ++++ b/drivers/staging/vt6656/hostap.c
1224 +@@ -133,7 +133,8 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
1225 + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
1226 + pDevice->dev->name, pDevice->apdev->name);
1227 + }
1228 +- free_netdev(pDevice->apdev);
1229 ++ if (pDevice->apdev)
1230 ++ free_netdev(pDevice->apdev);
1231 + pDevice->apdev = NULL;
1232 + pDevice->bEnable8021x = false;
1233 + pDevice->bEnableHostWEP = false;
1234 +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
1235 +index 401fc7097935..552ac2d6fdc4 100644
1236 +--- a/drivers/target/target_core_sbc.c
1237 ++++ b/drivers/target/target_core_sbc.c
1238 +@@ -367,6 +367,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
1239 + int *post_ret)
1240 + {
1241 + struct se_device *dev = cmd->se_dev;
1242 ++ sense_reason_t ret = TCM_NO_SENSE;
1243 +
1244 + /*
1245 + * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
1246 +@@ -374,9 +375,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
1247 + * sent to the backend driver.
1248 + */
1249 + spin_lock_irq(&cmd->t_state_lock);
1250 +- if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
1251 ++ if (cmd->transport_state & CMD_T_SENT) {
1252 + cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
1253 + *post_ret = 1;
1254 ++
1255 ++ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
1256 ++ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1257 + }
1258 + spin_unlock_irq(&cmd->t_state_lock);
1259 +
1260 +@@ -386,7 +390,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
1261 + */
1262 + up(&dev->caw_sem);
1263 +
1264 +- return TCM_NO_SENSE;
1265 ++ return ret;
1266 + }
1267 +
1268 + static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
1269 +diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
1270 +index c0f2b3e5452f..90ed37e45006 100644
1271 +--- a/drivers/tty/serial/msm_serial.c
1272 ++++ b/drivers/tty/serial/msm_serial.c
1273 +@@ -973,6 +973,7 @@ static struct of_device_id msm_match_table[] = {
1274 + { .compatible = "qcom,msm-uartdm" },
1275 + {}
1276 + };
1277 ++MODULE_DEVICE_TABLE(of, msm_match_table);
1278 +
1279 + static struct platform_driver msm_platform_driver = {
1280 + .remove = msm_serial_remove,
1281 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1282 +index c78c4f7efb40..ea93b35b1c6d 100644
1283 +--- a/drivers/usb/class/cdc-acm.c
1284 ++++ b/drivers/usb/class/cdc-acm.c
1285 +@@ -514,19 +514,18 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
1286 + acm->control->needs_remote_wakeup = 1;
1287 +
1288 + acm->ctrlurb->dev = acm->dev;
1289 +- if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
1290 ++ retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL);
1291 ++ if (retval) {
1292 + dev_err(&acm->control->dev,
1293 + "%s - usb_submit_urb(ctrl irq) failed\n", __func__);
1294 + goto error_submit_urb;
1295 + }
1296 +
1297 + acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
1298 +- if (acm_set_control(acm, acm->ctrlout) < 0 &&
1299 +- (acm->ctrl_caps & USB_CDC_CAP_LINE))
1300 ++ retval = acm_set_control(acm, acm->ctrlout);
1301 ++ if (retval < 0 && (acm->ctrl_caps & USB_CDC_CAP_LINE))
1302 + goto error_set_control;
1303 +
1304 +- usb_autopm_put_interface(acm->control);
1305 +-
1306 + /*
1307 + * Unthrottle device in case the TTY was closed while throttled.
1308 + */
1309 +@@ -535,9 +534,12 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
1310 + acm->throttle_req = 0;
1311 + spin_unlock_irq(&acm->read_lock);
1312 +
1313 +- if (acm_submit_read_urbs(acm, GFP_KERNEL))
1314 ++ retval = acm_submit_read_urbs(acm, GFP_KERNEL);
1315 ++ if (retval)
1316 + goto error_submit_read_urbs;
1317 +
1318 ++ usb_autopm_put_interface(acm->control);
1319 ++
1320 + mutex_unlock(&acm->mutex);
1321 +
1322 + return 0;
1323 +@@ -554,7 +556,8 @@ error_submit_urb:
1324 + error_get_interface:
1325 + disconnected:
1326 + mutex_unlock(&acm->mutex);
1327 +- return retval;
1328 ++
1329 ++ return usb_translate_errors(retval);
1330 + }
1331 +
1332 + static void acm_port_destruct(struct tty_port *port)
1333 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1334 +index ba39d978583c..094fe92ac21f 100644
1335 +--- a/drivers/usb/core/quirks.c
1336 ++++ b/drivers/usb/core/quirks.c
1337 +@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
1338 + /* CBM - Flash disk */
1339 + { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
1340 +
1341 ++ /* WORLDE easy key (easykey.25) MIDI controller */
1342 ++ { USB_DEVICE(0x0218, 0x0401), .driver_info =
1343 ++ USB_QUIRK_CONFIG_INTF_STRINGS },
1344 ++
1345 + /* HP 5300/5370C scanner */
1346 + { USB_DEVICE(0x03f0, 0x0701), .driver_info =
1347 + USB_QUIRK_STRING_FETCH_255 },
1348 +diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
1349 +index bc77e955cbef..1f4c116843fc 100644
1350 +--- a/drivers/usb/serial/ark3116.c
1351 ++++ b/drivers/usb/serial/ark3116.c
1352 +@@ -100,10 +100,17 @@ static int ark3116_read_reg(struct usb_serial *serial,
1353 + usb_rcvctrlpipe(serial->dev, 0),
1354 + 0xfe, 0xc0, 0, reg,
1355 + buf, 1, ARK_TIMEOUT);
1356 +- if (result < 0)
1357 ++ if (result < 1) {
1358 ++ dev_err(&serial->interface->dev,
1359 ++ "failed to read register %u: %d\n",
1360 ++ reg, result);
1361 ++ if (result >= 0)
1362 ++ result = -EIO;
1363 ++
1364 + return result;
1365 +- else
1366 +- return buf[0];
1367 ++ }
1368 ++
1369 ++ return buf[0];
1370 + }
1371 +
1372 + static inline int calc_divisor(int bps)
1373 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1374 +index 8b3e77716c4a..95544c6323a7 100644
1375 +--- a/drivers/usb/serial/cp210x.c
1376 ++++ b/drivers/usb/serial/cp210x.c
1377 +@@ -171,6 +171,8 @@ static const struct usb_device_id id_table[] = {
1378 + { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
1379 + { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
1380 + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
1381 ++ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
1382 ++ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
1383 + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
1384 + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
1385 + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
1386 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1387 +index d1b76b0a67df..a099f8eafd9a 100644
1388 +--- a/drivers/usb/serial/ftdi_sio.c
1389 ++++ b/drivers/usb/serial/ftdi_sio.c
1390 +@@ -1829,8 +1829,6 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1391 +
1392 + mutex_init(&priv->cfg_lock);
1393 +
1394 +- priv->flags = ASYNC_LOW_LATENCY;
1395 +-
1396 + if (quirk && quirk->port_probe)
1397 + quirk->port_probe(priv);
1398 +
1399 +@@ -2104,6 +2102,20 @@ static int ftdi_process_packet(struct usb_serial_port *port,
1400 + priv->prev_status = status;
1401 + }
1402 +
1403 ++ /* save if the transmitter is empty or not */
1404 ++ if (packet[1] & FTDI_RS_TEMT)
1405 ++ priv->transmit_empty = 1;
1406 ++ else
1407 ++ priv->transmit_empty = 0;
1408 ++
1409 ++ len -= 2;
1410 ++ if (!len)
1411 ++ return 0; /* status only */
1412 ++
1413 ++ /*
1414 ++ * Break and error status must only be processed for packets with
1415 ++ * data payload to avoid over-reporting.
1416 ++ */
1417 + flag = TTY_NORMAL;
1418 + if (packet[1] & FTDI_RS_ERR_MASK) {
1419 + /* Break takes precedence over parity, which takes precedence
1420 +@@ -2126,15 +2138,6 @@ static int ftdi_process_packet(struct usb_serial_port *port,
1421 + }
1422 + }
1423 +
1424 +- /* save if the transmitter is empty or not */
1425 +- if (packet[1] & FTDI_RS_TEMT)
1426 +- priv->transmit_empty = 1;
1427 +- else
1428 +- priv->transmit_empty = 0;
1429 +-
1430 +- len -= 2;
1431 +- if (!len)
1432 +- return 0; /* status only */
1433 + port->icount.rx += len;
1434 + ch = packet + 2;
1435 +
1436 +@@ -2465,8 +2468,12 @@ static int ftdi_get_modem_status(struct usb_serial_port *port,
1437 + FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
1438 + 0, priv->interface,
1439 + buf, len, WDR_TIMEOUT);
1440 +- if (ret < 0) {
1441 ++
1442 ++ /* NOTE: We allow short responses and handle that below. */
1443 ++ if (ret < 1) {
1444 + dev_err(&port->dev, "failed to get modem status: %d\n", ret);
1445 ++ if (ret >= 0)
1446 ++ ret = -EIO;
1447 + ret = usb_translate_errors(ret);
1448 + goto out;
1449 + }
1450 +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
1451 +index 0b1659026d85..fc052e4cc5b2 100644
1452 +--- a/drivers/usb/serial/mos7840.c
1453 ++++ b/drivers/usb/serial/mos7840.c
1454 +@@ -1031,6 +1031,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
1455 + * (can't set it up in mos7840_startup as the structures *
1456 + * were not set up at that time.) */
1457 + if (port0->open_ports == 1) {
1458 ++ /* FIXME: Buffer never NULL, so URB is not submitted. */
1459 + if (serial->port[0]->interrupt_in_buffer == NULL) {
1460 + /* set up interrupt urb */
1461 + usb_fill_int_urb(serial->port[0]->interrupt_in_urb,
1462 +@@ -2195,7 +2196,8 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
1463 + static int mos7840_attach(struct usb_serial *serial)
1464 + {
1465 + if (serial->num_bulk_in < serial->num_ports ||
1466 +- serial->num_bulk_out < serial->num_ports) {
1467 ++ serial->num_bulk_out < serial->num_ports ||
1468 ++ serial->num_interrupt_in < 1) {
1469 + dev_err(&serial->interface->dev, "missing endpoints\n");
1470 + return -ENODEV;
1471 + }
1472 +diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
1473 +index df495ea0d977..bb9c07a79b4f 100644
1474 +--- a/drivers/usb/serial/opticon.c
1475 ++++ b/drivers/usb/serial/opticon.c
1476 +@@ -143,7 +143,7 @@ static int opticon_open(struct tty_struct *tty, struct usb_serial_port *port)
1477 + usb_clear_halt(port->serial->dev, port->read_urb->pipe);
1478 +
1479 + res = usb_serial_generic_open(tty, port);
1480 +- if (!res)
1481 ++ if (res)
1482 + return res;
1483 +
1484 + /* Request CTS line state, sometimes during opening the current
1485 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1486 +index 99dff08b560b..49b668da6cf0 100644
1487 +--- a/drivers/usb/serial/option.c
1488 ++++ b/drivers/usb/serial/option.c
1489 +@@ -527,6 +527,12 @@ static void option_instat_callback(struct urb *urb);
1490 + #define VIATELECOM_VENDOR_ID 0x15eb
1491 + #define VIATELECOM_PRODUCT_CDS7 0x0001
1492 +
1493 ++/* WeTelecom products */
1494 ++#define WETELECOM_VENDOR_ID 0x22de
1495 ++#define WETELECOM_PRODUCT_WMD200 0x6801
1496 ++#define WETELECOM_PRODUCT_6802 0x6802
1497 ++#define WETELECOM_PRODUCT_WMD300 0x6803
1498 ++
1499 + /* some devices interfaces need special handling due to a number of reasons */
1500 + enum option_blacklist_reason {
1501 + OPTION_BLACKLIST_NONE = 0,
1502 +@@ -1648,7 +1654,79 @@ static const struct usb_device_id option_ids[] = {
1503 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1504 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
1505 + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1506 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
1507 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
1508 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
1509 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
1510 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
1511 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
1512 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
1513 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
1514 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
1515 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
1516 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
1517 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
1518 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
1519 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
1520 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
1521 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
1522 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
1523 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
1524 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
1525 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
1526 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
1527 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
1528 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
1529 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
1530 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
1531 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
1532 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
1533 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
1534 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
1535 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
1536 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
1537 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
1538 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
1539 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
1540 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
1541 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
1542 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
1543 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
1544 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
1545 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
1546 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
1547 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
1548 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
1549 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
1550 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
1551 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
1552 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
1553 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
1554 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
1555 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
1556 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
1557 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
1558 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
1559 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
1560 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
1561 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
1562 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
1563 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
1564 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
1565 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
1566 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
1567 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
1568 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
1569 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
1570 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
1571 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
1572 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
1573 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
1574 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
1575 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
1576 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
1577 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
1578 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
1579 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
1580 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
1581 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
1582 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
1583 +@@ -1659,7 +1737,61 @@ static const struct usb_device_id option_ids[] = {
1584 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
1585 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
1586 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
1587 +-
1588 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
1589 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
1590 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
1591 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
1592 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
1593 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
1594 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
1595 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
1596 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
1597 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
1598 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
1599 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
1600 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
1601 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
1602 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
1603 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
1604 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
1605 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
1606 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
1607 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
1608 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
1609 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
1610 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
1611 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
1612 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
1613 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
1614 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
1615 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
1616 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
1617 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
1618 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
1619 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
1620 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
1621 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
1622 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
1623 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
1624 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
1625 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
1626 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
1627 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
1628 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
1629 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
1630 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
1631 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
1632 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
1633 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
1634 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
1635 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
1636 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
1637 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
1638 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
1639 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
1640 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
1641 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
1642 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
1643 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
1644 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1645 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
1646 +@@ -1871,6 +2003,10 @@ static const struct usb_device_id option_ids[] = {
1647 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1648 + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1649 + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
1650 ++ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
1651 ++ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
1652 ++ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
1653 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
1654 + { } /* Terminating entry */
1655 + };
1656 + MODULE_DEVICE_TABLE(usb, option_ids);
1657 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1658 +index 23f11751e05a..3438146b3ddc 100644
1659 +--- a/drivers/usb/serial/pl2303.c
1660 ++++ b/drivers/usb/serial/pl2303.c
1661 +@@ -52,6 +52,7 @@ static const struct usb_device_id id_table[] = {
1662 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
1663 + { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
1664 + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
1665 ++ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
1666 + { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
1667 + { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
1668 + { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
1669 +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
1670 +index e3b7af8adfb7..09d9be88209e 100644
1671 +--- a/drivers/usb/serial/pl2303.h
1672 ++++ b/drivers/usb/serial/pl2303.h
1673 +@@ -27,6 +27,7 @@
1674 + #define ATEN_VENDOR_ID 0x0557
1675 + #define ATEN_VENDOR_ID2 0x0547
1676 + #define ATEN_PRODUCT_ID 0x2008
1677 ++#define ATEN_PRODUCT_ID2 0x2118
1678 +
1679 + #define IODATA_VENDOR_ID 0x04bb
1680 + #define IODATA_PRODUCT_ID 0x0a03
1681 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1682 +index 3e96d1a9cbdb..d2e8eee46ef7 100644
1683 +--- a/drivers/usb/serial/qcserial.c
1684 ++++ b/drivers/usb/serial/qcserial.c
1685 +@@ -119,6 +119,7 @@ static const struct usb_device_id id_table[] = {
1686 + {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
1687 + {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
1688 + {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
1689 ++ {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
1690 + {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
1691 + {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
1692 + {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
1693 +diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
1694 +index ab754d23244c..5fe33cc6a8e3 100644
1695 +--- a/drivers/usb/serial/spcp8x5.c
1696 ++++ b/drivers/usb/serial/spcp8x5.c
1697 +@@ -233,11 +233,17 @@ static int spcp8x5_get_msr(struct usb_serial_port *port, u8 *status)
1698 + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
1699 + GET_UART_STATUS, GET_UART_STATUS_TYPE,
1700 + 0, GET_UART_STATUS_MSR, buf, 1, 100);
1701 +- if (ret < 0)
1702 ++ if (ret < 1) {
1703 + dev_err(&port->dev, "failed to get modem status: %d", ret);
1704 ++ if (ret >= 0)
1705 ++ ret = -EIO;
1706 ++ goto out;
1707 ++ }
1708 +
1709 + dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x", ret, *buf);
1710 + *status = *buf;
1711 ++ ret = 0;
1712 ++out:
1713 + kfree(buf);
1714 +
1715 + return ret;
1716 +diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
1717 +index f89245b8ba8e..68a113594808 100644
1718 +--- a/drivers/video/fbcmap.c
1719 ++++ b/drivers/video/fbcmap.c
1720 +@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
1721 +
1722 + int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
1723 + {
1724 +- int tooff = 0, fromoff = 0;
1725 +- int size;
1726 ++ unsigned int tooff = 0, fromoff = 0;
1727 ++ size_t size;
1728 +
1729 + if (to->start > from->start)
1730 + fromoff = to->start - from->start;
1731 + else
1732 + tooff = from->start - to->start;
1733 +- size = to->len - tooff;
1734 +- if (size > (int) (from->len - fromoff))
1735 +- size = from->len - fromoff;
1736 +- if (size <= 0)
1737 ++ if (fromoff >= from->len || tooff >= to->len)
1738 ++ return -EINVAL;
1739 ++
1740 ++ size = min_t(size_t, to->len - tooff, from->len - fromoff);
1741 ++ if (size == 0)
1742 + return -EINVAL;
1743 + size *= sizeof(u16);
1744 +
1745 +@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
1746 +
1747 + int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
1748 + {
1749 +- int tooff = 0, fromoff = 0;
1750 +- int size;
1751 ++ unsigned int tooff = 0, fromoff = 0;
1752 ++ size_t size;
1753 +
1754 + if (to->start > from->start)
1755 + fromoff = to->start - from->start;
1756 + else
1757 + tooff = from->start - to->start;
1758 +- size = to->len - tooff;
1759 +- if (size > (int) (from->len - fromoff))
1760 +- size = from->len - fromoff;
1761 +- if (size <= 0)
1762 ++ if (fromoff >= from->len || tooff >= to->len)
1763 ++ return -EINVAL;
1764 ++
1765 ++ size = min_t(size_t, to->len - tooff, from->len - fromoff);
1766 ++ if (size == 0)
1767 + return -EINVAL;
1768 + size *= sizeof(u16);
1769 +
1770 +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
1771 +index a4e276e65b0a..467aca9c64e5 100644
1772 +--- a/fs/cifs/readdir.c
1773 ++++ b/fs/cifs/readdir.c
1774 +@@ -280,6 +280,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
1775 + rc = -ENOMEM;
1776 + goto error_exit;
1777 + }
1778 ++ spin_lock_init(&cifsFile->file_info_lock);
1779 + file->private_data = cifsFile;
1780 + cifsFile->tlink = cifs_get_tlink(tlink);
1781 + tcon = tlink_tcon(tlink);
1782 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1783 +index 6362896f5875..7bc05f7bb2a7 100644
1784 +--- a/fs/ext4/super.c
1785 ++++ b/fs/ext4/super.c
1786 +@@ -3852,6 +3852,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1787 + (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1788 + db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
1789 + EXT4_DESC_PER_BLOCK(sb);
1790 ++ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG)) {
1791 ++ if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
1792 ++ ext4_msg(sb, KERN_WARNING,
1793 ++ "first meta block group too large: %u "
1794 ++ "(group descriptor block count %u)",
1795 ++ le32_to_cpu(es->s_first_meta_bg), db_count);
1796 ++ goto failed_mount;
1797 ++ }
1798 ++ }
1799 + sbi->s_group_desc = ext4_kvmalloc(db_count *
1800 + sizeof(struct buffer_head *),
1801 + GFP_KERNEL);
1802 +diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
1803 +index fa32ce9b455d..71e249201bcd 100644
1804 +--- a/fs/ocfs2/ioctl.c
1805 ++++ b/fs/ocfs2/ioctl.c
1806 +@@ -34,9 +34,8 @@
1807 + copy_to_user((typeof(a) __user *)b, &(a), sizeof(a))
1808 +
1809 + /*
1810 +- * This call is void because we are already reporting an error that may
1811 +- * be -EFAULT. The error will be returned from the ioctl(2) call. It's
1812 +- * just a best-effort to tell userspace that this request caused the error.
1813 ++ * This is just a best-effort to tell userspace that this request
1814 ++ * caused the error.
1815 + */
1816 + static inline void o2info_set_request_error(struct ocfs2_info_request *kreq,
1817 + struct ocfs2_info_request __user *req)
1818 +@@ -145,136 +144,105 @@ bail:
1819 + int ocfs2_info_handle_blocksize(struct inode *inode,
1820 + struct ocfs2_info_request __user *req)
1821 + {
1822 +- int status = -EFAULT;
1823 + struct ocfs2_info_blocksize oib;
1824 +
1825 + if (o2info_from_user(oib, req))
1826 +- goto bail;
1827 ++ return -EFAULT;
1828 +
1829 + oib.ib_blocksize = inode->i_sb->s_blocksize;
1830 +
1831 + o2info_set_request_filled(&oib.ib_req);
1832 +
1833 + if (o2info_to_user(oib, req))
1834 +- goto bail;
1835 +-
1836 +- status = 0;
1837 +-bail:
1838 +- if (status)
1839 +- o2info_set_request_error(&oib.ib_req, req);
1840 ++ return -EFAULT;
1841 +
1842 +- return status;
1843 ++ return 0;
1844 + }
1845 +
1846 + int ocfs2_info_handle_clustersize(struct inode *inode,
1847 + struct ocfs2_info_request __user *req)
1848 + {
1849 +- int status = -EFAULT;
1850 + struct ocfs2_info_clustersize oic;
1851 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1852 +
1853 + if (o2info_from_user(oic, req))
1854 +- goto bail;
1855 ++ return -EFAULT;
1856 +
1857 + oic.ic_clustersize = osb->s_clustersize;
1858 +
1859 + o2info_set_request_filled(&oic.ic_req);
1860 +
1861 + if (o2info_to_user(oic, req))
1862 +- goto bail;
1863 +-
1864 +- status = 0;
1865 +-bail:
1866 +- if (status)
1867 +- o2info_set_request_error(&oic.ic_req, req);
1868 ++ return -EFAULT;
1869 +
1870 +- return status;
1871 ++ return 0;
1872 + }
1873 +
1874 + int ocfs2_info_handle_maxslots(struct inode *inode,
1875 + struct ocfs2_info_request __user *req)
1876 + {
1877 +- int status = -EFAULT;
1878 + struct ocfs2_info_maxslots oim;
1879 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1880 +
1881 + if (o2info_from_user(oim, req))
1882 +- goto bail;
1883 ++ return -EFAULT;
1884 +
1885 + oim.im_max_slots = osb->max_slots;
1886 +
1887 + o2info_set_request_filled(&oim.im_req);
1888 +
1889 + if (o2info_to_user(oim, req))
1890 +- goto bail;
1891 ++ return -EFAULT;
1892 +
1893 +- status = 0;
1894 +-bail:
1895 +- if (status)
1896 +- o2info_set_request_error(&oim.im_req, req);
1897 +-
1898 +- return status;
1899 ++ return 0;
1900 + }
1901 +
1902 + int ocfs2_info_handle_label(struct inode *inode,
1903 + struct ocfs2_info_request __user *req)
1904 + {
1905 +- int status = -EFAULT;
1906 + struct ocfs2_info_label oil;
1907 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1908 +
1909 + if (o2info_from_user(oil, req))
1910 +- goto bail;
1911 ++ return -EFAULT;
1912 +
1913 + memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
1914 +
1915 + o2info_set_request_filled(&oil.il_req);
1916 +
1917 + if (o2info_to_user(oil, req))
1918 +- goto bail;
1919 ++ return -EFAULT;
1920 +
1921 +- status = 0;
1922 +-bail:
1923 +- if (status)
1924 +- o2info_set_request_error(&oil.il_req, req);
1925 +-
1926 +- return status;
1927 ++ return 0;
1928 + }
1929 +
1930 + int ocfs2_info_handle_uuid(struct inode *inode,
1931 + struct ocfs2_info_request __user *req)
1932 + {
1933 +- int status = -EFAULT;
1934 + struct ocfs2_info_uuid oiu;
1935 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1936 +
1937 + if (o2info_from_user(oiu, req))
1938 +- goto bail;
1939 ++ return -EFAULT;
1940 +
1941 + memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
1942 +
1943 + o2info_set_request_filled(&oiu.iu_req);
1944 +
1945 + if (o2info_to_user(oiu, req))
1946 +- goto bail;
1947 +-
1948 +- status = 0;
1949 +-bail:
1950 +- if (status)
1951 +- o2info_set_request_error(&oiu.iu_req, req);
1952 ++ return -EFAULT;
1953 +
1954 +- return status;
1955 ++ return 0;
1956 + }
1957 +
1958 + int ocfs2_info_handle_fs_features(struct inode *inode,
1959 + struct ocfs2_info_request __user *req)
1960 + {
1961 +- int status = -EFAULT;
1962 + struct ocfs2_info_fs_features oif;
1963 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1964 +
1965 + if (o2info_from_user(oif, req))
1966 +- goto bail;
1967 ++ return -EFAULT;
1968 +
1969 + oif.if_compat_features = osb->s_feature_compat;
1970 + oif.if_incompat_features = osb->s_feature_incompat;
1971 +@@ -283,39 +251,28 @@ int ocfs2_info_handle_fs_features(struct inode *inode,
1972 + o2info_set_request_filled(&oif.if_req);
1973 +
1974 + if (o2info_to_user(oif, req))
1975 +- goto bail;
1976 ++ return -EFAULT;
1977 +
1978 +- status = 0;
1979 +-bail:
1980 +- if (status)
1981 +- o2info_set_request_error(&oif.if_req, req);
1982 +-
1983 +- return status;
1984 ++ return 0;
1985 + }
1986 +
1987 + int ocfs2_info_handle_journal_size(struct inode *inode,
1988 + struct ocfs2_info_request __user *req)
1989 + {
1990 +- int status = -EFAULT;
1991 + struct ocfs2_info_journal_size oij;
1992 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1993 +
1994 + if (o2info_from_user(oij, req))
1995 +- goto bail;
1996 ++ return -EFAULT;
1997 +
1998 + oij.ij_journal_size = i_size_read(osb->journal->j_inode);
1999 +
2000 + o2info_set_request_filled(&oij.ij_req);
2001 +
2002 + if (o2info_to_user(oij, req))
2003 +- goto bail;
2004 ++ return -EFAULT;
2005 +
2006 +- status = 0;
2007 +-bail:
2008 +- if (status)
2009 +- o2info_set_request_error(&oij.ij_req, req);
2010 +-
2011 +- return status;
2012 ++ return 0;
2013 + }
2014 +
2015 + int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
2016 +@@ -371,7 +328,7 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
2017 + u32 i;
2018 + u64 blkno = -1;
2019 + char namebuf[40];
2020 +- int status = -EFAULT, type = INODE_ALLOC_SYSTEM_INODE;
2021 ++ int status, type = INODE_ALLOC_SYSTEM_INODE;
2022 + struct ocfs2_info_freeinode *oifi = NULL;
2023 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2024 + struct inode *inode_alloc = NULL;
2025 +@@ -383,8 +340,10 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
2026 + goto out_err;
2027 + }
2028 +
2029 +- if (o2info_from_user(*oifi, req))
2030 +- goto bail;
2031 ++ if (o2info_from_user(*oifi, req)) {
2032 ++ status = -EFAULT;
2033 ++ goto out_free;
2034 ++ }
2035 +
2036 + oifi->ifi_slotnum = osb->max_slots;
2037 +
2038 +@@ -421,14 +380,16 @@ int ocfs2_info_handle_freeinode(struct inode *inode,
2039 +
2040 + o2info_set_request_filled(&oifi->ifi_req);
2041 +
2042 +- if (o2info_to_user(*oifi, req))
2043 +- goto bail;
2044 ++ if (o2info_to_user(*oifi, req)) {
2045 ++ status = -EFAULT;
2046 ++ goto out_free;
2047 ++ }
2048 +
2049 + status = 0;
2050 + bail:
2051 + if (status)
2052 + o2info_set_request_error(&oifi->ifi_req, req);
2053 +-
2054 ++out_free:
2055 + kfree(oifi);
2056 + out_err:
2057 + return status;
2058 +@@ -655,7 +616,7 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
2059 + {
2060 + u64 blkno = -1;
2061 + char namebuf[40];
2062 +- int status = -EFAULT, type = GLOBAL_BITMAP_SYSTEM_INODE;
2063 ++ int status, type = GLOBAL_BITMAP_SYSTEM_INODE;
2064 +
2065 + struct ocfs2_info_freefrag *oiff;
2066 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2067 +@@ -668,8 +629,10 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
2068 + goto out_err;
2069 + }
2070 +
2071 +- if (o2info_from_user(*oiff, req))
2072 +- goto bail;
2073 ++ if (o2info_from_user(*oiff, req)) {
2074 ++ status = -EFAULT;
2075 ++ goto out_free;
2076 ++ }
2077 + /*
2078 + * chunksize from userspace should be power of 2.
2079 + */
2080 +@@ -708,14 +671,14 @@ int ocfs2_info_handle_freefrag(struct inode *inode,
2081 +
2082 + if (o2info_to_user(*oiff, req)) {
2083 + status = -EFAULT;
2084 +- goto bail;
2085 ++ goto out_free;
2086 + }
2087 +
2088 + status = 0;
2089 + bail:
2090 + if (status)
2091 + o2info_set_request_error(&oiff->iff_req, req);
2092 +-
2093 ++out_free:
2094 + kfree(oiff);
2095 + out_err:
2096 + return status;
2097 +@@ -724,23 +687,17 @@ out_err:
2098 + int ocfs2_info_handle_unknown(struct inode *inode,
2099 + struct ocfs2_info_request __user *req)
2100 + {
2101 +- int status = -EFAULT;
2102 + struct ocfs2_info_request oir;
2103 +
2104 + if (o2info_from_user(oir, req))
2105 +- goto bail;
2106 ++ return -EFAULT;
2107 +
2108 + o2info_clear_request_filled(&oir);
2109 +
2110 + if (o2info_to_user(oir, req))
2111 +- goto bail;
2112 ++ return -EFAULT;
2113 +
2114 +- status = 0;
2115 +-bail:
2116 +- if (status)
2117 +- o2info_set_request_error(&oir, req);
2118 +-
2119 +- return status;
2120 ++ return 0;
2121 + }
2122 +
2123 + /*
2124 +diff --git a/fs/splice.c b/fs/splice.c
2125 +index 51ce51b9af6a..2e012472f97b 100644
2126 +--- a/fs/splice.c
2127 ++++ b/fs/splice.c
2128 +@@ -215,6 +215,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
2129 + buf->len = spd->partial[page_nr].len;
2130 + buf->private = spd->partial[page_nr].private;
2131 + buf->ops = spd->ops;
2132 ++ buf->flags = 0;
2133 + if (spd->flags & SPLICE_F_GIFT)
2134 + buf->flags |= PIPE_BUF_FLAG_GIFT;
2135 +
2136 +diff --git a/include/linux/can/core.h b/include/linux/can/core.h
2137 +index 78c6c52073ad..6bdc00b6df01 100644
2138 +--- a/include/linux/can/core.h
2139 ++++ b/include/linux/can/core.h
2140 +@@ -45,10 +45,9 @@ struct can_proto {
2141 + extern int can_proto_register(const struct can_proto *cp);
2142 + extern void can_proto_unregister(const struct can_proto *cp);
2143 +
2144 +-extern int can_rx_register(struct net_device *dev, canid_t can_id,
2145 +- canid_t mask,
2146 +- void (*func)(struct sk_buff *, void *),
2147 +- void *data, char *ident);
2148 ++int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
2149 ++ void (*func)(struct sk_buff *, void *),
2150 ++ void *data, char *ident, struct sock *sk);
2151 +
2152 + extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
2153 + canid_t mask,
2154 +diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
2155 +index 3859ddbecb5f..985e180a5d9a 100644
2156 +--- a/include/linux/nfs4.h
2157 ++++ b/include/linux/nfs4.h
2158 +@@ -240,7 +240,7 @@ enum nfsstat4 {
2159 +
2160 + static inline bool seqid_mutating_err(u32 err)
2161 + {
2162 +- /* rfc 3530 section 8.1.5: */
2163 ++ /* See RFC 7530, section 9.1.7 */
2164 + switch (err) {
2165 + case NFS4ERR_STALE_CLIENTID:
2166 + case NFS4ERR_STALE_STATEID:
2167 +@@ -249,6 +249,7 @@ static inline bool seqid_mutating_err(u32 err)
2168 + case NFS4ERR_BADXDR:
2169 + case NFS4ERR_RESOURCE:
2170 + case NFS4ERR_NOFILEHANDLE:
2171 ++ case NFS4ERR_MOVED:
2172 + return false;
2173 + };
2174 + return true;
2175 +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
2176 +index 6740801aa71a..5a51d3e5646c 100644
2177 +--- a/include/linux/sunrpc/clnt.h
2178 ++++ b/include/linux/sunrpc/clnt.h
2179 +@@ -168,5 +168,6 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
2180 + const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
2181 + int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
2182 +
2183 ++void rpc_cleanup_clids(void);
2184 + #endif /* __KERNEL__ */
2185 + #endif /* _LINUX_SUNRPC_CLNT_H */
2186 +diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
2187 +index a8c2ef6d3b93..9078b31d336f 100644
2188 +--- a/include/net/cipso_ipv4.h
2189 ++++ b/include/net/cipso_ipv4.h
2190 +@@ -303,6 +303,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
2191 + }
2192 +
2193 + for (opt_iter = 6; opt_iter < opt_len;) {
2194 ++ if (opt_iter + 1 == opt_len) {
2195 ++ err_offset = opt_iter;
2196 ++ goto out;
2197 ++ }
2198 + tag_len = opt[opt_iter + 1];
2199 + if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
2200 + err_offset = opt_iter + 1;
2201 +diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
2202 +index 02ef7727bb55..587e9dd3e3b4 100644
2203 +--- a/include/net/if_inet6.h
2204 ++++ b/include/net/if_inet6.h
2205 +@@ -166,7 +166,6 @@ struct inet6_dev {
2206 + struct net_device *dev;
2207 +
2208 + struct list_head addr_list;
2209 +- int valid_ll_addr_cnt;
2210 +
2211 + struct ifmcaddr6 *mc_list;
2212 + struct ifmcaddr6 *mc_tomb;
2213 +diff --git a/include/net/sock.h b/include/net/sock.h
2214 +index 238e934dd3c3..467d2f810fb3 100644
2215 +--- a/include/net/sock.h
2216 ++++ b/include/net/sock.h
2217 +@@ -1554,6 +1554,7 @@ extern struct sk_buff *sock_rmalloc(struct sock *sk,
2218 + extern void sock_wfree(struct sk_buff *skb);
2219 + extern void skb_orphan_partial(struct sk_buff *skb);
2220 + extern void sock_rfree(struct sk_buff *skb);
2221 ++void sock_efree(struct sk_buff *skb);
2222 + extern void sock_edemux(struct sk_buff *skb);
2223 +
2224 + extern int sock_setsockopt(struct socket *sock, int level,
2225 +diff --git a/kernel/futex.c b/kernel/futex.c
2226 +index 509bdd404414..9c6394afd10f 100644
2227 +--- a/kernel/futex.c
2228 ++++ b/kernel/futex.c
2229 +@@ -2905,4 +2905,4 @@ static int __init futex_init(void)
2230 +
2231 + return 0;
2232 + }
2233 +-__initcall(futex_init);
2234 ++core_initcall(futex_init);
2235 +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
2236 +index 44a8df70c0ec..1c0315709806 100644
2237 +--- a/kernel/printk/printk.c
2238 ++++ b/kernel/printk/printk.c
2239 +@@ -1261,7 +1261,7 @@ static void call_console_drivers(int level, const char *text, size_t len)
2240 + {
2241 + struct console *con;
2242 +
2243 +- trace_console(text, len);
2244 ++ trace_console_rcuidle(text, len);
2245 +
2246 + if (level >= console_loglevel && !ignore_loglevel)
2247 + return;
2248 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2249 +index fe080adbe5a8..426193802b1f 100644
2250 +--- a/kernel/sched/core.c
2251 ++++ b/kernel/sched/core.c
2252 +@@ -4233,7 +4233,8 @@ void show_state_filter(unsigned long state_filter)
2253 + touch_all_softlockup_watchdogs();
2254 +
2255 + #ifdef CONFIG_SCHED_DEBUG
2256 +- sysrq_sched_debug_show();
2257 ++ if (!state_filter)
2258 ++ sysrq_sched_debug_show();
2259 + #endif
2260 + rcu_read_unlock();
2261 + /*
2262 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
2263 +index 37b95a2982af..2488148a66d7 100644
2264 +--- a/kernel/sysctl.c
2265 ++++ b/kernel/sysctl.c
2266 +@@ -2229,6 +2229,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
2267 + break;
2268 + if (neg)
2269 + continue;
2270 ++ val = convmul * val / convdiv;
2271 + if ((min && val < *min) || (max && val > *max))
2272 + continue;
2273 + *i = val;
2274 +diff --git a/mm/filemap.c b/mm/filemap.c
2275 +index 9fa5c3f40cd6..5fce50a0c898 100644
2276 +--- a/mm/filemap.c
2277 ++++ b/mm/filemap.c
2278 +@@ -1338,6 +1338,11 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
2279 +
2280 + cond_resched();
2281 + find_page:
2282 ++ if (fatal_signal_pending(current)) {
2283 ++ error = -EINTR;
2284 ++ goto out;
2285 ++ }
2286 ++
2287 + page = find_get_page(mapping, index);
2288 + if (!page) {
2289 + page_cache_sync_readahead(mapping,
2290 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
2291 +index 723978c6f8ab..8b2e127b6af4 100644
2292 +--- a/mm/memory_hotplug.c
2293 ++++ b/mm/memory_hotplug.c
2294 +@@ -1205,7 +1205,7 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
2295 + }
2296 +
2297 + /*
2298 +- * Confirm all pages in a range [start, end) is belongs to the same zone.
2299 ++ * Confirm all pages in a range [start, end) belong to the same zone.
2300 + */
2301 + static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
2302 + {
2303 +@@ -1213,9 +1213,9 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
2304 + struct zone *zone = NULL;
2305 + struct page *page;
2306 + int i;
2307 +- for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
2308 ++ for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
2309 + pfn < end_pfn;
2310 +- pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
2311 ++ pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
2312 + /* Make sure the memory section is present first */
2313 + if (!present_section_nr(pfn_to_section_nr(pfn)))
2314 + continue;
2315 +@@ -1234,7 +1234,11 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
2316 + zone = page_zone(page);
2317 + }
2318 + }
2319 +- return 1;
2320 ++
2321 ++ if (zone)
2322 ++ return 1;
2323 ++ else
2324 ++ return 0;
2325 + }
2326 +
2327 + /*
2328 +diff --git a/net/can/af_can.c b/net/can/af_can.c
2329 +index 5a668268f7ff..86f88598a102 100644
2330 +--- a/net/can/af_can.c
2331 ++++ b/net/can/af_can.c
2332 +@@ -425,6 +425,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
2333 + * @func: callback function on filter match
2334 + * @data: returned parameter for callback function
2335 + * @ident: string for calling module indentification
2336 ++ * @sk: socket pointer (might be NULL)
2337 + *
2338 + * Description:
2339 + * Invokes the callback function with the received sk_buff and the given
2340 +@@ -448,7 +449,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
2341 + */
2342 + int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
2343 + void (*func)(struct sk_buff *, void *), void *data,
2344 +- char *ident)
2345 ++ char *ident, struct sock *sk)
2346 + {
2347 + struct receiver *r;
2348 + struct hlist_head *rl;
2349 +@@ -476,6 +477,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
2350 + r->func = func;
2351 + r->data = data;
2352 + r->ident = ident;
2353 ++ r->sk = sk;
2354 +
2355 + hlist_add_head_rcu(&r->list, rl);
2356 + d->entries++;
2357 +@@ -500,8 +502,11 @@ EXPORT_SYMBOL(can_rx_register);
2358 + static void can_rx_delete_receiver(struct rcu_head *rp)
2359 + {
2360 + struct receiver *r = container_of(rp, struct receiver, rcu);
2361 ++ struct sock *sk = r->sk;
2362 +
2363 + kmem_cache_free(rcv_cache, r);
2364 ++ if (sk)
2365 ++ sock_put(sk);
2366 + }
2367 +
2368 + /**
2369 +@@ -576,8 +581,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
2370 + spin_unlock(&can_rcvlists_lock);
2371 +
2372 + /* schedule the receiver item for deletion */
2373 +- if (r)
2374 ++ if (r) {
2375 ++ if (r->sk)
2376 ++ sock_hold(r->sk);
2377 + call_rcu(&r->rcu, can_rx_delete_receiver);
2378 ++ }
2379 + }
2380 + EXPORT_SYMBOL(can_rx_unregister);
2381 +
2382 +diff --git a/net/can/af_can.h b/net/can/af_can.h
2383 +index 1dccb4c33894..0e95be423587 100644
2384 +--- a/net/can/af_can.h
2385 ++++ b/net/can/af_can.h
2386 +@@ -50,13 +50,14 @@
2387 +
2388 + struct receiver {
2389 + struct hlist_node list;
2390 +- struct rcu_head rcu;
2391 + canid_t can_id;
2392 + canid_t mask;
2393 + unsigned long matches;
2394 + void (*func)(struct sk_buff *, void *);
2395 + void *data;
2396 + char *ident;
2397 ++ struct sock *sk;
2398 ++ struct rcu_head rcu;
2399 + };
2400 +
2401 + enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
2402 +diff --git a/net/can/bcm.c b/net/can/bcm.c
2403 +index 392a687d3ca6..d64e8bab7c1a 100644
2404 +--- a/net/can/bcm.c
2405 ++++ b/net/can/bcm.c
2406 +@@ -706,14 +706,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
2407 +
2408 + static void bcm_remove_op(struct bcm_op *op)
2409 + {
2410 +- hrtimer_cancel(&op->timer);
2411 +- hrtimer_cancel(&op->thrtimer);
2412 +-
2413 +- if (op->tsklet.func)
2414 +- tasklet_kill(&op->tsklet);
2415 ++ if (op->tsklet.func) {
2416 ++ while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
2417 ++ test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
2418 ++ hrtimer_active(&op->timer)) {
2419 ++ hrtimer_cancel(&op->timer);
2420 ++ tasklet_kill(&op->tsklet);
2421 ++ }
2422 ++ }
2423 +
2424 +- if (op->thrtsklet.func)
2425 +- tasklet_kill(&op->thrtsklet);
2426 ++ if (op->thrtsklet.func) {
2427 ++ while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
2428 ++ test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
2429 ++ hrtimer_active(&op->thrtimer)) {
2430 ++ hrtimer_cancel(&op->thrtimer);
2431 ++ tasklet_kill(&op->thrtsklet);
2432 ++ }
2433 ++ }
2434 +
2435 + if ((op->frames) && (op->frames != &op->sframe))
2436 + kfree(op->frames);
2437 +@@ -1169,7 +1178,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
2438 + err = can_rx_register(dev, op->can_id,
2439 + REGMASK(op->can_id),
2440 + bcm_rx_handler, op,
2441 +- "bcm");
2442 ++ "bcm", sk);
2443 +
2444 + op->rx_reg_dev = dev;
2445 + dev_put(dev);
2446 +@@ -1178,7 +1187,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
2447 + } else
2448 + err = can_rx_register(NULL, op->can_id,
2449 + REGMASK(op->can_id),
2450 +- bcm_rx_handler, op, "bcm");
2451 ++ bcm_rx_handler, op, "bcm", sk);
2452 + if (err) {
2453 + /* this bcm rx op is broken -> remove it */
2454 + list_del(&op->list);
2455 +diff --git a/net/can/gw.c b/net/can/gw.c
2456 +index 233ce53c1852..3ce56716041d 100644
2457 +--- a/net/can/gw.c
2458 ++++ b/net/can/gw.c
2459 +@@ -441,7 +441,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
2460 + {
2461 + return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
2462 + gwj->ccgw.filter.can_mask, can_can_gw_rcv,
2463 +- gwj, "gw");
2464 ++ gwj, "gw", NULL);
2465 + }
2466 +
2467 + static inline void cgw_unregister_filter(struct cgw_job *gwj)
2468 +diff --git a/net/can/raw.c b/net/can/raw.c
2469 +index e10699cc72bd..65a0553bc14b 100644
2470 +--- a/net/can/raw.c
2471 ++++ b/net/can/raw.c
2472 +@@ -168,7 +168,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
2473 + for (i = 0; i < count; i++) {
2474 + err = can_rx_register(dev, filter[i].can_id,
2475 + filter[i].can_mask,
2476 +- raw_rcv, sk, "raw");
2477 ++ raw_rcv, sk, "raw", sk);
2478 + if (err) {
2479 + /* clean up successfully registered filters */
2480 + while (--i >= 0)
2481 +@@ -189,7 +189,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
2482 +
2483 + if (err_mask)
2484 + err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
2485 +- raw_rcv, sk, "raw");
2486 ++ raw_rcv, sk, "raw", sk);
2487 +
2488 + return err;
2489 + }
2490 +diff --git a/net/core/dev.c b/net/core/dev.c
2491 +index 6b0ddf661f92..349ee899b3f0 100644
2492 +--- a/net/core/dev.c
2493 ++++ b/net/core/dev.c
2494 +@@ -1594,24 +1594,19 @@ EXPORT_SYMBOL(call_netdevice_notifiers);
2495 +
2496 + static struct static_key netstamp_needed __read_mostly;
2497 + #ifdef HAVE_JUMP_LABEL
2498 +-/* We are not allowed to call static_key_slow_dec() from irq context
2499 +- * If net_disable_timestamp() is called from irq context, defer the
2500 +- * static_key_slow_dec() calls.
2501 +- */
2502 + static atomic_t netstamp_needed_deferred;
2503 +-#endif
2504 +-
2505 +-void net_enable_timestamp(void)
2506 ++static void netstamp_clear(struct work_struct *work)
2507 + {
2508 +-#ifdef HAVE_JUMP_LABEL
2509 + int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2510 +
2511 +- if (deferred) {
2512 +- while (--deferred)
2513 +- static_key_slow_dec(&netstamp_needed);
2514 +- return;
2515 +- }
2516 ++ while (deferred--)
2517 ++ static_key_slow_dec(&netstamp_needed);
2518 ++}
2519 ++static DECLARE_WORK(netstamp_work, netstamp_clear);
2520 + #endif
2521 ++
2522 ++void net_enable_timestamp(void)
2523 ++{
2524 + static_key_slow_inc(&netstamp_needed);
2525 + }
2526 + EXPORT_SYMBOL(net_enable_timestamp);
2527 +@@ -1619,12 +1614,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
2528 + void net_disable_timestamp(void)
2529 + {
2530 + #ifdef HAVE_JUMP_LABEL
2531 +- if (in_interrupt()) {
2532 +- atomic_inc(&netstamp_needed_deferred);
2533 +- return;
2534 +- }
2535 +-#endif
2536 ++ /* net_disable_timestamp() can be called from non process context */
2537 ++ atomic_inc(&netstamp_needed_deferred);
2538 ++ schedule_work(&netstamp_work);
2539 ++#else
2540 + static_key_slow_dec(&netstamp_needed);
2541 ++#endif
2542 + }
2543 + EXPORT_SYMBOL(net_disable_timestamp);
2544 +
2545 +@@ -2489,9 +2484,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2546 + if (skb->ip_summed != CHECKSUM_NONE &&
2547 + !can_checksum_protocol(features, skb_network_protocol(skb))) {
2548 + features &= ~NETIF_F_ALL_CSUM;
2549 +- } else if (illegal_highdma(dev, skb)) {
2550 +- features &= ~NETIF_F_SG;
2551 + }
2552 ++ if (illegal_highdma(dev, skb))
2553 ++ features &= ~NETIF_F_SG;
2554 +
2555 + return features;
2556 + }
2557 +diff --git a/net/core/dst.c b/net/core/dst.c
2558 +index 31344009de25..08c9a8f7b885 100644
2559 +--- a/net/core/dst.c
2560 ++++ b/net/core/dst.c
2561 +@@ -280,12 +280,13 @@ void dst_release(struct dst_entry *dst)
2562 + {
2563 + if (dst) {
2564 + int newrefcnt;
2565 ++ unsigned short nocache = dst->flags & DST_NOCACHE;
2566 +
2567 + newrefcnt = atomic_dec_return(&dst->__refcnt);
2568 + if (unlikely(newrefcnt < 0))
2569 + net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
2570 + __func__, dst, newrefcnt);
2571 +- if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
2572 ++ if (!newrefcnt && unlikely(nocache))
2573 + call_rcu(&dst->rcu_head, dst_destroy_rcu);
2574 + }
2575 + }
2576 +diff --git a/net/core/sock.c b/net/core/sock.c
2577 +index 7fa427ed41bc..d765d6411a5b 100644
2578 +--- a/net/core/sock.c
2579 ++++ b/net/core/sock.c
2580 +@@ -1656,6 +1656,12 @@ void sock_rfree(struct sk_buff *skb)
2581 + }
2582 + EXPORT_SYMBOL(sock_rfree);
2583 +
2584 ++void sock_efree(struct sk_buff *skb)
2585 ++{
2586 ++ sock_put(skb->sk);
2587 ++}
2588 ++EXPORT_SYMBOL(sock_efree);
2589 ++
2590 + void sock_edemux(struct sk_buff *skb)
2591 + {
2592 + struct sock *sk = skb->sk;
2593 +diff --git a/net/dccp/input.c b/net/dccp/input.c
2594 +index 14cdafad7a90..e511ccc74a07 100644
2595 +--- a/net/dccp/input.c
2596 ++++ b/net/dccp/input.c
2597 +@@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
2598 + if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
2599 + skb) < 0)
2600 + return 1;
2601 +- goto discard;
2602 ++ consume_skb(skb);
2603 ++ return 0;
2604 + }
2605 + if (dh->dccph_type == DCCP_PKT_RESET)
2606 + goto discard;
2607 +diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
2608 +index ceabe6f13216..a377d435756e 100644
2609 +--- a/net/ieee802154/6lowpan.c
2610 ++++ b/net/ieee802154/6lowpan.c
2611 +@@ -548,7 +548,7 @@ static int lowpan_header_create(struct sk_buff *skb,
2612 + hc06_ptr += 3;
2613 + } else {
2614 + /* compress nothing */
2615 +- memcpy(hc06_ptr, &hdr, 4);
2616 ++ memcpy(hc06_ptr, hdr, 4);
2617 + /* replace the top byte with new ECN | DSCP format */
2618 + *hc06_ptr = tmp;
2619 + hc06_ptr += 4;
2620 +@@ -1392,8 +1392,10 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
2621 + real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
2622 + if (!real_dev)
2623 + return -ENODEV;
2624 +- if (real_dev->type != ARPHRD_IEEE802154)
2625 ++ if (real_dev->type != ARPHRD_IEEE802154) {
2626 ++ dev_put(real_dev);
2627 + return -EINVAL;
2628 ++ }
2629 +
2630 + lowpan_dev_info(dev)->real_dev = real_dev;
2631 + lowpan_dev_info(dev)->fragment_tag = 0;
2632 +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
2633 +index 667c1d4ca984..4322372dddbe 100644
2634 +--- a/net/ipv4/cipso_ipv4.c
2635 ++++ b/net/ipv4/cipso_ipv4.c
2636 +@@ -1649,6 +1649,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
2637 + goto validate_return_locked;
2638 + }
2639 +
2640 ++ if (opt_iter + 1 == opt_len) {
2641 ++ err_offset = opt_iter;
2642 ++ goto validate_return_locked;
2643 ++ }
2644 + tag_len = tag[1];
2645 + if (tag_len > (opt_len - opt_iter)) {
2646 + err_offset = opt_iter + 1;
2647 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
2648 +index 9e4f832aaf13..5a7bb6cb22bb 100644
2649 +--- a/net/ipv4/ip_sockglue.c
2650 ++++ b/net/ipv4/ip_sockglue.c
2651 +@@ -1044,7 +1044,14 @@ void ipv4_pktinfo_prepare(struct sk_buff *skb)
2652 + pktinfo->ipi_ifindex = 0;
2653 + pktinfo->ipi_spec_dst.s_addr = 0;
2654 + }
2655 +- skb_dst_drop(skb);
2656 ++ /* We need to keep the dst for __ip_options_echo()
2657 ++ * We could restrict the test to opt.ts_needtime || opt.srr,
2658 ++ * but the following is good enough as IP options are not often used.
2659 ++ */
2660 ++ if (unlikely(IPCB(skb)->opt.optlen))
2661 ++ skb_dst_force(skb);
2662 ++ else
2663 ++ skb_dst_drop(skb);
2664 + }
2665 +
2666 + int ip_setsockopt(struct sock *sk, int level,
2667 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
2668 +index 33e2bf806249..e8e662331720 100644
2669 +--- a/net/ipv4/ip_vti.c
2670 ++++ b/net/ipv4/ip_vti.c
2671 +@@ -283,7 +283,6 @@ static int vti_tunnel_init(struct net_device *dev)
2672 + memcpy(dev->dev_addr, &iph->saddr, 4);
2673 + memcpy(dev->broadcast, &iph->daddr, 4);
2674 +
2675 +- dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
2676 + dev->mtu = ETH_DATA_LEN;
2677 + dev->flags = IFF_NOARP;
2678 + dev->iflink = 0;
2679 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
2680 +index 6de66893a488..6be49858c86f 100644
2681 +--- a/net/ipv4/ping.c
2682 ++++ b/net/ipv4/ping.c
2683 +@@ -640,6 +640,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
2684 + {
2685 + struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
2686 +
2687 ++ if (!skb)
2688 ++ return 0;
2689 + pfh->wcheck = csum_partial((char *)&pfh->icmph,
2690 + sizeof(struct icmphdr), pfh->wcheck);
2691 + pfh->icmph.checksum = csum_fold(pfh->wcheck);
2692 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2693 +index 3e63b5fb2121..3d2e55c5458e 100644
2694 +--- a/net/ipv4/tcp.c
2695 ++++ b/net/ipv4/tcp.c
2696 +@@ -722,6 +722,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
2697 + ret = -EAGAIN;
2698 + break;
2699 + }
2700 ++ /* if __tcp_splice_read() got nothing while we have
2701 ++ * an skb in receive queue, we do not want to loop.
2702 ++ * This might happen with URG data.
2703 ++ */
2704 ++ if (!skb_queue_empty(&sk->sk_receive_queue))
2705 ++ break;
2706 + sk_wait_data(sk, &timeo);
2707 + if (signal_pending(current)) {
2708 + ret = sock_intr_errno(timeo);
2709 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2710 +index 57f5bad5650c..12504f57fd7b 100644
2711 +--- a/net/ipv4/tcp_ipv4.c
2712 ++++ b/net/ipv4/tcp_ipv4.c
2713 +@@ -1408,6 +1408,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
2714 + * scaled. So correct it appropriately.
2715 + */
2716 + tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
2717 ++ tp->max_window = tp->snd_wnd;
2718 +
2719 + /* Activate the retrans timer so that SYNACK can be retransmitted.
2720 + * The request socket is not added to the SYN table of the parent
2721 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2722 +index c807d5790ca1..d92c4b69f7ea 100644
2723 +--- a/net/ipv4/tcp_output.c
2724 ++++ b/net/ipv4/tcp_output.c
2725 +@@ -2163,9 +2163,11 @@ u32 __tcp_select_window(struct sock *sk)
2726 + int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
2727 + int window;
2728 +
2729 +- if (mss > full_space)
2730 ++ if (unlikely(mss > full_space)) {
2731 + mss = full_space;
2732 +-
2733 ++ if (mss <= 0)
2734 ++ return 0;
2735 ++ }
2736 + if (free_space < (full_space >> 1)) {
2737 + icsk->icsk_ack.quick = 0;
2738 +
2739 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2740 +index 1e31fc5477e8..1452e113e8e4 100644
2741 +--- a/net/ipv6/addrconf.c
2742 ++++ b/net/ipv6/addrconf.c
2743 +@@ -3237,6 +3237,22 @@ out:
2744 + in6_ifa_put(ifp);
2745 + }
2746 +
2747 ++/* ifp->idev must be at least read locked */
2748 ++static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
2749 ++{
2750 ++ struct inet6_ifaddr *ifpiter;
2751 ++ struct inet6_dev *idev = ifp->idev;
2752 ++
2753 ++ list_for_each_entry(ifpiter, &idev->addr_list, if_list) {
2754 ++ if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
2755 ++ (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
2756 ++ IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
2757 ++ IFA_F_PERMANENT)
2758 ++ return false;
2759 ++ }
2760 ++ return true;
2761 ++}
2762 ++
2763 + static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2764 + {
2765 + struct net_device *dev = ifp->idev->dev;
2766 +@@ -3256,14 +3272,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2767 + */
2768 +
2769 + read_lock_bh(&ifp->idev->lock);
2770 +- spin_lock(&ifp->lock);
2771 +- send_mld = ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL &&
2772 +- ifp->idev->valid_ll_addr_cnt == 1;
2773 ++ send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
2774 + send_rs = send_mld &&
2775 + ipv6_accept_ra(ifp->idev) &&
2776 + ifp->idev->cnf.rtr_solicits > 0 &&
2777 + (dev->flags&IFF_LOOPBACK) == 0;
2778 +- spin_unlock(&ifp->lock);
2779 + read_unlock_bh(&ifp->idev->lock);
2780 +
2781 + /* While dad is in progress mld report's source address is in6_addrany.
2782 +@@ -4558,19 +4571,6 @@ errout:
2783 + rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
2784 + }
2785 +
2786 +-static void update_valid_ll_addr_cnt(struct inet6_ifaddr *ifp, int count)
2787 +-{
2788 +- write_lock_bh(&ifp->idev->lock);
2789 +- spin_lock(&ifp->lock);
2790 +- if (((ifp->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|
2791 +- IFA_F_DADFAILED)) == IFA_F_PERMANENT) &&
2792 +- (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL))
2793 +- ifp->idev->valid_ll_addr_cnt += count;
2794 +- WARN_ON(ifp->idev->valid_ll_addr_cnt < 0);
2795 +- spin_unlock(&ifp->lock);
2796 +- write_unlock_bh(&ifp->idev->lock);
2797 +-}
2798 +-
2799 + static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
2800 + {
2801 + struct net *net = dev_net(ifp->idev->dev);
2802 +@@ -4579,8 +4579,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
2803 +
2804 + switch (event) {
2805 + case RTM_NEWADDR:
2806 +- update_valid_ll_addr_cnt(ifp, 1);
2807 +-
2808 + /*
2809 + * If the address was optimistic
2810 + * we inserted the route at the start of
2811 +@@ -4596,8 +4594,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
2812 + ifp->idev->dev, 0, 0);
2813 + break;
2814 + case RTM_DELADDR:
2815 +- update_valid_ll_addr_cnt(ifp, -1);
2816 +-
2817 + if (ifp->idev->cnf.forwarding)
2818 + addrconf_leave_anycast(ifp);
2819 + addrconf_leave_solict(ifp->idev, &ifp->addr);
2820 +@@ -4693,8 +4689,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
2821 + struct net_device *dev;
2822 + struct inet6_dev *idev;
2823 +
2824 +- rcu_read_lock();
2825 +- for_each_netdev_rcu(net, dev) {
2826 ++ for_each_netdev(net, dev) {
2827 + idev = __in6_dev_get(dev);
2828 + if (idev) {
2829 + int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
2830 +@@ -4703,7 +4698,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
2831 + dev_disable_change(idev);
2832 + }
2833 + }
2834 +- rcu_read_unlock();
2835 + }
2836 +
2837 + static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
2838 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
2839 +index 6b5acd50103f..bb3e8326cacb 100644
2840 +--- a/net/ipv6/ip6_gre.c
2841 ++++ b/net/ipv6/ip6_gre.c
2842 +@@ -55,6 +55,7 @@
2843 + #include <net/ip6_fib.h>
2844 + #include <net/ip6_route.h>
2845 + #include <net/ip6_tunnel.h>
2846 ++#include <net/gre.h>
2847 +
2848 +
2849 + static bool log_ecn_error = true;
2850 +@@ -366,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
2851 +
2852 +
2853 + static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
2854 +- u8 type, u8 code, int offset, __be32 info)
2855 ++ u8 type, u8 code, int offset, __be32 info)
2856 + {
2857 +- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
2858 +- __be16 *p = (__be16 *)(skb->data + offset);
2859 +- int grehlen = offset + 4;
2860 ++ const struct gre_base_hdr *greh;
2861 ++ const struct ipv6hdr *ipv6h;
2862 ++ int grehlen = sizeof(*greh);
2863 + struct ip6_tnl *t;
2864 ++ int key_off = 0;
2865 + __be16 flags;
2866 ++ __be32 key;
2867 +
2868 +- flags = p[0];
2869 +- if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
2870 +- if (flags&(GRE_VERSION|GRE_ROUTING))
2871 +- return;
2872 +- if (flags&GRE_KEY) {
2873 +- grehlen += 4;
2874 +- if (flags&GRE_CSUM)
2875 +- grehlen += 4;
2876 +- }
2877 ++ if (!pskb_may_pull(skb, offset + grehlen))
2878 ++ return;
2879 ++ greh = (const struct gre_base_hdr *)(skb->data + offset);
2880 ++ flags = greh->flags;
2881 ++ if (flags & (GRE_VERSION | GRE_ROUTING))
2882 ++ return;
2883 ++ if (flags & GRE_CSUM)
2884 ++ grehlen += 4;
2885 ++ if (flags & GRE_KEY) {
2886 ++ key_off = grehlen + offset;
2887 ++ grehlen += 4;
2888 + }
2889 +
2890 +- /* If only 8 bytes returned, keyed message will be dropped here */
2891 +- if (!pskb_may_pull(skb, grehlen))
2892 ++ if (!pskb_may_pull(skb, offset + grehlen))
2893 + return;
2894 + ipv6h = (const struct ipv6hdr *)skb->data;
2895 +- p = (__be16 *)(skb->data + offset);
2896 ++ greh = (const struct gre_base_hdr *)(skb->data + offset);
2897 ++ key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
2898 +
2899 + t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
2900 +- flags & GRE_KEY ?
2901 +- *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
2902 +- p[1]);
2903 ++ key, greh->protocol);
2904 + if (t == NULL)
2905 + return;
2906 +
2907 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2908 +index 9a625b1ae10f..509fbc805017 100644
2909 +--- a/net/ipv6/ip6_tunnel.c
2910 ++++ b/net/ipv6/ip6_tunnel.c
2911 +@@ -104,16 +104,25 @@ struct ip6_tnl_net {
2912 +
2913 + static struct net_device_stats *ip6_get_stats(struct net_device *dev)
2914 + {
2915 +- struct pcpu_tstats sum = { 0 };
2916 ++ struct pcpu_tstats tmp, sum = { 0 };
2917 + int i;
2918 +
2919 + for_each_possible_cpu(i) {
2920 ++ unsigned int start;
2921 + const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
2922 +
2923 +- sum.rx_packets += tstats->rx_packets;
2924 +- sum.rx_bytes += tstats->rx_bytes;
2925 +- sum.tx_packets += tstats->tx_packets;
2926 +- sum.tx_bytes += tstats->tx_bytes;
2927 ++ do {
2928 ++ start = u64_stats_fetch_begin_bh(&tstats->syncp);
2929 ++ tmp.rx_packets = tstats->rx_packets;
2930 ++ tmp.rx_bytes = tstats->rx_bytes;
2931 ++ tmp.tx_packets = tstats->tx_packets;
2932 ++ tmp.tx_bytes = tstats->tx_bytes;
2933 ++ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
2934 ++
2935 ++ sum.rx_packets += tmp.rx_packets;
2936 ++ sum.rx_bytes += tmp.rx_bytes;
2937 ++ sum.tx_packets += tmp.tx_packets;
2938 ++ sum.tx_bytes += tmp.tx_bytes;
2939 + }
2940 + dev->stats.rx_packets = sum.rx_packets;
2941 + dev->stats.rx_bytes = sum.rx_bytes;
2942 +@@ -396,18 +405,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
2943 +
2944 + __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
2945 + {
2946 +- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
2947 +- __u8 nexthdr = ipv6h->nexthdr;
2948 +- __u16 off = sizeof (*ipv6h);
2949 ++ const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
2950 ++ unsigned int nhoff = raw - skb->data;
2951 ++ unsigned int off = nhoff + sizeof(*ipv6h);
2952 ++ u8 next, nexthdr = ipv6h->nexthdr;
2953 +
2954 + while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
2955 +- __u16 optlen = 0;
2956 + struct ipv6_opt_hdr *hdr;
2957 +- if (raw + off + sizeof (*hdr) > skb->data &&
2958 +- !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
2959 ++ u16 optlen;
2960 ++
2961 ++ if (!pskb_may_pull(skb, off + sizeof(*hdr)))
2962 + break;
2963 +
2964 +- hdr = (struct ipv6_opt_hdr *) (raw + off);
2965 ++ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
2966 + if (nexthdr == NEXTHDR_FRAGMENT) {
2967 + struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
2968 + if (frag_hdr->frag_off)
2969 +@@ -418,20 +428,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
2970 + } else {
2971 + optlen = ipv6_optlen(hdr);
2972 + }
2973 ++ /* cache hdr->nexthdr, since pskb_may_pull() might
2974 ++ * invalidate hdr
2975 ++ */
2976 ++ next = hdr->nexthdr;
2977 + if (nexthdr == NEXTHDR_DEST) {
2978 +- __u16 i = off + 2;
2979 ++ u16 i = 2;
2980 ++
2981 ++ /* Remember : hdr is no longer valid at this point. */
2982 ++ if (!pskb_may_pull(skb, off + optlen))
2983 ++ break;
2984 ++
2985 + while (1) {
2986 + struct ipv6_tlv_tnl_enc_lim *tel;
2987 +
2988 + /* No more room for encapsulation limit */
2989 +- if (i + sizeof (*tel) > off + optlen)
2990 ++ if (i + sizeof(*tel) > optlen)
2991 + break;
2992 +
2993 +- tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
2994 ++ tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
2995 + /* return index of option if found and valid */
2996 + if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
2997 + tel->length == 1)
2998 +- return i;
2999 ++ return i + off - nhoff;
3000 + /* else jump to next option */
3001 + if (tel->type)
3002 + i += tel->length + 2;
3003 +@@ -439,7 +458,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
3004 + i++;
3005 + }
3006 + }
3007 +- nexthdr = hdr->nexthdr;
3008 ++ nexthdr = next;
3009 + off += optlen;
3010 + }
3011 + return 0;
3012 +@@ -822,8 +841,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
3013 + }
3014 +
3015 + tstats = this_cpu_ptr(t->dev->tstats);
3016 ++ u64_stats_update_begin(&tstats->syncp);
3017 + tstats->rx_packets++;
3018 + tstats->rx_bytes += skb->len;
3019 ++ u64_stats_update_end(&tstats->syncp);
3020 +
3021 + netif_rx(skb);
3022 +
3023 +diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
3024 +index 7152624ed5f1..26ccd65cdcab 100644
3025 +--- a/net/irda/irqueue.c
3026 ++++ b/net/irda/irqueue.c
3027 +@@ -385,9 +385,6 @@ EXPORT_SYMBOL(hashbin_new);
3028 + * for deallocating this structure if it's complex. If not the user can
3029 + * just supply kfree, which should take care of the job.
3030 + */
3031 +-#ifdef CONFIG_LOCKDEP
3032 +-static int hashbin_lock_depth = 0;
3033 +-#endif
3034 + int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
3035 + {
3036 + irda_queue_t* queue;
3037 +@@ -398,22 +395,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
3038 + IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
3039 +
3040 + /* Synchronize */
3041 +- if ( hashbin->hb_type & HB_LOCK ) {
3042 +- spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
3043 +- hashbin_lock_depth++);
3044 +- }
3045 ++ if (hashbin->hb_type & HB_LOCK)
3046 ++ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
3047 +
3048 + /*
3049 + * Free the entries in the hashbin, TODO: use hashbin_clear when
3050 + * it has been shown to work
3051 + */
3052 + for (i = 0; i < HASHBIN_SIZE; i ++ ) {
3053 +- queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
3054 +- while (queue ) {
3055 +- if (free_func)
3056 +- (*free_func)(queue);
3057 +- queue = dequeue_first(
3058 +- (irda_queue_t**) &hashbin->hb_queue[i]);
3059 ++ while (1) {
3060 ++ queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
3061 ++
3062 ++ if (!queue)
3063 ++ break;
3064 ++
3065 ++ if (free_func) {
3066 ++ if (hashbin->hb_type & HB_LOCK)
3067 ++ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
3068 ++ free_func(queue);
3069 ++ if (hashbin->hb_type & HB_LOCK)
3070 ++ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
3071 ++ }
3072 + }
3073 + }
3074 +
3075 +@@ -422,12 +424,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
3076 + hashbin->magic = ~HB_MAGIC;
3077 +
3078 + /* Release lock */
3079 +- if ( hashbin->hb_type & HB_LOCK) {
3080 ++ if (hashbin->hb_type & HB_LOCK)
3081 + spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
3082 +-#ifdef CONFIG_LOCKDEP
3083 +- hashbin_lock_depth--;
3084 +-#endif
3085 +- }
3086 +
3087 + /*
3088 + * Free the hashbin structure
3089 +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
3090 +index 6f251cbc2ed7..f8f1089ee8f2 100644
3091 +--- a/net/l2tp/l2tp_core.h
3092 ++++ b/net/l2tp/l2tp_core.h
3093 +@@ -261,6 +261,7 @@ extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int
3094 +
3095 + extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
3096 + extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
3097 ++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
3098 +
3099 + /* Session reference counts. Incremented when code obtains a reference
3100 + * to a session.
3101 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
3102 +index 81f317f841b4..b69b762159ad 100644
3103 +--- a/net/l2tp/l2tp_ip.c
3104 ++++ b/net/l2tp/l2tp_ip.c
3105 +@@ -11,6 +11,7 @@
3106 +
3107 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3108 +
3109 ++#include <asm/ioctls.h>
3110 + #include <linux/icmp.h>
3111 + #include <linux/module.h>
3112 + #include <linux/skbuff.h>
3113 +@@ -555,6 +556,30 @@ out:
3114 + return err ? err : copied;
3115 + }
3116 +
3117 ++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
3118 ++{
3119 ++ struct sk_buff *skb;
3120 ++ int amount;
3121 ++
3122 ++ switch (cmd) {
3123 ++ case SIOCOUTQ:
3124 ++ amount = sk_wmem_alloc_get(sk);
3125 ++ break;
3126 ++ case SIOCINQ:
3127 ++ spin_lock_bh(&sk->sk_receive_queue.lock);
3128 ++ skb = skb_peek(&sk->sk_receive_queue);
3129 ++ amount = skb ? skb->len : 0;
3130 ++ spin_unlock_bh(&sk->sk_receive_queue.lock);
3131 ++ break;
3132 ++
3133 ++ default:
3134 ++ return -ENOIOCTLCMD;
3135 ++ }
3136 ++
3137 ++ return put_user(amount, (int __user *)arg);
3138 ++}
3139 ++EXPORT_SYMBOL(l2tp_ioctl);
3140 ++
3141 + static struct proto l2tp_ip_prot = {
3142 + .name = "L2TP/IP",
3143 + .owner = THIS_MODULE,
3144 +@@ -563,7 +588,7 @@ static struct proto l2tp_ip_prot = {
3145 + .bind = l2tp_ip_bind,
3146 + .connect = l2tp_ip_connect,
3147 + .disconnect = l2tp_ip_disconnect,
3148 +- .ioctl = udp_ioctl,
3149 ++ .ioctl = l2tp_ioctl,
3150 + .destroy = l2tp_ip_destroy_sock,
3151 + .setsockopt = ip_setsockopt,
3152 + .getsockopt = ip_getsockopt,
3153 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
3154 +index 7c1a288f0b20..8783dfe5ac6c 100644
3155 +--- a/net/l2tp/l2tp_ip6.c
3156 ++++ b/net/l2tp/l2tp_ip6.c
3157 +@@ -721,7 +721,7 @@ static struct proto l2tp_ip6_prot = {
3158 + .bind = l2tp_ip6_bind,
3159 + .connect = l2tp_ip6_connect,
3160 + .disconnect = l2tp_ip6_disconnect,
3161 +- .ioctl = udp_ioctl,
3162 ++ .ioctl = l2tp_ioctl,
3163 + .destroy = l2tp_ip6_destroy_sock,
3164 + .setsockopt = ipv6_setsockopt,
3165 + .getsockopt = ipv6_getsockopt,
3166 +diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
3167 +index cd8724177965..6d36b3241b98 100644
3168 +--- a/net/llc/llc_conn.c
3169 ++++ b/net/llc/llc_conn.c
3170 +@@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
3171 + * another trick required to cope with how the PROCOM state
3172 + * machine works. -acme
3173 + */
3174 ++ skb_orphan(skb);
3175 ++ sock_hold(sk);
3176 + skb->sk = sk;
3177 ++ skb->destructor = sock_efree;
3178 + }
3179 + if (!sock_owned_by_user(sk))
3180 + llc_conn_rcv(sk, skb);
3181 +diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
3182 +index e5850699098e..4ee1e1142e8e 100644
3183 +--- a/net/llc/llc_sap.c
3184 ++++ b/net/llc/llc_sap.c
3185 +@@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
3186 +
3187 + ev->type = LLC_SAP_EV_TYPE_PDU;
3188 + ev->reason = 0;
3189 ++ skb_orphan(skb);
3190 ++ sock_hold(sk);
3191 + skb->sk = sk;
3192 ++ skb->destructor = sock_efree;
3193 + llc_sap_state_process(sap, skb);
3194 + }
3195 +
3196 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
3197 +index 67559f7a7832..732cc22fbe26 100644
3198 +--- a/net/mac80211/mesh.c
3199 ++++ b/net/mac80211/mesh.c
3200 +@@ -345,7 +345,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
3201 + /* fast-forward to vendor IEs */
3202 + offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
3203 +
3204 +- if (offset) {
3205 ++ if (offset < ifmsh->ie_len) {
3206 + len = ifmsh->ie_len - offset;
3207 + data = ifmsh->ie + offset;
3208 + if (skb_tailroom(skb) < len)
3209 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3210 +index 40d82575adc1..dfea5968a582 100644
3211 +--- a/net/packet/af_packet.c
3212 ++++ b/net/packet/af_packet.c
3213 +@@ -1268,6 +1268,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
3214 + f->arr[f->num_members] = sk;
3215 + smp_wmb();
3216 + f->num_members++;
3217 ++ if (f->num_members == 1)
3218 ++ dev_add_pack(&f->prot_hook);
3219 + spin_unlock(&f->lock);
3220 + }
3221 +
3222 +@@ -1284,6 +1286,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
3223 + BUG_ON(i >= f->num_members);
3224 + f->arr[i] = f->arr[f->num_members - 1];
3225 + f->num_members--;
3226 ++ if (f->num_members == 0)
3227 ++ __dev_remove_pack(&f->prot_hook);
3228 + spin_unlock(&f->lock);
3229 + }
3230 +
3231 +@@ -1316,13 +1320,16 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
3232 + return -EINVAL;
3233 + }
3234 +
3235 ++ mutex_lock(&fanout_mutex);
3236 ++
3237 ++ err = -EINVAL;
3238 + if (!po->running)
3239 +- return -EINVAL;
3240 ++ goto out;
3241 +
3242 ++ err = -EALREADY;
3243 + if (po->fanout)
3244 +- return -EALREADY;
3245 ++ goto out;
3246 +
3247 +- mutex_lock(&fanout_mutex);
3248 + match = NULL;
3249 + list_for_each_entry(f, &fanout_list, list) {
3250 + if (f->id == id &&
3251 +@@ -1352,7 +1359,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
3252 + match->prot_hook.func = packet_rcv_fanout;
3253 + match->prot_hook.af_packet_priv = match;
3254 + match->prot_hook.id_match = match_fanout_group;
3255 +- dev_add_pack(&match->prot_hook);
3256 + list_add(&match->list, &fanout_list);
3257 + }
3258 + err = -EINVAL;
3259 +@@ -1373,24 +1379,29 @@ out:
3260 + return err;
3261 + }
3262 +
3263 +-static void fanout_release(struct sock *sk)
3264 ++/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
3265 ++ * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
3266 ++ * It is the responsibility of the caller to call fanout_release_data() and
3267 ++ * free the returned packet_fanout (after synchronize_net())
3268 ++ */
3269 ++static struct packet_fanout *fanout_release(struct sock *sk)
3270 + {
3271 + struct packet_sock *po = pkt_sk(sk);
3272 + struct packet_fanout *f;
3273 +
3274 +- f = po->fanout;
3275 +- if (!f)
3276 +- return;
3277 +-
3278 + mutex_lock(&fanout_mutex);
3279 +- po->fanout = NULL;
3280 ++ f = po->fanout;
3281 ++ if (f) {
3282 ++ po->fanout = NULL;
3283 +
3284 +- if (atomic_dec_and_test(&f->sk_ref)) {
3285 +- list_del(&f->list);
3286 +- dev_remove_pack(&f->prot_hook);
3287 +- kfree(f);
3288 ++ if (atomic_dec_and_test(&f->sk_ref))
3289 ++ list_del(&f->list);
3290 ++ else
3291 ++ f = NULL;
3292 + }
3293 + mutex_unlock(&fanout_mutex);
3294 ++
3295 ++ return f;
3296 + }
3297 +
3298 + static const struct proto_ops packet_ops;
3299 +@@ -2255,7 +2266,7 @@ static int packet_snd(struct socket *sock,
3300 + int vnet_hdr_len;
3301 + struct packet_sock *po = pkt_sk(sk);
3302 + unsigned short gso_type = 0;
3303 +- int hlen, tlen;
3304 ++ int hlen, tlen, linear;
3305 + int extra_len = 0;
3306 +
3307 + /*
3308 +@@ -2349,7 +2360,9 @@ static int packet_snd(struct socket *sock,
3309 + err = -ENOBUFS;
3310 + hlen = LL_RESERVED_SPACE(dev);
3311 + tlen = dev->needed_tailroom;
3312 +- skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
3313 ++ linear = vnet_hdr.hdr_len;
3314 ++ linear = max(linear, min_t(int, len, dev->hard_header_len));
3315 ++ skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
3316 + msg->msg_flags & MSG_DONTWAIT, &err);
3317 + if (skb == NULL)
3318 + goto out_unlock;
3319 +@@ -2452,6 +2465,7 @@ static int packet_release(struct socket *sock)
3320 + {
3321 + struct sock *sk = sock->sk;
3322 + struct packet_sock *po;
3323 ++ struct packet_fanout *f;
3324 + struct net *net;
3325 + union tpacket_req_u req_u;
3326 +
3327 +@@ -2491,9 +2505,13 @@ static int packet_release(struct socket *sock)
3328 + packet_set_ring(sk, &req_u, 1, 1);
3329 + }
3330 +
3331 +- fanout_release(sk);
3332 ++ f = fanout_release(sk);
3333 +
3334 + synchronize_net();
3335 ++
3336 ++ if (f) {
3337 ++ kfree(f);
3338 ++ }
3339 + /*
3340 + * Now the socket is dead. No more input will appear.
3341 + */
3342 +@@ -3371,7 +3389,6 @@ static int packet_notifier(struct notifier_block *this,
3343 + }
3344 + if (msg == NETDEV_UNREGISTER) {
3345 + packet_cached_dev_reset(po);
3346 +- fanout_release(sk);
3347 + po->ifindex = -1;
3348 + if (po->prot_hook.dev)
3349 + dev_put(po->prot_hook.dev);
3350 +@@ -3660,7 +3677,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3351 + */
3352 + if (!tx_ring)
3353 + init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3354 +- break;
3355 ++ break;
3356 + default:
3357 + break;
3358 + }
3359 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
3360 +index 88ca530f1d1a..1c58a980f0c2 100644
3361 +--- a/net/sctp/associola.c
3362 ++++ b/net/sctp/associola.c
3363 +@@ -1286,78 +1286,107 @@ void sctp_assoc_update(struct sctp_association *asoc,
3364 + }
3365 +
3366 + /* Update the retran path for sending a retransmitted packet.
3367 +- * Round-robin through the active transports, else round-robin
3368 +- * through the inactive transports as this is the next best thing
3369 +- * we can try.
3370 ++ * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
3371 ++ *
3372 ++ * When there is outbound data to send and the primary path
3373 ++ * becomes inactive (e.g., due to failures), or where the
3374 ++ * SCTP user explicitly requests to send data to an
3375 ++ * inactive destination transport address, before reporting
3376 ++ * an error to its ULP, the SCTP endpoint should try to send
3377 ++ * the data to an alternate active destination transport
3378 ++ * address if one exists.
3379 ++ *
3380 ++ * When retransmitting data that timed out, if the endpoint
3381 ++ * is multihomed, it should consider each source-destination
3382 ++ * address pair in its retransmission selection policy.
3383 ++ * When retransmitting timed-out data, the endpoint should
3384 ++ * attempt to pick the most divergent source-destination
3385 ++ * pair from the original source-destination pair to which
3386 ++ * the packet was transmitted.
3387 ++ *
3388 ++ * Note: Rules for picking the most divergent source-destination
3389 ++ * pair are an implementation decision and are not specified
3390 ++ * within this document.
3391 ++ *
3392 ++ * Our basic strategy is to round-robin transports in priorities
3393 ++ * according to sctp_state_prio_map[] e.g., if no such
3394 ++ * transport with state SCTP_ACTIVE exists, round-robin through
3395 ++ * SCTP_UNKNOWN, etc. You get the picture.
3396 + */
3397 +-void sctp_assoc_update_retran_path(struct sctp_association *asoc)
3398 ++static const u8 sctp_trans_state_to_prio_map[] = {
3399 ++ [SCTP_ACTIVE] = 3, /* best case */
3400 ++ [SCTP_UNKNOWN] = 2,
3401 ++ [SCTP_PF] = 1,
3402 ++ [SCTP_INACTIVE] = 0, /* worst case */
3403 ++};
3404 ++
3405 ++static u8 sctp_trans_score(const struct sctp_transport *trans)
3406 + {
3407 +- struct sctp_transport *t, *next;
3408 +- struct list_head *head = &asoc->peer.transport_addr_list;
3409 +- struct list_head *pos;
3410 ++ return sctp_trans_state_to_prio_map[trans->state];
3411 ++}
3412 +
3413 +- if (asoc->peer.transport_count == 1)
3414 +- return;
3415 ++static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
3416 ++ struct sctp_transport *best)
3417 ++{
3418 ++ if (best == NULL)
3419 ++ return curr;
3420 +
3421 +- /* Find the next transport in a round-robin fashion. */
3422 +- t = asoc->peer.retran_path;
3423 +- pos = &t->transports;
3424 +- next = NULL;
3425 ++ return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best;
3426 ++}
3427 +
3428 +- while (1) {
3429 +- /* Skip the head. */
3430 +- if (pos->next == head)
3431 +- pos = head->next;
3432 +- else
3433 +- pos = pos->next;
3434 ++void sctp_assoc_update_retran_path(struct sctp_association *asoc)
3435 ++{
3436 ++ struct sctp_transport *trans = asoc->peer.retran_path;
3437 ++ struct sctp_transport *trans_next = NULL;
3438 +
3439 +- t = list_entry(pos, struct sctp_transport, transports);
3440 ++ /* We're done as we only have the one and only path. */
3441 ++ if (asoc->peer.transport_count == 1)
3442 ++ return;
3443 ++ /* If active_path and retran_path are the same and active,
3444 ++ * then this is the only active path. Use it.
3445 ++ */
3446 ++ if (asoc->peer.active_path == asoc->peer.retran_path &&
3447 ++ asoc->peer.active_path->state == SCTP_ACTIVE)
3448 ++ return;
3449 +
3450 +- /* We have exhausted the list, but didn't find any
3451 +- * other active transports. If so, use the next
3452 +- * transport.
3453 +- */
3454 +- if (t == asoc->peer.retran_path) {
3455 +- t = next;
3456 ++ /* Iterate from retran_path's successor back to retran_path. */
3457 ++ for (trans = list_next_entry(trans, transports); 1;
3458 ++ trans = list_next_entry(trans, transports)) {
3459 ++ /* Manually skip the head element. */
3460 ++ if (&trans->transports == &asoc->peer.transport_addr_list)
3461 ++ continue;
3462 ++ if (trans->state == SCTP_UNCONFIRMED)
3463 ++ continue;
3464 ++ trans_next = sctp_trans_elect_best(trans, trans_next);
3465 ++ /* Active is good enough for immediate return. */
3466 ++ if (trans_next->state == SCTP_ACTIVE)
3467 + break;
3468 +- }
3469 +-
3470 +- /* Try to find an active transport. */
3471 +-
3472 +- if ((t->state == SCTP_ACTIVE) ||
3473 +- (t->state == SCTP_UNKNOWN)) {
3474 ++ /* We've reached the end, time to update path. */
3475 ++ if (trans == asoc->peer.retran_path)
3476 + break;
3477 +- } else {
3478 +- /* Keep track of the next transport in case
3479 +- * we don't find any active transport.
3480 +- */
3481 +- if (t->state != SCTP_UNCONFIRMED && !next)
3482 +- next = t;
3483 +- }
3484 + }
3485 +
3486 +- if (t)
3487 +- asoc->peer.retran_path = t;
3488 +- else
3489 +- t = asoc->peer.retran_path;
3490 ++ if (trans_next != NULL)
3491 ++ asoc->peer.retran_path = trans_next;
3492 +
3493 +- pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc,
3494 +- &t->ipaddr.sa);
3495 ++ pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
3496 ++ __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
3497 + }
3498 +
3499 +-/* Choose the transport for sending retransmit packet. */
3500 +-struct sctp_transport *sctp_assoc_choose_alter_transport(
3501 +- struct sctp_association *asoc, struct sctp_transport *last_sent_to)
3502 ++struct sctp_transport *
3503 ++sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
3504 ++ struct sctp_transport *last_sent_to)
3505 + {
3506 + /* If this is the first time packet is sent, use the active path,
3507 + * else use the retran path. If the last packet was sent over the
3508 + * retran path, update the retran path and use it.
3509 + */
3510 +- if (!last_sent_to)
3511 ++ if (last_sent_to == NULL) {
3512 + return asoc->peer.active_path;
3513 +- else {
3514 ++ } else {
3515 + if (last_sent_to == asoc->peer.retran_path)
3516 + sctp_assoc_update_retran_path(asoc);
3517 ++
3518 + return asoc->peer.retran_path;
3519 + }
3520 + }
3521 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3522 +index 2c5cb6d2787d..8e7cc3e2b08b 100644
3523 +--- a/net/sctp/socket.c
3524 ++++ b/net/sctp/socket.c
3525 +@@ -6712,7 +6712,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3526 + */
3527 + sctp_release_sock(sk);
3528 + current_timeo = schedule_timeout(current_timeo);
3529 +- BUG_ON(sk != asoc->base.sk);
3530 ++ if (sk != asoc->base.sk)
3531 ++ goto do_error;
3532 + sctp_lock_sock(sk);
3533 +
3534 + *timeo_p = current_timeo;
3535 +diff --git a/net/socket.c b/net/socket.c
3536 +index 64c47cd62e14..bc3f3f726d47 100644
3537 +--- a/net/socket.c
3538 ++++ b/net/socket.c
3539 +@@ -2334,8 +2334,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
3540 + return err;
3541 +
3542 + err = sock_error(sock->sk);
3543 +- if (err)
3544 ++ if (err) {
3545 ++ datagrams = err;
3546 + goto out_put;
3547 ++ }
3548 +
3549 + entry = mmsg;
3550 + compat_entry = (struct compat_mmsghdr __user *)mmsg;
3551 +diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
3552 +index e0062c544ac8..a9ca70579eb9 100644
3553 +--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
3554 ++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
3555 +@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
3556 + if (!oa->data)
3557 + return -ENOMEM;
3558 +
3559 +- creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
3560 ++ creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
3561 + if (!creds) {
3562 + kfree(oa->data);
3563 + return -ENOMEM;
3564 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
3565 +index 8724ef857360..8ac0f2ec323b 100644
3566 +--- a/net/sunrpc/clnt.c
3567 ++++ b/net/sunrpc/clnt.c
3568 +@@ -315,6 +315,11 @@ out:
3569 +
3570 + static DEFINE_IDA(rpc_clids);
3571 +
3572 ++void rpc_cleanup_clids(void)
3573 ++{
3574 ++ ida_destroy(&rpc_clids);
3575 ++}
3576 ++
3577 + static int rpc_alloc_clid(struct rpc_clnt *clnt)
3578 + {
3579 + int clid;
3580 +diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
3581 +index 3d6498af9adc..c13279459407 100644
3582 +--- a/net/sunrpc/sunrpc_syms.c
3583 ++++ b/net/sunrpc/sunrpc_syms.c
3584 +@@ -111,6 +111,7 @@ out:
3585 + static void __exit
3586 + cleanup_sunrpc(void)
3587 + {
3588 ++ rpc_cleanup_clids();
3589 + rpcauth_remove_module();
3590 + cleanup_socket_xprt();
3591 + svc_cleanup_xprt_sock();
3592 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
3593 +index 3974413f78e7..339532b15223 100644
3594 +--- a/net/unix/af_unix.c
3595 ++++ b/net/unix/af_unix.c
3596 +@@ -978,6 +978,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3597 + unsigned int hash;
3598 + struct unix_address *addr;
3599 + struct hlist_head *list;
3600 ++ struct path path = { NULL, NULL };
3601 +
3602 + err = -EINVAL;
3603 + if (sunaddr->sun_family != AF_UNIX)
3604 +@@ -993,9 +994,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3605 + goto out;
3606 + addr_len = err;
3607 +
3608 ++ if (sun_path[0]) {
3609 ++ umode_t mode = S_IFSOCK |
3610 ++ (SOCK_INODE(sock)->i_mode & ~current_umask());
3611 ++ err = unix_mknod(sun_path, mode, &path);
3612 ++ if (err) {
3613 ++ if (err == -EEXIST)
3614 ++ err = -EADDRINUSE;
3615 ++ goto out;
3616 ++ }
3617 ++ }
3618 ++
3619 + err = mutex_lock_interruptible(&u->readlock);
3620 + if (err)
3621 +- goto out;
3622 ++ goto out_put;
3623 +
3624 + err = -EINVAL;
3625 + if (u->addr)
3626 +@@ -1012,16 +1024,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3627 + atomic_set(&addr->refcnt, 1);
3628 +
3629 + if (sun_path[0]) {
3630 +- struct path path;
3631 +- umode_t mode = S_IFSOCK |
3632 +- (SOCK_INODE(sock)->i_mode & ~current_umask());
3633 +- err = unix_mknod(sun_path, mode, &path);
3634 +- if (err) {
3635 +- if (err == -EEXIST)
3636 +- err = -EADDRINUSE;
3637 +- unix_release_addr(addr);
3638 +- goto out_up;
3639 +- }
3640 + addr->hash = UNIX_HASH_SIZE;
3641 + hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
3642 + spin_lock(&unix_table_lock);
3643 +@@ -1048,6 +1050,9 @@ out_unlock:
3644 + spin_unlock(&unix_table_lock);
3645 + out_up:
3646 + mutex_unlock(&u->readlock);
3647 ++out_put:
3648 ++ if (err)
3649 ++ path_put(&path);
3650 + out:
3651 + return err;
3652 + }
3653 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
3654 +index 3ba608a61bbf..bcae35aa0557 100644
3655 +--- a/security/selinux/hooks.c
3656 ++++ b/security/selinux/hooks.c
3657 +@@ -5511,7 +5511,7 @@ static int selinux_setprocattr(struct task_struct *p,
3658 + return error;
3659 +
3660 + /* Obtain a SID for the context, if one was specified. */
3661 +- if (size && str[1] && str[1] != '\n') {
3662 ++ if (size && str[0] && str[0] != '\n') {
3663 + if (str[size-1] == '\n') {
3664 + str[size-1] = 0;
3665 + size--;
3666 +diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
3667 +index 652350e2533f..7204c0f1700b 100644
3668 +--- a/sound/core/seq/seq_memory.c
3669 ++++ b/sound/core/seq/seq_memory.c
3670 +@@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
3671 + {
3672 + unsigned long flags;
3673 + struct snd_seq_event_cell *ptr;
3674 +- int max_count = 5 * HZ;
3675 +
3676 + if (snd_BUG_ON(!pool))
3677 + return -EINVAL;
3678 +@@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
3679 + if (waitqueue_active(&pool->output_sleep))
3680 + wake_up(&pool->output_sleep);
3681 +
3682 +- while (atomic_read(&pool->counter) > 0) {
3683 +- if (max_count == 0) {
3684 +- snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
3685 +- break;
3686 +- }
3687 ++ while (atomic_read(&pool->counter) > 0)
3688 + schedule_timeout_uninterruptible(1);
3689 +- max_count--;
3690 +- }
3691 +
3692 + /* release all resources */
3693 + spin_lock_irqsave(&pool->lock, flags);
3694 +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
3695 +index 4c9aa462de9b..17fe04d892f9 100644
3696 +--- a/sound/core/seq/seq_queue.c
3697 ++++ b/sound/core/seq/seq_queue.c
3698 +@@ -183,6 +183,8 @@ void __exit snd_seq_queues_delete(void)
3699 + }
3700 + }
3701 +
3702 ++static void queue_use(struct snd_seq_queue *queue, int client, int use);
3703 ++
3704 + /* allocate a new queue -
3705 + * return queue index value or negative value for error
3706 + */
3707 +@@ -194,11 +196,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
3708 + if (q == NULL)
3709 + return -ENOMEM;
3710 + q->info_flags = info_flags;
3711 ++ queue_use(q, client, 1);
3712 + if (queue_list_add(q) < 0) {
3713 + queue_delete(q);
3714 + return -ENOMEM;
3715 + }
3716 +- snd_seq_queue_use(q->queue, client, 1); /* use this queue */
3717 + return q->queue;
3718 + }
3719 +
3720 +@@ -504,19 +506,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
3721 + return result;
3722 + }
3723 +
3724 +-
3725 +-/* use or unuse this queue -
3726 +- * if it is the first client, starts the timer.
3727 +- * if it is not longer used by any clients, stop the timer.
3728 +- */
3729 +-int snd_seq_queue_use(int queueid, int client, int use)
3730 ++/* use or unuse this queue */
3731 ++static void queue_use(struct snd_seq_queue *queue, int client, int use)
3732 + {
3733 +- struct snd_seq_queue *queue;
3734 +-
3735 +- queue = queueptr(queueid);
3736 +- if (queue == NULL)
3737 +- return -EINVAL;
3738 +- mutex_lock(&queue->timer_mutex);
3739 + if (use) {
3740 + if (!test_and_set_bit(client, queue->clients_bitmap))
3741 + queue->clients++;
3742 +@@ -531,6 +523,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
3743 + } else {
3744 + snd_seq_timer_close(queue);
3745 + }
3746 ++}
3747 ++
3748 ++/* use or unuse this queue -
3749 ++ * if it is the first client, starts the timer.
3750 ++ * if it is not longer used by any clients, stop the timer.
3751 ++ */
3752 ++int snd_seq_queue_use(int queueid, int client, int use)
3753 ++{
3754 ++ struct snd_seq_queue *queue;
3755 ++
3756 ++ queue = queueptr(queueid);
3757 ++ if (queue == NULL)
3758 ++ return -EINVAL;
3759 ++ mutex_lock(&queue->timer_mutex);
3760 ++ queue_use(queue, client, use);
3761 + mutex_unlock(&queue->timer_mutex);
3762 + queuefree(queue);
3763 + return 0;