Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.12 commit in: /
Date: Wed, 01 Feb 2017 12:48:55
Message-Id: 1485953311.687a84c83863dcb0662f4f62c707ce18fcee51c1.alicef@gentoo
1 commit: 687a84c83863dcb0662f4f62c707ce18fcee51c1
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 1 12:48:31 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 1 12:48:31 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=687a84c8
7
8 linux kernel 3.12.70
9
10 0000_README | 8 +
11 1069_linux-3.12.70.patch | 7026 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 7034 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 9b876d4..89b165d 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -318,6 +318,14 @@ Patch: 1067_linux-3.12.68.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.12.68
21
22 +Patch: 1068_linux-3.12.69.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.12.69
25 +
26 +Patch: 1069_linux-3.12.70.patch
27 +From: http://www.kernel.org
28 +Desc: Linux 3.12.70
29 +
30 Patch: 1500_XATTR_USER_PREFIX.patch
31 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
32 Desc: Support for namespace user.pax.* on tmpfs.
33
34 diff --git a/1069_linux-3.12.70.patch b/1069_linux-3.12.70.patch
35 new file mode 100644
36 index 0000000..01821f5
37 --- /dev/null
38 +++ b/1069_linux-3.12.70.patch
39 @@ -0,0 +1,7026 @@
40 +diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt
41 +index 19df842c694f..8163d565f697 100644
42 +--- a/Documentation/devicetree/bindings/clock/imx31-clock.txt
43 ++++ b/Documentation/devicetree/bindings/clock/imx31-clock.txt
44 +@@ -77,7 +77,7 @@ Examples:
45 + clks: ccm@53f80000{
46 + compatible = "fsl,imx31-ccm";
47 + reg = <0x53f80000 0x4000>;
48 +- interrupts = <0 31 0x04 0 53 0x04>;
49 ++ interrupts = <31>, <53>;
50 + #clock-cells = <1>;
51 + };
52 +
53 +diff --git a/Makefile b/Makefile
54 +index f355c0e24cd6..d0e6e38ee77b 100644
55 +--- a/Makefile
56 ++++ b/Makefile
57 +@@ -1,6 +1,6 @@
58 + VERSION = 3
59 + PATCHLEVEL = 12
60 +-SUBLEVEL = 69
61 ++SUBLEVEL = 70
62 + EXTRAVERSION =
63 + NAME = One Giant Leap for Frogkind
64 +
65 +diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
66 +index 588ce58a2959..bd81f1da17a6 100644
67 +--- a/arch/arm/boot/dts/da850-evm.dts
68 ++++ b/arch/arm/boot/dts/da850-evm.dts
69 +@@ -59,6 +59,7 @@
70 + #size-cells = <1>;
71 + compatible = "m25p64";
72 + spi-max-frequency = <30000000>;
73 ++ m25p,fast-read;
74 + reg = <0>;
75 + partition@0 {
76 + label = "U-Boot-SPL";
77 +diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi
78 +index c34f82581248..626e5e374572 100644
79 +--- a/arch/arm/boot/dts/imx31.dtsi
80 ++++ b/arch/arm/boot/dts/imx31.dtsi
81 +@@ -30,11 +30,11 @@
82 + };
83 + };
84 +
85 +- avic: avic-interrupt-controller@60000000 {
86 ++ avic: interrupt-controller@68000000 {
87 + compatible = "fsl,imx31-avic", "fsl,avic";
88 + interrupt-controller;
89 + #interrupt-cells = <1>;
90 +- reg = <0x60000000 0x100000>;
91 ++ reg = <0x68000000 0x100000>;
92 + };
93 +
94 + soc {
95 +@@ -110,13 +110,6 @@
96 + interrupts = <19>;
97 + clocks = <&clks 25>;
98 + };
99 +-
100 +- clks: ccm@53f80000{
101 +- compatible = "fsl,imx31-ccm";
102 +- reg = <0x53f80000 0x4000>;
103 +- interrupts = <0 31 0x04 0 53 0x04>;
104 +- #clock-cells = <1>;
105 +- };
106 + };
107 +
108 + aips@53f00000 { /* AIPS2 */
109 +@@ -126,6 +119,13 @@
110 + reg = <0x53f00000 0x100000>;
111 + ranges;
112 +
113 ++ clks: ccm@53f80000{
114 ++ compatible = "fsl,imx31-ccm";
115 ++ reg = <0x53f80000 0x4000>;
116 ++ interrupts = <31>, <53>;
117 ++ #clock-cells = <1>;
118 ++ };
119 ++
120 + gpt: timer@53f90000 {
121 + compatible = "fsl,imx31-gpt";
122 + reg = <0x53f90000 0x4000>;
123 +diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
124 +index 9672e978d50d..569549079bc7 100644
125 +--- a/arch/arm/include/asm/cputype.h
126 ++++ b/arch/arm/include/asm/cputype.h
127 +@@ -76,6 +76,9 @@
128 + #define ARM_CPU_XSCALE_ARCH_V2 0x4000
129 + #define ARM_CPU_XSCALE_ARCH_V3 0x6000
130 +
131 ++/* Qualcomm implemented cores */
132 ++#define ARM_CPU_PART_SCORPION 0x510002d0
133 ++
134 + extern unsigned int processor_id;
135 +
136 + #ifdef CONFIG_CPU_CP15
137 +diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
138 +index 7b95de601357..b3ebae328fac 100644
139 +--- a/arch/arm/kernel/hw_breakpoint.c
140 ++++ b/arch/arm/kernel/hw_breakpoint.c
141 +@@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void)
142 + return 0;
143 + }
144 +
145 ++ /*
146 ++ * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
147 ++ * whenever a WFI is issued, even if the core is not powered down, in
148 ++ * violation of the architecture. When DBGPRSR.SPD is set, accesses to
149 ++ * breakpoint and watchpoint registers are treated as undefined, so
150 ++ * this results in boot time and runtime failures when these are
151 ++ * accessed and we unexpectedly take a trap.
152 ++ *
153 ++ * It's not clear if/how this can be worked around, so we blacklist
154 ++ * Scorpion CPUs to avoid these issues.
155 ++ */
156 ++ if ((read_cpuid_id() & 0xff00fff0) == ARM_CPU_PART_SCORPION) {
157 ++ pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
158 ++ return 0;
159 ++ }
160 ++
161 + has_ossr = core_has_os_save_restore();
162 +
163 + /* Determine how many BRPs/WRPs are available. */
164 +diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
165 +index f56e5fbfa2fd..25f11492c33f 100644
166 +--- a/arch/arm/mach-davinci/da850.c
167 ++++ b/arch/arm/mach-davinci/da850.c
168 +@@ -297,6 +297,16 @@ static struct clk emac_clk = {
169 + .gpsc = 1,
170 + };
171 +
172 ++/*
173 ++ * In order to avoid adding the emac_clk to the clock lookup table twice (and
174 ++ * screwing up the linked list in the process) create a separate clock for
175 ++ * mdio inheriting the rate from emac_clk.
176 ++ */
177 ++static struct clk mdio_clk = {
178 ++ .name = "mdio",
179 ++ .parent = &emac_clk,
180 ++};
181 ++
182 + static struct clk mcasp_clk = {
183 + .name = "mcasp",
184 + .parent = &pll0_sysclk2,
185 +@@ -461,7 +471,7 @@ static struct clk_lookup da850_clks[] = {
186 + CLK(NULL, "arm", &arm_clk),
187 + CLK(NULL, "rmii", &rmii_clk),
188 + CLK("davinci_emac.1", NULL, &emac_clk),
189 +- CLK("davinci_mdio.0", "fck", &emac_clk),
190 ++ CLK("davinci_mdio.0", "fck", &mdio_clk),
191 + CLK("davinci-mcasp.0", NULL, &mcasp_clk),
192 + CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
193 + CLK("da830-mmc.0", NULL, &mmcsd0_clk),
194 +diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
195 +index 1a468f0fd22e..9d532568b8b3 100644
196 +--- a/arch/arm/mach-ux500/pm.c
197 ++++ b/arch/arm/mach-ux500/pm.c
198 +@@ -128,8 +128,8 @@ bool prcmu_pending_irq(void)
199 + */
200 + bool prcmu_is_cpu_in_wfi(int cpu)
201 + {
202 +- return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
203 +- PRCM_ARM_WFI_STANDBY_WFI0;
204 ++ return readl(PRCM_ARM_WFI_STANDBY) &
205 ++ (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
206 + }
207 +
208 + /*
209 +diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
210 +index 83e4f959ee47..0cad698cdd3c 100644
211 +--- a/arch/arm/xen/enlighten.c
212 ++++ b/arch/arm/xen/enlighten.c
213 +@@ -260,8 +260,7 @@ static int __init xen_guest_init(void)
214 + * for secondary CPUs as they are brought up.
215 + * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
216 + */
217 +- xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
218 +- sizeof(struct vcpu_info));
219 ++ xen_vcpu_info = alloc_percpu(struct vcpu_info);
220 + if (xen_vcpu_info == NULL)
221 + return -ENOMEM;
222 +
223 +diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
224 +index 6913643bbe54..c136fd53c847 100644
225 +--- a/arch/arm64/include/uapi/asm/ptrace.h
226 ++++ b/arch/arm64/include/uapi/asm/ptrace.h
227 +@@ -75,6 +75,7 @@ struct user_fpsimd_state {
228 + __uint128_t vregs[32];
229 + __u32 fpsr;
230 + __u32 fpcr;
231 ++ __u32 __reserved[2];
232 + };
233 +
234 + struct user_hwdebug_state {
235 +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
236 +index 028a1b91e2b3..c405e2421fd8 100644
237 +--- a/arch/arm64/kernel/entry.S
238 ++++ b/arch/arm64/kernel/entry.S
239 +@@ -493,7 +493,7 @@ el0_inv:
240 + mov x0, sp
241 + mov x1, #BAD_SYNC
242 + mrs x2, esr_el1
243 +- b bad_mode
244 ++ b bad_el0_sync
245 + ENDPROC(el0_sync)
246 +
247 + .align 6
248 +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
249 +index 9b9d651446ba..cdf1ec11c015 100644
250 +--- a/arch/arm64/kernel/ptrace.c
251 ++++ b/arch/arm64/kernel/ptrace.c
252 +@@ -442,6 +442,8 @@ static int hw_break_set(struct task_struct *target,
253 + /* (address, ctrl) registers */
254 + limit = regset->n * regset->size;
255 + while (count && offset < limit) {
256 ++ if (count < PTRACE_HBP_ADDR_SZ)
257 ++ return -EINVAL;
258 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
259 + offset, offset + PTRACE_HBP_ADDR_SZ);
260 + if (ret)
261 +@@ -451,6 +453,8 @@ static int hw_break_set(struct task_struct *target,
262 + return ret;
263 + offset += PTRACE_HBP_ADDR_SZ;
264 +
265 ++ if (!count)
266 ++ break;
267 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
268 + offset, offset + PTRACE_HBP_CTRL_SZ);
269 + if (ret)
270 +@@ -487,7 +491,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
271 + const void *kbuf, const void __user *ubuf)
272 + {
273 + int ret;
274 +- struct user_pt_regs newregs;
275 ++ struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
276 +
277 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
278 + if (ret)
279 +@@ -517,7 +521,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
280 + const void *kbuf, const void __user *ubuf)
281 + {
282 + int ret;
283 +- struct user_fpsimd_state newstate;
284 ++ struct user_fpsimd_state newstate =
285 ++ target->thread.fpsimd_state.user_fpsimd;
286 +
287 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
288 + if (ret)
289 +@@ -540,7 +545,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
290 + const void *kbuf, const void __user *ubuf)
291 + {
292 + int ret;
293 +- unsigned long tls;
294 ++ unsigned long tls = target->thread.tp_value;
295 +
296 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
297 + if (ret)
298 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
299 +index 7ffadddb645d..7d1f6c5cfa65 100644
300 +--- a/arch/arm64/kernel/traps.c
301 ++++ b/arch/arm64/kernel/traps.c
302 +@@ -306,16 +306,33 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
303 + }
304 +
305 + /*
306 +- * bad_mode handles the impossible case in the exception vector.
307 ++ * bad_mode handles the impossible case in the exception vector. This is always
308 ++ * fatal.
309 + */
310 + asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
311 + {
312 +- siginfo_t info;
313 +- void __user *pc = (void __user *)instruction_pointer(regs);
314 + console_verbose();
315 +
316 + pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
317 + handler[reason], esr);
318 ++
319 ++ die("Oops - bad mode", regs, 0);
320 ++ local_irq_disable();
321 ++ panic("bad mode");
322 ++}
323 ++
324 ++/*
325 ++ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
326 ++ * exceptions taken from EL0. Unlike bad_mode, this returns.
327 ++ */
328 ++asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
329 ++{
330 ++ siginfo_t info;
331 ++ void __user *pc = (void __user *)instruction_pointer(regs);
332 ++ console_verbose();
333 ++
334 ++ pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x\n",
335 ++ smp_processor_id(), esr);
336 + __show_regs(regs);
337 +
338 + info.si_signo = SIGILL;
339 +@@ -323,7 +340,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
340 + info.si_code = ILL_ILLOPC;
341 + info.si_addr = pc;
342 +
343 +- arm64_notify_die("Oops - bad mode", regs, &info, 0);
344 ++ force_sig_info(info.si_signo, &info, current);
345 + }
346 +
347 + void __pte_error(const char *file, int line, unsigned long val)
348 +diff --git a/arch/cris/boot/rescue/Makefile b/arch/cris/boot/rescue/Makefile
349 +index 52bd0bd1dd22..d98edbb30a18 100644
350 +--- a/arch/cris/boot/rescue/Makefile
351 ++++ b/arch/cris/boot/rescue/Makefile
352 +@@ -10,6 +10,9 @@
353 +
354 + asflags-y += $(LINUXINCLUDE)
355 + ccflags-y += -O2 $(LINUXINCLUDE)
356 ++
357 ++ifdef CONFIG_ETRAX_AXISFLASHMAP
358 ++
359 + arch-$(CONFIG_ETRAX_ARCH_V10) = v10
360 + arch-$(CONFIG_ETRAX_ARCH_V32) = v32
361 +
362 +@@ -28,6 +31,11 @@ $(obj)/rescue.bin: $(obj)/rescue.o FORCE
363 + $(call if_changed,objcopy)
364 + cp -p $(obj)/rescue.bin $(objtree)
365 +
366 ++else
367 ++$(obj)/rescue.bin:
368 ++
369 ++endif
370 ++
371 + $(obj)/testrescue.bin: $(obj)/testrescue.o
372 + $(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin
373 + # Pad it to 784 bytes
374 +diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
375 +index d28fa8fe26fe..c598d847d56b 100644
376 +--- a/arch/m68k/include/asm/delay.h
377 ++++ b/arch/m68k/include/asm/delay.h
378 +@@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs)
379 + */
380 + #define HZSCALE (268435456 / (1000000 / HZ))
381 +
382 +-#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000));
383 ++#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000))
384 +
385 + #endif /* defined(_M68K_DELAY_H) */
386 +diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
387 +index b6fcbaf5027b..3dc44b05fb97 100644
388 +--- a/arch/powerpc/boot/ps3-head.S
389 ++++ b/arch/powerpc/boot/ps3-head.S
390 +@@ -57,11 +57,6 @@ __system_reset_overlay:
391 + bctr
392 +
393 + 1:
394 +- /* Save the value at addr zero for a null pointer write check later. */
395 +-
396 +- li r4, 0
397 +- lwz r3, 0(r4)
398 +-
399 + /* Primary delays then goes to _zimage_start in wrapper. */
400 +
401 + or 31, 31, 31 /* db16cyc */
402 +diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
403 +index 9954d98871d0..029ea3ce1588 100644
404 +--- a/arch/powerpc/boot/ps3.c
405 ++++ b/arch/powerpc/boot/ps3.c
406 +@@ -119,13 +119,12 @@ void ps3_copy_vectors(void)
407 + flush_cache((void *)0x100, 512);
408 + }
409 +
410 +-void platform_init(unsigned long null_check)
411 ++void platform_init(void)
412 + {
413 + const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
414 + void *chosen;
415 + unsigned long ft_addr;
416 + u64 rm_size;
417 +- unsigned long val;
418 +
419 + console_ops.write = ps3_console_write;
420 + platform_ops.exit = ps3_exit;
421 +@@ -153,11 +152,6 @@ void platform_init(unsigned long null_check)
422 +
423 + printf(" flat tree at 0x%lx\n\r", ft_addr);
424 +
425 +- val = *(unsigned long *)0;
426 +-
427 +- if (val != null_check)
428 +- printf("null check failed: %lx != %lx\n\r", val, null_check);
429 +-
430 + ((kernel_entry_t)0)(ft_addr, 0, NULL);
431 +
432 + ps3_exit();
433 +diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
434 +index 16a7c2326d48..bc47b7986e37 100644
435 +--- a/arch/powerpc/kernel/ibmebus.c
436 ++++ b/arch/powerpc/kernel/ibmebus.c
437 +@@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn)
438 + static int ibmebus_create_devices(const struct of_device_id *matches)
439 + {
440 + struct device_node *root, *child;
441 ++ struct device *dev;
442 + int ret = 0;
443 +
444 + root = of_find_node_by_path("/");
445 +@@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
446 + if (!of_match_node(matches, child))
447 + continue;
448 +
449 +- if (bus_find_device(&ibmebus_bus_type, NULL, child,
450 +- ibmebus_match_node))
451 ++ dev = bus_find_device(&ibmebus_bus_type, NULL, child,
452 ++ ibmebus_match_node);
453 ++ if (dev) {
454 ++ put_device(dev);
455 + continue;
456 ++ }
457 +
458 + ret = ibmebus_create_device(child);
459 + if (ret) {
460 +@@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
461 + const char *buf, size_t count)
462 + {
463 + struct device_node *dn = NULL;
464 ++ struct device *dev;
465 + char *path;
466 + ssize_t rc = 0;
467 +
468 +@@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
469 + if (!path)
470 + return -ENOMEM;
471 +
472 +- if (bus_find_device(&ibmebus_bus_type, NULL, path,
473 +- ibmebus_match_path)) {
474 ++ dev = bus_find_device(&ibmebus_bus_type, NULL, path,
475 ++ ibmebus_match_path);
476 ++ if (dev) {
477 ++ put_device(dev);
478 + printk(KERN_WARNING "%s: %s has already been probed\n",
479 + __func__, path);
480 + rc = -EEXIST;
481 +@@ -306,6 +313,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
482 + if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
483 + ibmebus_match_path))) {
484 + of_device_unregister(to_platform_device(dev));
485 ++ put_device(dev);
486 +
487 + kfree(path);
488 + return count;
489 +diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
490 +index df930727f73b..6ff0f4ef08be 100644
491 +--- a/arch/powerpc/kernel/idle_power7.S
492 ++++ b/arch/powerpc/kernel/idle_power7.S
493 +@@ -110,7 +110,7 @@ power7_enter_nap_mode:
494 + std r0,0(r1)
495 + ptesync
496 + ld r0,0(r1)
497 +-1: cmp cr0,r0,r0
498 ++1: cmpd cr0,r0,r0
499 + bne 1b
500 + PPC_NAP
501 + b .
502 +diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
503 +index ace34137a501..e23298f065df 100644
504 +--- a/arch/powerpc/kernel/misc_32.S
505 ++++ b/arch/powerpc/kernel/misc_32.S
506 +@@ -313,7 +313,7 @@ _GLOBAL(flush_instruction_cache)
507 + lis r3, KERNELBASE@h
508 + iccci 0,r3
509 + #endif
510 +-#elif CONFIG_FSL_BOOKE
511 ++#elif defined(CONFIG_FSL_BOOKE)
512 + BEGIN_FTR_SECTION
513 + mfspr r3,SPRN_L1CSR0
514 + ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
515 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
516 +index 29559831c94f..43849c3d6275 100644
517 +--- a/arch/x86/include/asm/apic.h
518 ++++ b/arch/x86/include/asm/apic.h
519 +@@ -710,9 +710,8 @@ static inline void exiting_irq(void)
520 +
521 + static inline void exiting_ack_irq(void)
522 + {
523 +- irq_exit();
524 +- /* Ack only at the end to avoid potential reentry */
525 + ack_APIC_irq();
526 ++ irq_exit();
527 + }
528 +
529 + extern void ioapic_zap_locks(void);
530 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
531 +index 9364936b47c2..f415fd820c86 100644
532 +--- a/arch/x86/kernel/cpu/common.c
533 ++++ b/arch/x86/kernel/cpu/common.c
534 +@@ -1067,7 +1067,7 @@ static __init int setup_disablecpuid(char *arg)
535 + {
536 + int bit;
537 +
538 +- if (get_option(&arg, &bit) && bit < NCAPINTS*32)
539 ++ if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
540 + setup_clear_cpu_cap(bit);
541 + else
542 + return 0;
543 +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
544 +index 0271272d55d0..050784bcd71f 100644
545 +--- a/arch/x86/kernel/cpu/perf_event.c
546 ++++ b/arch/x86/kernel/cpu/perf_event.c
547 +@@ -64,7 +64,7 @@ u64 x86_perf_event_update(struct perf_event *event)
548 + int shift = 64 - x86_pmu.cntval_bits;
549 + u64 prev_raw_count, new_raw_count;
550 + int idx = hwc->idx;
551 +- s64 delta;
552 ++ u64 delta;
553 +
554 + if (idx == INTEL_PMC_IDX_FIXED_BTS)
555 + return 0;
556 +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
557 +index 04e7df068f0e..0c6527a168f0 100644
558 +--- a/arch/x86/kernel/cpu/perf_event_intel.c
559 ++++ b/arch/x86/kernel/cpu/perf_event_intel.c
560 +@@ -2578,7 +2578,7 @@ __init int intel_pmu_init(void)
561 +
562 + /* Support full width counters using alternative MSR range */
563 + if (x86_pmu.intel_cap.full_width_write) {
564 +- x86_pmu.max_period = x86_pmu.cntval_mask;
565 ++ x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
566 + x86_pmu.perfctr = MSR_IA32_PMC0;
567 + pr_cont("full-width counters, ");
568 + }
569 +diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
570 +index 1f1c33d0a13c..a78db5ed8b3f 100644
571 +--- a/arch/x86/kernel/entry_32.S
572 ++++ b/arch/x86/kernel/entry_32.S
573 +@@ -1113,8 +1113,8 @@ ftrace_graph_call:
574 + jmp ftrace_stub
575 + #endif
576 +
577 +-.globl ftrace_stub
578 +-ftrace_stub:
579 ++/* This is weak to keep gas from relaxing the jumps */
580 ++WEAK(ftrace_stub)
581 + ret
582 + END(ftrace_caller)
583 +
584 +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
585 +index ead3e7c9672e..ceb8d113938b 100644
586 +--- a/arch/x86/kernel/entry_64.S
587 ++++ b/arch/x86/kernel/entry_64.S
588 +@@ -122,7 +122,8 @@ GLOBAL(ftrace_graph_call)
589 + jmp ftrace_stub
590 + #endif
591 +
592 +-GLOBAL(ftrace_stub)
593 ++/* This is weak to keep gas from relaxing the jumps */
594 ++WEAK(ftrace_stub)
595 + retq
596 + END(ftrace_caller)
597 +
598 +diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
599 +index 7c3a5a61f2e4..e5d895fa1fe0 100644
600 +--- a/arch/x86/kernel/smp.c
601 ++++ b/arch/x86/kernel/smp.c
602 +@@ -267,8 +267,8 @@ __visible void smp_reschedule_interrupt(struct pt_regs *regs)
603 +
604 + static inline void smp_entering_irq(void)
605 + {
606 +- ack_APIC_irq();
607 + irq_enter();
608 ++ ack_APIC_irq();
609 + }
610 +
611 + __visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
612 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
613 +index 77d373211053..0b45efc5318f 100644
614 +--- a/arch/x86/kvm/emulate.c
615 ++++ b/arch/x86/kvm/emulate.c
616 +@@ -744,6 +744,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
617 + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
618 + }
619 +
620 ++static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
621 ++ struct segmented_address addr,
622 ++ void *data,
623 ++ unsigned int size)
624 ++{
625 ++ int rc;
626 ++ ulong linear;
627 ++
628 ++ rc = linearize(ctxt, addr, size, true, &linear);
629 ++ if (rc != X86EMUL_CONTINUE)
630 ++ return rc;
631 ++ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
632 ++}
633 ++
634 + /*
635 + * Fetch the next byte of the instruction being emulated which is pointed to
636 + * by ctxt->_eip, then increment ctxt->_eip.
637 +@@ -1444,7 +1458,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
638 + &ctxt->exception);
639 + }
640 +
641 +-/* Does not support long mode */
642 + static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
643 + u16 selector, int seg,
644 + struct desc_struct *desc)
645 +@@ -1458,6 +1471,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
646 + int ret;
647 + u16 dummy;
648 +
649 ++
650 ++ /*
651 ++ * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
652 ++ * they can load it at CPL<3 (Intel's manual says only LSS can,
653 ++ * but it's wrong).
654 ++ *
655 ++ * However, the Intel manual says that putting IST=1/DPL=3 in
656 ++ * an interrupt gate will result in SS=3 (the AMD manual instead
657 ++ * says it doesn't), so allow SS=3 in __load_segment_descriptor
658 ++ * and only forbid it here.
659 ++ */
660 ++ if (seg == VCPU_SREG_SS && selector == 3 &&
661 ++ ctxt->mode == X86EMUL_MODE_PROT64)
662 ++ return emulate_exception(ctxt, GP_VECTOR, 0, true);
663 ++
664 + memset(&seg_desc, 0, sizeof seg_desc);
665 +
666 + if (ctxt->mode == X86EMUL_MODE_REAL) {
667 +@@ -1480,20 +1508,34 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
668 + rpl = selector & 3;
669 + cpl = ctxt->ops->cpl(ctxt);
670 +
671 +- /* NULL selector is not valid for TR, CS and SS (except for long mode) */
672 +- if ((seg == VCPU_SREG_CS
673 +- || (seg == VCPU_SREG_SS
674 +- && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
675 +- || seg == VCPU_SREG_TR)
676 +- && null_selector)
677 +- goto exception;
678 +-
679 + /* TR should be in GDT only */
680 + if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
681 + goto exception;
682 +
683 +- if (null_selector) /* for NULL selector skip all following checks */
684 ++ /* NULL selector is not valid for TR, CS and (except for long mode) SS */
685 ++ if (null_selector) {
686 ++ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
687 ++ goto exception;
688 ++
689 ++ if (seg == VCPU_SREG_SS) {
690 ++ if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
691 ++ goto exception;
692 ++
693 ++ /*
694 ++ * ctxt->ops->set_segment expects the CPL to be in
695 ++ * SS.DPL, so fake an expand-up 32-bit data segment.
696 ++ */
697 ++ seg_desc.type = 3;
698 ++ seg_desc.p = 1;
699 ++ seg_desc.s = 1;
700 ++ seg_desc.dpl = cpl;
701 ++ seg_desc.d = 1;
702 ++ seg_desc.g = 1;
703 ++ }
704 ++
705 ++ /* Skip all following checks */
706 + goto load;
707 ++ }
708 +
709 + ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
710 + if (ret != X86EMUL_CONTINUE)
711 +@@ -3179,8 +3221,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
712 + }
713 + /* Disable writeback. */
714 + ctxt->dst.type = OP_NONE;
715 +- return segmented_write(ctxt, ctxt->dst.addr.mem,
716 +- &desc_ptr, 2 + ctxt->op_bytes);
717 ++ return segmented_write_std(ctxt, ctxt->dst.addr.mem,
718 ++ &desc_ptr, 2 + ctxt->op_bytes);
719 + }
720 +
721 + static int em_sgdt(struct x86_emulate_ctxt *ctxt)
722 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
723 +index a4ce2b2f1418..33d479540373 100644
724 +--- a/arch/x86/kvm/lapic.c
725 ++++ b/arch/x86/kvm/lapic.c
726 +@@ -1908,3 +1908,9 @@ void kvm_lapic_init(void)
727 + jump_label_rate_limit(&apic_hw_disabled, HZ);
728 + jump_label_rate_limit(&apic_sw_disabled, HZ);
729 + }
730 ++
731 ++void kvm_lapic_exit(void)
732 ++{
733 ++ static_key_deferred_flush(&apic_hw_disabled);
734 ++ static_key_deferred_flush(&apic_sw_disabled);
735 ++}
736 +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
737 +index fc87568fc409..f1fd0753b6ba 100644
738 +--- a/arch/x86/kvm/lapic.h
739 ++++ b/arch/x86/kvm/lapic.h
740 +@@ -93,6 +93,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
741 +
742 + int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
743 + void kvm_lapic_init(void);
744 ++void kvm_lapic_exit(void);
745 +
746 + static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
747 + {
748 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
749 +index b81c81bce181..c7f2b3c52d92 100644
750 +--- a/arch/x86/kvm/vmx.c
751 ++++ b/arch/x86/kvm/vmx.c
752 +@@ -1052,10 +1052,10 @@ static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
753 + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
754 + }
755 +
756 +-static inline bool is_exception(u32 intr_info)
757 ++static inline bool is_nmi(u32 intr_info)
758 + {
759 + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
760 +- == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
761 ++ == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
762 + }
763 +
764 + static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
765 +@@ -4769,7 +4769,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
766 + if (is_machine_check(intr_info))
767 + return handle_machine_check(vcpu);
768 +
769 +- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
770 ++ if (is_nmi(intr_info))
771 + return 1; /* already handled by vmx_vcpu_run() */
772 +
773 + if (is_no_device(intr_info)) {
774 +@@ -6653,7 +6653,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
775 +
776 + switch (exit_reason) {
777 + case EXIT_REASON_EXCEPTION_NMI:
778 +- if (!is_exception(intr_info))
779 ++ if (is_nmi(intr_info))
780 + return 0;
781 + else if (is_page_fault(intr_info))
782 + return enable_ept;
783 +@@ -6962,8 +6962,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
784 + kvm_machine_check();
785 +
786 + /* We need to handle NMIs before interrupts are enabled */
787 +- if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
788 +- (exit_intr_info & INTR_INFO_VALID_MASK)) {
789 ++ if (is_nmi(exit_intr_info)) {
790 + kvm_before_handle_nmi(&vmx->vcpu);
791 + asm("int $2");
792 + kvm_after_handle_nmi(&vmx->vcpu);
793 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
794 +index 8562aff68884..69e7b0b9a6bb 100644
795 +--- a/arch/x86/kvm/x86.c
796 ++++ b/arch/x86/kvm/x86.c
797 +@@ -5573,6 +5573,7 @@ out:
798 +
799 + void kvm_arch_exit(void)
800 + {
801 ++ kvm_lapic_exit();
802 + perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
803 +
804 + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
805 +diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
806 +index a24e9c2e95da..a33c61c5e34a 100644
807 +--- a/arch/x86/pci/acpi.c
808 ++++ b/arch/x86/pci/acpi.c
809 +@@ -118,6 +118,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
810 + DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
811 + },
812 + },
813 ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
814 ++ {
815 ++ .callback = set_nouse_crs,
816 ++ .ident = "Supermicro X8DTH",
817 ++ .matches = {
818 ++ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
819 ++ DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
820 ++ DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
821 ++ },
822 ++ },
823 +
824 + /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
825 + {
826 +diff --git a/block/bsg.c b/block/bsg.c
827 +index 420a5a9f1b23..76801e57f556 100644
828 +--- a/block/bsg.c
829 ++++ b/block/bsg.c
830 +@@ -675,6 +675,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
831 +
832 + dprintk("%s: write %Zd bytes\n", bd->name, count);
833 +
834 ++ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
835 ++ return -EINVAL;
836 ++
837 + bsg_set_block(bd, file);
838 +
839 + bytes_written = 0;
840 +diff --git a/drivers/base/core.c b/drivers/base/core.c
841 +index 944fecd32e9f..449f7096974d 100644
842 +--- a/drivers/base/core.c
843 ++++ b/drivers/base/core.c
844 +@@ -874,11 +874,29 @@ static struct kobject *get_device_parent(struct device *dev,
845 + return NULL;
846 + }
847 +
848 ++static inline bool live_in_glue_dir(struct kobject *kobj,
849 ++ struct device *dev)
850 ++{
851 ++ if (!kobj || !dev->class ||
852 ++ kobj->kset != &dev->class->p->glue_dirs)
853 ++ return false;
854 ++ return true;
855 ++}
856 ++
857 ++static inline struct kobject *get_glue_dir(struct device *dev)
858 ++{
859 ++ return dev->kobj.parent;
860 ++}
861 ++
862 ++/*
863 ++ * make sure cleaning up dir as the last step, we need to make
864 ++ * sure .release handler of kobject is run with holding the
865 ++ * global lock
866 ++ */
867 + static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
868 + {
869 + /* see if we live in a "glue" directory */
870 +- if (!glue_dir || !dev->class ||
871 +- glue_dir->kset != &dev->class->p->glue_dirs)
872 ++ if (!live_in_glue_dir(glue_dir, dev))
873 + return;
874 +
875 + mutex_lock(&gdp_mutex);
876 +@@ -886,11 +904,6 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
877 + mutex_unlock(&gdp_mutex);
878 + }
879 +
880 +-static void cleanup_device_parent(struct device *dev)
881 +-{
882 +- cleanup_glue_dir(dev, dev->kobj.parent);
883 +-}
884 +-
885 + static int device_add_class_symlinks(struct device *dev)
886 + {
887 + int error;
888 +@@ -1054,6 +1067,7 @@ int device_add(struct device *dev)
889 + struct kobject *kobj;
890 + struct class_interface *class_intf;
891 + int error = -EINVAL;
892 ++ struct kobject *glue_dir = NULL;
893 +
894 + dev = get_device(dev);
895 + if (!dev)
896 +@@ -1098,8 +1112,10 @@ int device_add(struct device *dev)
897 + /* first, register with generic layer. */
898 + /* we require the name to be set before, and pass NULL */
899 + error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
900 +- if (error)
901 ++ if (error) {
902 ++ glue_dir = get_glue_dir(dev);
903 + goto Error;
904 ++ }
905 +
906 + /* notify platform of device entry */
907 + if (platform_notify)
908 +@@ -1182,11 +1198,11 @@ done:
909 + device_remove_file(dev, &dev_attr_uevent);
910 + attrError:
911 + kobject_uevent(&dev->kobj, KOBJ_REMOVE);
912 ++ glue_dir = get_glue_dir(dev);
913 + kobject_del(&dev->kobj);
914 + Error:
915 +- cleanup_device_parent(dev);
916 +- if (parent)
917 +- put_device(parent);
918 ++ cleanup_glue_dir(dev, glue_dir);
919 ++ put_device(parent);
920 + name_error:
921 + kfree(dev->p);
922 + dev->p = NULL;
923 +@@ -1261,6 +1277,7 @@ EXPORT_SYMBOL_GPL(put_device);
924 + void device_del(struct device *dev)
925 + {
926 + struct device *parent = dev->parent;
927 ++ struct kobject *glue_dir = NULL;
928 + struct class_interface *class_intf;
929 +
930 + /* Notify clients of device removal. This call must come
931 +@@ -1302,8 +1319,9 @@ void device_del(struct device *dev)
932 + if (platform_notify_remove)
933 + platform_notify_remove(dev);
934 + kobject_uevent(&dev->kobj, KOBJ_REMOVE);
935 +- cleanup_device_parent(dev);
936 ++ glue_dir = get_glue_dir(dev);
937 + kobject_del(&dev->kobj);
938 ++ cleanup_glue_dir(dev, glue_dir);
939 + put_device(parent);
940 + }
941 + EXPORT_SYMBOL_GPL(device_del);
942 +diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
943 +index 805b4c344006..ee5f2c985f4d 100644
944 +--- a/drivers/clk/clk-wm831x.c
945 ++++ b/drivers/clk/clk-wm831x.c
946 +@@ -248,7 +248,7 @@ static int wm831x_clkout_is_prepared(struct clk_hw *hw)
947 + if (ret < 0) {
948 + dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
949 + ret);
950 +- return true;
951 ++ return false;
952 + }
953 +
954 + return (ret & WM831X_CLKOUT_ENA) != 0;
955 +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
956 +index fc0e502022de..26bfe09ce0fb 100644
957 +--- a/drivers/clocksource/exynos_mct.c
958 ++++ b/drivers/clocksource/exynos_mct.c
959 +@@ -398,13 +398,11 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
960 + return IRQ_HANDLED;
961 + }
962 +
963 +-static int exynos4_local_timer_setup(struct clock_event_device *evt)
964 ++static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
965 + {
966 +- struct mct_clock_event_device *mevt;
967 ++ struct clock_event_device *evt = &mevt->evt;
968 + unsigned int cpu = smp_processor_id();
969 +
970 +- mevt = container_of(evt, struct mct_clock_event_device, evt);
971 +-
972 + mevt->base = EXYNOS4_MCT_L_BASE(cpu);
973 + sprintf(mevt->name, "mct_tick%d", cpu);
974 +
975 +@@ -433,12 +431,15 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
976 + return 0;
977 + }
978 +
979 +-static void exynos4_local_timer_stop(struct clock_event_device *evt)
980 ++static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
981 + {
982 ++ struct clock_event_device *evt = &mevt->evt;
983 ++
984 + evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
985 + if (mct_int_type == MCT_INT_SPI) {
986 + if (evt->irq != -1)
987 + disable_irq_nosync(evt->irq);
988 ++ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
989 + } else {
990 + disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
991 + }
992 +@@ -456,11 +457,11 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
993 + switch (action & ~CPU_TASKS_FROZEN) {
994 + case CPU_STARTING:
995 + mevt = this_cpu_ptr(&percpu_mct_tick);
996 +- exynos4_local_timer_setup(&mevt->evt);
997 ++ exynos4_local_timer_setup(mevt);
998 + break;
999 + case CPU_DYING:
1000 + mevt = this_cpu_ptr(&percpu_mct_tick);
1001 +- exynos4_local_timer_stop(&mevt->evt);
1002 ++ exynos4_local_timer_stop(mevt);
1003 + break;
1004 + }
1005 +
1006 +@@ -526,7 +527,7 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
1007 + goto out_irq;
1008 +
1009 + /* Immediately configure the timer on the boot CPU */
1010 +- exynos4_local_timer_setup(&mevt->evt);
1011 ++ exynos4_local_timer_setup(mevt);
1012 + return;
1013 +
1014 + out_irq:
1015 +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
1016 +index 7c63b72ecd75..66f549399dc4 100644
1017 +--- a/drivers/crypto/caam/caamalg.c
1018 ++++ b/drivers/crypto/caam/caamalg.c
1019 +@@ -418,7 +418,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
1020 +
1021 + /* Will read cryptlen */
1022 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
1023 +- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
1024 ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
1025 ++ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
1026 ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
1027 +
1028 + /* Write ICV */
1029 + append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
1030 +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
1031 +index 88fc3a5fa7c4..32be5cb1f797 100644
1032 +--- a/drivers/gpu/drm/ast/ast_main.c
1033 ++++ b/drivers/gpu/drm/ast/ast_main.c
1034 +@@ -120,7 +120,8 @@ static int ast_get_dram_info(struct drm_device *dev)
1035 + ast_write32(ast, 0x10000, 0xfc600309);
1036 +
1037 + do {
1038 +- ;
1039 ++ if (pci_channel_offline(dev->pdev))
1040 ++ return -EIO;
1041 + } while (ast_read32(ast, 0x10000) != 0x01);
1042 + data = ast_read32(ast, 0x10004);
1043 +
1044 +@@ -343,7 +344,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
1045 + ast_detect_chip(dev);
1046 +
1047 + if (ast->chip != AST1180) {
1048 +- ast_get_dram_info(dev);
1049 ++ ret = ast_get_dram_info(dev);
1050 ++ if (ret)
1051 ++ goto out_free;
1052 + ast->vram_size = ast_get_vram_info(dev);
1053 + DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
1054 + }
1055 +diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
1056 +index fcb4e9ff1f20..09c155737daf 100644
1057 +--- a/drivers/gpu/drm/gma500/psb_drv.c
1058 ++++ b/drivers/gpu/drm/gma500/psb_drv.c
1059 +@@ -620,6 +620,9 @@ static const struct file_operations psb_gem_fops = {
1060 + .open = drm_open,
1061 + .release = drm_release,
1062 + .unlocked_ioctl = psb_unlocked_ioctl,
1063 ++#ifdef CONFIG_COMPAT
1064 ++ .compat_ioctl = drm_compat_ioctl,
1065 ++#endif
1066 + .mmap = drm_gem_mmap,
1067 + .poll = drm_poll,
1068 + .read = drm_read,
1069 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1070 +index 3265792f1990..f7af7a8e4cd0 100644
1071 +--- a/drivers/gpu/drm/radeon/si_dpm.c
1072 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
1073 +@@ -2943,24 +2943,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1074 + (rdev->pdev->device == 0x6817) ||
1075 + (rdev->pdev->device == 0x6806))
1076 + max_mclk = 120000;
1077 +- } else if (rdev->family == CHIP_VERDE) {
1078 +- if ((rdev->pdev->revision == 0x81) ||
1079 +- (rdev->pdev->revision == 0x83) ||
1080 +- (rdev->pdev->revision == 0x87) ||
1081 +- (rdev->pdev->device == 0x6820) ||
1082 +- (rdev->pdev->device == 0x6821) ||
1083 +- (rdev->pdev->device == 0x6822) ||
1084 +- (rdev->pdev->device == 0x6823) ||
1085 +- (rdev->pdev->device == 0x682A) ||
1086 +- (rdev->pdev->device == 0x682B)) {
1087 +- max_sclk = 75000;
1088 +- max_mclk = 80000;
1089 +- }
1090 + } else if (rdev->family == CHIP_OLAND) {
1091 + if ((rdev->pdev->revision == 0xC7) ||
1092 + (rdev->pdev->revision == 0x80) ||
1093 + (rdev->pdev->revision == 0x81) ||
1094 + (rdev->pdev->revision == 0x83) ||
1095 ++ (rdev->pdev->revision == 0x87) ||
1096 + (rdev->pdev->device == 0x6604) ||
1097 + (rdev->pdev->device == 0x6605)) {
1098 + max_sclk = 75000;
1099 +diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
1100 +index c4ef3bc726e3..e299576004ce 100644
1101 +--- a/drivers/hid/hid-cypress.c
1102 ++++ b/drivers/hid/hid-cypress.c
1103 +@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
1104 + if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
1105 + return rdesc;
1106 +
1107 ++ if (*rsize < 4)
1108 ++ return rdesc;
1109 ++
1110 + for (i = 0; i < *rsize - 4; i++)
1111 + if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
1112 + __u8 tmp;
1113 +diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
1114 +index 0918b9136588..2a50ab613238 100644
1115 +--- a/drivers/hwmon/ds620.c
1116 ++++ b/drivers/hwmon/ds620.c
1117 +@@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
1118 + if (res)
1119 + return res;
1120 +
1121 +- val = (val * 10 / 625) * 8;
1122 ++ val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8;
1123 +
1124 + mutex_lock(&data->update_lock);
1125 + data->temp[attr->index] = val;
1126 +diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
1127 +index b4b8b5bef718..3bc0e8224b33 100644
1128 +--- a/drivers/hwmon/g762.c
1129 ++++ b/drivers/hwmon/g762.c
1130 +@@ -193,14 +193,17 @@ static inline unsigned int rpm_from_cnt(u8 cnt, u32 clk_freq, u16 p,
1131 + * Convert fan RPM value from sysfs into count value for fan controller
1132 + * register (FAN_SET_CNT).
1133 + */
1134 +-static inline unsigned char cnt_from_rpm(u32 rpm, u32 clk_freq, u16 p,
1135 ++static inline unsigned char cnt_from_rpm(unsigned long rpm, u32 clk_freq, u16 p,
1136 + u8 clk_div, u8 gear_mult)
1137 + {
1138 +- if (!rpm) /* to stop the fan, set cnt to 255 */
1139 ++ unsigned long f1 = clk_freq * 30 * gear_mult;
1140 ++ unsigned long f2 = p * clk_div;
1141 ++
1142 ++ if (!rpm) /* to stop the fan, set cnt to 255 */
1143 + return 0xff;
1144 +
1145 +- return clamp_val(((clk_freq * 30 * gear_mult) / (rpm * p * clk_div)),
1146 +- 0, 255);
1147 ++ rpm = clamp_val(rpm, f1 / (255 * f2), ULONG_MAX / f2);
1148 ++ return DIV_ROUND_CLOSEST(f1, rpm * f2);
1149 + }
1150 +
1151 + /* helper to grab and cache data, at most one time per second */
1152 +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
1153 +index c3ccdea3d180..fa3ecec524fa 100644
1154 +--- a/drivers/i2c/i2c-dev.c
1155 ++++ b/drivers/i2c/i2c-dev.c
1156 +@@ -328,7 +328,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
1157 + unsigned long arg)
1158 + {
1159 + struct i2c_smbus_ioctl_data data_arg;
1160 +- union i2c_smbus_data temp;
1161 ++ union i2c_smbus_data temp = {};
1162 + int datasize, res;
1163 +
1164 + if (copy_from_user(&data_arg,
1165 +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
1166 +index 4c837e66516b..f93fca41464f 100644
1167 +--- a/drivers/infiniband/core/mad.c
1168 ++++ b/drivers/infiniband/core/mad.c
1169 +@@ -1598,7 +1598,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
1170 + if (!class)
1171 + goto out;
1172 + if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
1173 +- IB_MGMT_MAX_METHODS)
1174 ++ ARRAY_SIZE(class->method_table))
1175 + goto out;
1176 + method = class->method_table[convert_mgmt_class(
1177 + mad->mad_hdr.mgmt_class)];
1178 +diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
1179 +index 180d7f436ed5..2f861b59cbc1 100644
1180 +--- a/drivers/infiniband/core/multicast.c
1181 ++++ b/drivers/infiniband/core/multicast.c
1182 +@@ -516,8 +516,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
1183 + if (status)
1184 + process_join_error(group, status);
1185 + else {
1186 +- ib_find_pkey(group->port->dev->device, group->port->port_num,
1187 +- be16_to_cpu(rec->pkey), &pkey_index);
1188 ++
1189 ++ if (ib_find_pkey(group->port->dev->device,
1190 ++ group->port->port_num, be16_to_cpu(rec->pkey),
1191 ++ &pkey_index))
1192 ++ pkey_index = MCAST_INVALID_PKEY_INDEX;
1193 +
1194 + spin_lock_irq(&group->port->lock);
1195 + group->rec = *rec;
1196 +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
1197 +index f55d69500a5f..3a85e7669068 100644
1198 +--- a/drivers/infiniband/hw/mlx4/ah.c
1199 ++++ b/drivers/infiniband/hw/mlx4/ah.c
1200 +@@ -118,7 +118,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
1201 + !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
1202 + --ah->av.eth.stat_rate;
1203 + }
1204 +-
1205 ++ ah->av.eth.sl_tclass_flowlabel |=
1206 ++ cpu_to_be32((ah_attr->grh.traffic_class << 20) |
1207 ++ ah_attr->grh.flow_label);
1208 + /*
1209 + * HW requires multicast LID so we just choose one.
1210 + */
1211 +@@ -126,7 +128,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
1212 + ah->av.ib.dlid = cpu_to_be16(0xc000);
1213 +
1214 + memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
1215 +- ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
1216 ++ ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
1217 +
1218 + return &ah->ibah;
1219 + }
1220 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1221 +index f0612645de99..9407a31afe20 100644
1222 +--- a/drivers/infiniband/hw/mlx4/main.c
1223 ++++ b/drivers/infiniband/hw/mlx4/main.c
1224 +@@ -335,9 +335,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
1225 + if (err)
1226 + goto out;
1227 +
1228 +- props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
1229 +- IB_WIDTH_4X : IB_WIDTH_1X;
1230 +- props->active_speed = IB_SPEED_QDR;
1231 ++ props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
1232 ++ (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
1233 ++ IB_WIDTH_4X : IB_WIDTH_1X;
1234 ++ props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
1235 ++ IB_SPEED_FDR : IB_SPEED_QDR;
1236 + props->port_cap_flags = IB_PORT_CM_SUP;
1237 + props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
1238 + props->max_msg_sz = mdev->dev->caps.max_msg_sz;
1239 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
1240 +index 5be10fb2edf2..a711aab97ae7 100644
1241 +--- a/drivers/input/joystick/xpad.c
1242 ++++ b/drivers/input/joystick/xpad.c
1243 +@@ -1094,6 +1094,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
1244 + input_dev->name = xpad_device[i].name;
1245 + input_dev->phys = xpad->phys;
1246 + usb_to_input_id(udev, &input_dev->id);
1247 ++
1248 ++ if (xpad->xtype == XTYPE_XBOX360W) {
1249 ++ /* x360w controllers and the receiver have different ids */
1250 ++ input_dev->id.product = 0x02a1;
1251 ++ }
1252 ++
1253 + input_dev->dev.parent = &intf->dev;
1254 +
1255 + input_set_drvdata(input_dev, xpad);
1256 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1257 +index ccb36fb565de..3f3c517f2039 100644
1258 +--- a/drivers/input/serio/i8042-x86ia64io.h
1259 ++++ b/drivers/input/serio/i8042-x86ia64io.h
1260 +@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
1261 + DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
1262 + },
1263 + },
1264 ++ {
1265 ++ .matches = {
1266 ++ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
1267 ++ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
1268 ++ },
1269 ++ },
1270 + { }
1271 + };
1272 +
1273 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1274 +index 71f9cd108590..557824a7e5b8 100644
1275 +--- a/drivers/iommu/amd_iommu.c
1276 ++++ b/drivers/iommu/amd_iommu.c
1277 +@@ -1044,7 +1044,7 @@ again:
1278 + next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
1279 + left = (head - next_tail) % iommu->cmd_buf_size;
1280 +
1281 +- if (left <= 2) {
1282 ++ if (left <= 0x20) {
1283 + struct iommu_cmd sync_cmd;
1284 + volatile u64 sem = 0;
1285 + int ret;
1286 +diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
1287 +index 3ac9c4194814..53dfe1693e50 100644
1288 +--- a/drivers/isdn/gigaset/ser-gigaset.c
1289 ++++ b/drivers/isdn/gigaset/ser-gigaset.c
1290 +@@ -787,8 +787,10 @@ static int __init ser_gigaset_init(void)
1291 + driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
1292 + GIGASET_MODULENAME, GIGASET_DEVNAME,
1293 + &ops, THIS_MODULE);
1294 +- if (!driver)
1295 ++ if (!driver) {
1296 ++ rc = -ENOMEM;
1297 + goto error;
1298 ++ }
1299 +
1300 + rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
1301 + if (rc != 0) {
1302 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1303 +index 0f64dc596bce..c1b36e208669 100644
1304 +--- a/drivers/md/dm-crypt.c
1305 ++++ b/drivers/md/dm-crypt.c
1306 +@@ -1283,12 +1283,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
1307 + if (!cc->key_size && strcmp(key, "-"))
1308 + goto out;
1309 +
1310 ++ /* clear the flag since following operations may invalidate previously valid key */
1311 ++ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1312 ++
1313 + if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1314 + goto out;
1315 +
1316 +- set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1317 +-
1318 + r = crypt_setkey_allcpus(cc);
1319 ++ if (!r)
1320 ++ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1321 +
1322 + out:
1323 + /* Hex key string not needed after here, so wipe it. */
1324 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1325 +index 81bf511b3182..87e8cd29ca5f 100644
1326 +--- a/drivers/md/md.c
1327 ++++ b/drivers/md/md.c
1328 +@@ -6431,7 +6431,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
1329 + /* need to ensure recovery thread has run */
1330 + wait_event_interruptible_timeout(mddev->sb_wait,
1331 + !test_bit(MD_RECOVERY_NEEDED,
1332 +- &mddev->flags),
1333 ++ &mddev->recovery),
1334 + msecs_to_jiffies(5000));
1335 + if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
1336 + /* Need to flush page cache, and ensure no-one else opens
1337 +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1338 +index 8a8f06bcde60..1543f37c272a 100644
1339 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c
1340 ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1341 +@@ -773,15 +773,13 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
1342 + memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
1343 +
1344 + r = sm_ll_new_metadata(&smm->ll, tm);
1345 ++ if (!r) {
1346 ++ r = sm_ll_extend(&smm->ll, nr_blocks);
1347 ++ }
1348 ++ memcpy(&smm->sm, &ops, sizeof(smm->sm));
1349 + if (r)
1350 + return r;
1351 +
1352 +- r = sm_ll_extend(&smm->ll, nr_blocks);
1353 +- if (r)
1354 +- return r;
1355 +-
1356 +- memcpy(&smm->sm, &ops, sizeof(smm->sm));
1357 +-
1358 + /*
1359 + * Now we need to update the newly created data structures with the
1360 + * allocated blocks that they were built from.
1361 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1362 +index 9fbc77c6e132..01757b23e1fc 100644
1363 +--- a/drivers/md/raid5.c
1364 ++++ b/drivers/md/raid5.c
1365 +@@ -5943,6 +5943,15 @@ static int run(struct mddev *mddev)
1366 + stripe = (stripe | (stripe-1)) + 1;
1367 + mddev->queue->limits.discard_alignment = stripe;
1368 + mddev->queue->limits.discard_granularity = stripe;
1369 ++
1370 ++ /*
1371 ++ * We use 16-bit counter of active stripes in bi_phys_segments
1372 ++ * (minus one for over-loaded initialization)
1373 ++ */
1374 ++ blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
1375 ++ blk_queue_max_discard_sectors(mddev->queue,
1376 ++ 0xfffe * STRIPE_SECTORS);
1377 ++
1378 + /*
1379 + * unaligned part of discard request will be ignored, so can't
1380 + * guarantee discard_zeroes_data
1381 +diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
1382 +index 63b42252166a..7a754ec826ac 100644
1383 +--- a/drivers/media/rc/ite-cir.c
1384 ++++ b/drivers/media/rc/ite-cir.c
1385 +@@ -263,6 +263,8 @@ static void ite_set_carrier_params(struct ite_dev *dev)
1386 +
1387 + if (allowance > ITE_RXDCR_MAX)
1388 + allowance = ITE_RXDCR_MAX;
1389 ++
1390 ++ use_demodulator = true;
1391 + }
1392 + }
1393 +
1394 +diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
1395 +index 9771cd83c06e..3a615e4c4991 100644
1396 +--- a/drivers/media/tuners/tuner-xc2028.c
1397 ++++ b/drivers/media/tuners/tuner-xc2028.c
1398 +@@ -289,6 +289,14 @@ static void free_firmware(struct xc2028_data *priv)
1399 + int i;
1400 + tuner_dbg("%s called\n", __func__);
1401 +
1402 ++ /* free allocated f/w string */
1403 ++ if (priv->fname != firmware_name)
1404 ++ kfree(priv->fname);
1405 ++ priv->fname = NULL;
1406 ++
1407 ++ priv->state = XC2028_NO_FIRMWARE;
1408 ++ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
1409 ++
1410 + if (!priv->firm)
1411 + return;
1412 +
1413 +@@ -299,9 +307,6 @@ static void free_firmware(struct xc2028_data *priv)
1414 +
1415 + priv->firm = NULL;
1416 + priv->firm_size = 0;
1417 +- priv->state = XC2028_NO_FIRMWARE;
1418 +-
1419 +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
1420 + }
1421 +
1422 + static int load_all_firmwares(struct dvb_frontend *fe,
1423 +@@ -890,9 +895,9 @@ read_not_reliable:
1424 + return 0;
1425 +
1426 + fail:
1427 ++ free_firmware(priv);
1428 + priv->state = XC2028_SLEEP;
1429 +
1430 +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
1431 + if (retry_count < 8) {
1432 + msleep(50);
1433 + retry_count++;
1434 +@@ -1314,11 +1319,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
1435 + mutex_lock(&xc2028_list_mutex);
1436 +
1437 + /* only perform final cleanup if this is the last instance */
1438 +- if (hybrid_tuner_report_instance_count(priv) == 1) {
1439 ++ if (hybrid_tuner_report_instance_count(priv) == 1)
1440 + free_firmware(priv);
1441 +- kfree(priv->ctrl.fname);
1442 +- priv->ctrl.fname = NULL;
1443 +- }
1444 +
1445 + if (priv)
1446 + hybrid_tuner_release_state(priv);
1447 +@@ -1381,16 +1383,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
1448 +
1449 + /*
1450 + * Copy the config data.
1451 +- * For the firmware name, keep a local copy of the string,
1452 +- * in order to avoid troubles during device release.
1453 + */
1454 +- kfree(priv->ctrl.fname);
1455 + memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
1456 +- if (p->fname) {
1457 +- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
1458 +- if (priv->ctrl.fname == NULL)
1459 +- rc = -ENOMEM;
1460 +- }
1461 +
1462 + /*
1463 + * If firmware name changed, frees firmware. As free_firmware will
1464 +@@ -1405,10 +1399,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
1465 +
1466 + if (priv->state == XC2028_NO_FIRMWARE) {
1467 + if (!firmware_name[0])
1468 +- priv->fname = priv->ctrl.fname;
1469 ++ priv->fname = kstrdup(p->fname, GFP_KERNEL);
1470 + else
1471 + priv->fname = firmware_name;
1472 +
1473 ++ if (!priv->fname) {
1474 ++ rc = -ENOMEM;
1475 ++ goto unlock;
1476 ++ }
1477 ++
1478 + rc = request_firmware_nowait(THIS_MODULE, 1,
1479 + priv->fname,
1480 + priv->i2c_props.adap->dev.parent,
1481 +@@ -1421,6 +1420,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
1482 + } else
1483 + priv->state = XC2028_WAITING_FIRMWARE;
1484 + }
1485 ++unlock:
1486 + mutex_unlock(&priv->lock);
1487 +
1488 + return rc;
1489 +diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
1490 +index 0c0fc52d42c5..b2ef5f2b4c53 100644
1491 +--- a/drivers/mmc/card/mmc_test.c
1492 ++++ b/drivers/mmc/card/mmc_test.c
1493 +@@ -795,7 +795,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
1494 + struct mmc_async_req *cur_areq = &test_areq[0].areq;
1495 + struct mmc_async_req *other_areq = &test_areq[1].areq;
1496 + int i;
1497 +- int ret;
1498 ++ int ret = RESULT_OK;
1499 +
1500 + test_areq[0].test = test;
1501 + test_areq[1].test = test;
1502 +diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
1503 +index f8aac3044670..f87e6e9ce386 100644
1504 +--- a/drivers/mmc/host/mxs-mmc.c
1505 ++++ b/drivers/mmc/host/mxs-mmc.c
1506 +@@ -315,6 +315,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
1507 + cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
1508 + cmd1 = cmd->arg;
1509 +
1510 ++ if (cmd->opcode == MMC_STOP_TRANSMISSION)
1511 ++ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
1512 ++
1513 + if (host->sdio_irq_en) {
1514 + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
1515 + cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
1516 +@@ -423,8 +426,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
1517 + ssp->base + HW_SSP_BLOCK_SIZE);
1518 + }
1519 +
1520 +- if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
1521 +- (cmd->opcode == SD_IO_RW_EXTENDED))
1522 ++ if (cmd->opcode == SD_IO_RW_EXTENDED)
1523 + cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
1524 +
1525 + cmd1 = cmd->arg;
1526 +diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
1527 +index d88529841d3f..2bb9c04cb2c5 100644
1528 +--- a/drivers/mtd/nand/Kconfig
1529 ++++ b/drivers/mtd/nand/Kconfig
1530 +@@ -531,7 +531,7 @@ config MTD_NAND_FSMC
1531 + Flexible Static Memory Controller (FSMC)
1532 +
1533 + config MTD_NAND_XWAY
1534 +- tristate "Support for NAND on Lantiq XWAY SoC"
1535 ++ bool "Support for NAND on Lantiq XWAY SoC"
1536 + depends on LANTIQ && SOC_TYPE_XWAY
1537 + select MTD_NAND_PLATFORM
1538 + help
1539 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1540 +index 03e7f0cbda8c..47f0dcbf42ca 100644
1541 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1542 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1543 +@@ -824,23 +824,25 @@ lbl_free_candev:
1544 + static void peak_usb_disconnect(struct usb_interface *intf)
1545 + {
1546 + struct peak_usb_device *dev;
1547 ++ struct peak_usb_device *dev_prev_siblings;
1548 +
1549 + /* unregister as many netdev devices as siblings */
1550 +- for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) {
1551 ++ for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) {
1552 + struct net_device *netdev = dev->netdev;
1553 + char name[IFNAMSIZ];
1554 +
1555 ++ dev_prev_siblings = dev->prev_siblings;
1556 + dev->state &= ~PCAN_USB_STATE_CONNECTED;
1557 + strncpy(name, netdev->name, IFNAMSIZ);
1558 +
1559 + unregister_netdev(netdev);
1560 +- free_candev(netdev);
1561 +
1562 + kfree(dev->cmd_buf);
1563 + dev->next_siblings = NULL;
1564 + if (dev->adapter->dev_free)
1565 + dev->adapter->dev_free(dev);
1566 +
1567 ++ free_candev(netdev);
1568 + dev_info(&intf->dev, "%s removed\n", name);
1569 + }
1570 +
1571 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
1572 +index 97fe8e6dba79..5ef133a5a48b 100644
1573 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
1574 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
1575 +@@ -1776,8 +1776,16 @@ static void bnx2x_get_ringparam(struct net_device *dev,
1576 +
1577 + ering->rx_max_pending = MAX_RX_AVAIL;
1578 +
1579 ++ /* If size isn't already set, we give an estimation of the number
1580 ++ * of buffers we'll have. We're neglecting some possible conditions
1581 ++ * [we couldn't know for certain at this point if number of queues
1582 ++ * might shrink] but the number would be correct for the likely
1583 ++ * scenario.
1584 ++ */
1585 + if (bp->rx_ring_size)
1586 + ering->rx_pending = bp->rx_ring_size;
1587 ++ else if (BNX2X_NUM_RX_QUEUES(bp))
1588 ++ ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp);
1589 + else
1590 + ering->rx_pending = MAX_RX_AVAIL;
1591 +
1592 +diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
1593 +index 45ce6e2214b3..2deabae1d66e 100644
1594 +--- a/drivers/net/ethernet/brocade/bna/bnad.c
1595 ++++ b/drivers/net/ethernet/brocade/bna/bnad.c
1596 +@@ -193,6 +193,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
1597 + return 0;
1598 +
1599 + hw_cons = *(tcb->hw_consumer_index);
1600 ++ rmb();
1601 + cons = tcb->consumer_index;
1602 + q_depth = tcb->q_depth;
1603 +
1604 +@@ -2906,13 +2907,12 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1605 + BNA_QE_INDX_INC(prod, q_depth);
1606 + tcb->producer_index = prod;
1607 +
1608 +- smp_mb();
1609 ++ wmb();
1610 +
1611 + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
1612 + return NETDEV_TX_OK;
1613 +
1614 + bna_txq_prod_indx_doorbell(tcb);
1615 +- smp_mb();
1616 +
1617 + return NETDEV_TX_OK;
1618 + }
1619 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1620 +index 3b5459696310..4ce28987c3c1 100644
1621 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1622 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1623 +@@ -2723,12 +2723,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1624 + spin_lock_init(&priv->lock);
1625 + spin_lock_init(&priv->tx_lock);
1626 +
1627 +- ret = register_netdev(ndev);
1628 +- if (ret) {
1629 +- pr_err("%s: ERROR %i registering the device\n", __func__, ret);
1630 +- goto error_netdev_register;
1631 +- }
1632 +-
1633 + priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
1634 + if (IS_ERR(priv->stmmac_clk)) {
1635 + pr_warn("%s: warning: cannot get CSR clock\n", __func__);
1636 +@@ -2759,13 +2753,23 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1637 + }
1638 + }
1639 +
1640 ++ ret = register_netdev(ndev);
1641 ++ if (ret) {
1642 ++ netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
1643 ++ __func__, ret);
1644 ++ goto error_netdev_register;
1645 ++ }
1646 ++
1647 + return priv;
1648 +
1649 ++error_netdev_register:
1650 ++ if (priv->pcs != STMMAC_PCS_RGMII &&
1651 ++ priv->pcs != STMMAC_PCS_TBI &&
1652 ++ priv->pcs != STMMAC_PCS_RTBI)
1653 ++ stmmac_mdio_unregister(ndev);
1654 + error_mdio_register:
1655 + clk_put(priv->stmmac_clk);
1656 + error_clk_get:
1657 +- unregister_netdev(ndev);
1658 +-error_netdev_register:
1659 + netif_napi_del(&priv->napi);
1660 + error_free_netdev:
1661 + free_netdev(ndev);
1662 +diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
1663 +index 2dc16b6efaf0..97f3e626b535 100644
1664 +--- a/drivers/net/ethernet/ti/cpmac.c
1665 ++++ b/drivers/net/ethernet/ti/cpmac.c
1666 +@@ -557,7 +557,8 @@ fatal_error:
1667 +
1668 + static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
1669 + {
1670 +- int queue, len;
1671 ++ int queue;
1672 ++ unsigned int len;
1673 + struct cpmac_desc *desc;
1674 + struct cpmac_priv *priv = netdev_priv(dev);
1675 +
1676 +@@ -567,7 +568,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
1677 + if (unlikely(skb_padto(skb, ETH_ZLEN)))
1678 + return NETDEV_TX_OK;
1679 +
1680 +- len = max(skb->len, ETH_ZLEN);
1681 ++ len = max_t(unsigned int, skb->len, ETH_ZLEN);
1682 + queue = skb_get_queue_mapping(skb);
1683 + netif_stop_subqueue(dev, queue);
1684 +
1685 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1686 +index 616b4e1dd44c..eb6d0d8a3e06 100644
1687 +--- a/drivers/net/hyperv/netvsc_drv.c
1688 ++++ b/drivers/net/hyperv/netvsc_drv.c
1689 +@@ -48,6 +48,9 @@ struct net_device_context {
1690 + struct work_struct work;
1691 + };
1692 +
1693 ++/* Restrict GSO size to account for NVGRE */
1694 ++#define NETVSC_GSO_MAX_SIZE 62768
1695 ++
1696 + #define RING_SIZE_MIN 64
1697 + static int ring_size = 128;
1698 + module_param(ring_size, int, S_IRUGO);
1699 +@@ -435,6 +438,7 @@ static int netvsc_probe(struct hv_device *dev,
1700 +
1701 + SET_ETHTOOL_OPS(net, &ethtool_ops);
1702 + SET_NETDEV_DEV(net, &dev->device);
1703 ++ netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
1704 +
1705 + ret = register_netdev(net);
1706 + if (ret != 0) {
1707 +diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
1708 +index 55d89390b4bc..59dcdfcd0c28 100644
1709 +--- a/drivers/net/vmxnet3/vmxnet3_drv.c
1710 ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
1711 +@@ -2890,7 +2890,6 @@ vmxnet3_tx_timeout(struct net_device *netdev)
1712 +
1713 + netdev_err(adapter->netdev, "tx hang\n");
1714 + schedule_work(&adapter->work);
1715 +- netif_wake_queue(adapter->netdev);
1716 + }
1717 +
1718 +
1719 +@@ -2917,6 +2916,7 @@ vmxnet3_reset_work(struct work_struct *data)
1720 + }
1721 + rtnl_unlock();
1722 +
1723 ++ netif_wake_queue(adapter->netdev);
1724 + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
1725 + }
1726 +
1727 +diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
1728 +index bb7af78e4eed..6a995e0919dd 100644
1729 +--- a/drivers/pci/hotplug/rpadlpar_core.c
1730 ++++ b/drivers/pci/hotplug/rpadlpar_core.c
1731 +@@ -259,8 +259,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn)
1732 +
1733 + static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
1734 + {
1735 +- if (vio_find_node(dn))
1736 ++ struct vio_dev *vio_dev;
1737 ++
1738 ++ vio_dev = vio_find_node(dn);
1739 ++ if (vio_dev) {
1740 ++ put_device(&vio_dev->dev);
1741 + return -EINVAL;
1742 ++ }
1743 +
1744 + if (!vio_register_device_node(dn)) {
1745 + printk(KERN_ERR
1746 +@@ -336,6 +341,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn)
1747 + return -EINVAL;
1748 +
1749 + vio_unregister_device(vio_dev);
1750 ++
1751 ++ put_device(&vio_dev->dev);
1752 ++
1753 + return 0;
1754 + }
1755 +
1756 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1757 +index 36c3e71d54b5..1b9548fb9102 100644
1758 +--- a/drivers/pci/pci.c
1759 ++++ b/drivers/pci/pci.c
1760 +@@ -1906,6 +1906,10 @@ bool pci_dev_run_wake(struct pci_dev *dev)
1761 + if (!dev->pme_support)
1762 + return false;
1763 +
1764 ++ /* PME-capable in principle, but not from the intended sleep state */
1765 ++ if (!pci_pme_capable(dev, pci_target_state(dev)))
1766 ++ return false;
1767 ++
1768 + while (bus->parent) {
1769 + struct pci_dev *bridge = bus->self;
1770 +
1771 +diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
1772 +index e758af95c209..b625a1f062bf 100644
1773 +--- a/drivers/pinctrl/sh-pfc/pinctrl.c
1774 ++++ b/drivers/pinctrl/sh-pfc/pinctrl.c
1775 +@@ -479,7 +479,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
1776 +
1777 + switch (param) {
1778 + case PIN_CONFIG_BIAS_DISABLE:
1779 +- return true;
1780 ++ return pin->configs &
1781 ++ (SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN);
1782 +
1783 + case PIN_CONFIG_BIAS_PULL_UP:
1784 + return pin->configs & SH_PFC_PIN_CFG_PULL_UP;
1785 +diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
1786 +index cf31d3321dab..a7f44f30273b 100644
1787 +--- a/drivers/s390/char/vmlogrdr.c
1788 ++++ b/drivers/s390/char/vmlogrdr.c
1789 +@@ -873,7 +873,7 @@ static int __init vmlogrdr_init(void)
1790 + goto cleanup;
1791 +
1792 + for (i=0; i < MAXMINOR; ++i ) {
1793 +- sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
1794 ++ sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1795 + if (!sys_ser[i].buffer) {
1796 + rc = -ENOMEM;
1797 + break;
1798 +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
1799 +index 371aed75eb83..79f0f2e096cb 100644
1800 +--- a/drivers/s390/scsi/zfcp_dbf.c
1801 ++++ b/drivers/s390/scsi/zfcp_dbf.c
1802 +@@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
1803 +
1804 +
1805 + /**
1806 +- * zfcp_dbf_rec_run - trace event related to running recovery
1807 ++ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
1808 ++ * @level: trace level to be used for event
1809 + * @tag: identifier for event
1810 + * @erp: erp_action running
1811 + */
1812 +-void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
1813 ++void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
1814 + {
1815 + struct zfcp_dbf *dbf = erp->adapter->dbf;
1816 + struct zfcp_dbf_rec *rec = &dbf->rec_buf;
1817 +@@ -319,11 +320,21 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
1818 + else
1819 + rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
1820 +
1821 +- debug_event(dbf->rec, 1, rec, sizeof(*rec));
1822 ++ debug_event(dbf->rec, level, rec, sizeof(*rec));
1823 + spin_unlock_irqrestore(&dbf->rec_lock, flags);
1824 + }
1825 +
1826 + /**
1827 ++ * zfcp_dbf_rec_run - trace event related to running recovery
1828 ++ * @tag: identifier for event
1829 ++ * @erp: erp_action running
1830 ++ */
1831 ++void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
1832 ++{
1833 ++ zfcp_dbf_rec_run_lvl(1, tag, erp);
1834 ++}
1835 ++
1836 ++/**
1837 + * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
1838 + * @tag: identifier for event
1839 + * @wka_port: well known address port
1840 +diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
1841 +index 440aa619da1d..a8165f142550 100644
1842 +--- a/drivers/s390/scsi/zfcp_dbf.h
1843 ++++ b/drivers/s390/scsi/zfcp_dbf.h
1844 +@@ -2,7 +2,7 @@
1845 + * zfcp device driver
1846 + * debug feature declarations
1847 + *
1848 +- * Copyright IBM Corp. 2008, 2015
1849 ++ * Copyright IBM Corp. 2008, 2016
1850 + */
1851 +
1852 + #ifndef ZFCP_DBF_H
1853 +@@ -283,6 +283,30 @@ struct zfcp_dbf {
1854 + struct zfcp_dbf_scsi scsi_buf;
1855 + };
1856 +
1857 ++/**
1858 ++ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
1859 ++ * @req: request that has been completed
1860 ++ *
1861 ++ * Returns true if FCP response with only benign residual under count.
1862 ++ */
1863 ++static inline
1864 ++bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
1865 ++{
1866 ++ struct fsf_qtcb *qtcb = req->qtcb;
1867 ++ u32 fsf_stat = qtcb->header.fsf_status;
1868 ++ struct fcp_resp *fcp_rsp;
1869 ++ u8 rsp_flags, fr_status;
1870 ++
1871 ++ if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
1872 ++ return false; /* not an FCP response */
1873 ++ fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
1874 ++ rsp_flags = fcp_rsp->fr_flags;
1875 ++ fr_status = fcp_rsp->fr_status;
1876 ++ return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
1877 ++ (rsp_flags == FCP_RESID_UNDER) &&
1878 ++ (fr_status == SAM_STAT_GOOD);
1879 ++}
1880 ++
1881 + static inline
1882 + void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
1883 + {
1884 +@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
1885 + zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
1886 +
1887 + } else if (qtcb->header.fsf_status != FSF_GOOD) {
1888 +- zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
1889 ++ zfcp_dbf_hba_fsf_resp("fs_ferr",
1890 ++ zfcp_dbf_hba_fsf_resp_suppress(req)
1891 ++ ? 5 : 1, req);
1892 +
1893 + } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
1894 + (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
1895 +@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
1896 + _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
1897 + }
1898 +
1899 ++/**
1900 ++ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
1901 ++ * @scmnd: SCSI command that was NULLified.
1902 ++ * @fsf_req: request that owned @scmnd.
1903 ++ */
1904 ++static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
1905 ++ struct zfcp_fsf_req *fsf_req)
1906 ++{
1907 ++ _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
1908 ++}
1909 ++
1910 + #endif /* ZFCP_DBF_H */
1911 +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
1912 +index ac86ff90c897..acb0b8c3989d 100644
1913 +--- a/drivers/s390/scsi/zfcp_erp.c
1914 ++++ b/drivers/s390/scsi/zfcp_erp.c
1915 +@@ -3,7 +3,7 @@
1916 + *
1917 + * Error Recovery Procedures (ERP).
1918 + *
1919 +- * Copyright IBM Corp. 2002, 2015
1920 ++ * Copyright IBM Corp. 2002, 2016
1921 + */
1922 +
1923 + #define KMSG_COMPONENT "zfcp"
1924 +@@ -1211,6 +1211,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
1925 + }
1926 + }
1927 +
1928 ++/**
1929 ++ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
1930 ++ * @port: zfcp_port whose fc_rport we should try to unblock
1931 ++ */
1932 ++static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
1933 ++{
1934 ++ unsigned long flags;
1935 ++ struct zfcp_adapter *adapter = port->adapter;
1936 ++ int port_status;
1937 ++ struct Scsi_Host *shost = adapter->scsi_host;
1938 ++ struct scsi_device *sdev;
1939 ++
1940 ++ write_lock_irqsave(&adapter->erp_lock, flags);
1941 ++ port_status = atomic_read(&port->status);
1942 ++ if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
1943 ++ (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
1944 ++ ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
1945 ++ /* new ERP of severity >= port triggered elsewhere meanwhile or
1946 ++ * local link down (adapter erp_failed but not clear unblock)
1947 ++ */
1948 ++ zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
1949 ++ write_unlock_irqrestore(&adapter->erp_lock, flags);
1950 ++ return;
1951 ++ }
1952 ++ spin_lock(shost->host_lock);
1953 ++ __shost_for_each_device(sdev, shost) {
1954 ++ struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
1955 ++ int lun_status;
1956 ++
1957 ++ if (zsdev->port != port)
1958 ++ continue;
1959 ++ /* LUN under port of interest */
1960 ++ lun_status = atomic_read(&zsdev->status);
1961 ++ if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
1962 ++ continue; /* unblock rport despite failed LUNs */
1963 ++ /* LUN recovery not given up yet [maybe follow-up pending] */
1964 ++ if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
1965 ++ (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
1966 ++ /* LUN blocked:
1967 ++ * not yet unblocked [LUN recovery pending]
1968 ++ * or meanwhile blocked [new LUN recovery triggered]
1969 ++ */
1970 ++ zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
1971 ++ spin_unlock(shost->host_lock);
1972 ++ write_unlock_irqrestore(&adapter->erp_lock, flags);
1973 ++ return;
1974 ++ }
1975 ++ }
1976 ++ /* now port has no child or all children have completed recovery,
1977 ++ * and no ERP of severity >= port was meanwhile triggered elsewhere
1978 ++ */
1979 ++ zfcp_scsi_schedule_rport_register(port);
1980 ++ spin_unlock(shost->host_lock);
1981 ++ write_unlock_irqrestore(&adapter->erp_lock, flags);
1982 ++}
1983 ++
1984 + static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1985 + {
1986 + struct zfcp_adapter *adapter = act->adapter;
1987 +@@ -1221,6 +1277,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1988 + case ZFCP_ERP_ACTION_REOPEN_LUN:
1989 + if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
1990 + scsi_device_put(sdev);
1991 ++ zfcp_erp_try_rport_unblock(port);
1992 + break;
1993 +
1994 + case ZFCP_ERP_ACTION_REOPEN_PORT:
1995 +@@ -1231,7 +1288,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1996 + */
1997 + if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
1998 + if (result == ZFCP_ERP_SUCCEEDED)
1999 +- zfcp_scsi_schedule_rport_register(port);
2000 ++ zfcp_erp_try_rport_unblock(port);
2001 + /* fall through */
2002 + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2003 + put_device(&port->dev);
2004 +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
2005 +index 1f1fe41ecb97..0c8c8b8fc1de 100644
2006 +--- a/drivers/s390/scsi/zfcp_ext.h
2007 ++++ b/drivers/s390/scsi/zfcp_ext.h
2008 +@@ -3,7 +3,7 @@
2009 + *
2010 + * External function declarations.
2011 + *
2012 +- * Copyright IBM Corp. 2002, 2015
2013 ++ * Copyright IBM Corp. 2002, 2016
2014 + */
2015 +
2016 + #ifndef ZFCP_EXT_H
2017 +@@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
2018 + extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
2019 + struct zfcp_port *, struct scsi_device *, u8, u8);
2020 + extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
2021 ++extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
2022 ++ struct zfcp_erp_action *erp);
2023 + extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
2024 + extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
2025 + extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
2026 +diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
2027 +index be1c04b334c5..ea3c76ac0de1 100644
2028 +--- a/drivers/s390/scsi/zfcp_fsf.h
2029 ++++ b/drivers/s390/scsi/zfcp_fsf.h
2030 +@@ -3,7 +3,7 @@
2031 + *
2032 + * Interface to the FSF support functions.
2033 + *
2034 +- * Copyright IBM Corp. 2002, 2015
2035 ++ * Copyright IBM Corp. 2002, 2016
2036 + */
2037 +
2038 + #ifndef FSF_H
2039 +@@ -78,6 +78,7 @@
2040 + #define FSF_APP_TAG_CHECK_FAILURE 0x00000082
2041 + #define FSF_REF_TAG_CHECK_FAILURE 0x00000083
2042 + #define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
2043 ++#define FSF_FCP_RSP_AVAILABLE 0x000000AF
2044 + #define FSF_UNKNOWN_COMMAND 0x000000E2
2045 + #define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
2046 + #define FSF_INVALID_COMMAND_OPTION 0x000000E5
2047 +diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
2048 +index 7c2c6194dfca..703fce59befe 100644
2049 +--- a/drivers/s390/scsi/zfcp_reqlist.h
2050 ++++ b/drivers/s390/scsi/zfcp_reqlist.h
2051 +@@ -4,7 +4,7 @@
2052 + * Data structure and helper functions for tracking pending FSF
2053 + * requests.
2054 + *
2055 +- * Copyright IBM Corp. 2009
2056 ++ * Copyright IBM Corp. 2009, 2016
2057 + */
2058 +
2059 + #ifndef ZFCP_REQLIST_H
2060 +@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
2061 + spin_unlock_irqrestore(&rl->lock, flags);
2062 + }
2063 +
2064 ++/**
2065 ++ * zfcp_reqlist_apply_for_all() - apply a function to every request.
2066 ++ * @rl: the requestlist that contains the target requests.
2067 ++ * @f: the function to apply to each request; the first parameter of the
2068 ++ * function will be the target-request; the second parameter is the same
2069 ++ * pointer as given with the argument @data.
2070 ++ * @data: freely chosen argument; passed through to @f as second parameter.
2071 ++ *
2072 ++ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
2073 ++ * table (not a 'safe' variant, so don't modify the list).
2074 ++ *
2075 ++ * Holds @rl->lock over the entire request-iteration.
2076 ++ */
2077 ++static inline void
2078 ++zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
2079 ++ void (*f)(struct zfcp_fsf_req *, void *), void *data)
2080 ++{
2081 ++ struct zfcp_fsf_req *req;
2082 ++ unsigned long flags;
2083 ++ unsigned int i;
2084 ++
2085 ++ spin_lock_irqsave(&rl->lock, flags);
2086 ++ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
2087 ++ list_for_each_entry(req, &rl->buckets[i], list)
2088 ++ f(req, data);
2089 ++ spin_unlock_irqrestore(&rl->lock, flags);
2090 ++}
2091 ++
2092 + #endif /* ZFCP_REQLIST_H */
2093 +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
2094 +index 38ee0df633a3..66c37e77ac7c 100644
2095 +--- a/drivers/s390/scsi/zfcp_scsi.c
2096 ++++ b/drivers/s390/scsi/zfcp_scsi.c
2097 +@@ -3,7 +3,7 @@
2098 + *
2099 + * Interface to Linux SCSI midlayer.
2100 + *
2101 +- * Copyright IBM Corp. 2002, 2015
2102 ++ * Copyright IBM Corp. 2002, 2016
2103 + */
2104 +
2105 + #define KMSG_COMPONENT "zfcp"
2106 +@@ -109,9 +109,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
2107 + }
2108 +
2109 + if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
2110 +- /* This could be either
2111 +- * open LUN pending: this is temporary, will result in
2112 +- * open LUN or ERP_FAILED, so retry command
2113 ++ /* This could be
2114 + * call to rport_delete pending: mimic retry from
2115 + * fc_remote_port_chkready until rport is BLOCKED
2116 + */
2117 +@@ -230,6 +228,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
2118 + return retval;
2119 + }
2120 +
2121 ++struct zfcp_scsi_req_filter {
2122 ++ u8 tmf_scope;
2123 ++ u32 lun_handle;
2124 ++ u32 port_handle;
2125 ++};
2126 ++
2127 ++static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
2128 ++{
2129 ++ struct zfcp_scsi_req_filter *filter =
2130 ++ (struct zfcp_scsi_req_filter *)data;
2131 ++
2132 ++ /* already aborted - prevent side-effects - or not a SCSI command */
2133 ++ if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
2134 ++ return;
2135 ++
2136 ++ /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
2137 ++ if (old_req->qtcb->header.port_handle != filter->port_handle)
2138 ++ return;
2139 ++
2140 ++ if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
2141 ++ old_req->qtcb->header.lun_handle != filter->lun_handle)
2142 ++ return;
2143 ++
2144 ++ zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
2145 ++ old_req->data = NULL;
2146 ++}
2147 ++
2148 ++static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
2149 ++{
2150 ++ struct zfcp_adapter *adapter = zsdev->port->adapter;
2151 ++ struct zfcp_scsi_req_filter filter = {
2152 ++ .tmf_scope = FCP_TMF_TGT_RESET,
2153 ++ .port_handle = zsdev->port->handle,
2154 ++ };
2155 ++ unsigned long flags;
2156 ++
2157 ++ if (tm_flags == FCP_TMF_LUN_RESET) {
2158 ++ filter.tmf_scope = FCP_TMF_LUN_RESET;
2159 ++ filter.lun_handle = zsdev->lun_handle;
2160 ++ }
2161 ++
2162 ++ /*
2163 ++ * abort_lock secures against other processings - in the abort-function
2164 ++ * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
2165 ++ */
2166 ++ write_lock_irqsave(&adapter->abort_lock, flags);
2167 ++ zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
2168 ++ &filter);
2169 ++ write_unlock_irqrestore(&adapter->abort_lock, flags);
2170 ++}
2171 ++
2172 + static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
2173 + {
2174 + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
2175 +@@ -262,8 +311,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
2176 + if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
2177 + zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
2178 + retval = FAILED;
2179 +- } else
2180 ++ } else {
2181 + zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
2182 ++ zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
2183 ++ }
2184 +
2185 + zfcp_fsf_req_free(fsf_req);
2186 + return retval;
2187 +diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
2188 +index 1e4479f3331a..55716c5184f7 100644
2189 +--- a/drivers/scsi/mvsas/mv_94xx.c
2190 ++++ b/drivers/scsi/mvsas/mv_94xx.c
2191 +@@ -621,7 +621,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
2192 + {
2193 + u32 tmp;
2194 + tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
2195 +- if (tmp && 1 << (slot_idx % 32)) {
2196 ++ if (tmp & 1 << (slot_idx % 32)) {
2197 + mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
2198 + mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
2199 + 1 << (slot_idx % 32));
2200 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2201 +index 36d62fd53511..ebc939e85b76 100644
2202 +--- a/drivers/scsi/qla2xxx/qla_os.c
2203 ++++ b/drivers/scsi/qla2xxx/qla_os.c
2204 +@@ -3384,7 +3384,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2205 + sizeof(struct ct6_dsd), 0,
2206 + SLAB_HWCACHE_ALIGN, NULL);
2207 + if (!ctx_cachep)
2208 +- goto fail_free_gid_list;
2209 ++ goto fail_free_srb_mempool;
2210 + }
2211 + ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
2212 + ctx_cachep);
2213 +@@ -3537,7 +3537,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2214 + ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
2215 + GFP_KERNEL);
2216 + if (!ha->loop_id_map)
2217 +- goto fail_async_pd;
2218 ++ goto fail_loop_id_map;
2219 + else {
2220 + qla2x00_set_reserved_loop_ids(ha);
2221 + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
2222 +@@ -3546,6 +3546,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2223 +
2224 + return 0;
2225 +
2226 ++fail_loop_id_map:
2227 ++ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
2228 + fail_async_pd:
2229 + dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
2230 + fail_ex_init_cb:
2231 +@@ -3573,6 +3575,10 @@ fail_free_ms_iocb:
2232 + dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2233 + ha->ms_iocb = NULL;
2234 + ha->ms_iocb_dma = 0;
2235 ++
2236 ++ if (ha->sns_cmd)
2237 ++ dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2238 ++ ha->sns_cmd, ha->sns_cmd_dma);
2239 + fail_dma_pool:
2240 + if (IS_QLA82XX(ha) || ql2xenabledif) {
2241 + dma_pool_destroy(ha->fcp_cmnd_dma_pool);
2242 +@@ -3590,10 +3596,12 @@ fail_free_nvram:
2243 + kfree(ha->nvram);
2244 + ha->nvram = NULL;
2245 + fail_free_ctx_mempool:
2246 +- mempool_destroy(ha->ctx_mempool);
2247 ++ if (ha->ctx_mempool)
2248 ++ mempool_destroy(ha->ctx_mempool);
2249 + ha->ctx_mempool = NULL;
2250 + fail_free_srb_mempool:
2251 +- mempool_destroy(ha->srb_mempool);
2252 ++ if (ha->srb_mempool)
2253 ++ mempool_destroy(ha->srb_mempool);
2254 + ha->srb_mempool = NULL;
2255 + fail_free_gid_list:
2256 + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
2257 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
2258 +index 14ad111b2851..970f655f8532 100644
2259 +--- a/drivers/scsi/scsi_sysfs.c
2260 ++++ b/drivers/scsi/scsi_sysfs.c
2261 +@@ -905,10 +905,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
2262 + struct request_queue *rq = sdev->request_queue;
2263 + struct scsi_target *starget = sdev->sdev_target;
2264 +
2265 +- error = scsi_device_set_state(sdev, SDEV_RUNNING);
2266 +- if (error)
2267 +- return error;
2268 +-
2269 + error = scsi_target_add(starget);
2270 + if (error)
2271 + return error;
2272 +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
2273 +index 1f65e32db285..0b27d293dd83 100644
2274 +--- a/drivers/scsi/sg.c
2275 ++++ b/drivers/scsi/sg.c
2276 +@@ -568,6 +568,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
2277 + sg_io_hdr_t *hp;
2278 + unsigned char cmnd[MAX_COMMAND_SIZE];
2279 +
2280 ++ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
2281 ++ return -EINVAL;
2282 ++
2283 + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
2284 + return -ENXIO;
2285 + SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
2286 +@@ -766,8 +769,14 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
2287 + return k; /* probably out of space --> ENOMEM */
2288 + }
2289 + if (sdp->detached) {
2290 +- if (srp->bio)
2291 ++ if (srp->bio) {
2292 ++ if (srp->rq->cmd != srp->rq->__cmd)
2293 ++ kfree(srp->rq->cmd);
2294 ++
2295 + blk_end_request_all(srp->rq, -EIO);
2296 ++ srp->rq = NULL;
2297 ++ }
2298 ++
2299 + sg_finish_rem_req(srp);
2300 + return -ENODEV;
2301 + }
2302 +diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
2303 +index a8dc95ebf2d6..7700cef5e177 100644
2304 +--- a/drivers/ssb/pci.c
2305 ++++ b/drivers/ssb/pci.c
2306 +@@ -846,6 +846,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
2307 + if (err) {
2308 + ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
2309 + err);
2310 ++ goto out_free;
2311 + } else {
2312 + ssb_dbg("Using SPROM revision %d provided by platform\n",
2313 + sprom->revision);
2314 +diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
2315 +index 72868ceda360..740a8eab262a 100644
2316 +--- a/drivers/staging/iio/adc/ad7606_core.c
2317 ++++ b/drivers/staging/iio/adc/ad7606_core.c
2318 +@@ -189,7 +189,7 @@ static ssize_t ad7606_store_oversampling_ratio(struct device *dev,
2319 + mutex_lock(&indio_dev->mlock);
2320 + gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1);
2321 + gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1);
2322 +- gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1);
2323 ++ gpio_set_value(st->pdata->gpio_os2, (ret >> 2) & 1);
2324 + st->oversampling = lval;
2325 + mutex_unlock(&indio_dev->mlock);
2326 +
2327 +diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
2328 +index b713d63a86f7..ed4ea4ef1420 100644
2329 +--- a/drivers/target/iscsi/iscsi_target_tpg.c
2330 ++++ b/drivers/target/iscsi/iscsi_target_tpg.c
2331 +@@ -258,7 +258,6 @@ err_out:
2332 + iscsi_release_param_list(tpg->param_list);
2333 + tpg->param_list = NULL;
2334 + }
2335 +- kfree(tpg);
2336 + return -ENOMEM;
2337 + }
2338 +
2339 +diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
2340 +index 1967bee4f076..9035fbc5e98d 100644
2341 +--- a/drivers/thermal/thermal_hwmon.c
2342 ++++ b/drivers/thermal/thermal_hwmon.c
2343 +@@ -98,7 +98,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
2344 + long temperature;
2345 + int ret;
2346 +
2347 +- ret = tz->ops->get_trip_temp(tz, 0, &temperature);
2348 ++ ret = tz->ops->get_crit_temp(tz, &temperature);
2349 + if (ret)
2350 + return ret;
2351 +
2352 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
2353 +index 3299168189cc..e93eaea14ccc 100644
2354 +--- a/drivers/tty/serial/8250/8250_pci.c
2355 ++++ b/drivers/tty/serial/8250/8250_pci.c
2356 +@@ -55,6 +55,7 @@ struct serial_private {
2357 + unsigned int nr;
2358 + void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES];
2359 + struct pci_serial_quirk *quirk;
2360 ++ const struct pciserial_board *board;
2361 + int line[0];
2362 + };
2363 +
2364 +@@ -3451,6 +3452,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
2365 + }
2366 + }
2367 + priv->nr = i;
2368 ++ priv->board = board;
2369 + return priv;
2370 +
2371 + err_deinit:
2372 +@@ -3461,7 +3463,7 @@ err_out:
2373 + }
2374 + EXPORT_SYMBOL_GPL(pciserial_init_ports);
2375 +
2376 +-void pciserial_remove_ports(struct serial_private *priv)
2377 ++void pciserial_detach_ports(struct serial_private *priv)
2378 + {
2379 + struct pci_serial_quirk *quirk;
2380 + int i;
2381 +@@ -3481,7 +3483,11 @@ void pciserial_remove_ports(struct serial_private *priv)
2382 + quirk = find_quirk(priv->dev);
2383 + if (quirk->exit)
2384 + quirk->exit(priv->dev);
2385 ++}
2386 +
2387 ++void pciserial_remove_ports(struct serial_private *priv)
2388 ++{
2389 ++ pciserial_detach_ports(priv);
2390 + kfree(priv);
2391 + }
2392 + EXPORT_SYMBOL_GPL(pciserial_remove_ports);
2393 +@@ -5039,7 +5045,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
2394 + return PCI_ERS_RESULT_DISCONNECT;
2395 +
2396 + if (priv)
2397 +- pciserial_suspend_ports(priv);
2398 ++ pciserial_detach_ports(priv);
2399 +
2400 + pci_disable_device(dev);
2401 +
2402 +@@ -5064,9 +5070,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
2403 + static void serial8250_io_resume(struct pci_dev *dev)
2404 + {
2405 + struct serial_private *priv = pci_get_drvdata(dev);
2406 ++ const struct pciserial_board *board;
2407 +
2408 +- if (priv)
2409 +- pciserial_resume_ports(priv);
2410 ++ if (!priv)
2411 ++ return;
2412 ++
2413 ++ board = priv->board;
2414 ++ kfree(priv);
2415 ++ priv = pciserial_init_ports(dev, board);
2416 ++
2417 ++ if (!IS_ERR(priv)) {
2418 ++ pci_set_drvdata(dev, priv);
2419 ++ }
2420 + }
2421 +
2422 + static const struct pci_error_handlers serial8250_err_handler = {
2423 +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
2424 +index 3b9b80856c1b..aefe343b4212 100644
2425 +--- a/drivers/tty/sysrq.c
2426 ++++ b/drivers/tty/sysrq.c
2427 +@@ -925,8 +925,8 @@ static const struct input_device_id sysrq_ids[] = {
2428 + {
2429 + .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
2430 + INPUT_DEVICE_ID_MATCH_KEYBIT,
2431 +- .evbit = { BIT_MASK(EV_KEY) },
2432 +- .keybit = { BIT_MASK(KEY_LEFTALT) },
2433 ++ .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
2434 ++ .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
2435 + },
2436 + { },
2437 + };
2438 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2439 +index 2d269169d08b..c78c4f7efb40 100644
2440 +--- a/drivers/usb/class/cdc-acm.c
2441 ++++ b/drivers/usb/class/cdc-acm.c
2442 +@@ -1588,6 +1588,7 @@ static const struct usb_device_id acm_ids[] = {
2443 + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
2444 + },
2445 + { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
2446 ++ { USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */
2447 + { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
2448 + },
2449 + /* Motorola H24 HSPA module: */
2450 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
2451 +index ce6225959f2c..15b39065f1dc 100644
2452 +--- a/drivers/usb/core/config.c
2453 ++++ b/drivers/usb/core/config.c
2454 +@@ -207,6 +207,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
2455 + if (ifp->desc.bNumEndpoints >= num_ep)
2456 + goto skip_to_next_endpoint_or_interface_descriptor;
2457 +
2458 ++ /* Check for duplicate endpoint addresses */
2459 ++ for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
2460 ++ if (ifp->endpoint[i].desc.bEndpointAddress ==
2461 ++ d->bEndpointAddress) {
2462 ++ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
2463 ++ cfgno, inum, asnum, d->bEndpointAddress);
2464 ++ goto skip_to_next_endpoint_or_interface_descriptor;
2465 ++ }
2466 ++ }
2467 ++
2468 + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
2469 + ++ifp->desc.bNumEndpoints;
2470 +
2471 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2472 +index 5e788077675b..770cea7de0ec 100644
2473 +--- a/drivers/usb/core/hub.c
2474 ++++ b/drivers/usb/core/hub.c
2475 +@@ -115,6 +115,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
2476 +
2477 + static int usb_reset_and_verify_device(struct usb_device *udev);
2478 + static void hub_release(struct kref *kref);
2479 ++static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
2480 +
2481 + static inline char *portspeed(struct usb_hub *hub, int portstatus)
2482 + {
2483 +@@ -878,89 +879,6 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
2484 + }
2485 +
2486 + /*
2487 +- * If USB 3.0 ports are placed into the Disabled state, they will no longer
2488 +- * detect any device connects or disconnects. This is generally not what the
2489 +- * USB core wants, since it expects a disabled port to produce a port status
2490 +- * change event when a new device connects.
2491 +- *
2492 +- * Instead, set the link state to Disabled, wait for the link to settle into
2493 +- * that state, clear any change bits, and then put the port into the RxDetect
2494 +- * state.
2495 +- */
2496 +-static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
2497 +-{
2498 +- int ret;
2499 +- int total_time;
2500 +- u16 portchange, portstatus;
2501 +-
2502 +- if (!hub_is_superspeed(hub->hdev))
2503 +- return -EINVAL;
2504 +-
2505 +- ret = hub_port_status(hub, port1, &portstatus, &portchange);
2506 +- if (ret < 0)
2507 +- return ret;
2508 +-
2509 +- /*
2510 +- * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
2511 +- * Controller [1022:7814] will have spurious result making the following
2512 +- * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
2513 +- * as high-speed device if we set the usb 3.0 port link state to
2514 +- * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
2515 +- * check the state here to avoid the bug.
2516 +- */
2517 +- if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
2518 +- USB_SS_PORT_LS_RX_DETECT) {
2519 +- dev_dbg(&hub->ports[port1 - 1]->dev,
2520 +- "Not disabling port; link state is RxDetect\n");
2521 +- return ret;
2522 +- }
2523 +-
2524 +- ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
2525 +- if (ret)
2526 +- return ret;
2527 +-
2528 +- /* Wait for the link to enter the disabled state. */
2529 +- for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
2530 +- ret = hub_port_status(hub, port1, &portstatus, &portchange);
2531 +- if (ret < 0)
2532 +- return ret;
2533 +-
2534 +- if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
2535 +- USB_SS_PORT_LS_SS_DISABLED)
2536 +- break;
2537 +- if (total_time >= HUB_DEBOUNCE_TIMEOUT)
2538 +- break;
2539 +- msleep(HUB_DEBOUNCE_STEP);
2540 +- }
2541 +- if (total_time >= HUB_DEBOUNCE_TIMEOUT)
2542 +- dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
2543 +- port1, total_time);
2544 +-
2545 +- return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
2546 +-}
2547 +-
2548 +-static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
2549 +-{
2550 +- struct usb_device *hdev = hub->hdev;
2551 +- int ret = 0;
2552 +-
2553 +- if (hub->ports[port1 - 1]->child && set_state)
2554 +- usb_set_device_state(hub->ports[port1 - 1]->child,
2555 +- USB_STATE_NOTATTACHED);
2556 +- if (!hub->error) {
2557 +- if (hub_is_superspeed(hub->hdev))
2558 +- ret = hub_usb3_port_disable(hub, port1);
2559 +- else
2560 +- ret = usb_clear_port_feature(hdev, port1,
2561 +- USB_PORT_FEAT_ENABLE);
2562 +- }
2563 +- if (ret && ret != -ENODEV)
2564 +- dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
2565 +- port1, ret);
2566 +- return ret;
2567 +-}
2568 +-
2569 +-/*
2570 + * Disable a port and mark a logical connect-change event, so that some
2571 + * time later khubd will disconnect() any existing usb_device on the port
2572 + * and will re-enumerate if there actually is a device attached.
2573 +@@ -3885,6 +3803,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev)
2574 + }
2575 + EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
2576 +
2577 ++/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
2578 ++static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
2579 ++ struct usb_port *port_dev)
2580 ++{
2581 ++ struct usb_device *udev = port_dev->child;
2582 ++ int ret;
2583 ++
2584 ++ if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
2585 ++ ret = hub_set_port_link_state(hub, port_dev->portnum,
2586 ++ USB_SS_PORT_LS_U0);
2587 ++ if (!ret) {
2588 ++ msleep(USB_RESUME_TIMEOUT);
2589 ++ ret = usb_disable_remote_wakeup(udev);
2590 ++ }
2591 ++ if (ret)
2592 ++ dev_warn(&udev->dev,
2593 ++ "Port disable: can't disable remote wake\n");
2594 ++ udev->do_remote_wakeup = 0;
2595 ++ }
2596 ++}
2597 +
2598 + #else /* CONFIG_PM */
2599 +
2600 +@@ -3892,6 +3830,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
2601 + #define hub_resume NULL
2602 + #define hub_reset_resume NULL
2603 +
2604 ++static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
2605 ++ struct usb_port *port_dev) { }
2606 ++
2607 + int usb_disable_lpm(struct usb_device *udev)
2608 + {
2609 + return 0;
2610 +@@ -3921,6 +3862,35 @@ EXPORT_SYMBOL_GPL(usb_enable_ltm);
2611 +
2612 + #endif /* CONFIG_PM */
2613 +
2614 ++/*
2615 ++ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
2616 ++ * a connection with a plugged-in cable but will signal the host when the cable
2617 ++ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
2618 ++ */
2619 ++static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
2620 ++{
2621 ++ struct usb_port *port_dev = hub->ports[port1 - 1];
2622 ++ struct usb_device *hdev = hub->hdev;
2623 ++ int ret = 0;
2624 ++
2625 ++ if (!hub->error) {
2626 ++ if (hub_is_superspeed(hub->hdev)) {
2627 ++ hub_usb3_port_prepare_disable(hub, port_dev);
2628 ++ ret = hub_set_port_link_state(hub, port_dev->portnum,
2629 ++ USB_SS_PORT_LS_U3);
2630 ++ } else {
2631 ++ ret = usb_clear_port_feature(hdev, port1,
2632 ++ USB_PORT_FEAT_ENABLE);
2633 ++ }
2634 ++ }
2635 ++ if (port_dev->child && set_state)
2636 ++ usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
2637 ++ if (ret && ret != -ENODEV)
2638 ++ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
2639 ++ port1, ret);
2640 ++ return ret;
2641 ++}
2642 ++
2643 +
2644 + /* USB 2.0 spec, 7.1.7.3 / fig 7-29:
2645 + *
2646 +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
2647 +index 2e252aae51ca..b4e123152533 100644
2648 +--- a/drivers/usb/dwc3/dwc3-pci.c
2649 ++++ b/drivers/usb/dwc3/dwc3-pci.c
2650 +@@ -30,6 +30,14 @@
2651 + #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
2652 + #define PCI_DEVICE_ID_INTEL_BYT 0x0f37
2653 + #define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
2654 ++#define PCI_DEVICE_ID_INTEL_BSW 0x22B7
2655 ++#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
2656 ++#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
2657 ++#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
2658 ++#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
2659 ++#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
2660 ++#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
2661 ++#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
2662 +
2663 + struct dwc3_pci {
2664 + struct device *dev;
2665 +@@ -189,8 +197,16 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
2666 + PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
2667 + PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
2668 + },
2669 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
2670 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
2671 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
2672 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
2673 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
2674 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
2675 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
2676 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
2677 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
2678 ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
2679 + { } /* Terminating Entry */
2680 + };
2681 + MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
2682 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2683 +index af03ea2c9c78..f4a36f4669bb 100644
2684 +--- a/drivers/usb/dwc3/gadget.c
2685 ++++ b/drivers/usb/dwc3/gadget.c
2686 +@@ -245,11 +245,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
2687 + if (req->request.status == -EINPROGRESS)
2688 + req->request.status = status;
2689 +
2690 +- if (dwc->ep0_bounced && dep->number == 0)
2691 ++ if (dwc->ep0_bounced && dep->number <= 1)
2692 + dwc->ep0_bounced = false;
2693 +- else
2694 +- usb_gadget_unmap_request(&dwc->gadget, &req->request,
2695 +- req->direction);
2696 ++
2697 ++ usb_gadget_unmap_request(&dwc->gadget, &req->request,
2698 ++ req->direction);
2699 +
2700 + dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
2701 + req, dep->name, req->request.actual,
2702 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
2703 +index a0b5a13b52b0..2c0f38811ee7 100644
2704 +--- a/drivers/usb/gadget/composite.c
2705 ++++ b/drivers/usb/gadget/composite.c
2706 +@@ -125,11 +125,16 @@ int config_ep_by_speed(struct usb_gadget *g,
2707 +
2708 + ep_found:
2709 + /* commit results */
2710 +- _ep->maxpacket = usb_endpoint_maxp(chosen_desc);
2711 ++ _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
2712 + _ep->desc = chosen_desc;
2713 + _ep->comp_desc = NULL;
2714 + _ep->maxburst = 0;
2715 +- _ep->mult = 0;
2716 ++ _ep->mult = 1;
2717 ++
2718 ++ if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
2719 ++ usb_endpoint_xfer_int(_ep->desc)))
2720 ++ _ep->mult = ((usb_endpoint_maxp(_ep->desc) & 0x1800) >> 11) + 1;
2721 ++
2722 + if (!want_comp_desc)
2723 + return 0;
2724 +
2725 +@@ -146,7 +151,7 @@ ep_found:
2726 + switch (usb_endpoint_type(_ep->desc)) {
2727 + case USB_ENDPOINT_XFER_ISOC:
2728 + /* mult: bits 1:0 of bmAttributes */
2729 +- _ep->mult = comp_desc->bmAttributes & 0x3;
2730 ++ _ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
2731 + case USB_ENDPOINT_XFER_BULK:
2732 + case USB_ENDPOINT_XFER_INT:
2733 + _ep->maxburst = comp_desc->bMaxBurst + 1;
2734 +@@ -1320,9 +1325,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
2735 + value = min(w_length, (u16) 1);
2736 + break;
2737 +
2738 +- /* function drivers must handle get/set altsetting; if there's
2739 +- * no get() method, we know only altsetting zero works.
2740 +- */
2741 ++ /* function drivers must handle get/set altsetting */
2742 + case USB_REQ_SET_INTERFACE:
2743 + if (ctrl->bRequestType != USB_RECIP_INTERFACE)
2744 + goto unknown;
2745 +@@ -1331,7 +1334,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
2746 + f = cdev->config->interface[intf];
2747 + if (!f)
2748 + break;
2749 +- if (w_value && !f->set_alt)
2750 ++
2751 ++ /*
2752 ++ * If there's no get_alt() method, we know only altsetting zero
2753 ++ * works. There is no need to check if set_alt() is not NULL
2754 ++ * as we check this in usb_add_function().
2755 ++ */
2756 ++ if (w_value && !f->get_alt)
2757 + break;
2758 + value = f->set_alt(f, w_index, w_value);
2759 + if (value == USB_GADGET_DELAYED_STATUS) {
2760 +diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
2761 +index b8a2376971a4..341976289d15 100644
2762 +--- a/drivers/usb/gadget/dummy_hcd.c
2763 ++++ b/drivers/usb/gadget/dummy_hcd.c
2764 +@@ -266,7 +266,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep)
2765 + /* caller must hold lock */
2766 + static void stop_activity(struct dummy *dum)
2767 + {
2768 +- struct dummy_ep *ep;
2769 ++ int i;
2770 +
2771 + /* prevent any more requests */
2772 + dum->address = 0;
2773 +@@ -274,8 +274,8 @@ static void stop_activity(struct dummy *dum)
2774 + /* The timer is left running so that outstanding URBs can fail */
2775 +
2776 + /* nuke any pending requests first, so driver i/o is quiesced */
2777 +- list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list)
2778 +- nuke(dum, ep);
2779 ++ for (i = 0; i < DUMMY_ENDPOINTS; ++i)
2780 ++ nuke(dum, &dum->ep[i]);
2781 +
2782 + /* driver now does any non-usb quiescing necessary */
2783 + }
2784 +diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
2785 +index 4ac9e9928d67..8fa7ba0f6beb 100644
2786 +--- a/drivers/usb/gadget/inode.c
2787 ++++ b/drivers/usb/gadget/inode.c
2788 +@@ -1199,7 +1199,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
2789 + /* data and/or status stage for control request */
2790 + } else if (dev->state == STATE_DEV_SETUP) {
2791 +
2792 +- /* IN DATA+STATUS caller makes len <= wLength */
2793 ++ len = min_t(size_t, len, dev->setup_wLength);
2794 + if (dev->setup_in) {
2795 + retval = setup_req (dev->gadget->ep0, dev->req, len);
2796 + if (retval == 0) {
2797 +@@ -1829,10 +1829,12 @@ static struct usb_gadget_driver probe_driver = {
2798 + * such as configuration notifications.
2799 + */
2800 +
2801 +-static int is_valid_config (struct usb_config_descriptor *config)
2802 ++static int is_valid_config(struct usb_config_descriptor *config,
2803 ++ unsigned int total)
2804 + {
2805 + return config->bDescriptorType == USB_DT_CONFIG
2806 + && config->bLength == USB_DT_CONFIG_SIZE
2807 ++ && total >= USB_DT_CONFIG_SIZE
2808 + && config->bConfigurationValue != 0
2809 + && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
2810 + && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
2811 +@@ -1849,7 +1851,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
2812 + u32 tag;
2813 + char *kbuf;
2814 +
2815 +- if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
2816 ++ if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
2817 ++ (len > PAGE_SIZE * 4))
2818 + return -EINVAL;
2819 +
2820 + /* we might need to change message format someday */
2821 +@@ -1873,7 +1876,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
2822 + /* full or low speed config */
2823 + dev->config = (void *) kbuf;
2824 + total = le16_to_cpu(dev->config->wTotalLength);
2825 +- if (!is_valid_config (dev->config) || total >= length)
2826 ++ if (!is_valid_config(dev->config, total) ||
2827 ++ total > length - USB_DT_DEVICE_SIZE)
2828 + goto fail;
2829 + kbuf += total;
2830 + length -= total;
2831 +@@ -1882,10 +1886,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
2832 + if (kbuf [1] == USB_DT_CONFIG) {
2833 + dev->hs_config = (void *) kbuf;
2834 + total = le16_to_cpu(dev->hs_config->wTotalLength);
2835 +- if (!is_valid_config (dev->hs_config) || total >= length)
2836 ++ if (!is_valid_config(dev->hs_config, total) ||
2837 ++ total > length - USB_DT_DEVICE_SIZE)
2838 + goto fail;
2839 + kbuf += total;
2840 + length -= total;
2841 ++ } else {
2842 ++ dev->hs_config = NULL;
2843 + }
2844 +
2845 + /* could support multiple configs, using another encoding! */
2846 +diff --git a/drivers/usb/gadget/uvc_video.c b/drivers/usb/gadget/uvc_video.c
2847 +index 71e896d4c5ae..43e8c65fd9ed 100644
2848 +--- a/drivers/usb/gadget/uvc_video.c
2849 ++++ b/drivers/usb/gadget/uvc_video.c
2850 +@@ -240,7 +240,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
2851 +
2852 + req_size = video->ep->maxpacket
2853 + * max_t(unsigned int, video->ep->maxburst, 1)
2854 +- * (video->ep->mult + 1);
2855 ++ * (video->ep->mult);
2856 +
2857 + for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
2858 + video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
2859 +diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
2860 +index 0f228c46eeda..ad458ef4b7e9 100644
2861 +--- a/drivers/usb/host/uhci-pci.c
2862 ++++ b/drivers/usb/host/uhci-pci.c
2863 +@@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
2864 + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
2865 + uhci->wait_for_hp = 1;
2866 +
2867 ++ /* Intel controllers use non-PME wakeup signalling */
2868 ++ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
2869 ++ device_set_run_wake(uhci_dev(uhci), 1);
2870 ++
2871 + /* Set up pointers to PCI-specific functions */
2872 + uhci->reset_hc = uhci_pci_reset_hc;
2873 + uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
2874 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
2875 +index 8a79270ca44d..f97a382e3e76 100644
2876 +--- a/drivers/usb/host/xhci-hub.c
2877 ++++ b/drivers/usb/host/xhci-hub.c
2878 +@@ -1221,6 +1221,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
2879 + return 0;
2880 + }
2881 +
2882 ++/*
2883 ++ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
2884 ++ * warm reset a USB3 device stuck in polling or compliance mode after resume.
2885 ++ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
2886 ++ */
2887 ++static bool xhci_port_missing_cas_quirk(int port_index,
2888 ++ __le32 __iomem **port_array)
2889 ++{
2890 ++ u32 portsc;
2891 ++
2892 ++ portsc = readl(port_array[port_index]);
2893 ++
2894 ++ /* if any of these are set we are not stuck */
2895 ++ if (portsc & (PORT_CONNECT | PORT_CAS))
2896 ++ return false;
2897 ++
2898 ++ if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
2899 ++ ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
2900 ++ return false;
2901 ++
2902 ++ /* clear wakeup/change bits, and do a warm port reset */
2903 ++ portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
2904 ++ portsc |= PORT_WR;
2905 ++ writel(portsc, port_array[port_index]);
2906 ++ /* flush write */
2907 ++ readl(port_array[port_index]);
2908 ++ return true;
2909 ++}
2910 ++
2911 + int xhci_bus_resume(struct usb_hcd *hcd)
2912 + {
2913 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2914 +@@ -1255,6 +1284,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
2915 + int slot_id;
2916 +
2917 + temp = xhci_readl(xhci, port_array[port_index]);
2918 ++
2919 ++ /* warm reset CAS limited ports stuck in polling/compliance */
2920 ++ if ((xhci->quirks & XHCI_MISSING_CAS) &&
2921 ++ (hcd->speed >= HCD_USB3) &&
2922 ++ xhci_port_missing_cas_quirk(port_index, port_array)) {
2923 ++ xhci_dbg(xhci, "reset stuck port %d\n", port_index);
2924 ++ continue;
2925 ++ }
2926 + if (DEV_SUPERSPEED(temp))
2927 + temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
2928 + else
2929 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
2930 +index bc5307f9367f..34323aa444e3 100644
2931 +--- a/drivers/usb/host/xhci-mem.c
2932 ++++ b/drivers/usb/host/xhci-mem.c
2933 +@@ -865,6 +865,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
2934 + xhci->devs[slot_id] = NULL;
2935 + }
2936 +
2937 ++/*
2938 ++ * Free a virt_device structure.
2939 ++ * If the virt_device added a tt_info (a hub) and has children pointing to
2940 ++ * that tt_info, then free the child first. Recursive.
2941 ++ * We can't rely on udev at this point to find child-parent relationships.
2942 ++ */
2943 ++void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
2944 ++{
2945 ++ struct xhci_virt_device *vdev;
2946 ++ struct list_head *tt_list_head;
2947 ++ struct xhci_tt_bw_info *tt_info, *next;
2948 ++ int i;
2949 ++
2950 ++ vdev = xhci->devs[slot_id];
2951 ++ if (!vdev)
2952 ++ return;
2953 ++
2954 ++ tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
2955 ++ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
2956 ++ /* is this a hub device that added a tt_info to the tts list */
2957 ++ if (tt_info->slot_id == slot_id) {
2958 ++ /* are any devices using this tt_info? */
2959 ++ for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
2960 ++ vdev = xhci->devs[i];
2961 ++ if (vdev && (vdev->tt_info == tt_info))
2962 ++ xhci_free_virt_devices_depth_first(
2963 ++ xhci, i);
2964 ++ }
2965 ++ }
2966 ++ }
2967 ++ /* we are now at a leaf device */
2968 ++ xhci_free_virt_device(xhci, slot_id);
2969 ++}
2970 ++
2971 + int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
2972 + struct usb_device *udev, gfp_t flags)
2973 + {
2974 +@@ -1735,8 +1769,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
2975 + }
2976 + }
2977 +
2978 +- for (i = 1; i < MAX_HC_SLOTS; ++i)
2979 +- xhci_free_virt_device(xhci, i);
2980 ++ for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
2981 ++ xhci_free_virt_devices_depth_first(xhci, i);
2982 +
2983 + if (xhci->segment_pool)
2984 + dma_pool_destroy(xhci->segment_pool);
2985 +@@ -2270,7 +2304,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2986 + * "physically contiguous and 64-byte (cache line) aligned".
2987 + */
2988 + xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2989 +- GFP_KERNEL);
2990 ++ flags);
2991 + if (!xhci->dcbaa)
2992 + goto fail;
2993 + memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2994 +@@ -2365,7 +2399,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2995 +
2996 + xhci->erst.entries = dma_alloc_coherent(dev,
2997 + sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2998 +- GFP_KERNEL);
2999 ++ flags);
3000 + if (!xhci->erst.entries)
3001 + goto fail;
3002 + xhci_dbg_trace(xhci, trace_xhci_dbg_init,
3003 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3004 +index 1ee8c97ae6be..6b11f6df76aa 100644
3005 +--- a/drivers/usb/host/xhci-pci.c
3006 ++++ b/drivers/usb/host/xhci-pci.c
3007 +@@ -41,6 +41,9 @@
3008 + #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
3009 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
3010 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
3011 ++#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
3012 ++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
3013 ++#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
3014 +
3015 + static const char hcd_name[] = "xhci_hcd";
3016 +
3017 +@@ -138,9 +141,17 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3018 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3019 + (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
3020 + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
3021 +- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
3022 ++ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
3023 ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
3024 ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
3025 ++ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
3026 + xhci->quirks |= XHCI_PME_STUCK_QUIRK;
3027 + }
3028 ++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3029 ++ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
3030 ++ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
3031 ++ xhci->quirks |= XHCI_MISSING_CAS;
3032 ++
3033 + if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
3034 + pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
3035 + xhci->quirks |= XHCI_RESET_ON_RESUME;
3036 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3037 +index 4bcea54f60cd..8f1159612593 100644
3038 +--- a/drivers/usb/host/xhci-ring.c
3039 ++++ b/drivers/usb/host/xhci-ring.c
3040 +@@ -948,13 +948,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
3041 + spin_lock_irqsave(&xhci->lock, flags);
3042 +
3043 + ep->stop_cmds_pending--;
3044 +- if (xhci->xhc_state & XHCI_STATE_DYING) {
3045 +- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3046 +- "Stop EP timer ran, but another timer marked "
3047 +- "xHCI as DYING, exiting.");
3048 +- spin_unlock_irqrestore(&xhci->lock, flags);
3049 +- return;
3050 +- }
3051 + if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
3052 + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3053 + "Stop EP timer ran, but no command pending, "
3054 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3055 +index ea185eaeae28..04ba50b05075 100644
3056 +--- a/drivers/usb/host/xhci.c
3057 ++++ b/drivers/usb/host/xhci.c
3058 +@@ -1538,19 +1538,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
3059 + xhci_urb_free_priv(xhci, urb_priv);
3060 + return ret;
3061 + }
3062 +- if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3063 +- (xhci->xhc_state & XHCI_STATE_HALTED)) {
3064 +- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
3065 +- "Ep 0x%x: URB %p to be canceled on "
3066 +- "non-responsive xHCI host.",
3067 +- urb->ep->desc.bEndpointAddress, urb);
3068 +- /* Let the stop endpoint command watchdog timer (which set this
3069 +- * state) finish cleaning up the endpoint TD lists. We must
3070 +- * have caught it in the middle of dropping a lock and giving
3071 +- * back an URB.
3072 +- */
3073 +- goto done;
3074 +- }
3075 +
3076 + ep_index = xhci_get_endpoint_index(&urb->ep->desc);
3077 + ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
3078 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3079 +index 0419137c4732..83bfb60d19c0 100644
3080 +--- a/drivers/usb/host/xhci.h
3081 ++++ b/drivers/usb/host/xhci.h
3082 +@@ -286,6 +286,8 @@ struct xhci_op_regs {
3083 + #define XDEV_U2 (0x2 << 5)
3084 + #define XDEV_U3 (0x3 << 5)
3085 + #define XDEV_INACTIVE (0x6 << 5)
3086 ++#define XDEV_POLLING (0x7 << 5)
3087 ++#define XDEV_COMP_MODE (0xa << 5)
3088 + #define XDEV_RESUME (0xf << 5)
3089 + /* true: port has power (see HCC_PPC) */
3090 + #define PORT_POWER (1 << 9)
3091 +@@ -1555,6 +1557,7 @@ struct xhci_hcd {
3092 + #define XHCI_SLOW_SUSPEND (1 << 17)
3093 + #define XHCI_SPURIOUS_WAKEUP (1 << 18)
3094 + #define XHCI_PME_STUCK_QUIRK (1 << 20)
3095 ++#define XHCI_MISSING_CAS (1 << 24)
3096 + unsigned int num_active_eps;
3097 + unsigned int limit_active_eps;
3098 + /* There are two roothubs to keep track of bus suspend info for */
3099 +diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
3100 +index f7b13fd25257..a3dcbd55e436 100644
3101 +--- a/drivers/usb/musb/musbhsdma.h
3102 ++++ b/drivers/usb/musb/musbhsdma.h
3103 +@@ -157,5 +157,5 @@ struct musb_dma_controller {
3104 + void __iomem *base;
3105 + u8 channel_count;
3106 + u8 used_channels;
3107 +- u8 irq;
3108 ++ int irq;
3109 + };
3110 +diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
3111 +index 22cf07d62e4c..0b8efff8524c 100644
3112 +--- a/drivers/usb/phy/phy-am335x-control.c
3113 ++++ b/drivers/usb/phy/phy-am335x-control.c
3114 +@@ -85,7 +85,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev)
3115 + return NULL;
3116 +
3117 + dev = bus_find_device(&platform_bus_type, NULL, node, match);
3118 ++ of_node_put(node);
3119 + ctrl_usb = dev_get_drvdata(dev);
3120 ++ put_device(dev);
3121 + if (!ctrl_usb)
3122 + return NULL;
3123 + return &ctrl_usb->phy_ctrl;
3124 +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
3125 +index c2a4171ab9cb..a4e5be5aea46 100644
3126 +--- a/drivers/usb/serial/ch341.c
3127 ++++ b/drivers/usb/serial/ch341.c
3128 +@@ -97,6 +97,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
3129 + r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
3130 + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
3131 + value, index, NULL, 0, DEFAULT_TIMEOUT);
3132 ++ if (r < 0)
3133 ++ dev_err(&dev->dev, "failed to send control message: %d\n", r);
3134 +
3135 + return r;
3136 + }
3137 +@@ -114,7 +116,20 @@ static int ch341_control_in(struct usb_device *dev,
3138 + r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
3139 + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
3140 + value, index, buf, bufsize, DEFAULT_TIMEOUT);
3141 +- return r;
3142 ++ if (r < bufsize) {
3143 ++ if (r >= 0) {
3144 ++ dev_err(&dev->dev,
3145 ++ "short control message received (%d < %u)\n",
3146 ++ r, bufsize);
3147 ++ r = -EIO;
3148 ++ }
3149 ++
3150 ++ dev_err(&dev->dev, "failed to receive control message: %d\n",
3151 ++ r);
3152 ++ return r;
3153 ++ }
3154 ++
3155 ++ return 0;
3156 + }
3157 +
3158 + static int ch341_set_baudrate(struct usb_device *dev,
3159 +@@ -156,9 +171,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
3160 +
3161 + static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
3162 + {
3163 ++ const unsigned int size = 2;
3164 + char *buffer;
3165 + int r;
3166 +- const unsigned size = 8;
3167 + unsigned long flags;
3168 +
3169 + buffer = kmalloc(size, GFP_KERNEL);
3170 +@@ -169,15 +184,10 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
3171 + if (r < 0)
3172 + goto out;
3173 +
3174 +- /* setup the private status if available */
3175 +- if (r == 2) {
3176 +- r = 0;
3177 +- spin_lock_irqsave(&priv->lock, flags);
3178 +- priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
3179 +- priv->multi_status_change = 0;
3180 +- spin_unlock_irqrestore(&priv->lock, flags);
3181 +- } else
3182 +- r = -EPROTO;
3183 ++ spin_lock_irqsave(&priv->lock, flags);
3184 ++ priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
3185 ++ priv->multi_status_change = 0;
3186 ++ spin_unlock_irqrestore(&priv->lock, flags);
3187 +
3188 + out: kfree(buffer);
3189 + return r;
3190 +@@ -187,9 +197,9 @@ out: kfree(buffer);
3191 +
3192 + static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
3193 + {
3194 ++ const unsigned int size = 2;
3195 + char *buffer;
3196 + int r;
3197 +- const unsigned size = 8;
3198 +
3199 + buffer = kmalloc(size, GFP_KERNEL);
3200 + if (!buffer)
3201 +@@ -252,7 +262,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
3202 +
3203 + spin_lock_init(&priv->lock);
3204 + priv->baud_rate = DEFAULT_BAUD_RATE;
3205 +- priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
3206 +
3207 + r = ch341_configure(port->serial->dev, priv);
3208 + if (r < 0)
3209 +@@ -316,15 +325,15 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
3210 +
3211 + r = ch341_configure(serial->dev, priv);
3212 + if (r)
3213 +- goto out;
3214 ++ return r;
3215 +
3216 + r = ch341_set_handshake(serial->dev, priv->line_control);
3217 + if (r)
3218 +- goto out;
3219 ++ return r;
3220 +
3221 + r = ch341_set_baudrate(serial->dev, priv);
3222 + if (r)
3223 +- goto out;
3224 ++ return r;
3225 +
3226 + dev_dbg(&port->dev, "%s - submitting interrupt urb", __func__);
3227 + r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
3228 +@@ -332,12 +341,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
3229 + dev_err(&port->dev, "%s - failed submitting interrupt urb,"
3230 + " error %d\n", __func__, r);
3231 + ch341_close(port);
3232 +- goto out;
3233 ++ return r;
3234 + }
3235 +
3236 + r = usb_serial_generic_open(tty, port);
3237 ++ if (r)
3238 ++ goto err_kill_interrupt_urb;
3239 +
3240 +-out: return r;
3241 ++ return 0;
3242 ++
3243 ++err_kill_interrupt_urb:
3244 ++ usb_kill_urb(port->interrupt_in_urb);
3245 ++
3246 ++ return r;
3247 + }
3248 +
3249 + /* Old_termios contains the original termios settings and
3250 +@@ -352,26 +368,25 @@ static void ch341_set_termios(struct tty_struct *tty,
3251 +
3252 + baud_rate = tty_get_baud_rate(tty);
3253 +
3254 +- priv->baud_rate = baud_rate;
3255 +-
3256 + if (baud_rate) {
3257 +- spin_lock_irqsave(&priv->lock, flags);
3258 +- priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
3259 +- spin_unlock_irqrestore(&priv->lock, flags);
3260 ++ priv->baud_rate = baud_rate;
3261 + ch341_set_baudrate(port->serial->dev, priv);
3262 +- } else {
3263 +- spin_lock_irqsave(&priv->lock, flags);
3264 +- priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
3265 +- spin_unlock_irqrestore(&priv->lock, flags);
3266 + }
3267 +
3268 +- ch341_set_handshake(port->serial->dev, priv->line_control);
3269 +-
3270 + /* Unimplemented:
3271 + * (cflag & CSIZE) : data bits [5, 8]
3272 + * (cflag & PARENB) : parity {NONE, EVEN, ODD}
3273 + * (cflag & CSTOPB) : stop bits [1, 2]
3274 + */
3275 ++
3276 ++ spin_lock_irqsave(&priv->lock, flags);
3277 ++ if (C_BAUD(tty) == B0)
3278 ++ priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
3279 ++ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
3280 ++ priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
3281 ++ spin_unlock_irqrestore(&priv->lock, flags);
3282 ++
3283 ++ ch341_set_handshake(port->serial->dev, priv->line_control);
3284 + }
3285 +
3286 + static void ch341_break_ctl(struct tty_struct *tty, int break_state)
3287 +@@ -570,14 +585,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
3288 +
3289 + static int ch341_reset_resume(struct usb_serial *serial)
3290 + {
3291 +- struct ch341_private *priv;
3292 +-
3293 +- priv = usb_get_serial_port_data(serial->port[0]);
3294 ++ struct usb_serial_port *port = serial->port[0];
3295 ++ struct ch341_private *priv = usb_get_serial_port_data(port);
3296 ++ int ret;
3297 +
3298 + /* reconfigure ch341 serial port after bus-reset */
3299 + ch341_configure(serial->dev, priv);
3300 +
3301 +- return 0;
3302 ++ if (test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
3303 ++ ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
3304 ++ if (ret) {
3305 ++ dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
3306 ++ ret);
3307 ++ return ret;
3308 ++ }
3309 ++ }
3310 ++
3311 ++ return usb_serial_generic_resume(serial);
3312 + }
3313 +
3314 + static struct usb_serial_driver ch341_device = {
3315 +diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
3316 +index 781426230d69..bb3c7f09f059 100644
3317 +--- a/drivers/usb/serial/cyberjack.c
3318 ++++ b/drivers/usb/serial/cyberjack.c
3319 +@@ -51,6 +51,7 @@
3320 + #define CYBERJACK_PRODUCT_ID 0x0100
3321 +
3322 + /* Function prototypes */
3323 ++static int cyberjack_attach(struct usb_serial *serial);
3324 + static int cyberjack_port_probe(struct usb_serial_port *port);
3325 + static int cyberjack_port_remove(struct usb_serial_port *port);
3326 + static int cyberjack_open(struct tty_struct *tty,
3327 +@@ -78,6 +79,7 @@ static struct usb_serial_driver cyberjack_device = {
3328 + .description = "Reiner SCT Cyberjack USB card reader",
3329 + .id_table = id_table,
3330 + .num_ports = 1,
3331 ++ .attach = cyberjack_attach,
3332 + .port_probe = cyberjack_port_probe,
3333 + .port_remove = cyberjack_port_remove,
3334 + .open = cyberjack_open,
3335 +@@ -101,6 +103,14 @@ struct cyberjack_private {
3336 + short wrsent; /* Data already sent */
3337 + };
3338 +
3339 ++static int cyberjack_attach(struct usb_serial *serial)
3340 ++{
3341 ++ if (serial->num_bulk_out < serial->num_ports)
3342 ++ return -ENODEV;
3343 ++
3344 ++ return 0;
3345 ++}
3346 ++
3347 + static int cyberjack_port_probe(struct usb_serial_port *port)
3348 + {
3349 + struct cyberjack_private *priv;
3350 +diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
3351 +index 04b5ed90ffb2..9f1381dfce8c 100644
3352 +--- a/drivers/usb/serial/garmin_gps.c
3353 ++++ b/drivers/usb/serial/garmin_gps.c
3354 +@@ -1049,6 +1049,7 @@ static int garmin_write_bulk(struct usb_serial_port *port,
3355 + "%s - usb_submit_urb(write bulk) failed with status = %d\n",
3356 + __func__, status);
3357 + count = status;
3358 ++ kfree(buffer);
3359 + }
3360 +
3361 + /* we are done with this urb, so let the host driver
3362 +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
3363 +index 0d037cc40e51..75e5ed82d17e 100644
3364 +--- a/drivers/usb/serial/io_edgeport.c
3365 ++++ b/drivers/usb/serial/io_edgeport.c
3366 +@@ -2781,6 +2781,11 @@ static int edge_startup(struct usb_serial *serial)
3367 + EDGE_COMPATIBILITY_MASK1,
3368 + EDGE_COMPATIBILITY_MASK2 };
3369 +
3370 ++ if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) {
3371 ++ dev_err(&serial->interface->dev, "missing endpoints\n");
3372 ++ return -ENODEV;
3373 ++ }
3374 ++
3375 + dev = serial->dev;
3376 +
3377 + /* create our private serial structure */
3378 +diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
3379 +index 0385bc4efefa..d569d773e1ce 100644
3380 +--- a/drivers/usb/serial/io_ti.c
3381 ++++ b/drivers/usb/serial/io_ti.c
3382 +@@ -1390,8 +1390,7 @@ static int download_fw(struct edgeport_serial *serial)
3383 +
3384 + dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__);
3385 +
3386 +- /* return an error on purpose */
3387 +- return -ENODEV;
3388 ++ return 1;
3389 + }
3390 +
3391 + stayinbootmode:
3392 +@@ -1399,7 +1398,7 @@ stayinbootmode:
3393 + dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__);
3394 + serial->product_info.TiMode = TI_MODE_BOOT;
3395 +
3396 +- return 0;
3397 ++ return 1;
3398 + }
3399 +
3400 +
3401 +@@ -2409,6 +2408,13 @@ static int edge_startup(struct usb_serial *serial)
3402 + struct edgeport_serial *edge_serial;
3403 + int status;
3404 +
3405 ++ /* Make sure we have the required endpoints when in download mode. */
3406 ++ if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) {
3407 ++ if (serial->num_bulk_in < serial->num_ports ||
3408 ++ serial->num_bulk_out < serial->num_ports)
3409 ++ return -ENODEV;
3410 ++ }
3411 ++
3412 + /* create our private serial structure */
3413 + edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
3414 + if (edge_serial == NULL) {
3415 +@@ -2420,11 +2426,14 @@ static int edge_startup(struct usb_serial *serial)
3416 + usb_set_serial_data(serial, edge_serial);
3417 +
3418 + status = download_fw(edge_serial);
3419 +- if (status) {
3420 ++ if (status < 0) {
3421 + kfree(edge_serial);
3422 + return status;
3423 + }
3424 +
3425 ++ if (status > 0)
3426 ++ return 1; /* bind but do not register any ports */
3427 ++
3428 + return 0;
3429 + }
3430 +
3431 +diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
3432 +index 57c439a24b5a..66ca41f83ffc 100644
3433 +--- a/drivers/usb/serial/iuu_phoenix.c
3434 ++++ b/drivers/usb/serial/iuu_phoenix.c
3435 +@@ -69,6 +69,16 @@ struct iuu_private {
3436 + u32 clk;
3437 + };
3438 +
3439 ++static int iuu_attach(struct usb_serial *serial)
3440 ++{
3441 ++ unsigned char num_ports = serial->num_ports;
3442 ++
3443 ++ if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports)
3444 ++ return -ENODEV;
3445 ++
3446 ++ return 0;
3447 ++}
3448 ++
3449 + static int iuu_port_probe(struct usb_serial_port *port)
3450 + {
3451 + struct iuu_private *priv;
3452 +@@ -1197,6 +1207,7 @@ static struct usb_serial_driver iuu_device = {
3453 + .tiocmset = iuu_tiocmset,
3454 + .set_termios = iuu_set_termios,
3455 + .init_termios = iuu_init_termios,
3456 ++ .attach = iuu_attach,
3457 + .port_probe = iuu_port_probe,
3458 + .port_remove = iuu_port_remove,
3459 + };
3460 +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
3461 +index 5f1d382e55cf..05c567bf5cfa 100644
3462 +--- a/drivers/usb/serial/keyspan_pda.c
3463 ++++ b/drivers/usb/serial/keyspan_pda.c
3464 +@@ -697,6 +697,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
3465 + MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
3466 + #endif
3467 +
3468 ++static int keyspan_pda_attach(struct usb_serial *serial)
3469 ++{
3470 ++ unsigned char num_ports = serial->num_ports;
3471 ++
3472 ++ if (serial->num_bulk_out < num_ports ||
3473 ++ serial->num_interrupt_in < num_ports) {
3474 ++ dev_err(&serial->interface->dev, "missing endpoints\n");
3475 ++ return -ENODEV;
3476 ++ }
3477 ++
3478 ++ return 0;
3479 ++}
3480 ++
3481 + static int keyspan_pda_port_probe(struct usb_serial_port *port)
3482 + {
3483 +
3484 +@@ -774,6 +787,7 @@ static struct usb_serial_driver keyspan_pda_device = {
3485 + .break_ctl = keyspan_pda_break_ctl,
3486 + .tiocmget = keyspan_pda_tiocmget,
3487 + .tiocmset = keyspan_pda_tiocmset,
3488 ++ .attach = keyspan_pda_attach,
3489 + .port_probe = keyspan_pda_port_probe,
3490 + .port_remove = keyspan_pda_port_remove,
3491 + };
3492 +diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
3493 +index 1b4054fe52a5..b6794baf0a3b 100644
3494 +--- a/drivers/usb/serial/kl5kusb105.c
3495 ++++ b/drivers/usb/serial/kl5kusb105.c
3496 +@@ -198,10 +198,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
3497 + status_buf, KLSI_STATUSBUF_LEN,
3498 + 10000
3499 + );
3500 +- if (rc < 0)
3501 +- dev_err(&port->dev, "Reading line status failed (error = %d)\n",
3502 +- rc);
3503 +- else {
3504 ++ if (rc != KLSI_STATUSBUF_LEN) {
3505 ++ dev_err(&port->dev, "reading line status failed: %d\n", rc);
3506 ++ if (rc >= 0)
3507 ++ rc = -EIO;
3508 ++ } else {
3509 + status = get_unaligned_le16(status_buf);
3510 +
3511 + dev_info(&port->serial->dev->dev, "read status %x %x",
3512 +@@ -304,7 +305,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
3513 + rc = usb_serial_generic_open(tty, port);
3514 + if (rc) {
3515 + retval = rc;
3516 +- goto exit;
3517 ++ goto err_free_cfg;
3518 + }
3519 +
3520 + rc = usb_control_msg(port->serial->dev,
3521 +@@ -319,21 +320,38 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
3522 + if (rc < 0) {
3523 + dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc);
3524 + retval = rc;
3525 ++ goto err_generic_close;
3526 + } else
3527 + dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
3528 +
3529 + rc = klsi_105_get_line_state(port, &line_state);
3530 +- if (rc >= 0) {
3531 +- spin_lock_irqsave(&priv->lock, flags);
3532 +- priv->line_state = line_state;
3533 +- spin_unlock_irqrestore(&priv->lock, flags);
3534 +- dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
3535 +- retval = 0;
3536 +- } else
3537 ++ if (rc < 0) {
3538 + retval = rc;
3539 ++ goto err_disable_read;
3540 ++ }
3541 ++
3542 ++ spin_lock_irqsave(&priv->lock, flags);
3543 ++ priv->line_state = line_state;
3544 ++ spin_unlock_irqrestore(&priv->lock, flags);
3545 ++ dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
3546 ++ line_state);
3547 ++
3548 ++ return 0;
3549 +
3550 +-exit:
3551 ++err_disable_read:
3552 ++ usb_control_msg(port->serial->dev,
3553 ++ usb_sndctrlpipe(port->serial->dev, 0),
3554 ++ KL5KUSB105A_SIO_CONFIGURE,
3555 ++ USB_TYPE_VENDOR | USB_DIR_OUT,
3556 ++ KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
3557 ++ 0, /* index */
3558 ++ NULL, 0,
3559 ++ KLSI_TIMEOUT);
3560 ++err_generic_close:
3561 ++ usb_serial_generic_close(port);
3562 ++err_free_cfg:
3563 + kfree(cfg);
3564 ++
3565 + return retval;
3566 + }
3567 +
3568 +diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
3569 +index efa75b4e51f2..63fa400a822f 100644
3570 +--- a/drivers/usb/serial/kobil_sct.c
3571 ++++ b/drivers/usb/serial/kobil_sct.c
3572 +@@ -52,6 +52,7 @@
3573 +
3574 +
3575 + /* Function prototypes */
3576 ++static int kobil_attach(struct usb_serial *serial);
3577 + static int kobil_port_probe(struct usb_serial_port *probe);
3578 + static int kobil_port_remove(struct usb_serial_port *probe);
3579 + static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
3580 +@@ -87,6 +88,7 @@ static struct usb_serial_driver kobil_device = {
3581 + .description = "KOBIL USB smart card terminal",
3582 + .id_table = id_table,
3583 + .num_ports = 1,
3584 ++ .attach = kobil_attach,
3585 + .port_probe = kobil_port_probe,
3586 + .port_remove = kobil_port_remove,
3587 + .ioctl = kobil_ioctl,
3588 +@@ -114,6 +116,16 @@ struct kobil_private {
3589 + };
3590 +
3591 +
3592 ++static int kobil_attach(struct usb_serial *serial)
3593 ++{
3594 ++ if (serial->num_interrupt_out < serial->num_ports) {
3595 ++ dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n");
3596 ++ return -ENODEV;
3597 ++ }
3598 ++
3599 ++ return 0;
3600 ++}
3601 ++
3602 + static int kobil_port_probe(struct usb_serial_port *port)
3603 + {
3604 + struct usb_serial *serial = port->serial;
3605 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
3606 +index d40e1dccb998..c5274908ea92 100644
3607 +--- a/drivers/usb/serial/mos7720.c
3608 ++++ b/drivers/usb/serial/mos7720.c
3609 +@@ -66,8 +66,6 @@ struct moschip_port {
3610 + struct urb *write_urb_pool[NUM_URBS];
3611 + };
3612 +
3613 +-static struct usb_serial_driver moschip7720_2port_driver;
3614 +-
3615 + #define USB_VENDOR_ID_MOSCHIP 0x9710
3616 + #define MOSCHIP_DEVICE_ID_7720 0x7720
3617 + #define MOSCHIP_DEVICE_ID_7715 0x7715
3618 +@@ -966,25 +964,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
3619 + tty_port_tty_wakeup(&mos7720_port->port->port);
3620 + }
3621 +
3622 +-/*
3623 +- * mos77xx_probe
3624 +- * this function installs the appropriate read interrupt endpoint callback
3625 +- * depending on whether the device is a 7720 or 7715, thus avoiding costly
3626 +- * run-time checks in the high-frequency callback routine itself.
3627 +- */
3628 +-static int mos77xx_probe(struct usb_serial *serial,
3629 +- const struct usb_device_id *id)
3630 +-{
3631 +- if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
3632 +- moschip7720_2port_driver.read_int_callback =
3633 +- mos7715_interrupt_callback;
3634 +- else
3635 +- moschip7720_2port_driver.read_int_callback =
3636 +- mos7720_interrupt_callback;
3637 +-
3638 +- return 0;
3639 +-}
3640 +-
3641 + static int mos77xx_calc_num_ports(struct usb_serial *serial)
3642 + {
3643 + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
3644 +@@ -1916,6 +1895,11 @@ static int mos7720_startup(struct usb_serial *serial)
3645 + u16 product;
3646 + int ret_val;
3647 +
3648 ++ if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) {
3649 ++ dev_err(&serial->interface->dev, "missing bulk endpoints\n");
3650 ++ return -ENODEV;
3651 ++ }
3652 ++
3653 + product = le16_to_cpu(serial->dev->descriptor.idProduct);
3654 + dev = serial->dev;
3655 +
3656 +@@ -1940,19 +1924,18 @@ static int mos7720_startup(struct usb_serial *serial)
3657 + tmp->interrupt_in_endpointAddress;
3658 + serial->port[1]->interrupt_in_urb = NULL;
3659 + serial->port[1]->interrupt_in_buffer = NULL;
3660 ++
3661 ++ if (serial->port[0]->interrupt_in_urb) {
3662 ++ struct urb *urb = serial->port[0]->interrupt_in_urb;
3663 ++
3664 ++ urb->complete = mos7715_interrupt_callback;
3665 ++ }
3666 + }
3667 +
3668 + /* setting configuration feature to one */
3669 + usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
3670 + (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
3671 +
3672 +- /* start the interrupt urb */
3673 +- ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
3674 +- if (ret_val)
3675 +- dev_err(&dev->dev,
3676 +- "%s - Error %d submitting control urb\n",
3677 +- __func__, ret_val);
3678 +-
3679 + #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
3680 + if (product == MOSCHIP_DEVICE_ID_7715) {
3681 + ret_val = mos7715_parport_init(serial);
3682 +@@ -1960,6 +1943,13 @@ static int mos7720_startup(struct usb_serial *serial)
3683 + return ret_val;
3684 + }
3685 + #endif
3686 ++ /* start the interrupt urb */
3687 ++ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
3688 ++ if (ret_val) {
3689 ++ dev_err(&dev->dev, "failed to submit interrupt urb: %d\n",
3690 ++ ret_val);
3691 ++ }
3692 ++
3693 + /* LSR For Port 1 */
3694 + read_mos_reg(serial, 0, LSR, &data);
3695 + dev_dbg(&dev->dev, "LSR:%x\n", data);
3696 +@@ -1969,6 +1959,8 @@ static int mos7720_startup(struct usb_serial *serial)
3697 +
3698 + static void mos7720_release(struct usb_serial *serial)
3699 + {
3700 ++ usb_kill_urb(serial->port[0]->interrupt_in_urb);
3701 ++
3702 + #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
3703 + /* close the parallel port */
3704 +
3705 +@@ -2051,7 +2043,6 @@ static struct usb_serial_driver moschip7720_2port_driver = {
3706 + .close = mos7720_close,
3707 + .throttle = mos7720_throttle,
3708 + .unthrottle = mos7720_unthrottle,
3709 +- .probe = mos77xx_probe,
3710 + .attach = mos7720_startup,
3711 + .release = mos7720_release,
3712 + .port_probe = mos7720_port_probe,
3713 +@@ -2065,7 +2056,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
3714 + .chars_in_buffer = mos7720_chars_in_buffer,
3715 + .break_ctl = mos7720_break,
3716 + .read_bulk_callback = mos7720_bulk_in_callback,
3717 +- .read_int_callback = NULL /* dynamically assigned in probe() */
3718 ++ .read_int_callback = mos7720_interrupt_callback,
3719 + };
3720 +
3721 + static struct usb_serial_driver * const serial_drivers[] = {
3722 +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
3723 +index 29b33ecd048b..0b1659026d85 100644
3724 +--- a/drivers/usb/serial/mos7840.c
3725 ++++ b/drivers/usb/serial/mos7840.c
3726 +@@ -2192,6 +2192,17 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
3727 + return mos7840_num_ports;
3728 + }
3729 +
3730 ++static int mos7840_attach(struct usb_serial *serial)
3731 ++{
3732 ++ if (serial->num_bulk_in < serial->num_ports ||
3733 ++ serial->num_bulk_out < serial->num_ports) {
3734 ++ dev_err(&serial->interface->dev, "missing endpoints\n");
3735 ++ return -ENODEV;
3736 ++ }
3737 ++
3738 ++ return 0;
3739 ++}
3740 ++
3741 + static int mos7840_port_probe(struct usb_serial_port *port)
3742 + {
3743 + struct usb_serial *serial = port->serial;
3744 +@@ -2472,6 +2483,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
3745 + .tiocmset = mos7840_tiocmset,
3746 + .tiocmiwait = usb_serial_generic_tiocmiwait,
3747 + .get_icount = usb_serial_generic_get_icount,
3748 ++ .attach = mos7840_attach,
3749 + .port_probe = mos7840_port_probe,
3750 + .port_remove = mos7840_port_remove,
3751 + .read_bulk_callback = mos7840_bulk_in_callback,
3752 +diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
3753 +index 5739bf6f7200..24720f656387 100644
3754 +--- a/drivers/usb/serial/omninet.c
3755 ++++ b/drivers/usb/serial/omninet.c
3756 +@@ -39,6 +39,7 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
3757 + const unsigned char *buf, int count);
3758 + static int omninet_write_room(struct tty_struct *tty);
3759 + static void omninet_disconnect(struct usb_serial *serial);
3760 ++static int omninet_attach(struct usb_serial *serial);
3761 + static int omninet_port_probe(struct usb_serial_port *port);
3762 + static int omninet_port_remove(struct usb_serial_port *port);
3763 +
3764 +@@ -57,6 +58,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
3765 + .description = "ZyXEL - omni.net lcd plus usb",
3766 + .id_table = id_table,
3767 + .num_ports = 1,
3768 ++ .attach = omninet_attach,
3769 + .port_probe = omninet_port_probe,
3770 + .port_remove = omninet_port_remove,
3771 + .open = omninet_open,
3772 +@@ -105,6 +107,17 @@ struct omninet_data {
3773 + __u8 od_outseq; /* Sequence number for bulk_out URBs */
3774 + };
3775 +
3776 ++static int omninet_attach(struct usb_serial *serial)
3777 ++{
3778 ++ /* The second bulk-out endpoint is used for writing. */
3779 ++ if (serial->num_bulk_out < 2) {
3780 ++ dev_err(&serial->interface->dev, "missing endpoints\n");
3781 ++ return -ENODEV;
3782 ++ }
3783 ++
3784 ++ return 0;
3785 ++}
3786 ++
3787 + static int omninet_port_probe(struct usb_serial_port *port)
3788 + {
3789 + struct omninet_data *od;
3790 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3791 +index 2bc169692965..99dff08b560b 100644
3792 +--- a/drivers/usb/serial/option.c
3793 ++++ b/drivers/usb/serial/option.c
3794 +@@ -269,6 +269,8 @@ static void option_instat_callback(struct urb *urb);
3795 + #define TELIT_PRODUCT_CC864_SINGLE 0x1006
3796 + #define TELIT_PRODUCT_DE910_DUAL 0x1010
3797 + #define TELIT_PRODUCT_UE910_V2 0x1012
3798 ++#define TELIT_PRODUCT_LE922_USBCFG1 0x1040
3799 ++#define TELIT_PRODUCT_LE922_USBCFG2 0x1041
3800 + #define TELIT_PRODUCT_LE922_USBCFG0 0x1042
3801 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
3802 + #define TELIT_PRODUCT_LE922_USBCFG5 0x1045
3803 +@@ -1212,6 +1214,10 @@ static const struct usb_device_id option_ids[] = {
3804 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
3805 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
3806 + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
3807 ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
3808 ++ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
3809 ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
3810 ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
3811 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
3812 + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
3813 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
3814 +@@ -1856,6 +1862,7 @@ static const struct usb_device_id option_ids[] = {
3815 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
3816 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
3817 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
3818 ++ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
3819 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
3820 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
3821 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
3822 +diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
3823 +index a2080ac7b7e5..da6404c868e9 100644
3824 +--- a/drivers/usb/serial/oti6858.c
3825 ++++ b/drivers/usb/serial/oti6858.c
3826 +@@ -135,6 +135,7 @@ static int oti6858_tiocmget(struct tty_struct *tty);
3827 + static int oti6858_tiocmset(struct tty_struct *tty,
3828 + unsigned int set, unsigned int clear);
3829 + static int oti6858_tiocmiwait(struct tty_struct *tty, unsigned long arg);
3830 ++static int oti6858_attach(struct usb_serial *serial);
3831 + static int oti6858_port_probe(struct usb_serial_port *port);
3832 + static int oti6858_port_remove(struct usb_serial_port *port);
3833 +
3834 +@@ -159,6 +160,7 @@ static struct usb_serial_driver oti6858_device = {
3835 + .write_bulk_callback = oti6858_write_bulk_callback,
3836 + .write_room = oti6858_write_room,
3837 + .chars_in_buffer = oti6858_chars_in_buffer,
3838 ++ .attach = oti6858_attach,
3839 + .port_probe = oti6858_port_probe,
3840 + .port_remove = oti6858_port_remove,
3841 + };
3842 +@@ -328,6 +330,20 @@ static void send_data(struct work_struct *work)
3843 + usb_serial_port_softint(port);
3844 + }
3845 +
3846 ++static int oti6858_attach(struct usb_serial *serial)
3847 ++{
3848 ++ unsigned char num_ports = serial->num_ports;
3849 ++
3850 ++ if (serial->num_bulk_in < num_ports ||
3851 ++ serial->num_bulk_out < num_ports ||
3852 ++ serial->num_interrupt_in < num_ports) {
3853 ++ dev_err(&serial->interface->dev, "missing endpoints\n");
3854 ++ return -ENODEV;
3855 ++ }
3856 ++
3857 ++ return 0;
3858 ++}
3859 ++
3860 + static int oti6858_port_probe(struct usb_serial_port *port)
3861 + {
3862 + struct oti6858_private *priv;
3863 +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
3864 +index e47f9c642404..23f11751e05a 100644
3865 +--- a/drivers/usb/serial/pl2303.c
3866 ++++ b/drivers/usb/serial/pl2303.c
3867 +@@ -176,9 +176,17 @@ static int pl2303_vendor_write(__u16 value, __u16 index,
3868 + static int pl2303_startup(struct usb_serial *serial)
3869 + {
3870 + struct pl2303_serial_private *spriv;
3871 ++ unsigned char num_ports = serial->num_ports;
3872 + enum pl2303_type type = type_0;
3873 + unsigned char *buf;
3874 +
3875 ++ if (serial->num_bulk_in < num_ports ||
3876 ++ serial->num_bulk_out < num_ports ||
3877 ++ serial->num_interrupt_in < num_ports) {
3878 ++ dev_err(&serial->interface->dev, "missing endpoints\n");
3879 ++ return -ENODEV;
3880 ++ }
3881 ++
3882 + spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
3883 + if (!spriv)
3884 + return -ENOMEM;
3885 +diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
3886 +index 58ab9e52a938..d0ee758dff0b 100644
3887 +--- a/drivers/usb/serial/quatech2.c
3888 ++++ b/drivers/usb/serial/quatech2.c
3889 +@@ -409,16 +409,12 @@ static void qt2_close(struct usb_serial_port *port)
3890 + {
3891 + struct usb_serial *serial;
3892 + struct qt2_port_private *port_priv;
3893 +- unsigned long flags;
3894 + int i;
3895 +
3896 + serial = port->serial;
3897 + port_priv = usb_get_serial_port_data(port);
3898 +
3899 +- spin_lock_irqsave(&port_priv->urb_lock, flags);
3900 + usb_kill_urb(port_priv->write_urb);
3901 +- port_priv->urb_in_use = false;
3902 +- spin_unlock_irqrestore(&port_priv->urb_lock, flags);
3903 +
3904 + /* flush the port transmit buffer */
3905 + i = usb_control_msg(serial->dev,
3906 +diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
3907 +index 5b793c352267..ab754d23244c 100644
3908 +--- a/drivers/usb/serial/spcp8x5.c
3909 ++++ b/drivers/usb/serial/spcp8x5.c
3910 +@@ -155,6 +155,19 @@ static int spcp8x5_probe(struct usb_serial *serial,
3911 + return 0;
3912 + }
3913 +
3914 ++static int spcp8x5_attach(struct usb_serial *serial)
3915 ++{
3916 ++ unsigned char num_ports = serial->num_ports;
3917 ++
3918 ++ if (serial->num_bulk_in < num_ports ||
3919 ++ serial->num_bulk_out < num_ports) {
3920 ++ dev_err(&serial->interface->dev, "missing endpoints\n");
3921 ++ return -ENODEV;
3922 ++ }
3923 ++
3924 ++ return 0;
3925 ++}
3926 ++
3927 + static int spcp8x5_port_probe(struct usb_serial_port *port)
3928 + {
3929 + const struct usb_device_id *id = usb_get_serial_data(port->serial);
3930 +@@ -479,6 +492,7 @@ static struct usb_serial_driver spcp8x5_device = {
3931 + .tiocmget = spcp8x5_tiocmget,
3932 + .tiocmset = spcp8x5_tiocmset,
3933 + .probe = spcp8x5_probe,
3934 ++ .attach = spcp8x5_attach,
3935 + .port_probe = spcp8x5_port_probe,
3936 + .port_remove = spcp8x5_port_remove,
3937 + };
3938 +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
3939 +index 11b402935fbd..a7c3f0800de9 100644
3940 +--- a/drivers/usb/serial/ti_usb_3410_5052.c
3941 ++++ b/drivers/usb/serial/ti_usb_3410_5052.c
3942 +@@ -341,6 +341,13 @@ static int ti_startup(struct usb_serial *serial)
3943 + goto free_tdev;
3944 + }
3945 +
3946 ++ if (serial->num_bulk_in < serial->num_ports ||
3947 ++ serial->num_bulk_out < serial->num_ports) {
3948 ++ dev_err(&serial->interface->dev, "missing endpoints\n");
3949 ++ status = -ENODEV;
3950 ++ goto free_tdev;
3951 ++ }
3952 ++
3953 + return 0;
3954 +
3955 + free_tdev:
3956 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
3957 +index 275aa3fc4087..f636e2eb0dd8 100644
3958 +--- a/drivers/vfio/pci/vfio_pci.c
3959 ++++ b/drivers/vfio/pci/vfio_pci.c
3960 +@@ -468,8 +468,9 @@ static long vfio_pci_ioctl(void *device_data,
3961 +
3962 + } else if (cmd == VFIO_DEVICE_SET_IRQS) {
3963 + struct vfio_irq_set hdr;
3964 ++ size_t size;
3965 + u8 *data = NULL;
3966 +- int ret = 0;
3967 ++ int max, ret = 0;
3968 +
3969 + minsz = offsetofend(struct vfio_irq_set, count);
3970 +
3971 +@@ -477,23 +478,31 @@ static long vfio_pci_ioctl(void *device_data,
3972 + return -EFAULT;
3973 +
3974 + if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
3975 ++ hdr.count >= (U32_MAX - hdr.start) ||
3976 + hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
3977 + VFIO_IRQ_SET_ACTION_TYPE_MASK))
3978 + return -EINVAL;
3979 +
3980 +- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
3981 +- size_t size;
3982 +- int max = vfio_pci_get_irq_count(vdev, hdr.index);
3983 ++ max = vfio_pci_get_irq_count(vdev, hdr.index);
3984 ++ if (hdr.start >= max || hdr.start + hdr.count > max)
3985 ++ return -EINVAL;
3986 +
3987 +- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
3988 +- size = sizeof(uint8_t);
3989 +- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
3990 +- size = sizeof(int32_t);
3991 +- else
3992 +- return -EINVAL;
3993 ++ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
3994 ++ case VFIO_IRQ_SET_DATA_NONE:
3995 ++ size = 0;
3996 ++ break;
3997 ++ case VFIO_IRQ_SET_DATA_BOOL:
3998 ++ size = sizeof(uint8_t);
3999 ++ break;
4000 ++ case VFIO_IRQ_SET_DATA_EVENTFD:
4001 ++ size = sizeof(int32_t);
4002 ++ break;
4003 ++ default:
4004 ++ return -EINVAL;
4005 ++ }
4006 +
4007 +- if (hdr.argsz - minsz < hdr.count * size ||
4008 +- hdr.start >= max || hdr.start + hdr.count > max)
4009 ++ if (size) {
4010 ++ if (hdr.argsz - minsz < hdr.count * size)
4011 + return -EINVAL;
4012 +
4013 + data = memdup_user((void __user *)(arg + minsz),
4014 +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
4015 +index 641bc87bdb96..05b0834e26e0 100644
4016 +--- a/drivers/vfio/pci/vfio_pci_intrs.c
4017 ++++ b/drivers/vfio/pci/vfio_pci_intrs.c
4018 +@@ -465,7 +465,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
4019 + if (!is_irq_none(vdev))
4020 + return -EINVAL;
4021 +
4022 +- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
4023 ++ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
4024 + if (!vdev->ctx)
4025 + return -ENOMEM;
4026 +
4027 +diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
4028 +index 1abbf80ffb19..9733b8a7fea7 100644
4029 +--- a/drivers/vme/bridges/vme_ca91cx42.c
4030 ++++ b/drivers/vme/bridges/vme_ca91cx42.c
4031 +@@ -468,7 +468,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
4032 + vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
4033 + pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
4034 +
4035 +- *pci_base = (dma_addr_t)vme_base + pci_offset;
4036 ++ *pci_base = (dma_addr_t)*vme_base + pci_offset;
4037 + *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
4038 +
4039 + *enabled = 0;
4040 +diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
4041 +index 27accc4cc999..c17116f63eb1 100644
4042 +--- a/drivers/xen/gntdev.c
4043 ++++ b/drivers/xen/gntdev.c
4044 +@@ -763,7 +763,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
4045 +
4046 + vma->vm_ops = &gntdev_vmops;
4047 +
4048 +- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
4049 ++ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
4050 +
4051 + if (use_ptemod)
4052 + vma->vm_flags |= VM_DONTCOPY;
4053 +diff --git a/fs/9p/acl.c b/fs/9p/acl.c
4054 +index 7af425f53bee..9686c1f17653 100644
4055 +--- a/fs/9p/acl.c
4056 ++++ b/fs/9p/acl.c
4057 +@@ -320,32 +320,26 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
4058 + case ACL_TYPE_ACCESS:
4059 + name = POSIX_ACL_XATTR_ACCESS;
4060 + if (acl) {
4061 +- umode_t mode = inode->i_mode;
4062 +- retval = posix_acl_equiv_mode(acl, &mode);
4063 +- if (retval < 0)
4064 ++ struct iattr iattr;
4065 ++
4066 ++ retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
4067 ++ if (retval)
4068 + goto err_out;
4069 +- else {
4070 +- struct iattr iattr;
4071 +- if (retval == 0) {
4072 +- /*
4073 +- * ACL can be represented
4074 +- * by the mode bits. So don't
4075 +- * update ACL.
4076 +- */
4077 +- acl = NULL;
4078 +- value = NULL;
4079 +- size = 0;
4080 +- }
4081 +- /* Updte the mode bits */
4082 +- iattr.ia_mode = ((mode & S_IALLUGO) |
4083 +- (inode->i_mode & ~S_IALLUGO));
4084 +- iattr.ia_valid = ATTR_MODE;
4085 +- /* FIXME should we update ctime ?
4086 +- * What is the following setxattr update the
4087 +- * mode ?
4088 ++ if (!acl) {
4089 ++ /*
4090 ++ * ACL can be represented
4091 ++ * by the mode bits. So don't
4092 ++ * update ACL.
4093 + */
4094 +- v9fs_vfs_setattr_dotl(dentry, &iattr);
4095 ++ value = NULL;
4096 ++ size = 0;
4097 + }
4098 ++ iattr.ia_valid = ATTR_MODE;
4099 ++ /* FIXME should we update ctime ?
4100 ++ * What is the following setxattr update the
4101 ++ * mode ?
4102 ++ */
4103 ++ v9fs_vfs_setattr_dotl(dentry, &iattr);
4104 + }
4105 + break;
4106 + case ACL_TYPE_DEFAULT:
4107 +diff --git a/fs/block_dev.c b/fs/block_dev.c
4108 +index 1e86823a9cbd..e833c974409c 100644
4109 +--- a/fs/block_dev.c
4110 ++++ b/fs/block_dev.c
4111 +@@ -634,7 +634,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
4112 + return true; /* already a holder */
4113 + else if (bdev->bd_holder != NULL)
4114 + return false; /* held by someone else */
4115 +- else if (bdev->bd_contains == bdev)
4116 ++ else if (whole == bdev)
4117 + return true; /* is a whole device which isn't held */
4118 +
4119 + else if (whole->bd_holder == bd_may_claim)
4120 +@@ -1672,6 +1672,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
4121 + spin_lock(&inode_sb_list_lock);
4122 + list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
4123 + struct address_space *mapping = inode->i_mapping;
4124 ++ struct block_device *bdev;
4125 +
4126 + spin_lock(&inode->i_lock);
4127 + if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
4128 +@@ -1692,8 +1693,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
4129 + */
4130 + iput(old_inode);
4131 + old_inode = inode;
4132 ++ bdev = I_BDEV(inode);
4133 +
4134 +- func(I_BDEV(inode), arg);
4135 ++ mutex_lock(&bdev->bd_mutex);
4136 ++ if (bdev->bd_openers)
4137 ++ func(bdev, arg);
4138 ++ mutex_unlock(&bdev->bd_mutex);
4139 +
4140 + spin_lock(&inode_sb_list_lock);
4141 + }
4142 +diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
4143 +index 0890c83643e9..d6d53e5e7945 100644
4144 +--- a/fs/btrfs/acl.c
4145 ++++ b/fs/btrfs/acl.c
4146 +@@ -118,11 +118,9 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
4147 + case ACL_TYPE_ACCESS:
4148 + name = POSIX_ACL_XATTR_ACCESS;
4149 + if (acl) {
4150 +- ret = posix_acl_equiv_mode(acl, &inode->i_mode);
4151 +- if (ret < 0)
4152 ++ ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4153 ++ if (ret)
4154 + return ret;
4155 +- if (ret == 0)
4156 +- acl = NULL;
4157 + }
4158 + ret = 0;
4159 + break;
4160 +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
4161 +index 34f33e16b08f..269ac79ea25c 100644
4162 +--- a/fs/btrfs/delayed-inode.c
4163 ++++ b/fs/btrfs/delayed-inode.c
4164 +@@ -1805,14 +1805,6 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
4165 + struct btrfs_delayed_node *delayed_node;
4166 + int ret = 0;
4167 +
4168 +- /*
4169 +- * we don't do delayed inode updates during log recovery because it
4170 +- * leads to enospc problems. This means we also can't do
4171 +- * delayed inode refs
4172 +- */
4173 +- if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
4174 +- return -EAGAIN;
4175 +-
4176 + delayed_node = btrfs_get_or_create_delayed_node(inode);
4177 + if (IS_ERR(delayed_node))
4178 + return PTR_ERR(delayed_node);
4179 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
4180 +index 85bcb25384c0..854af9e95f4c 100644
4181 +--- a/fs/btrfs/extent_io.c
4182 ++++ b/fs/btrfs/extent_io.c
4183 +@@ -4865,11 +4865,20 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
4184 + lock_page(page);
4185 + }
4186 + locked_pages++;
4187 ++ }
4188 ++ /*
4189 ++ * We need to firstly lock all pages to make sure that
4190 ++ * the uptodate bit of our pages won't be affected by
4191 ++ * clear_extent_buffer_uptodate().
4192 ++ */
4193 ++ for (i = start_i; i < num_pages; i++) {
4194 ++ page = eb->pages[i];
4195 + if (!PageUptodate(page)) {
4196 + num_reads++;
4197 + all_uptodate = 0;
4198 + }
4199 + }
4200 ++
4201 + if (all_uptodate) {
4202 + if (start_i == 0)
4203 + set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4204 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4205 +index be3bf0be13c7..4c56a5028786 100644
4206 +--- a/fs/btrfs/tree-log.c
4207 ++++ b/fs/btrfs/tree-log.c
4208 +@@ -1739,12 +1739,11 @@ static noinline int find_dir_range(struct btrfs_root *root,
4209 + next:
4210 + /* check the next slot in the tree to see if it is a valid item */
4211 + nritems = btrfs_header_nritems(path->nodes[0]);
4212 ++ path->slots[0]++;
4213 + if (path->slots[0] >= nritems) {
4214 + ret = btrfs_next_leaf(root, path);
4215 + if (ret)
4216 + goto out;
4217 +- } else {
4218 +- path->slots[0]++;
4219 + }
4220 +
4221 + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4222 +diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
4223 +index 37e4a72a7d1c..ae4e35bdc2cd 100644
4224 +--- a/fs/cifs/cifs_fs_sb.h
4225 ++++ b/fs/cifs/cifs_fs_sb.h
4226 +@@ -45,6 +45,9 @@
4227 + #define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
4228 + #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
4229 + #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
4230 ++#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
4231 ++ * root mountable
4232 ++ */
4233 +
4234 + struct cifs_sb_info {
4235 + struct rb_root tlink_tree;
4236 +@@ -65,5 +68,6 @@ struct cifs_sb_info {
4237 + char *mountdata; /* options received at mount time or via DFS refs */
4238 + struct backing_dev_info bdi;
4239 + struct delayed_work prune_tlinks;
4240 ++ char *prepath;
4241 + };
4242 + #endif /* _CIFS_FS_SB_H */
4243 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
4244 +index 037b8f7e8a94..75aacb731c54 100644
4245 +--- a/fs/cifs/cifsfs.c
4246 ++++ b/fs/cifs/cifsfs.c
4247 +@@ -586,6 +586,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
4248 + char *s, *p;
4249 + char sep;
4250 +
4251 ++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
4252 ++ return dget(sb->s_root);
4253 ++
4254 + full_path = cifs_build_path_to_root(vol, cifs_sb,
4255 + cifs_sb_master_tcon(cifs_sb));
4256 + if (full_path == NULL)
4257 +@@ -665,10 +668,14 @@ cifs_do_mount(struct file_system_type *fs_type,
4258 + cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
4259 + if (cifs_sb->mountdata == NULL) {
4260 + root = ERR_PTR(-ENOMEM);
4261 +- goto out_cifs_sb;
4262 ++ goto out_free;
4263 + }
4264 +
4265 +- cifs_setup_cifs_sb(volume_info, cifs_sb);
4266 ++ rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
4267 ++ if (rc) {
4268 ++ root = ERR_PTR(rc);
4269 ++ goto out_free;
4270 ++ }
4271 +
4272 + rc = cifs_mount(cifs_sb, volume_info);
4273 + if (rc) {
4274 +@@ -676,7 +683,7 @@ cifs_do_mount(struct file_system_type *fs_type,
4275 + cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
4276 + rc);
4277 + root = ERR_PTR(rc);
4278 +- goto out_mountdata;
4279 ++ goto out_free;
4280 + }
4281 +
4282 + mnt_data.vol = volume_info;
4283 +@@ -719,9 +726,9 @@ out:
4284 + cifs_cleanup_volume_info(volume_info);
4285 + return root;
4286 +
4287 +-out_mountdata:
4288 ++out_free:
4289 ++ kfree(cifs_sb->prepath);
4290 + kfree(cifs_sb->mountdata);
4291 +-out_cifs_sb:
4292 + kfree(cifs_sb);
4293 + out_nls:
4294 + unload_nls(volume_info->local_nls);
4295 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
4296 +index fa30efe15ba2..4b87feaa507f 100644
4297 +--- a/fs/cifs/cifsglob.h
4298 ++++ b/fs/cifs/cifsglob.h
4299 +@@ -594,6 +594,8 @@ struct TCP_Server_Info {
4300 + #ifdef CONFIG_CIFS_SMB2
4301 + unsigned int max_read;
4302 + unsigned int max_write;
4303 ++ struct delayed_work reconnect; /* reconnect workqueue job */
4304 ++ struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
4305 + #endif /* CONFIG_CIFS_SMB2 */
4306 + };
4307 +
4308 +@@ -760,6 +762,7 @@ cap_unix(struct cifs_ses *ses)
4309 + struct cifs_tcon {
4310 + struct list_head tcon_list;
4311 + int tc_count;
4312 ++ struct list_head rlist; /* reconnect list */
4313 + struct list_head openFileList;
4314 + spinlock_t open_file_lock; /* protects list above */
4315 + struct cifs_ses *ses; /* pointer to session associated with */
4316 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
4317 +index c6bfe5b368f9..44d825cdf85e 100644
4318 +--- a/fs/cifs/cifsproto.h
4319 ++++ b/fs/cifs/cifsproto.h
4320 +@@ -179,7 +179,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
4321 + extern int cifs_readv_from_socket(struct TCP_Server_Info *server,
4322 + struct kvec *iov_orig, unsigned int nr_segs,
4323 + unsigned int to_read);
4324 +-extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
4325 ++extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
4326 + struct cifs_sb_info *cifs_sb);
4327 + extern int cifs_match_super(struct super_block *, void *);
4328 + extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
4329 +@@ -199,6 +199,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
4330 + struct tcon_link *tlink,
4331 + struct cifs_pending_open *open);
4332 + extern void cifs_del_pending_open(struct cifs_pending_open *open);
4333 ++extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
4334 ++ int from_reconnect);
4335 ++extern void cifs_put_tcon(struct cifs_tcon *tcon);
4336 +
4337 + #if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
4338 + extern void cifs_dfs_release_automount_timer(void);
4339 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
4340 +index 54f507bd2c09..bd54422a260d 100644
4341 +--- a/fs/cifs/connect.c
4342 ++++ b/fs/cifs/connect.c
4343 +@@ -52,6 +52,9 @@
4344 + #include "nterr.h"
4345 + #include "rfc1002pdu.h"
4346 + #include "fscache.h"
4347 ++#ifdef CONFIG_CIFS_SMB2
4348 ++#include "smb2proto.h"
4349 ++#endif
4350 +
4351 + #define CIFS_PORT 445
4352 + #define RFC1001_PORT 139
4353 +@@ -2060,8 +2063,8 @@ cifs_find_tcp_session(struct smb_vol *vol)
4354 + return NULL;
4355 + }
4356 +
4357 +-static void
4358 +-cifs_put_tcp_session(struct TCP_Server_Info *server)
4359 ++void
4360 ++cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
4361 + {
4362 + struct task_struct *task;
4363 +
4364 +@@ -2078,6 +2081,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
4365 +
4366 + cancel_delayed_work_sync(&server->echo);
4367 +
4368 ++#ifdef CONFIG_CIFS_SMB2
4369 ++ if (from_reconnect)
4370 ++ /*
4371 ++ * Avoid deadlock here: reconnect work calls
4372 ++ * cifs_put_tcp_session() at its end. Need to be sure
4373 ++ * that reconnect work does nothing with server pointer after
4374 ++ * that step.
4375 ++ */
4376 ++ cancel_delayed_work(&server->reconnect);
4377 ++ else
4378 ++ cancel_delayed_work_sync(&server->reconnect);
4379 ++#endif
4380 ++
4381 + spin_lock(&GlobalMid_Lock);
4382 + server->tcpStatus = CifsExiting;
4383 + spin_unlock(&GlobalMid_Lock);
4384 +@@ -2142,6 +2158,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
4385 + INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
4386 + INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
4387 + INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
4388 ++#ifdef CONFIG_CIFS_SMB2
4389 ++ INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
4390 ++ mutex_init(&tcp_ses->reconnect_mutex);
4391 ++#endif
4392 + memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
4393 + sizeof(tcp_ses->srcaddr));
4394 + memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
4395 +@@ -2294,7 +2314,7 @@ cifs_put_smb_ses(struct cifs_ses *ses)
4396 + spin_unlock(&cifs_tcp_ses_lock);
4397 +
4398 + sesInfoFree(ses);
4399 +- cifs_put_tcp_session(server);
4400 ++ cifs_put_tcp_session(server, 0);
4401 + }
4402 +
4403 + #ifdef CONFIG_KEYS
4404 +@@ -2467,7 +2487,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
4405 + mutex_unlock(&ses->session_mutex);
4406 +
4407 + /* existing SMB ses has a server reference already */
4408 +- cifs_put_tcp_session(server);
4409 ++ cifs_put_tcp_session(server, 0);
4410 + free_xid(xid);
4411 + return ses;
4412 + }
4413 +@@ -2557,7 +2577,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc)
4414 + return NULL;
4415 + }
4416 +
4417 +-static void
4418 ++void
4419 + cifs_put_tcon(struct cifs_tcon *tcon)
4420 + {
4421 + unsigned int xid;
4422 +@@ -2722,6 +2742,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
4423 + return 1;
4424 + }
4425 +
4426 ++static int
4427 ++match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
4428 ++{
4429 ++ struct cifs_sb_info *old = CIFS_SB(sb);
4430 ++ struct cifs_sb_info *new = mnt_data->cifs_sb;
4431 ++
4432 ++ if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
4433 ++ if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
4434 ++ return 0;
4435 ++ /* The prepath should be null terminated strings */
4436 ++ if (strcmp(new->prepath, old->prepath))
4437 ++ return 0;
4438 ++
4439 ++ return 1;
4440 ++ }
4441 ++ return 0;
4442 ++}
4443 ++
4444 + int
4445 + cifs_match_super(struct super_block *sb, void *data)
4446 + {
4447 +@@ -2749,7 +2787,8 @@ cifs_match_super(struct super_block *sb, void *data)
4448 +
4449 + if (!match_server(tcp_srv, volume_info) ||
4450 + !match_session(ses, volume_info) ||
4451 +- !match_tcon(tcon, volume_info->UNC)) {
4452 ++ !match_tcon(tcon, volume_info->UNC) ||
4453 ++ !match_prepath(sb, mnt_data)) {
4454 + rc = 0;
4455 + goto out;
4456 + }
4457 +@@ -3165,7 +3204,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
4458 + }
4459 + }
4460 +
4461 +-void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
4462 ++int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
4463 + struct cifs_sb_info *cifs_sb)
4464 + {
4465 + INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
4466 +@@ -3247,6 +3286,15 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
4467 +
4468 + if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
4469 + cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
4470 ++
4471 ++
4472 ++ if (pvolume_info->prepath) {
4473 ++ cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL);
4474 ++ if (cifs_sb->prepath == NULL)
4475 ++ return -ENOMEM;
4476 ++ }
4477 ++
4478 ++ return 0;
4479 + }
4480 +
4481 + static void
4482 +@@ -3417,6 +3465,44 @@ cifs_get_volume_info(char *mount_data, const char *devname)
4483 + return volume_info;
4484 + }
4485 +
4486 ++static int
4487 ++cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
4488 ++ unsigned int xid,
4489 ++ struct cifs_tcon *tcon,
4490 ++ struct cifs_sb_info *cifs_sb,
4491 ++ char *full_path)
4492 ++{
4493 ++ int rc;
4494 ++ char *s;
4495 ++ char sep, tmp;
4496 ++
4497 ++ sep = CIFS_DIR_SEP(cifs_sb);
4498 ++ s = full_path;
4499 ++
4500 ++ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
4501 ++ while (rc == 0) {
4502 ++ /* skip separators */
4503 ++ while (*s == sep)
4504 ++ s++;
4505 ++ if (!*s)
4506 ++ break;
4507 ++ /* next separator */
4508 ++ while (*s && *s != sep)
4509 ++ s++;
4510 ++
4511 ++ /*
4512 ++ * temporarily null-terminate the path at the end of
4513 ++ * the current component
4514 ++ */
4515 ++ tmp = *s;
4516 ++ *s = 0;
4517 ++ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
4518 ++ full_path);
4519 ++ *s = tmp;
4520 ++ }
4521 ++ return rc;
4522 ++}
4523 ++
4524 + int
4525 + cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
4526 + {
4527 +@@ -3543,6 +3629,17 @@ remote_path_check:
4528 + kfree(full_path);
4529 + goto mount_fail_check;
4530 + }
4531 ++ if (rc != -EREMOTE) {
4532 ++ rc = cifs_are_all_path_components_accessible(server,
4533 ++ xid, tcon, cifs_sb,
4534 ++ full_path);
4535 ++ if (rc != 0) {
4536 ++ cifs_dbg(VFS, "cannot query dirs between root and final path, "
4537 ++ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
4538 ++ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
4539 ++ rc = 0;
4540 ++ }
4541 ++ }
4542 + kfree(full_path);
4543 + }
4544 +
4545 +@@ -3606,7 +3703,7 @@ mount_fail_check:
4546 + else if (ses)
4547 + cifs_put_smb_ses(ses);
4548 + else
4549 +- cifs_put_tcp_session(server);
4550 ++ cifs_put_tcp_session(server, 0);
4551 + bdi_destroy(&cifs_sb->bdi);
4552 + }
4553 +
4554 +@@ -3799,6 +3896,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
4555 +
4556 + bdi_destroy(&cifs_sb->bdi);
4557 + kfree(cifs_sb->mountdata);
4558 ++ kfree(cifs_sb->prepath);
4559 + unload_nls(cifs_sb->local_nls);
4560 + kfree(cifs_sb);
4561 + }
4562 +@@ -3904,7 +4002,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
4563 + ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
4564 + if (IS_ERR(ses)) {
4565 + tcon = (struct cifs_tcon *)ses;
4566 +- cifs_put_tcp_session(master_tcon->ses->server);
4567 ++ cifs_put_tcp_session(master_tcon->ses->server, 0);
4568 + goto out;
4569 + }
4570 +
4571 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
4572 +index 7347f1678fa7..39660990e4b0 100644
4573 +--- a/fs/cifs/dir.c
4574 ++++ b/fs/cifs/dir.c
4575 +@@ -84,6 +84,7 @@ build_path_from_dentry(struct dentry *direntry)
4576 + struct dentry *temp;
4577 + int namelen;
4578 + int dfsplen;
4579 ++ int pplen = 0;
4580 + char *full_path;
4581 + char dirsep;
4582 + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
4583 +@@ -95,8 +96,12 @@ build_path_from_dentry(struct dentry *direntry)
4584 + dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
4585 + else
4586 + dfsplen = 0;
4587 ++
4588 ++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
4589 ++ pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
4590 ++
4591 + cifs_bp_rename_retry:
4592 +- namelen = dfsplen;
4593 ++ namelen = dfsplen + pplen;
4594 + seq = read_seqbegin(&rename_lock);
4595 + rcu_read_lock();
4596 + for (temp = direntry; !IS_ROOT(temp);) {
4597 +@@ -137,7 +142,7 @@ cifs_bp_rename_retry:
4598 + }
4599 + }
4600 + rcu_read_unlock();
4601 +- if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
4602 ++ if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) {
4603 + cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n",
4604 + namelen, dfsplen);
4605 + /* presumably this is only possible if racing with a rename
4606 +@@ -153,6 +158,17 @@ cifs_bp_rename_retry:
4607 + those safely to '/' if any are found in the middle of the prepath */
4608 + /* BB test paths to Windows with '/' in the midst of prepath */
4609 +
4610 ++ if (pplen) {
4611 ++ int i;
4612 ++
4613 ++ cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
4614 ++ memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
4615 ++ full_path[dfsplen] = '\\';
4616 ++ for (i = 0; i < pplen-1; i++)
4617 ++ if (full_path[dfsplen+1+i] == '/')
4618 ++ full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
4619 ++ }
4620 ++
4621 + if (dfsplen) {
4622 + strncpy(full_path, tcon->treeName, dfsplen);
4623 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
4624 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
4625 +index ab9f992ca479..518cf900682f 100644
4626 +--- a/fs/cifs/inode.c
4627 ++++ b/fs/cifs/inode.c
4628 +@@ -937,12 +937,29 @@ struct inode *cifs_root_iget(struct super_block *sb)
4629 + struct inode *inode = NULL;
4630 + long rc;
4631 + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
4632 ++ char *path = NULL;
4633 ++ int len;
4634 ++
4635 ++ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
4636 ++ && cifs_sb->prepath) {
4637 ++ len = strlen(cifs_sb->prepath);
4638 ++ path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
4639 ++ if (path == NULL)
4640 ++ return ERR_PTR(-ENOMEM);
4641 ++ path[0] = '/';
4642 ++ memcpy(path+1, cifs_sb->prepath, len);
4643 ++ } else {
4644 ++ path = kstrdup("", GFP_KERNEL);
4645 ++ if (path == NULL)
4646 ++ return ERR_PTR(-ENOMEM);
4647 ++ }
4648 +
4649 + xid = get_xid();
4650 ++ convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
4651 + if (tcon->unix_ext)
4652 +- rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
4653 ++ rc = cifs_get_inode_info_unix(&inode, path, sb, xid);
4654 + else
4655 +- rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
4656 ++ rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL);
4657 +
4658 + if (!inode) {
4659 + inode = ERR_PTR(rc);
4660 +@@ -970,6 +987,7 @@ struct inode *cifs_root_iget(struct super_block *sb)
4661 + }
4662 +
4663 + out:
4664 ++ kfree(path);
4665 + /* can not call macro free_xid here since in a void func
4666 + * TODO: This is no longer true
4667 + */
4668 +diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
4669 +index 45992944e238..b87b07504947 100644
4670 +--- a/fs/cifs/smb2file.c
4671 ++++ b/fs/cifs/smb2file.c
4672 +@@ -241,7 +241,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
4673 + * and check it for zero before using.
4674 + */
4675 + max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf;
4676 +- if (!max_buf) {
4677 ++ if (max_buf < sizeof(struct smb2_lock_element)) {
4678 + free_xid(xid);
4679 + return -EINVAL;
4680 + }
4681 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
4682 +index 1a6dde4bce62..30d0751626e3 100644
4683 +--- a/fs/cifs/smb2pdu.c
4684 ++++ b/fs/cifs/smb2pdu.c
4685 +@@ -282,7 +282,7 @@ out:
4686 + case SMB2_CHANGE_NOTIFY:
4687 + case SMB2_QUERY_INFO:
4688 + case SMB2_SET_INFO:
4689 +- return -EAGAIN;
4690 ++ rc = -EAGAIN;
4691 + }
4692 + unload_nls(nls_codepage);
4693 + return rc;
4694 +@@ -1560,6 +1560,54 @@ smb2_echo_callback(struct mid_q_entry *mid)
4695 + add_credits(server, credits_received, CIFS_ECHO_OP);
4696 + }
4697 +
4698 ++void smb2_reconnect_server(struct work_struct *work)
4699 ++{
4700 ++ struct TCP_Server_Info *server = container_of(work,
4701 ++ struct TCP_Server_Info, reconnect.work);
4702 ++ struct cifs_ses *ses;
4703 ++ struct cifs_tcon *tcon, *tcon2;
4704 ++ struct list_head tmp_list;
4705 ++ int tcon_exist = false;
4706 ++
4707 ++ /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
4708 ++ mutex_lock(&server->reconnect_mutex);
4709 ++
4710 ++ INIT_LIST_HEAD(&tmp_list);
4711 ++ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
4712 ++
4713 ++ spin_lock(&cifs_tcp_ses_lock);
4714 ++ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
4715 ++ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
4716 ++ if (tcon->need_reconnect) {
4717 ++ tcon->tc_count++;
4718 ++ list_add_tail(&tcon->rlist, &tmp_list);
4719 ++ tcon_exist = true;
4720 ++ }
4721 ++ }
4722 ++ }
4723 ++ /*
4724 ++ * Get the reference to server struct to be sure that the last call of
4725 ++ * cifs_put_tcon() in the loop below won't release the server pointer.
4726 ++ */
4727 ++ if (tcon_exist)
4728 ++ server->srv_count++;
4729 ++
4730 ++ spin_unlock(&cifs_tcp_ses_lock);
4731 ++
4732 ++ list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
4733 ++ smb2_reconnect(SMB2_ECHO, tcon);
4734 ++ list_del_init(&tcon->rlist);
4735 ++ cifs_put_tcon(tcon);
4736 ++ }
4737 ++
4738 ++ cifs_dbg(FYI, "Reconnecting tcons finished\n");
4739 ++ mutex_unlock(&server->reconnect_mutex);
4740 ++
4741 ++ /* now we can safely release srv struct */
4742 ++ if (tcon_exist)
4743 ++ cifs_put_tcp_session(server, 1);
4744 ++}
4745 ++
4746 + int
4747 + SMB2_echo(struct TCP_Server_Info *server)
4748 + {
4749 +@@ -1572,32 +1620,11 @@ SMB2_echo(struct TCP_Server_Info *server)
4750 + cifs_dbg(FYI, "In echo request\n");
4751 +
4752 + if (server->tcpStatus == CifsNeedNegotiate) {
4753 +- struct list_head *tmp, *tmp2;
4754 +- struct cifs_ses *ses;
4755 +- struct cifs_tcon *tcon;
4756 +-
4757 +- cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
4758 +- spin_lock(&cifs_tcp_ses_lock);
4759 +- list_for_each(tmp, &server->smb_ses_list) {
4760 +- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
4761 +- list_for_each(tmp2, &ses->tcon_list) {
4762 +- tcon = list_entry(tmp2, struct cifs_tcon,
4763 +- tcon_list);
4764 +- /* add check for persistent handle reconnect */
4765 +- if (tcon && tcon->need_reconnect) {
4766 +- spin_unlock(&cifs_tcp_ses_lock);
4767 +- rc = smb2_reconnect(SMB2_ECHO, tcon);
4768 +- spin_lock(&cifs_tcp_ses_lock);
4769 +- }
4770 +- }
4771 +- }
4772 +- spin_unlock(&cifs_tcp_ses_lock);
4773 ++ /* No need to send echo on newly established connections */
4774 ++ queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
4775 ++ return rc;
4776 + }
4777 +
4778 +- /* if no session, renegotiate failed above */
4779 +- if (server->tcpStatus == CifsNeedNegotiate)
4780 +- return -EIO;
4781 +-
4782 + rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
4783 + if (rc)
4784 + return rc;
4785 +diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
4786 +index 5793f3e39a31..d45f772a35c9 100644
4787 +--- a/fs/cifs/smb2proto.h
4788 ++++ b/fs/cifs/smb2proto.h
4789 +@@ -89,6 +89,7 @@ extern int smb2_open_file(const unsigned int xid,
4790 + extern int smb2_unlock_range(struct cifsFileInfo *cfile,
4791 + struct file_lock *flock, const unsigned int xid);
4792 + extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
4793 ++extern void smb2_reconnect_server(struct work_struct *work);
4794 +
4795 + /*
4796 + * SMB2 Worker functions - most of protocol specific implementation details
4797 +diff --git a/fs/dcache.c b/fs/dcache.c
4798 +index 11ded5b0b853..9a5e9082feb1 100644
4799 +--- a/fs/dcache.c
4800 ++++ b/fs/dcache.c
4801 +@@ -2623,6 +2623,12 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
4802 + dentry->d_parent = dentry;
4803 + list_del_init(&dentry->d_child);
4804 + anon->d_parent = dparent;
4805 ++ if (likely(!d_unhashed(anon))) {
4806 ++ hlist_bl_lock(&anon->d_sb->s_anon);
4807 ++ __hlist_bl_del(&anon->d_hash);
4808 ++ anon->d_hash.pprev = NULL;
4809 ++ hlist_bl_unlock(&anon->d_sb->s_anon);
4810 ++ }
4811 + list_move(&anon->d_child, &dparent->d_subdirs);
4812 +
4813 + write_seqcount_end(&dentry->d_seq);
4814 +@@ -2677,7 +2683,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
4815 + * could splice into our tree? */
4816 + __d_materialise_dentry(dentry, alias);
4817 + write_sequnlock(&rename_lock);
4818 +- __d_drop(alias);
4819 + goto found;
4820 + } else {
4821 + /* Nope, but we must(!) avoid directory
4822 +diff --git a/fs/exec.c b/fs/exec.c
4823 +index d8b46a197172..f33c0fff702c 100644
4824 +--- a/fs/exec.c
4825 ++++ b/fs/exec.c
4826 +@@ -19,7 +19,7 @@
4827 + * current->executable is only used by the procfs. This allows a dispatch
4828 + * table to check for several different types of binary formats. We keep
4829 + * trying until we recognize the file or we run out of supported binary
4830 +- * formats.
4831 ++ * formats.
4832 + */
4833 +
4834 + #include <linux/slab.h>
4835 +@@ -1098,6 +1098,13 @@ int flush_old_exec(struct linux_binprm * bprm)
4836 + flush_thread();
4837 + current->personality &= ~bprm->per_clear;
4838 +
4839 ++ /*
4840 ++ * We have to apply CLOEXEC before we change whether the process is
4841 ++ * dumpable (in setup_new_exec) to avoid a race with a process in userspace
4842 ++ * trying to access the should-be-closed file descriptors of a process
4843 ++ * undergoing exec(2).
4844 ++ */
4845 ++ do_close_on_exec(current->files);
4846 + return 0;
4847 +
4848 + out:
4849 +@@ -1148,7 +1155,6 @@ void setup_new_exec(struct linux_binprm * bprm)
4850 + current->self_exec_id++;
4851 +
4852 + flush_signal_handlers(current, 0);
4853 +- do_close_on_exec(current->files);
4854 + }
4855 + EXPORT_SYMBOL(setup_new_exec);
4856 +
4857 +diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
4858 +index 110b6b371a4e..48c3c2d7d261 100644
4859 +--- a/fs/ext2/acl.c
4860 ++++ b/fs/ext2/acl.c
4861 +@@ -206,15 +206,11 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
4862 + case ACL_TYPE_ACCESS:
4863 + name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
4864 + if (acl) {
4865 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
4866 +- if (error < 0)
4867 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4868 ++ if (error)
4869 + return error;
4870 +- else {
4871 +- inode->i_ctime = CURRENT_TIME_SEC;
4872 +- mark_inode_dirty(inode);
4873 +- if (error == 0)
4874 +- acl = NULL;
4875 +- }
4876 ++ inode->i_ctime = CURRENT_TIME_SEC;
4877 ++ mark_inode_dirty(inode);
4878 + }
4879 + break;
4880 +
4881 +diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
4882 +index dbb5ad59a7fc..2f994bbf73a7 100644
4883 +--- a/fs/ext3/acl.c
4884 ++++ b/fs/ext3/acl.c
4885 +@@ -205,15 +205,11 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
4886 + case ACL_TYPE_ACCESS:
4887 + name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
4888 + if (acl) {
4889 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
4890 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4891 + if (error < 0)
4892 + return error;
4893 +- else {
4894 +- inode->i_ctime = CURRENT_TIME_SEC;
4895 +- ext3_mark_inode_dirty(handle, inode);
4896 +- if (error == 0)
4897 +- acl = NULL;
4898 +- }
4899 ++ inode->i_ctime = CURRENT_TIME_SEC;
4900 ++ ext3_mark_inode_dirty(handle, inode);
4901 + }
4902 + break;
4903 +
4904 +diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
4905 +index 39a54a0e9fe4..c844f1bfb451 100644
4906 +--- a/fs/ext4/acl.c
4907 ++++ b/fs/ext4/acl.c
4908 +@@ -211,15 +211,11 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
4909 + case ACL_TYPE_ACCESS:
4910 + name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
4911 + if (acl) {
4912 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
4913 +- if (error < 0)
4914 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4915 ++ if (error)
4916 + return error;
4917 +- else {
4918 +- inode->i_ctime = ext4_current_time(inode);
4919 +- ext4_mark_inode_dirty(handle, inode);
4920 +- if (error == 0)
4921 +- acl = NULL;
4922 +- }
4923 ++ inode->i_ctime = ext4_current_time(inode);
4924 ++ ext4_mark_inode_dirty(handle, inode);
4925 + }
4926 + break;
4927 +
4928 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
4929 +index b7e491056f9c..a4d6e9a953f9 100644
4930 +--- a/fs/ext4/inline.c
4931 ++++ b/fs/ext4/inline.c
4932 +@@ -339,8 +339,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
4933 +
4934 + len -= EXT4_MIN_INLINE_DATA_SIZE;
4935 + value = kzalloc(len, GFP_NOFS);
4936 +- if (!value)
4937 ++ if (!value) {
4938 ++ error = -ENOMEM;
4939 + goto out;
4940 ++ }
4941 +
4942 + error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
4943 + value, len);
4944 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4945 +index 4a3735a795d0..50fc2d1da9a9 100644
4946 +--- a/fs/ext4/inode.c
4947 ++++ b/fs/ext4/inode.c
4948 +@@ -701,6 +701,20 @@ has_zeroout:
4949 + int ret = check_block_validity(inode, map);
4950 + if (ret != 0)
4951 + return ret;
4952 ++
4953 ++ /*
4954 ++ * Inodes with freshly allocated blocks where contents will be
4955 ++ * visible after transaction commit must be on transaction's
4956 ++ * ordered data list.
4957 ++ */
4958 ++ if (map->m_flags & EXT4_MAP_NEW &&
4959 ++ !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
4960 ++ !IS_NOQUOTA(inode) &&
4961 ++ ext4_should_order_data(inode)) {
4962 ++ ret = ext4_jbd2_file_inode(handle, inode);
4963 ++ if (ret)
4964 ++ return ret;
4965 ++ }
4966 + }
4967 + return retval;
4968 + }
4969 +@@ -1065,15 +1079,6 @@ static int ext4_write_end(struct file *file,
4970 + int i_size_changed = 0;
4971 +
4972 + trace_ext4_write_end(inode, pos, len, copied);
4973 +- if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
4974 +- ret = ext4_jbd2_file_inode(handle, inode);
4975 +- if (ret) {
4976 +- unlock_page(page);
4977 +- page_cache_release(page);
4978 +- goto errout;
4979 +- }
4980 +- }
4981 +-
4982 + if (ext4_has_inline_data(inode)) {
4983 + ret = ext4_write_inline_data_end(inode, pos, len,
4984 + copied, page);
4985 +@@ -4098,6 +4103,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4986 + struct inode *inode;
4987 + journal_t *journal = EXT4_SB(sb)->s_journal;
4988 + long ret;
4989 ++ loff_t size;
4990 + int block;
4991 + uid_t i_uid;
4992 + gid_t i_gid;
4993 +@@ -4189,6 +4195,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4994 + ei->i_file_acl |=
4995 + ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4996 + inode->i_size = ext4_isize(raw_inode);
4997 ++ if ((size = i_size_read(inode)) < 0) {
4998 ++ EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
4999 ++ ret = -EIO;
5000 ++ goto bad_inode;
5001 ++ }
5002 + ei->i_disksize = inode->i_size;
5003 + #ifdef CONFIG_QUOTA
5004 + ei->i_reserved_quota = 0;
5005 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
5006 +index 96f4c72fbbd2..2b4ed2bf9569 100644
5007 +--- a/fs/ext4/mballoc.c
5008 ++++ b/fs/ext4/mballoc.c
5009 +@@ -668,7 +668,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
5010 + ext4_grpblk_t min;
5011 + ext4_grpblk_t max;
5012 + ext4_grpblk_t chunk;
5013 +- unsigned short border;
5014 ++ unsigned int border;
5015 +
5016 + BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
5017 +
5018 +@@ -2243,7 +2243,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
5019 + struct ext4_group_info *grinfo;
5020 + struct sg {
5021 + struct ext4_group_info info;
5022 +- ext4_grpblk_t counters[16];
5023 ++ ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
5024 + } sg;
5025 +
5026 + group--;
5027 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
5028 +index 483bc328643d..6362896f5875 100644
5029 +--- a/fs/ext4/super.c
5030 ++++ b/fs/ext4/super.c
5031 +@@ -3257,10 +3257,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
5032 + ext4_set_bit(s++, buf);
5033 + count++;
5034 + }
5035 +- for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
5036 +- ext4_set_bit(EXT4_B2C(sbi, s++), buf);
5037 +- count++;
5038 ++ j = ext4_bg_num_gdb(sb, grp);
5039 ++ if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
5040 ++ ext4_error(sb, "Invalid number of block group "
5041 ++ "descriptor blocks: %d", j);
5042 ++ j = EXT4_BLOCKS_PER_GROUP(sb) - s;
5043 + }
5044 ++ count += j;
5045 ++ for (; j > 0; j--)
5046 ++ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
5047 + }
5048 + if (!count)
5049 + return 0;
5050 +@@ -3363,7 +3368,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5051 + char *orig_data = kstrdup(data, GFP_KERNEL);
5052 + struct buffer_head *bh;
5053 + struct ext4_super_block *es = NULL;
5054 +- struct ext4_sb_info *sbi;
5055 ++ struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
5056 + ext4_fsblk_t block;
5057 + ext4_fsblk_t sb_block = get_sb_block(&data);
5058 + ext4_fsblk_t logical_sb_block;
5059 +@@ -3383,16 +3388,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5060 + unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
5061 + ext4_group_t first_not_zeroed;
5062 +
5063 +- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
5064 +- if (!sbi)
5065 +- goto out_free_orig;
5066 ++ if ((data && !orig_data) || !sbi)
5067 ++ goto out_free_base;
5068 +
5069 + sbi->s_blockgroup_lock =
5070 + kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
5071 +- if (!sbi->s_blockgroup_lock) {
5072 +- kfree(sbi);
5073 +- goto out_free_orig;
5074 +- }
5075 ++ if (!sbi->s_blockgroup_lock)
5076 ++ goto out_free_base;
5077 ++
5078 + sb->s_fs_info = sbi;
5079 + sbi->s_sb = sb;
5080 + sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
5081 +@@ -3538,11 +3541,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5082 + */
5083 + sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
5084 +
5085 +- if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
5086 +- &journal_devnum, &journal_ioprio, 0)) {
5087 +- ext4_msg(sb, KERN_WARNING,
5088 +- "failed to parse options in superblock: %s",
5089 +- sbi->s_es->s_mount_opts);
5090 ++ if (sbi->s_es->s_mount_opts[0]) {
5091 ++ char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
5092 ++ sizeof(sbi->s_es->s_mount_opts),
5093 ++ GFP_KERNEL);
5094 ++ if (!s_mount_opts)
5095 ++ goto failed_mount;
5096 ++ if (!parse_options(s_mount_opts, sb, &journal_devnum,
5097 ++ &journal_ioprio, 0)) {
5098 ++ ext4_msg(sb, KERN_WARNING,
5099 ++ "failed to parse options in superblock: %s",
5100 ++ s_mount_opts);
5101 ++ }
5102 ++ kfree(s_mount_opts);
5103 + }
5104 + sbi->s_def_mount_opt = sbi->s_mount_opt;
5105 + if (!parse_options((char *) data, sb, &journal_devnum,
5106 +@@ -3689,12 +3700,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5107 +
5108 + sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
5109 + sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
5110 +- if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
5111 +- goto cantfind_ext4;
5112 +
5113 + sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
5114 + if (sbi->s_inodes_per_block == 0)
5115 + goto cantfind_ext4;
5116 ++ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
5117 ++ sbi->s_inodes_per_group > blocksize * 8) {
5118 ++ ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
5119 ++ sbi->s_blocks_per_group);
5120 ++ goto failed_mount;
5121 ++ }
5122 + sbi->s_itb_per_group = sbi->s_inodes_per_group /
5123 + sbi->s_inodes_per_block;
5124 + sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
5125 +@@ -3778,13 +3793,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5126 + }
5127 + sbi->s_cluster_ratio = clustersize / blocksize;
5128 +
5129 +- if (sbi->s_inodes_per_group > blocksize * 8) {
5130 +- ext4_msg(sb, KERN_ERR,
5131 +- "#inodes per group too big: %lu",
5132 +- sbi->s_inodes_per_group);
5133 +- goto failed_mount;
5134 +- }
5135 +-
5136 + /* Do we have standard group size of clustersize * 8 blocks ? */
5137 + if (sbi->s_blocks_per_group == clustersize << 3)
5138 + set_opt2(sb, STD_GROUP_SIZE);
5139 +@@ -4173,7 +4181,9 @@ no_journal:
5140 + }
5141 +
5142 + ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
5143 +- "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
5144 ++ "Opts: %.*s%s%s", descr,
5145 ++ (int) sizeof(sbi->s_es->s_mount_opts),
5146 ++ sbi->s_es->s_mount_opts,
5147 + *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
5148 +
5149 + if (es->s_error_count)
5150 +@@ -4242,8 +4252,8 @@ failed_mount:
5151 + out_fail:
5152 + sb->s_fs_info = NULL;
5153 + kfree(sbi->s_blockgroup_lock);
5154 ++out_free_base:
5155 + kfree(sbi);
5156 +-out_free_orig:
5157 + kfree(orig_data);
5158 + return err ? err : ret;
5159 + }
5160 +diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
5161 +index b7826ec1b470..f4fefc57ff56 100644
5162 +--- a/fs/f2fs/acl.c
5163 ++++ b/fs/f2fs/acl.c
5164 +@@ -223,12 +223,10 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
5165 + case ACL_TYPE_ACCESS:
5166 + name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
5167 + if (acl) {
5168 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
5169 +- if (error < 0)
5170 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
5171 ++ if (error)
5172 + return error;
5173 + set_acl_inode(fi, inode->i_mode);
5174 +- if (error == 0)
5175 +- acl = NULL;
5176 + }
5177 + break;
5178 +
5179 +diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
5180 +index a84b0a8e6854..52355ba40c15 100644
5181 +--- a/fs/f2fs/debug.c
5182 ++++ b/fs/f2fs/debug.c
5183 +@@ -294,6 +294,7 @@ static int stat_open(struct inode *inode, struct file *file)
5184 + }
5185 +
5186 + static const struct file_operations stat_fops = {
5187 ++ .owner = THIS_MODULE,
5188 + .open = stat_open,
5189 + .read = seq_read,
5190 + .llseek = seq_lseek,
5191 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
5192 +index 8ef52e12cd57..f6314cd3e3b0 100644
5193 +--- a/fs/fuse/file.c
5194 ++++ b/fs/fuse/file.c
5195 +@@ -2393,6 +2393,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
5196 + loff_t i_size;
5197 + size_t count = iov_length(iov, nr_segs);
5198 + struct fuse_io_priv *io;
5199 ++ bool is_sync = is_sync_kiocb(iocb);
5200 +
5201 + pos = offset;
5202 + inode = file->f_mapping->host;
5203 +@@ -2428,7 +2429,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
5204 + * to wait on real async I/O requests, so we must submit this request
5205 + * synchronously.
5206 + */
5207 +- if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
5208 ++ if (!is_sync && (offset + count > i_size) && rw == WRITE)
5209 + io->async = false;
5210 +
5211 + if (rw == WRITE)
5212 +@@ -2440,7 +2441,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
5213 + fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
5214 +
5215 + /* we have a non-extending, async request, so return */
5216 +- if (!is_sync_kiocb(iocb))
5217 ++ if (!is_sync)
5218 + return -EIOCBQUEUED;
5219 +
5220 + ret = wait_on_sync_kiocb(iocb);
5221 +diff --git a/fs/generic_acl.c b/fs/generic_acl.c
5222 +index b3f3676796d3..7855cfb938f6 100644
5223 +--- a/fs/generic_acl.c
5224 ++++ b/fs/generic_acl.c
5225 +@@ -82,19 +82,21 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value,
5226 + return PTR_ERR(acl);
5227 + }
5228 + if (acl) {
5229 ++ struct posix_acl *old_acl;
5230 ++
5231 + error = posix_acl_valid(acl);
5232 + if (error)
5233 + goto failed;
5234 + switch (type) {
5235 + case ACL_TYPE_ACCESS:
5236 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
5237 ++ old_acl = acl;
5238 ++ error = posix_acl_update_mode(inode, &inode->i_mode,
5239 ++ &acl);
5240 + if (error < 0)
5241 + goto failed;
5242 ++ if (!acl)
5243 ++ posix_acl_release(old_acl);
5244 + inode->i_ctime = CURRENT_TIME;
5245 +- if (error == 0) {
5246 +- posix_acl_release(acl);
5247 +- acl = NULL;
5248 +- }
5249 + break;
5250 + case ACL_TYPE_DEFAULT:
5251 + if (!S_ISDIR(inode->i_mode)) {
5252 +diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
5253 +index f69ac0af5496..a61b0c2b57ab 100644
5254 +--- a/fs/gfs2/acl.c
5255 ++++ b/fs/gfs2/acl.c
5256 +@@ -268,15 +268,13 @@ static int gfs2_xattr_system_set(struct dentry *dentry, const char *name,
5257 +
5258 + if (type == ACL_TYPE_ACCESS) {
5259 + umode_t mode = inode->i_mode;
5260 +- error = posix_acl_equiv_mode(acl, &mode);
5261 ++ struct posix_acl *old_acl = acl;
5262 +
5263 +- if (error <= 0) {
5264 +- posix_acl_release(acl);
5265 +- acl = NULL;
5266 +-
5267 +- if (error < 0)
5268 +- return error;
5269 +- }
5270 ++ error = posix_acl_update_mode(inode, &mode, &acl);
5271 ++ if (error < 0)
5272 ++ goto out_release;
5273 ++ if (!acl)
5274 ++ posix_acl_release(old_acl);
5275 +
5276 + error = gfs2_set_mode(inode, mode);
5277 + if (error)
5278 +diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
5279 +index b609cc14c72e..9f7cc491ffb1 100644
5280 +--- a/fs/hfsplus/posix_acl.c
5281 ++++ b/fs/hfsplus/posix_acl.c
5282 +@@ -72,8 +72,8 @@ static int hfsplus_set_posix_acl(struct inode *inode,
5283 + case ACL_TYPE_ACCESS:
5284 + xattr_name = POSIX_ACL_XATTR_ACCESS;
5285 + if (acl) {
5286 +- err = posix_acl_equiv_mode(acl, &inode->i_mode);
5287 +- if (err < 0)
5288 ++ err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
5289 ++ if (err)
5290 + return err;
5291 + }
5292 + err = 0;
5293 +diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
5294 +index bd8471fb9a6a..889be3fef4bc 100644
5295 +--- a/fs/hfsplus/xattr.c
5296 ++++ b/fs/hfsplus/xattr.c
5297 +@@ -69,8 +69,9 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
5298 + if (IS_ERR(acl))
5299 + return PTR_ERR(acl);
5300 + if (acl) {
5301 +- err = posix_acl_equiv_mode(acl, &inode->i_mode);
5302 +- posix_acl_release(acl);
5303 ++ struct posix_acl *old_acl = acl;
5304 ++ err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
5305 ++ posix_acl_release(old_acl);
5306 + if (err < 0)
5307 + return err;
5308 + mark_inode_dirty(inode);
5309 +diff --git a/fs/ioprio.c b/fs/ioprio.c
5310 +index 31666c92b46a..563435684c3c 100644
5311 +--- a/fs/ioprio.c
5312 ++++ b/fs/ioprio.c
5313 +@@ -149,8 +149,10 @@ static int get_task_ioprio(struct task_struct *p)
5314 + if (ret)
5315 + goto out;
5316 + ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
5317 ++ task_lock(p);
5318 + if (p->io_context)
5319 + ret = p->io_context->ioprio;
5320 ++ task_unlock(p);
5321 + out:
5322 + return ret;
5323 + }
5324 +diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
5325 +index 223283c30111..9335b8d3cf52 100644
5326 +--- a/fs/jffs2/acl.c
5327 ++++ b/fs/jffs2/acl.c
5328 +@@ -243,9 +243,10 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
5329 + case ACL_TYPE_ACCESS:
5330 + xprefix = JFFS2_XPREFIX_ACL_ACCESS;
5331 + if (acl) {
5332 +- umode_t mode = inode->i_mode;
5333 +- rc = posix_acl_equiv_mode(acl, &mode);
5334 +- if (rc < 0)
5335 ++ umode_t mode;
5336 ++
5337 ++ rc = posix_acl_update_mode(inode, &mode, &acl);
5338 ++ if (rc)
5339 + return rc;
5340 + if (inode->i_mode != mode) {
5341 + struct iattr attr;
5342 +@@ -257,8 +258,6 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
5343 + if (rc < 0)
5344 + return rc;
5345 + }
5346 +- if (rc == 0)
5347 +- acl = NULL;
5348 + }
5349 + break;
5350 + case ACL_TYPE_DEFAULT:
5351 +diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
5352 +index d3472f4cd530..8c9b6a06dcbb 100644
5353 +--- a/fs/jfs/xattr.c
5354 ++++ b/fs/jfs/xattr.c
5355 +@@ -693,8 +693,9 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
5356 + return rc;
5357 + }
5358 + if (acl) {
5359 +- rc = posix_acl_equiv_mode(acl, &inode->i_mode);
5360 +- posix_acl_release(acl);
5361 ++ struct posix_acl *old_acl = acl;
5362 ++ rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
5363 ++ posix_acl_release(old_acl);
5364 + if (rc < 0) {
5365 + printk(KERN_ERR
5366 + "posix_acl_equiv_mode returned %d\n",
5367 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
5368 +index b9670301d7d3..24e6448b7c80 100644
5369 +--- a/fs/nfs/dir.c
5370 ++++ b/fs/nfs/dir.c
5371 +@@ -1487,6 +1487,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
5372 + switch (err) {
5373 + case -ENOENT:
5374 + d_add(dentry, NULL);
5375 ++ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
5376 + break;
5377 + case -EISDIR:
5378 + case -ENOTDIR:
5379 +diff --git a/fs/nfs/file.c b/fs/nfs/file.c
5380 +index 1e6bfdbc1aff..0a0b5063e50e 100644
5381 +--- a/fs/nfs/file.c
5382 ++++ b/fs/nfs/file.c
5383 +@@ -425,7 +425,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
5384 + */
5385 + if (!PageUptodate(page)) {
5386 + unsigned pglen = nfs_page_length(page);
5387 +- unsigned end = offset + len;
5388 ++ unsigned end = offset + copied;
5389 +
5390 + if (pglen == 0) {
5391 + zero_user_segments(page, 0, offset,
5392 +diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
5393 +index efac602edb37..91de91430b31 100644
5394 +--- a/fs/nfs/nfs4filelayoutdev.c
5395 ++++ b/fs/nfs/nfs4filelayoutdev.c
5396 +@@ -827,7 +827,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
5397 + nfs4_wait_ds_connect(ds);
5398 + }
5399 + out_test_devid:
5400 +- if (filelayout_test_devid_unavailable(devid))
5401 ++ if (ret->ds_clp == NULL ||
5402 ++ filelayout_test_devid_unavailable(devid))
5403 + ret = NULL;
5404 + out:
5405 + return ret;
5406 +diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
5407 +index b4f788e0ca31..23095b017752 100644
5408 +--- a/fs/ocfs2/acl.c
5409 ++++ b/fs/ocfs2/acl.c
5410 +@@ -270,20 +270,14 @@ static int ocfs2_set_acl(handle_t *handle,
5411 + case ACL_TYPE_ACCESS:
5412 + name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
5413 + if (acl) {
5414 +- umode_t mode = inode->i_mode;
5415 +- ret = posix_acl_equiv_mode(acl, &mode);
5416 +- if (ret < 0)
5417 ++ umode_t mode;
5418 ++ ret = posix_acl_update_mode(inode, &mode, &acl);
5419 ++ if (ret)
5420 ++ return ret;
5421 ++ ret = ocfs2_acl_set_mode(inode, di_bh,
5422 ++ handle, mode);
5423 ++ if (ret)
5424 + return ret;
5425 +- else {
5426 +- if (ret == 0)
5427 +- acl = NULL;
5428 +-
5429 +- ret = ocfs2_acl_set_mode(inode, di_bh,
5430 +- handle, mode);
5431 +- if (ret)
5432 +- return ret;
5433 +-
5434 +- }
5435 + }
5436 + break;
5437 + case ACL_TYPE_DEFAULT:
5438 +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
5439 +index 416a2ab68ac1..9c93df0f241d 100644
5440 +--- a/fs/ocfs2/dlmglue.c
5441 ++++ b/fs/ocfs2/dlmglue.c
5442 +@@ -3302,6 +3302,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
5443 + mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
5444 + lockres->l_level, new_level);
5445 +
5446 ++ /*
5447 ++ * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
5448 ++ * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
5449 ++ * we can recover correctly from node failure. Otherwise, we may get
5450 ++ * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
5451 ++ */
5452 ++ if (!ocfs2_is_o2cb_active() &&
5453 ++ lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
5454 ++ lvb = 1;
5455 ++
5456 + if (lvb)
5457 + dlm_flags |= DLM_LKF_VALBLK;
5458 +
5459 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
5460 +index 54ba0afacf00..7201b56e8f2c 100644
5461 +--- a/fs/ocfs2/file.c
5462 ++++ b/fs/ocfs2/file.c
5463 +@@ -1100,6 +1100,7 @@ out:
5464 + int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
5465 + {
5466 + int status = 0, size_change;
5467 ++ int inode_locked = 0;
5468 + struct inode *inode = dentry->d_inode;
5469 + struct super_block *sb = inode->i_sb;
5470 + struct ocfs2_super *osb = OCFS2_SB(sb);
5471 +@@ -1145,6 +1146,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
5472 + mlog_errno(status);
5473 + goto bail_unlock_rw;
5474 + }
5475 ++ inode_locked = 1;
5476 +
5477 + if (size_change && attr->ia_size != i_size_read(inode)) {
5478 + status = inode_newsize_ok(inode, attr->ia_size);
5479 +@@ -1225,7 +1227,10 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
5480 + bail_commit:
5481 + ocfs2_commit_trans(osb, handle);
5482 + bail_unlock:
5483 +- ocfs2_inode_unlock(inode, 1);
5484 ++ if (status) {
5485 ++ ocfs2_inode_unlock(inode, 1);
5486 ++ inode_locked = 0;
5487 ++ }
5488 + bail_unlock_rw:
5489 + if (size_change)
5490 + ocfs2_rw_unlock(inode, 1);
5491 +@@ -1241,6 +1246,8 @@ bail:
5492 + if (status < 0)
5493 + mlog_errno(status);
5494 + }
5495 ++ if (inode_locked)
5496 ++ ocfs2_inode_unlock(inode, 1);
5497 +
5498 + return status;
5499 + }
5500 +diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
5501 +index 39abf89697ed..88610b3cbc04 100644
5502 +--- a/fs/ocfs2/stackglue.c
5503 ++++ b/fs/ocfs2/stackglue.c
5504 +@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
5505 + */
5506 + static struct ocfs2_stack_plugin *active_stack;
5507 +
5508 ++inline int ocfs2_is_o2cb_active(void)
5509 ++{
5510 ++ return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
5511 ++}
5512 ++EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
5513 ++
5514 + static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
5515 + {
5516 + struct ocfs2_stack_plugin *p;
5517 +diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
5518 +index 1ec56fdb8d0d..fa49d8a1dc7b 100644
5519 +--- a/fs/ocfs2/stackglue.h
5520 ++++ b/fs/ocfs2/stackglue.h
5521 +@@ -289,4 +289,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
5522 + int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
5523 + void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
5524 +
5525 ++/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
5526 ++int ocfs2_is_o2cb_active(void);
5527 ++
5528 + #endif /* STACKGLUE_H */
5529 +diff --git a/fs/posix_acl.c b/fs/posix_acl.c
5530 +index 3542f1f814e2..1da000aabb08 100644
5531 +--- a/fs/posix_acl.c
5532 ++++ b/fs/posix_acl.c
5533 +@@ -407,6 +407,37 @@ posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
5534 + }
5535 + EXPORT_SYMBOL(posix_acl_create);
5536 +
5537 ++/**
5538 ++ * posix_acl_update_mode - update mode in set_acl
5539 ++ *
5540 ++ * Update the file mode when setting an ACL: compute the new file permission
5541 ++ * bits based on the ACL. In addition, if the ACL is equivalent to the new
5542 ++ * file mode, set *acl to NULL to indicate that no ACL should be set.
5543 ++ *
5544 ++ * As with chmod, clear the setgit bit if the caller is not in the owning group
5545 ++ * or capable of CAP_FSETID (see inode_change_ok).
5546 ++ *
5547 ++ * Called from set_acl inode operations.
5548 ++ */
5549 ++int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
5550 ++ struct posix_acl **acl)
5551 ++{
5552 ++ umode_t mode = inode->i_mode;
5553 ++ int error;
5554 ++
5555 ++ error = posix_acl_equiv_mode(*acl, &mode);
5556 ++ if (error < 0)
5557 ++ return error;
5558 ++ if (error == 0)
5559 ++ *acl = NULL;
5560 ++ if (!in_group_p(inode->i_gid) &&
5561 ++ !capable_wrt_inode_uidgid(inode, CAP_FSETID))
5562 ++ mode &= ~S_ISGID;
5563 ++ *mode_p = mode;
5564 ++ return 0;
5565 ++}
5566 ++EXPORT_SYMBOL(posix_acl_update_mode);
5567 ++
5568 + int
5569 + posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode)
5570 + {
5571 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
5572 +index 71290463a1d3..c615a4592572 100644
5573 +--- a/fs/proc/proc_sysctl.c
5574 ++++ b/fs/proc/proc_sysctl.c
5575 +@@ -666,7 +666,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
5576 + ctl_dir = container_of(head, struct ctl_dir, header);
5577 +
5578 + if (!dir_emit_dots(file, ctx))
5579 +- return 0;
5580 ++ goto out;
5581 +
5582 + pos = 2;
5583 +
5584 +@@ -676,6 +676,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
5585 + break;
5586 + }
5587 + }
5588 ++out:
5589 + sysctl_head_finish(head);
5590 + return 0;
5591 + }
5592 +diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
5593 +index 06c04f73da65..a86ad7ec7957 100644
5594 +--- a/fs/reiserfs/xattr_acl.c
5595 ++++ b/fs/reiserfs/xattr_acl.c
5596 +@@ -288,13 +288,9 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
5597 + case ACL_TYPE_ACCESS:
5598 + name = POSIX_ACL_XATTR_ACCESS;
5599 + if (acl) {
5600 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
5601 +- if (error < 0)
5602 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
5603 ++ if (error)
5604 + return error;
5605 +- else {
5606 +- if (error == 0)
5607 +- acl = NULL;
5608 +- }
5609 + }
5610 + break;
5611 + case ACL_TYPE_DEFAULT:
5612 +diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
5613 +index 349f31a30f40..fdf2ca1dd771 100644
5614 +--- a/fs/ubifs/tnc.c
5615 ++++ b/fs/ubifs/tnc.c
5616 +@@ -34,6 +34,11 @@
5617 + #include <linux/slab.h>
5618 + #include "ubifs.h"
5619 +
5620 ++static int try_read_node(const struct ubifs_info *c, void *buf, int type,
5621 ++ int len, int lnum, int offs);
5622 ++static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
5623 ++ struct ubifs_zbranch *zbr, void *node);
5624 ++
5625 + /*
5626 + * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
5627 + * @NAME_LESS: name corresponding to the first argument is less than second
5628 +@@ -419,7 +424,19 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
5629 + return 0;
5630 + }
5631 +
5632 +- err = ubifs_tnc_read_node(c, zbr, node);
5633 ++ if (c->replaying) {
5634 ++ err = fallible_read_node(c, &zbr->key, zbr, node);
5635 ++ /*
5636 ++ * When the node was not found, return -ENOENT, 0 otherwise.
5637 ++ * Negative return codes stay as-is.
5638 ++ */
5639 ++ if (err == 0)
5640 ++ err = -ENOENT;
5641 ++ else if (err == 1)
5642 ++ err = 0;
5643 ++ } else {
5644 ++ err = ubifs_tnc_read_node(c, zbr, node);
5645 ++ }
5646 + if (err)
5647 + return err;
5648 +
5649 +@@ -2783,7 +2800,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
5650 + if (nm->name) {
5651 + if (err) {
5652 + /* Handle collisions */
5653 +- err = resolve_collision(c, key, &znode, &n, nm);
5654 ++ if (c->replaying)
5655 ++ err = fallible_resolve_collision(c, key, &znode, &n,
5656 ++ nm, 0);
5657 ++ else
5658 ++ err = resolve_collision(c, key, &znode, &n, nm);
5659 + dbg_tnc("rc returned %d, znode %p, n %d",
5660 + err, znode, n);
5661 + if (unlikely(err < 0))
5662 +diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
5663 +index 0e2f37efedd0..9c7b5ce06f4f 100644
5664 +--- a/fs/xfs/xfs_acl.c
5665 ++++ b/fs/xfs/xfs_acl.c
5666 +@@ -402,16 +402,15 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name,
5667 + goto out_release;
5668 +
5669 + if (type == ACL_TYPE_ACCESS) {
5670 +- umode_t mode = inode->i_mode;
5671 +- error = posix_acl_equiv_mode(acl, &mode);
5672 ++ umode_t mode;
5673 ++ struct posix_acl *old_acl = acl;
5674 +
5675 +- if (error <= 0) {
5676 +- posix_acl_release(acl);
5677 +- acl = NULL;
5678 ++ error = posix_acl_update_mode(inode, &mode, &acl);
5679 +
5680 +- if (error < 0)
5681 +- return error;
5682 +- }
5683 ++ if (error)
5684 ++ goto out_release;
5685 ++ if (!acl)
5686 ++ posix_acl_release(old_acl);
5687 +
5688 + error = xfs_set_mode(inode, mode);
5689 + if (error)
5690 +diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
5691 +index 5b166a07d55e..48dcb167cce5 100644
5692 +--- a/fs/xfs/xfs_log_recover.c
5693 ++++ b/fs/xfs/xfs_log_recover.c
5694 +@@ -3923,6 +3923,7 @@ xlog_recover_clear_agi_bucket(
5695 + agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
5696 + offset = offsetof(xfs_agi_t, agi_unlinked) +
5697 + (sizeof(xfs_agino_t) * bucket);
5698 ++ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
5699 + xfs_trans_log_buf(tp, agibp, offset,
5700 + (offset + sizeof(xfs_agino_t) - 1));
5701 +
5702 +diff --git a/include/linux/capability.h b/include/linux/capability.h
5703 +index aa93e5ef594c..c2eb39ff1a53 100644
5704 +--- a/include/linux/capability.h
5705 ++++ b/include/linux/capability.h
5706 +@@ -40,8 +40,6 @@ struct inode;
5707 + struct dentry;
5708 + struct user_namespace;
5709 +
5710 +-struct user_namespace *current_user_ns(void);
5711 +-
5712 + extern const kernel_cap_t __cap_empty_set;
5713 + extern const kernel_cap_t __cap_init_eff_set;
5714 +
5715 +diff --git a/include/linux/cpu.h b/include/linux/cpu.h
5716 +index 801ff9e73679..d1fcdcbc01e4 100644
5717 +--- a/include/linux/cpu.h
5718 ++++ b/include/linux/cpu.h
5719 +@@ -119,22 +119,16 @@ enum {
5720 + { .notifier_call = fn, .priority = pri }; \
5721 + register_cpu_notifier(&fn##_nb); \
5722 + }
5723 +-#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
5724 +-#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
5725 +-#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
5726 +-#ifdef CONFIG_HOTPLUG_CPU
5727 + extern int register_cpu_notifier(struct notifier_block *nb);
5728 + extern void unregister_cpu_notifier(struct notifier_block *nb);
5729 +-#else
5730 +
5731 +-#ifndef MODULE
5732 +-extern int register_cpu_notifier(struct notifier_block *nb);
5733 +-#else
5734 ++#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
5735 ++#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
5736 ++
5737 + static inline int register_cpu_notifier(struct notifier_block *nb)
5738 + {
5739 + return 0;
5740 + }
5741 +-#endif
5742 +
5743 + static inline void unregister_cpu_notifier(struct notifier_block *nb)
5744 + {
5745 +diff --git a/include/linux/cred.h b/include/linux/cred.h
5746 +index 6c58dd7cb9ac..cd3fb73dc421 100644
5747 +--- a/include/linux/cred.h
5748 ++++ b/include/linux/cred.h
5749 +@@ -345,7 +345,10 @@ extern struct user_namespace init_user_ns;
5750 + #ifdef CONFIG_USER_NS
5751 + #define current_user_ns() (current_cred_xxx(user_ns))
5752 + #else
5753 +-#define current_user_ns() (&init_user_ns)
5754 ++static inline struct user_namespace *current_user_ns(void)
5755 ++{
5756 ++ return &init_user_ns;
5757 ++}
5758 + #endif
5759 +
5760 +
5761 +diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
5762 +index 113788389b3d..3f66ce8f0819 100644
5763 +--- a/include/linux/jump_label_ratelimit.h
5764 ++++ b/include/linux/jump_label_ratelimit.h
5765 +@@ -14,6 +14,7 @@ struct static_key_deferred {
5766 +
5767 + #ifdef HAVE_JUMP_LABEL
5768 + extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
5769 ++extern void static_key_deferred_flush(struct static_key_deferred *key);
5770 + extern void
5771 + jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
5772 +
5773 +@@ -25,6 +26,9 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
5774 + {
5775 + static_key_slow_dec(&key->key);
5776 + }
5777 ++static inline void static_key_deferred_flush(struct static_key_deferred *key)
5778 ++{
5779 ++}
5780 + static inline void
5781 + jump_label_rate_limit(struct static_key_deferred *key,
5782 + unsigned long rl)
5783 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
5784 +index 41239f739d51..0a793dcd975f 100644
5785 +--- a/include/linux/netdevice.h
5786 ++++ b/include/linux/netdevice.h
5787 +@@ -1829,14 +1829,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
5788 + return NAPI_GRO_CB(skb)->frag0_len < hlen;
5789 + }
5790 +
5791 ++static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
5792 ++{
5793 ++ NAPI_GRO_CB(skb)->frag0 = NULL;
5794 ++ NAPI_GRO_CB(skb)->frag0_len = 0;
5795 ++}
5796 ++
5797 + static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
5798 + unsigned int offset)
5799 + {
5800 + if (!pskb_may_pull(skb, hlen))
5801 + return NULL;
5802 +
5803 +- NAPI_GRO_CB(skb)->frag0 = NULL;
5804 +- NAPI_GRO_CB(skb)->frag0_len = 0;
5805 ++ skb_gro_frag0_invalidate(skb);
5806 + return skb->data + offset;
5807 + }
5808 +
5809 +diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
5810 +index 7931efe71175..43cb8d59d0a7 100644
5811 +--- a/include/linux/posix_acl.h
5812 ++++ b/include/linux/posix_acl.h
5813 +@@ -89,6 +89,7 @@ extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
5814 + extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
5815 + extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
5816 + extern int posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
5817 ++extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
5818 + extern int posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
5819 +
5820 + extern struct posix_acl *get_posix_acl(struct inode *, int);
5821 +diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
5822 +index e52958d7c2d1..3018528bd1bf 100644
5823 +--- a/include/uapi/linux/can.h
5824 ++++ b/include/uapi/linux/can.h
5825 +@@ -158,5 +158,6 @@ struct can_filter {
5826 + };
5827 +
5828 + #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
5829 ++#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
5830 +
5831 + #endif /* CAN_H */
5832 +diff --git a/kernel/cpu.c b/kernel/cpu.c
5833 +index 92599d897125..c1f258a0a10e 100644
5834 +--- a/kernel/cpu.c
5835 ++++ b/kernel/cpu.c
5836 +@@ -182,8 +182,6 @@ static int cpu_notify(unsigned long val, void *v)
5837 + return __cpu_notify(val, v, -1, NULL);
5838 + }
5839 +
5840 +-#ifdef CONFIG_HOTPLUG_CPU
5841 +-
5842 + static void cpu_notify_nofail(unsigned long val, void *v)
5843 + {
5844 + BUG_ON(cpu_notify(val, v));
5845 +@@ -198,6 +196,7 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
5846 + }
5847 + EXPORT_SYMBOL(unregister_cpu_notifier);
5848 +
5849 ++#ifdef CONFIG_HOTPLUG_CPU
5850 + /**
5851 + * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
5852 + * @cpu: a CPU id
5853 +diff --git a/kernel/jump_label.c b/kernel/jump_label.c
5854 +index 297a9247a3b3..9ce813e99a56 100644
5855 +--- a/kernel/jump_label.c
5856 ++++ b/kernel/jump_label.c
5857 +@@ -113,6 +113,12 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
5858 + }
5859 + EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
5860 +
5861 ++void static_key_deferred_flush(struct static_key_deferred *key)
5862 ++{
5863 ++ flush_delayed_work(&key->work);
5864 ++}
5865 ++EXPORT_SYMBOL_GPL(static_key_deferred_flush);
5866 ++
5867 + void jump_label_rate_limit(struct static_key_deferred *key,
5868 + unsigned long rl)
5869 + {
5870 +diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
5871 +index 51a83343df68..132c6a00e301 100644
5872 +--- a/kernel/rtmutex.c
5873 ++++ b/kernel/rtmutex.c
5874 +@@ -64,8 +64,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
5875 +
5876 + static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
5877 + {
5878 +- if (!rt_mutex_has_waiters(lock))
5879 +- clear_rt_mutex_waiters(lock);
5880 ++ unsigned long owner, *p = (unsigned long *) &lock->owner;
5881 ++
5882 ++ if (rt_mutex_has_waiters(lock))
5883 ++ return;
5884 ++
5885 ++ /*
5886 ++ * The rbtree has no waiters enqueued, now make sure that the
5887 ++ * lock->owner still has the waiters bit set, otherwise the
5888 ++ * following can happen:
5889 ++ *
5890 ++ * CPU 0 CPU 1 CPU2
5891 ++ * l->owner=T1
5892 ++ * rt_mutex_lock(l)
5893 ++ * lock(l->lock)
5894 ++ * l->owner = T1 | HAS_WAITERS;
5895 ++ * enqueue(T2)
5896 ++ * boost()
5897 ++ * unlock(l->lock)
5898 ++ * block()
5899 ++ *
5900 ++ * rt_mutex_lock(l)
5901 ++ * lock(l->lock)
5902 ++ * l->owner = T1 | HAS_WAITERS;
5903 ++ * enqueue(T3)
5904 ++ * boost()
5905 ++ * unlock(l->lock)
5906 ++ * block()
5907 ++ * signal(->T2) signal(->T3)
5908 ++ * lock(l->lock)
5909 ++ * dequeue(T2)
5910 ++ * deboost()
5911 ++ * unlock(l->lock)
5912 ++ * lock(l->lock)
5913 ++ * dequeue(T3)
5914 ++ * ==> wait list is empty
5915 ++ * deboost()
5916 ++ * unlock(l->lock)
5917 ++ * lock(l->lock)
5918 ++ * fixup_rt_mutex_waiters()
5919 ++ * if (wait_list_empty(l) {
5920 ++ * l->owner = owner
5921 ++ * owner = l->owner & ~HAS_WAITERS;
5922 ++ * ==> l->owner = T1
5923 ++ * }
5924 ++ * lock(l->lock)
5925 ++ * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
5926 ++ * if (wait_list_empty(l) {
5927 ++ * owner = l->owner & ~HAS_WAITERS;
5928 ++ * cmpxchg(l->owner, T1, NULL)
5929 ++ * ===> Success (l->owner = NULL)
5930 ++ *
5931 ++ * l->owner = owner
5932 ++ * ==> l->owner = T1
5933 ++ * }
5934 ++ *
5935 ++ * With the check for the waiter bit in place T3 on CPU2 will not
5936 ++ * overwrite. All tasks fiddling with the waiters bit are
5937 ++ * serialized by l->lock, so nothing else can modify the waiters
5938 ++ * bit. If the bit is set then nothing can change l->owner either
5939 ++ * so the simple RMW is safe. The cmpxchg() will simply fail if it
5940 ++ * happens in the middle of the RMW because the waiters bit is
5941 ++ * still set.
5942 ++ */
5943 ++ owner = READ_ONCE(*p);
5944 ++ if (owner & RT_MUTEX_HAS_WAITERS)
5945 ++ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
5946 + }
5947 +
5948 + /*
5949 +diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
5950 +index 53a66c85261b..1823c094fe96 100644
5951 +--- a/kernel/rtmutex_common.h
5952 ++++ b/kernel/rtmutex_common.h
5953 +@@ -96,8 +96,9 @@ task_top_pi_waiter(struct task_struct *p)
5954 +
5955 + static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
5956 + {
5957 +- return (struct task_struct *)
5958 +- ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
5959 ++ unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
5960 ++
5961 ++ return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
5962 + }
5963 +
5964 + /*
5965 +diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
5966 +index 8a95408b1345..f27eb5db3260 100644
5967 +--- a/kernel/time/tick-broadcast.c
5968 ++++ b/kernel/time/tick-broadcast.c
5969 +@@ -778,6 +778,9 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
5970 + {
5971 + int cpu = smp_processor_id();
5972 +
5973 ++ if (!bc)
5974 ++ return;
5975 ++
5976 + /* Set it up only once ! */
5977 + if (bc->event_handler != tick_handle_oneshot_broadcast) {
5978 + int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
5979 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
5980 +index 2aaf11bdfb17..24d50334d51c 100644
5981 +--- a/mm/hugetlb.c
5982 ++++ b/mm/hugetlb.c
5983 +@@ -1114,23 +1114,32 @@ free:
5984 + }
5985 +
5986 + /*
5987 +- * When releasing a hugetlb pool reservation, any surplus pages that were
5988 +- * allocated to satisfy the reservation must be explicitly freed if they were
5989 +- * never used.
5990 +- * Called with hugetlb_lock held.
5991 ++ * This routine has two main purposes:
5992 ++ * 1) Decrement the reservation count (resv_huge_pages) by the value passed
5993 ++ * in unused_resv_pages. This corresponds to the prior adjustments made
5994 ++ * to the associated reservation map.
5995 ++ * 2) Free any unused surplus pages that may have been allocated to satisfy
5996 ++ * the reservation. As many as unused_resv_pages may be freed.
5997 ++ *
5998 ++ * Called with hugetlb_lock held. However, the lock could be dropped (and
5999 ++ * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
6000 ++ * we must make sure nobody else can claim pages we are in the process of
6001 ++ * freeing. Do this by ensuring resv_huge_page always is greater than the
6002 ++ * number of huge pages we plan to free when dropping the lock.
6003 + */
6004 + static void return_unused_surplus_pages(struct hstate *h,
6005 + unsigned long unused_resv_pages)
6006 + {
6007 + unsigned long nr_pages;
6008 +
6009 +- /* Uncommit the reservation */
6010 +- h->resv_huge_pages -= unused_resv_pages;
6011 +-
6012 + /* Cannot return gigantic pages currently */
6013 + if (h->order >= MAX_ORDER)
6014 +- return;
6015 ++ goto out;
6016 +
6017 ++ /*
6018 ++ * Part (or even all) of the reservation could have been backed
6019 ++ * by pre-allocated pages. Only free surplus pages.
6020 ++ */
6021 + nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
6022 +
6023 + /*
6024 +@@ -1140,12 +1149,22 @@ static void return_unused_surplus_pages(struct hstate *h,
6025 + * when the nodes with surplus pages have no free pages.
6026 + * free_pool_huge_page() will balance the the freed pages across the
6027 + * on-line nodes with memory and will handle the hstate accounting.
6028 ++ *
6029 ++ * Note that we decrement resv_huge_pages as we free the pages. If
6030 ++ * we drop the lock, resv_huge_pages will still be sufficiently large
6031 ++ * to cover subsequent pages we may free.
6032 + */
6033 + while (nr_pages--) {
6034 ++ h->resv_huge_pages--;
6035 ++ unused_resv_pages--;
6036 + if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
6037 +- break;
6038 ++ goto out;
6039 + cond_resched_lock(&hugetlb_lock);
6040 + }
6041 ++
6042 ++out:
6043 ++ /* Fully uncommit the reservation */
6044 ++ h->resv_huge_pages -= unused_resv_pages;
6045 + }
6046 +
6047 + /*
6048 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
6049 +index 7abab3b7d140..8927c8d0ff4e 100644
6050 +--- a/mm/page_alloc.c
6051 ++++ b/mm/page_alloc.c
6052 +@@ -5279,15 +5279,18 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6053 + sizeof(arch_zone_lowest_possible_pfn));
6054 + memset(arch_zone_highest_possible_pfn, 0,
6055 + sizeof(arch_zone_highest_possible_pfn));
6056 +- arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
6057 +- arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
6058 +- for (i = 1; i < MAX_NR_ZONES; i++) {
6059 ++
6060 ++ start_pfn = find_min_pfn_with_active_regions();
6061 ++
6062 ++ for (i = 0; i < MAX_NR_ZONES; i++) {
6063 + if (i == ZONE_MOVABLE)
6064 + continue;
6065 +- arch_zone_lowest_possible_pfn[i] =
6066 +- arch_zone_highest_possible_pfn[i-1];
6067 +- arch_zone_highest_possible_pfn[i] =
6068 +- max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
6069 ++
6070 ++ end_pfn = max(max_zone_pfn[i], start_pfn);
6071 ++ arch_zone_lowest_possible_pfn[i] = start_pfn;
6072 ++ arch_zone_highest_possible_pfn[i] = end_pfn;
6073 ++
6074 ++ start_pfn = end_pfn;
6075 + }
6076 + arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
6077 + arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
6078 +diff --git a/mm/vmscan.c b/mm/vmscan.c
6079 +index 6dc33d9dc2cf..dc23ad3ecf4c 100644
6080 +--- a/mm/vmscan.c
6081 ++++ b/mm/vmscan.c
6082 +@@ -231,6 +231,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
6083 + int nid = shrinkctl->nid;
6084 + long batch_size = shrinker->batch ? shrinker->batch
6085 + : SHRINK_BATCH;
6086 ++ long scanned = 0, next_deferred;
6087 +
6088 + freeable = shrinker->count_objects(shrinker, shrinkctl);
6089 + if (freeable == 0)
6090 +@@ -253,7 +254,9 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
6091 + "shrink_slab: %pF negative objects to delete nr=%ld\n",
6092 + shrinker->scan_objects, total_scan);
6093 + total_scan = freeable;
6094 +- }
6095 ++ next_deferred = nr;
6096 ++ } else
6097 ++ next_deferred = total_scan;
6098 +
6099 + /*
6100 + * We need to avoid excessive windup on filesystem shrinkers
6101 +@@ -310,17 +313,22 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker,
6102 +
6103 + count_vm_events(SLABS_SCANNED, nr_to_scan);
6104 + total_scan -= nr_to_scan;
6105 ++ scanned += nr_to_scan;
6106 +
6107 + cond_resched();
6108 + }
6109 +
6110 ++ if (next_deferred >= scanned)
6111 ++ next_deferred -= scanned;
6112 ++ else
6113 ++ next_deferred = 0;
6114 + /*
6115 + * move the unused scan count back into the shrinker in a
6116 + * manner that handles concurrent updates. If we exhausted the
6117 + * scan, there is no need to do an update.
6118 + */
6119 +- if (total_scan > 0)
6120 +- new_nr = atomic_long_add_return(total_scan,
6121 ++ if (next_deferred > 0)
6122 ++ new_nr = atomic_long_add_return(next_deferred,
6123 + &shrinker->nr_deferred[nid]);
6124 + else
6125 + new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
6126 +diff --git a/net/can/raw.c b/net/can/raw.c
6127 +index 641e1c895123..e10699cc72bd 100644
6128 +--- a/net/can/raw.c
6129 ++++ b/net/can/raw.c
6130 +@@ -470,6 +470,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
6131 + if (optlen % sizeof(struct can_filter) != 0)
6132 + return -EINVAL;
6133 +
6134 ++ if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
6135 ++ return -EINVAL;
6136 ++
6137 + count = optlen / sizeof(struct can_filter);
6138 +
6139 + if (count > 1) {
6140 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
6141 +index 469f3138d0f6..ecdf164c80fe 100644
6142 +--- a/net/ceph/messenger.c
6143 ++++ b/net/ceph/messenger.c
6144 +@@ -1972,6 +1972,19 @@ static int process_connect(struct ceph_connection *con)
6145 +
6146 + dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
6147 +
6148 ++ if (con->auth_reply_buf) {
6149 ++ /*
6150 ++ * Any connection that defines ->get_authorizer()
6151 ++ * should also define ->verify_authorizer_reply().
6152 ++ * See get_connect_authorizer().
6153 ++ */
6154 ++ ret = con->ops->verify_authorizer_reply(con, 0);
6155 ++ if (ret < 0) {
6156 ++ con->error_msg = "bad authorize reply";
6157 ++ return ret;
6158 ++ }
6159 ++ }
6160 ++
6161 + switch (con->in_reply.tag) {
6162 + case CEPH_MSGR_TAG_FEATURES:
6163 + pr_err("%s%lld %s feature set mismatch,"
6164 +diff --git a/net/core/dev.c b/net/core/dev.c
6165 +index fa6d9a47f71f..6b0ddf661f92 100644
6166 +--- a/net/core/dev.c
6167 ++++ b/net/core/dev.c
6168 +@@ -3969,7 +3969,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
6169 + pinfo->nr_frags &&
6170 + !PageHighMem(skb_frag_page(frag0))) {
6171 + NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
6172 +- NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
6173 ++ NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
6174 ++ skb_frag_size(frag0),
6175 ++ skb->end - skb->tail);
6176 + }
6177 + }
6178 +
6179 +diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
6180 +index f27d126239b1..5b40f7319504 100644
6181 +--- a/net/core/drop_monitor.c
6182 ++++ b/net/core/drop_monitor.c
6183 +@@ -80,6 +80,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
6184 + struct nlattr *nla;
6185 + struct sk_buff *skb;
6186 + unsigned long flags;
6187 ++ void *msg_header;
6188 +
6189 + al = sizeof(struct net_dm_alert_msg);
6190 + al += dm_hit_limit * sizeof(struct net_dm_drop_point);
6191 +@@ -87,21 +88,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
6192 +
6193 + skb = genlmsg_new(al, GFP_KERNEL);
6194 +
6195 +- if (skb) {
6196 +- genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
6197 +- 0, NET_DM_CMD_ALERT);
6198 +- nla = nla_reserve(skb, NLA_UNSPEC,
6199 +- sizeof(struct net_dm_alert_msg));
6200 +- msg = nla_data(nla);
6201 +- memset(msg, 0, al);
6202 +- } else {
6203 +- mod_timer(&data->send_timer, jiffies + HZ / 10);
6204 ++ if (!skb)
6205 ++ goto err;
6206 ++
6207 ++ msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
6208 ++ 0, NET_DM_CMD_ALERT);
6209 ++ if (!msg_header) {
6210 ++ nlmsg_free(skb);
6211 ++ skb = NULL;
6212 ++ goto err;
6213 ++ }
6214 ++ nla = nla_reserve(skb, NLA_UNSPEC,
6215 ++ sizeof(struct net_dm_alert_msg));
6216 ++ if (!nla) {
6217 ++ nlmsg_free(skb);
6218 ++ skb = NULL;
6219 ++ goto err;
6220 + }
6221 ++ msg = nla_data(nla);
6222 ++ memset(msg, 0, al);
6223 ++ goto out;
6224 +
6225 ++err:
6226 ++ mod_timer(&data->send_timer, jiffies + HZ / 10);
6227 ++out:
6228 + spin_lock_irqsave(&data->lock, flags);
6229 + swap(data->skb, skb);
6230 + spin_unlock_irqrestore(&data->lock, flags);
6231 +
6232 ++ if (skb) {
6233 ++ struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
6234 ++ struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
6235 ++
6236 ++ genlmsg_end(skb, genlmsg_data(gnlh));
6237 ++ }
6238 ++
6239 + return skb;
6240 + }
6241 +
6242 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
6243 +index 931bc8d6d8ee..38ab073783e2 100644
6244 +--- a/net/ipv4/igmp.c
6245 ++++ b/net/ipv4/igmp.c
6246 +@@ -221,9 +221,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
6247 + static void igmp_gq_start_timer(struct in_device *in_dev)
6248 + {
6249 + int tv = net_random() % in_dev->mr_maxdelay;
6250 ++ unsigned long exp = jiffies + tv + 2;
6251 ++
6252 ++ if (in_dev->mr_gq_running &&
6253 ++ time_after_eq(exp, (in_dev->mr_gq_timer).expires))
6254 ++ return;
6255 +
6256 + in_dev->mr_gq_running = 1;
6257 +- if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
6258 ++ if (!mod_timer(&in_dev->mr_gq_timer, exp))
6259 + in_dev_hold(in_dev);
6260 + }
6261 +
6262 +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
6263 +index d82de7228100..1a6ef4c8cd8b 100644
6264 +--- a/net/ipv6/ip6_offload.c
6265 ++++ b/net/ipv6/ip6_offload.c
6266 +@@ -177,6 +177,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
6267 + ops = rcu_dereference(inet6_offloads[proto]);
6268 + if (!ops || !ops->callbacks.gro_receive) {
6269 + __pskb_pull(skb, skb_gro_offset(skb));
6270 ++ skb_gro_frag0_invalidate(skb);
6271 + proto = ipv6_gso_pull_exthdrs(skb, proto);
6272 + skb_gro_pull(skb, -skb_transport_offset(skb));
6273 + skb_reset_transport_header(skb);
6274 +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
6275 +index c4e69763c602..c2afb29dc1d7 100644
6276 +--- a/net/ipv6/raw.c
6277 ++++ b/net/ipv6/raw.c
6278 +@@ -585,8 +585,11 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
6279 + }
6280 +
6281 + offset += skb_transport_offset(skb);
6282 +- if (skb_copy_bits(skb, offset, &csum, 2))
6283 +- BUG();
6284 ++ err = skb_copy_bits(skb, offset, &csum, 2);
6285 ++ if (err < 0) {
6286 ++ ip6_flush_pending_frames(sk);
6287 ++ goto out;
6288 ++ }
6289 +
6290 + /* in case cksum was not initialized */
6291 + if (unlikely(csum))
6292 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
6293 +index 2ea40d1877a6..042e5d839623 100644
6294 +--- a/net/sched/cls_api.c
6295 ++++ b/net/sched/cls_api.c
6296 +@@ -136,12 +136,14 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
6297 + unsigned long cl;
6298 + unsigned long fh;
6299 + int err;
6300 +- int tp_created = 0;
6301 ++ int tp_created;
6302 +
6303 + if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN))
6304 + return -EPERM;
6305 +
6306 + replay:
6307 ++ tp_created = 0;
6308 ++
6309 + err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
6310 + if (err < 0)
6311 + return err;
6312 +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
6313 +index 9d7e6097ef5b..6d0531a2a5c9 100644
6314 +--- a/net/sunrpc/auth_gss/svcauth_gss.c
6315 ++++ b/net/sunrpc/auth_gss/svcauth_gss.c
6316 +@@ -1485,7 +1485,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
6317 + case RPC_GSS_PROC_DESTROY:
6318 + if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
6319 + goto auth_err;
6320 +- rsci->h.expiry_time = get_seconds();
6321 ++ rsci->h.expiry_time = seconds_since_boot();
6322 + set_bit(CACHE_NEGATIVE, &rsci->h.flags);
6323 + if (resv->iov_len + 4 > PAGE_SIZE)
6324 + goto drop;
6325 +diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
6326 +index 8275f0e55106..4b2f44c20caf 100644
6327 +--- a/scripts/kconfig/nconf.gui.c
6328 ++++ b/scripts/kconfig/nconf.gui.c
6329 +@@ -364,12 +364,14 @@ int dialog_inputbox(WINDOW *main_window,
6330 + WINDOW *prompt_win;
6331 + WINDOW *form_win;
6332 + PANEL *panel;
6333 +- int i, x, y;
6334 ++ int i, x, y, lines, columns, win_lines, win_cols;
6335 + int res = -1;
6336 + int cursor_position = strlen(init);
6337 + int cursor_form_win;
6338 + char *result = *resultp;
6339 +
6340 ++ getmaxyx(stdscr, lines, columns);
6341 ++
6342 + if (strlen(init)+1 > *result_len) {
6343 + *result_len = strlen(init)+1;
6344 + *resultp = result = realloc(result, *result_len);
6345 +@@ -386,14 +388,19 @@ int dialog_inputbox(WINDOW *main_window,
6346 + if (title)
6347 + prompt_width = max(prompt_width, strlen(title));
6348 +
6349 ++ win_lines = min(prompt_lines+6, lines-2);
6350 ++ win_cols = min(prompt_width+7, columns-2);
6351 ++ prompt_lines = max(win_lines-6, 0);
6352 ++ prompt_width = max(win_cols-7, 0);
6353 ++
6354 + /* place dialog in middle of screen */
6355 +- y = (getmaxy(stdscr)-(prompt_lines+4))/2;
6356 +- x = (getmaxx(stdscr)-(prompt_width+4))/2;
6357 ++ y = (lines-win_lines)/2;
6358 ++ x = (columns-win_cols)/2;
6359 +
6360 + strncpy(result, init, *result_len);
6361 +
6362 + /* create the windows */
6363 +- win = newwin(prompt_lines+6, prompt_width+7, y, x);
6364 ++ win = newwin(win_lines, win_cols, y, x);
6365 + prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2);
6366 + form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2);
6367 + keypad(form_win, TRUE);
6368 +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
6369 +index b30489856741..a798c75c7726 100644
6370 +--- a/security/apparmor/apparmorfs.c
6371 ++++ b/security/apparmor/apparmorfs.c
6372 +@@ -380,6 +380,8 @@ void __aa_fs_profile_migrate_dents(struct aa_profile *old,
6373 +
6374 + for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
6375 + new->dents[i] = old->dents[i];
6376 ++ if (new->dents[i])
6377 ++ new->dents[i]->d_inode->i_mtime = CURRENT_TIME;
6378 + old->dents[i] = NULL;
6379 + }
6380 + }
6381 +diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
6382 +index 031d2d9dd695..47d0f9ecd3bc 100644
6383 +--- a/security/apparmor/audit.c
6384 ++++ b/security/apparmor/audit.c
6385 +@@ -212,7 +212,8 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
6386 +
6387 + if (sa->aad->type == AUDIT_APPARMOR_KILL)
6388 + (void)send_sig_info(SIGKILL, NULL,
6389 +- sa->aad->tsk ? sa->aad->tsk : current);
6390 ++ sa->type == LSM_AUDIT_DATA_TASK && sa->aad->tsk ?
6391 ++ sa->aad->tsk : current);
6392 +
6393 + if (sa->aad->type == AUDIT_APPARMOR_ALLOWED)
6394 + return complain_error(sa->aad->error);
6395 +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
6396 +index 0c23888b9816..1c7763766135 100644
6397 +--- a/security/apparmor/domain.c
6398 ++++ b/security/apparmor/domain.c
6399 +@@ -348,7 +348,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
6400 + file_inode(bprm->file)->i_uid,
6401 + file_inode(bprm->file)->i_mode
6402 + };
6403 +- const char *name = NULL, *target = NULL, *info = NULL;
6404 ++ const char *name = NULL, *info = NULL;
6405 + int error = cap_bprm_set_creds(bprm);
6406 + if (error)
6407 + return error;
6408 +@@ -403,6 +403,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
6409 + if (cxt->onexec) {
6410 + struct file_perms cp;
6411 + info = "change_profile onexec";
6412 ++ new_profile = aa_get_newest_profile(cxt->onexec);
6413 + if (!(perms.allow & AA_MAY_ONEXEC))
6414 + goto audit;
6415 +
6416 +@@ -417,7 +418,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
6417 +
6418 + if (!(cp.allow & AA_MAY_ONEXEC))
6419 + goto audit;
6420 +- new_profile = aa_get_newest_profile(cxt->onexec);
6421 + goto apply;
6422 + }
6423 +
6424 +@@ -437,7 +437,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
6425 + new_profile = aa_get_newest_profile(ns->unconfined);
6426 + info = "ux fallback";
6427 + } else {
6428 +- error = -ENOENT;
6429 ++ error = -EACCES;
6430 + info = "profile not found";
6431 + /* remove MAY_EXEC to audit as failure */
6432 + perms.allow &= ~MAY_EXEC;
6433 +@@ -449,10 +449,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
6434 + if (!new_profile) {
6435 + error = -ENOMEM;
6436 + info = "could not create null profile";
6437 +- } else {
6438 ++ } else
6439 + error = -EACCES;
6440 +- target = new_profile->base.hname;
6441 +- }
6442 + perms.xindex |= AA_X_UNSAFE;
6443 + } else
6444 + /* fail exec */
6445 +@@ -463,7 +461,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
6446 + * fail the exec.
6447 + */
6448 + if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) {
6449 +- aa_put_profile(new_profile);
6450 + error = -EPERM;
6451 + goto cleanup;
6452 + }
6453 +@@ -478,10 +475,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
6454 +
6455 + if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
6456 + error = may_change_ptraced_domain(current, new_profile);
6457 +- if (error) {
6458 +- aa_put_profile(new_profile);
6459 ++ if (error)
6460 + goto audit;
6461 +- }
6462 + }
6463 +
6464 + /* Determine if secure exec is needed.
6465 +@@ -502,7 +497,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
6466 + bprm->unsafe |= AA_SECURE_X_NEEDED;
6467 + }
6468 + apply:
6469 +- target = new_profile->base.hname;
6470 + /* when transitioning profiles clear unsafe personality bits */
6471 + bprm->per_clear |= PER_CLEAR_ON_SETID;
6472 +
6473 +@@ -510,15 +504,19 @@ x_clear:
6474 + aa_put_profile(cxt->profile);
6475 + /* transfer new profile reference will be released when cxt is freed */
6476 + cxt->profile = new_profile;
6477 ++ new_profile = NULL;
6478 +
6479 + /* clear out all temporary/transitional state from the context */
6480 + aa_clear_task_cxt_trans(cxt);
6481 +
6482 + audit:
6483 + error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_EXEC, MAY_EXEC,
6484 +- name, target, cond.uid, info, error);
6485 ++ name,
6486 ++ new_profile ? new_profile->base.hname : NULL,
6487 ++ cond.uid, info, error);
6488 +
6489 + cleanup:
6490 ++ aa_put_profile(new_profile);
6491 + aa_put_profile(profile);
6492 + kfree(buffer);
6493 +
6494 +diff --git a/security/apparmor/file.c b/security/apparmor/file.c
6495 +index fdaa50cb1876..a4f7f1a5a798 100644
6496 +--- a/security/apparmor/file.c
6497 ++++ b/security/apparmor/file.c
6498 +@@ -110,7 +110,8 @@ int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
6499 + int type = AUDIT_APPARMOR_AUTO;
6500 + struct common_audit_data sa;
6501 + struct apparmor_audit_data aad = {0,};
6502 +- sa.type = LSM_AUDIT_DATA_NONE;
6503 ++ sa.type = LSM_AUDIT_DATA_TASK;
6504 ++ sa.u.tsk = NULL;
6505 + sa.aad = &aad;
6506 + aad.op = op,
6507 + aad.fs.request = request;
6508 +diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
6509 +index 001c43aa0406..a1c04fe86790 100644
6510 +--- a/security/apparmor/include/match.h
6511 ++++ b/security/apparmor/include/match.h
6512 +@@ -62,6 +62,7 @@ struct table_set_header {
6513 + #define YYTD_ID_ACCEPT2 6
6514 + #define YYTD_ID_NXT 7
6515 + #define YYTD_ID_TSIZE 8
6516 ++#define YYTD_ID_MAX 8
6517 +
6518 + #define YYTD_DATA8 1
6519 + #define YYTD_DATA16 2
6520 +diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
6521 +index c28b0f20ab53..52275f040a5f 100644
6522 +--- a/security/apparmor/include/policy.h
6523 ++++ b/security/apparmor/include/policy.h
6524 +@@ -403,6 +403,8 @@ static inline int AUDIT_MODE(struct aa_profile *profile)
6525 + return profile->audit;
6526 + }
6527 +
6528 ++bool policy_view_capable(void);
6529 ++bool policy_admin_capable(void);
6530 + bool aa_may_manage_policy(int op);
6531 +
6532 + #endif /* __AA_POLICY_H */
6533 +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
6534 +index fb99e18123b4..00a92de97c82 100644
6535 +--- a/security/apparmor/lsm.c
6536 ++++ b/security/apparmor/lsm.c
6537 +@@ -762,51 +762,49 @@ __setup("apparmor=", apparmor_enabled_setup);
6538 + /* set global flag turning off the ability to load policy */
6539 + static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp)
6540 + {
6541 +- if (!capable(CAP_MAC_ADMIN))
6542 ++ if (!policy_admin_capable())
6543 + return -EPERM;
6544 +- if (aa_g_lock_policy)
6545 +- return -EACCES;
6546 + return param_set_bool(val, kp);
6547 + }
6548 +
6549 + static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
6550 + {
6551 +- if (!capable(CAP_MAC_ADMIN))
6552 ++ if (!policy_view_capable())
6553 + return -EPERM;
6554 + return param_get_bool(buffer, kp);
6555 + }
6556 +
6557 + static int param_set_aabool(const char *val, const struct kernel_param *kp)
6558 + {
6559 +- if (!capable(CAP_MAC_ADMIN))
6560 ++ if (!policy_admin_capable())
6561 + return -EPERM;
6562 + return param_set_bool(val, kp);
6563 + }
6564 +
6565 + static int param_get_aabool(char *buffer, const struct kernel_param *kp)
6566 + {
6567 +- if (!capable(CAP_MAC_ADMIN))
6568 ++ if (!policy_view_capable())
6569 + return -EPERM;
6570 + return param_get_bool(buffer, kp);
6571 + }
6572 +
6573 + static int param_set_aauint(const char *val, const struct kernel_param *kp)
6574 + {
6575 +- if (!capable(CAP_MAC_ADMIN))
6576 ++ if (!policy_admin_capable())
6577 + return -EPERM;
6578 + return param_set_uint(val, kp);
6579 + }
6580 +
6581 + static int param_get_aauint(char *buffer, const struct kernel_param *kp)
6582 + {
6583 +- if (!capable(CAP_MAC_ADMIN))
6584 ++ if (!policy_view_capable())
6585 + return -EPERM;
6586 + return param_get_uint(buffer, kp);
6587 + }
6588 +
6589 + static int param_get_audit(char *buffer, struct kernel_param *kp)
6590 + {
6591 +- if (!capable(CAP_MAC_ADMIN))
6592 ++ if (!policy_view_capable())
6593 + return -EPERM;
6594 +
6595 + if (!apparmor_enabled)
6596 +@@ -818,7 +816,7 @@ static int param_get_audit(char *buffer, struct kernel_param *kp)
6597 + static int param_set_audit(const char *val, struct kernel_param *kp)
6598 + {
6599 + int i;
6600 +- if (!capable(CAP_MAC_ADMIN))
6601 ++ if (!policy_admin_capable())
6602 + return -EPERM;
6603 +
6604 + if (!apparmor_enabled)
6605 +@@ -839,7 +837,7 @@ static int param_set_audit(const char *val, struct kernel_param *kp)
6606 +
6607 + static int param_get_mode(char *buffer, struct kernel_param *kp)
6608 + {
6609 +- if (!capable(CAP_MAC_ADMIN))
6610 ++ if (!policy_admin_capable())
6611 + return -EPERM;
6612 +
6613 + if (!apparmor_enabled)
6614 +@@ -851,7 +849,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp)
6615 + static int param_set_mode(const char *val, struct kernel_param *kp)
6616 + {
6617 + int i;
6618 +- if (!capable(CAP_MAC_ADMIN))
6619 ++ if (!policy_admin_capable())
6620 + return -EPERM;
6621 +
6622 + if (!apparmor_enabled)
6623 +diff --git a/security/apparmor/match.c b/security/apparmor/match.c
6624 +index 727eb4200d5c..3f900fcca8fb 100644
6625 +--- a/security/apparmor/match.c
6626 ++++ b/security/apparmor/match.c
6627 +@@ -47,6 +47,8 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
6628 + * it every time we use td_id as an index
6629 + */
6630 + th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1;
6631 ++ if (th.td_id > YYTD_ID_MAX)
6632 ++ goto out;
6633 + th.td_flags = be16_to_cpu(*(u16 *) (blob + 2));
6634 + th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8));
6635 + blob += sizeof(struct table_header);
6636 +@@ -61,7 +63,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
6637 +
6638 + table = kvzalloc(tsize);
6639 + if (table) {
6640 +- *table = th;
6641 ++ table->td_id = th.td_id;
6642 ++ table->td_flags = th.td_flags;
6643 ++ table->td_lolen = th.td_lolen;
6644 + if (th.td_flags == YYTD_DATA8)
6645 + UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
6646 + u8, byte_to_byte);
6647 +@@ -73,14 +77,14 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
6648 + u32, be32_to_cpu);
6649 + else
6650 + goto fail;
6651 ++ /* if table was vmalloced make sure the page tables are synced
6652 ++ * before it is used, as it goes live to all cpus.
6653 ++ */
6654 ++ if (is_vmalloc_addr(table))
6655 ++ vm_unmap_aliases();
6656 + }
6657 +
6658 + out:
6659 +- /* if table was vmalloced make sure the page tables are synced
6660 +- * before it is used, as it goes live to all cpus.
6661 +- */
6662 +- if (is_vmalloc_addr(table))
6663 +- vm_unmap_aliases();
6664 + return table;
6665 + fail:
6666 + kvfree(table);
6667 +diff --git a/security/apparmor/path.c b/security/apparmor/path.c
6668 +index 35b394a75d76..5505e0563bc8 100644
6669 +--- a/security/apparmor/path.c
6670 ++++ b/security/apparmor/path.c
6671 +@@ -25,7 +25,6 @@
6672 + #include "include/path.h"
6673 + #include "include/policy.h"
6674 +
6675 +-
6676 + /* modified from dcache.c */
6677 + static int prepend(char **buffer, int buflen, const char *str, int namelen)
6678 + {
6679 +@@ -39,6 +38,38 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
6680 +
6681 + #define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT)
6682 +
6683 ++/* If the path is not connected to the expected root,
6684 ++ * check if it is a sysctl and handle specially else remove any
6685 ++ * leading / that __d_path may have returned.
6686 ++ * Unless
6687 ++ * specifically directed to connect the path,
6688 ++ * OR
6689 ++ * if in a chroot and doing chroot relative paths and the path
6690 ++ * resolves to the namespace root (would be connected outside
6691 ++ * of chroot) and specifically directed to connect paths to
6692 ++ * namespace root.
6693 ++ */
6694 ++static int disconnect(const struct path *path, char *buf, char **name,
6695 ++ int flags)
6696 ++{
6697 ++ int error = 0;
6698 ++
6699 ++ if (!(flags & PATH_CONNECT_PATH) &&
6700 ++ !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
6701 ++ our_mnt(path->mnt))) {
6702 ++ /* disconnected path, don't return pathname starting
6703 ++ * with '/'
6704 ++ */
6705 ++ error = -EACCES;
6706 ++ if (**name == '/')
6707 ++ *name = *name + 1;
6708 ++ } else if (**name != '/')
6709 ++ /* CONNECT_PATH with missing root */
6710 ++ error = prepend(name, *name - buf, "/", 1);
6711 ++
6712 ++ return error;
6713 ++}
6714 ++
6715 + /**
6716 + * d_namespace_path - lookup a name associated with a given path
6717 + * @path: path to lookup (NOT NULL)
6718 +@@ -74,7 +105,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
6719 + * control instead of hard coded /proc
6720 + */
6721 + return prepend(name, *name - buf, "/proc", 5);
6722 +- }
6723 ++ } else
6724 ++ return disconnect(path, buf, name, flags);
6725 + return 0;
6726 + }
6727 +
6728 +@@ -120,29 +152,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
6729 + goto out;
6730 + }
6731 +
6732 +- /* If the path is not connected to the expected root,
6733 +- * check if it is a sysctl and handle specially else remove any
6734 +- * leading / that __d_path may have returned.
6735 +- * Unless
6736 +- * specifically directed to connect the path,
6737 +- * OR
6738 +- * if in a chroot and doing chroot relative paths and the path
6739 +- * resolves to the namespace root (would be connected outside
6740 +- * of chroot) and specifically directed to connect paths to
6741 +- * namespace root.
6742 +- */
6743 +- if (!connected) {
6744 +- if (!(flags & PATH_CONNECT_PATH) &&
6745 +- !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
6746 +- our_mnt(path->mnt))) {
6747 +- /* disconnected path, don't return pathname starting
6748 +- * with '/'
6749 +- */
6750 +- error = -EACCES;
6751 +- if (*res == '/')
6752 +- *name = res + 1;
6753 +- }
6754 +- }
6755 ++ if (!connected)
6756 ++ error = disconnect(path, buf, name, flags);
6757 +
6758 + out:
6759 + return error;
6760 +diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
6761 +index 705c2879d3a9..179e68d7dc5f 100644
6762 +--- a/security/apparmor/policy.c
6763 ++++ b/security/apparmor/policy.c
6764 +@@ -766,7 +766,9 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name)
6765 + struct aa_profile *profile;
6766 +
6767 + rcu_read_lock();
6768 +- profile = aa_get_profile(__find_child(&parent->base.profiles, name));
6769 ++ do {
6770 ++ profile = __find_child(&parent->base.profiles, name);
6771 ++ } while (profile && !aa_get_profile_not0(profile));
6772 + rcu_read_unlock();
6773 +
6774 + /* refcount released by caller */
6775 +@@ -916,6 +918,22 @@ static int audit_policy(int op, gfp_t gfp, const char *name, const char *info,
6776 + &sa, NULL);
6777 + }
6778 +
6779 ++bool policy_view_capable(void)
6780 ++{
6781 ++ struct user_namespace *user_ns = current_user_ns();
6782 ++ bool response = false;
6783 ++
6784 ++ if (ns_capable(user_ns, CAP_MAC_ADMIN))
6785 ++ response = true;
6786 ++
6787 ++ return response;
6788 ++}
6789 ++
6790 ++bool policy_admin_capable(void)
6791 ++{
6792 ++ return policy_view_capable() && !aa_g_lock_policy;
6793 ++}
6794 ++
6795 + /**
6796 + * aa_may_manage_policy - can the current task manage policy
6797 + * @op: the policy manipulation operation being done
6798 +@@ -930,7 +948,7 @@ bool aa_may_manage_policy(int op)
6799 + return 0;
6800 + }
6801 +
6802 +- if (!capable(CAP_MAC_ADMIN)) {
6803 ++ if (!policy_admin_capable()) {
6804 + audit_policy(op, GFP_KERNEL, NULL, "not policy admin", -EACCES);
6805 + return 0;
6806 + }
6807 +@@ -1067,7 +1085,7 @@ static int __lookup_replace(struct aa_namespace *ns, const char *hname,
6808 + */
6809 + ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
6810 + {
6811 +- const char *ns_name, *name = NULL, *info = NULL;
6812 ++ const char *ns_name, *info = NULL;
6813 + struct aa_namespace *ns = NULL;
6814 + struct aa_load_ent *ent, *tmp;
6815 + int op = OP_PROF_REPL;
6816 +@@ -1082,18 +1100,15 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
6817 + /* released below */
6818 + ns = aa_prepare_namespace(ns_name);
6819 + if (!ns) {
6820 +- info = "failed to prepare namespace";
6821 +- error = -ENOMEM;
6822 +- name = ns_name;
6823 +- goto fail;
6824 ++ error = audit_policy(op, GFP_KERNEL, ns_name,
6825 ++ "failed to prepare namespace", -ENOMEM);
6826 ++ goto free;
6827 + }
6828 +
6829 + mutex_lock(&ns->lock);
6830 + /* setup parent and ns info */
6831 + list_for_each_entry(ent, &lh, list) {
6832 + struct aa_policy *policy;
6833 +-
6834 +- name = ent->new->base.hname;
6835 + error = __lookup_replace(ns, ent->new->base.hname, noreplace,
6836 + &ent->old, &info);
6837 + if (error)
6838 +@@ -1121,7 +1136,6 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
6839 + if (!p) {
6840 + error = -ENOENT;
6841 + info = "parent does not exist";
6842 +- name = ent->new->base.hname;
6843 + goto fail_lock;
6844 + }
6845 + rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
6846 +@@ -1163,7 +1177,7 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
6847 + list_del_init(&ent->list);
6848 + op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL;
6849 +
6850 +- audit_policy(op, GFP_ATOMIC, ent->new->base.name, NULL, error);
6851 ++ audit_policy(op, GFP_ATOMIC, ent->new->base.hname, NULL, error);
6852 +
6853 + if (ent->old) {
6854 + __replace_profile(ent->old, ent->new, 1);
6855 +@@ -1187,14 +1201,14 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
6856 + /* parent replaced in this atomic set? */
6857 + if (newest != parent) {
6858 + aa_get_profile(newest);
6859 +- aa_put_profile(parent);
6860 + rcu_assign_pointer(ent->new->parent, newest);
6861 +- } else
6862 +- aa_put_profile(newest);
6863 ++ aa_put_profile(parent);
6864 ++ }
6865 + /* aafs interface uses replacedby */
6866 + rcu_assign_pointer(ent->new->replacedby->profile,
6867 + aa_get_profile(ent->new));
6868 +- __list_add_profile(&parent->base.profiles, ent->new);
6869 ++ __list_add_profile(&newest->base.profiles, ent->new);
6870 ++ aa_put_profile(newest);
6871 + } else {
6872 + /* aafs interface uses replacedby */
6873 + rcu_assign_pointer(ent->new->replacedby->profile,
6874 +@@ -1214,9 +1228,22 @@ out:
6875 +
6876 + fail_lock:
6877 + mutex_unlock(&ns->lock);
6878 +-fail:
6879 +- error = audit_policy(op, GFP_KERNEL, name, info, error);
6880 +
6881 ++ /* audit cause of failure */
6882 ++ op = (!ent->old) ? OP_PROF_LOAD : OP_PROF_REPL;
6883 ++ audit_policy(op, GFP_KERNEL, ent->new->base.hname, info, error);
6884 ++ /* audit status that rest of profiles in the atomic set failed too */
6885 ++ info = "valid profile in failed atomic policy load";
6886 ++ list_for_each_entry(tmp, &lh, list) {
6887 ++ if (tmp == ent) {
6888 ++ info = "unchecked profile in failed atomic policy load";
6889 ++ /* skip entry that caused failure */
6890 ++ continue;
6891 ++ }
6892 ++ op = (!ent->old) ? OP_PROF_LOAD : OP_PROF_REPL;
6893 ++ audit_policy(op, GFP_KERNEL, tmp->new->base.hname, info, error);
6894 ++ }
6895 ++free:
6896 + list_for_each_entry_safe(ent, tmp, &lh, list) {
6897 + list_del_init(&ent->list);
6898 + aa_load_ent_free(ent);
6899 +diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
6900 +index a689f10930b5..dac2121bc873 100644
6901 +--- a/security/apparmor/policy_unpack.c
6902 ++++ b/security/apparmor/policy_unpack.c
6903 +@@ -583,6 +583,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
6904 + error = PTR_ERR(profile->policy.dfa);
6905 + profile->policy.dfa = NULL;
6906 + goto fail;
6907 ++ } else if (!profile->policy.dfa) {
6908 ++ error = -EPROTO;
6909 ++ goto fail;
6910 + }
6911 + if (!unpack_u32(e, &profile->policy.start[0], "start"))
6912 + /* default start state */
6913 +@@ -676,7 +679,7 @@ static bool verify_xindex(int xindex, int table_size)
6914 + int index, xtype;
6915 + xtype = xindex & AA_X_TYPE_MASK;
6916 + index = xindex & AA_X_INDEX_MASK;
6917 +- if (xtype == AA_X_TABLE && index > table_size)
6918 ++ if (xtype == AA_X_TABLE && index >= table_size)
6919 + return 0;
6920 + return 1;
6921 + }
6922 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6923 +index c036e60c34fe..63a335dfd629 100644
6924 +--- a/sound/pci/hda/patch_conexant.c
6925 ++++ b/sound/pci/hda/patch_conexant.c
6926 +@@ -3234,6 +3234,7 @@ enum {
6927 + CXT_FIXUP_HEADPHONE_MIC,
6928 + CXT_FIXUP_GPIO1,
6929 + CXT_FIXUP_ASPIRE_DMIC,
6930 ++ CXT_FIXUP_HP_GATE_MIC,
6931 + };
6932 +
6933 + static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
6934 +@@ -3310,6 +3311,17 @@ static void cxt_fixup_headphone_mic(struct hda_codec *codec,
6935 + }
6936 +
6937 +
6938 ++static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
6939 ++ const struct hda_fixup *fix,
6940 ++ int action)
6941 ++{
6942 ++ /* the mic pin (0x19) doesn't give an unsolicited event;
6943 ++ * probe the mic pin together with the headphone pin (0x16)
6944 ++ */
6945 ++ if (action == HDA_FIXUP_ACT_PROBE)
6946 ++ snd_hda_jack_set_gating_jack(codec, 0x19, 0x16);
6947 ++}
6948 ++
6949 + /* ThinkPad X200 & co with cxt5051 */
6950 + static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
6951 + { 0x16, 0x042140ff }, /* HP (seq# overridden) */
6952 +@@ -3403,6 +3415,10 @@ static const struct hda_fixup cxt_fixups[] = {
6953 + .chained = true,
6954 + .chain_id = CXT_FIXUP_GPIO1,
6955 + },
6956 ++ [CXT_FIXUP_HP_GATE_MIC] = {
6957 ++ .type = HDA_FIXUP_FUNC,
6958 ++ .v.func = cxt_fixup_hp_gate_mic_jack,
6959 ++ },
6960 + };
6961 +
6962 + static const struct snd_pci_quirk cxt5051_fixups[] = {
6963 +@@ -3414,6 +3430,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
6964 + static const struct snd_pci_quirk cxt5066_fixups[] = {
6965 + SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
6966 + SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
6967 ++ SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
6968 + SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
6969 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
6970 + SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
6971 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6972 +index 06e80327567c..8b816bf65405 100644
6973 +--- a/sound/pci/hda/patch_realtek.c
6974 ++++ b/sound/pci/hda/patch_realtek.c
6975 +@@ -2194,6 +2194,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
6976 + SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
6977 + SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
6978 + SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
6979 ++ SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
6980 + SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
6981 + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
6982 + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
6983 +@@ -4982,6 +4983,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
6984 + SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
6985 + SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
6986 + SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
6987 ++ SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
6988 + SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_CHMAP),
6989 + SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
6990 + SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
6991 +diff --git a/sound/usb/card.c b/sound/usb/card.c
6992 +index 96a09226be7d..96a429945e3a 100644
6993 +--- a/sound/usb/card.c
6994 ++++ b/sound/usb/card.c
6995 +@@ -205,7 +205,6 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
6996 + if (! snd_usb_parse_audio_interface(chip, interface)) {
6997 + usb_set_interface(dev, interface, 0); /* reset the current interface */
6998 + usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
6999 +- return -EINVAL;
7000 + }
7001 +
7002 + return 0;
7003 +diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
7004 +index c21a3df9a0df..d4d036fca6cb 100644
7005 +--- a/sound/usb/hiface/pcm.c
7006 ++++ b/sound/usb/hiface/pcm.c
7007 +@@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
7008 +
7009 + mutex_lock(&rt->stream_mutex);
7010 +
7011 ++ hiface_pcm_stream_stop(rt);
7012 ++
7013 + sub->dma_off = 0;
7014 + sub->period_off = 0;
7015 +
7016 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
7017 +index 86f46b46f214..afcaafce643c 100644
7018 +--- a/sound/usb/mixer.c
7019 ++++ b/sound/usb/mixer.c
7020 +@@ -893,9 +893,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
7021 + case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
7022 + case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
7023 + case USB_ID(0x046d, 0x0991):
7024 ++ case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
7025 + /* Most audio usb devices lie about volume resolution.
7026 + * Most Logitech webcams have res = 384.
7027 +- * Proboly there is some logitech magic behind this number --fishor
7028 ++ * Probably there is some logitech magic behind this number --fishor
7029 + */
7030 + if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
7031 + snd_printk(KERN_INFO
7032 +diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
7033 +index 95199e4eea97..f928bfc4852f 100644
7034 +--- a/tools/perf/util/trace-event-scripting.c
7035 ++++ b/tools/perf/util/trace-event-scripting.c
7036 +@@ -91,7 +91,8 @@ static void register_python_scripting(struct scripting_ops *scripting_ops)
7037 + if (err)
7038 + die("error registering py script extension");
7039 +
7040 +- scripting_context = malloc(sizeof(struct scripting_context));
7041 ++ if (scripting_context == NULL)
7042 ++ scripting_context = malloc(sizeof(*scripting_context));
7043 + }
7044 +
7045 + #ifdef NO_LIBPYTHON
7046 +@@ -154,7 +155,8 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops)
7047 + if (err)
7048 + die("error registering pl script extension");
7049 +
7050 +- scripting_context = malloc(sizeof(struct scripting_context));
7051 ++ if (scripting_context == NULL)
7052 ++ scripting_context = malloc(sizeof(*scripting_context));
7053 + }
7054 +
7055 + #ifdef NO_LIBPERL
7056 +diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
7057 +index c09a682df56a..16058bbea7a8 100644
7058 +--- a/tools/testing/selftests/net/run_netsocktests
7059 ++++ b/tools/testing/selftests/net/run_netsocktests
7060 +@@ -1,4 +1,4 @@
7061 +-#!/bin/bash
7062 ++#!/bin/sh
7063 +
7064 + echo "--------------------"
7065 + echo "running socket test"