Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.12 commit in: /
Date: Tue, 03 Nov 2015 18:38:37
Message-Id: 1446575905.ba218a025ac9caddccdbd233481a4ffffaebc4d3.mpagano@gentoo
1 commit: ba218a025ac9caddccdbd233481a4ffffaebc4d3
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Nov 3 18:38:25 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Nov 3 18:38:25 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ba218a02
7
8 Linux patch 3.12.50
9
10 0000_README | 4 +
11 1049_linux-3.12.50.patch | 4795 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4799 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index e565511..ce73ef2 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -238,6 +238,10 @@ Patch: 1048_linux-3.12.49.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.12.49
21
22 +Patch: 1049_linux-3.12.50.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.12.50
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1049_linux-3.12.50.patch b/1049_linux-3.12.50.patch
31 new file mode 100644
32 index 0000000..c49a145
33 --- /dev/null
34 +++ b/1049_linux-3.12.50.patch
35 @@ -0,0 +1,4795 @@
36 +diff --git a/Makefile b/Makefile
37 +index b2985713121c..cbb29f4a4c43 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 3
42 + PATCHLEVEL = 12
43 +-SUBLEVEL = 49
44 ++SUBLEVEL = 50
45 + EXTRAVERSION =
46 + NAME = One Giant Leap for Frogkind
47 +
48 +diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h
49 +index ce8860a0b32d..3832bdb794fe 100644
50 +--- a/arch/alpha/include/asm/barrier.h
51 ++++ b/arch/alpha/include/asm/barrier.h
52 +@@ -3,33 +3,18 @@
53 +
54 + #include <asm/compiler.h>
55 +
56 +-#define mb() \
57 +-__asm__ __volatile__("mb": : :"memory")
58 ++#define mb() __asm__ __volatile__("mb": : :"memory")
59 ++#define rmb() __asm__ __volatile__("mb": : :"memory")
60 ++#define wmb() __asm__ __volatile__("wmb": : :"memory")
61 +
62 +-#define rmb() \
63 +-__asm__ __volatile__("mb": : :"memory")
64 +-
65 +-#define wmb() \
66 +-__asm__ __volatile__("wmb": : :"memory")
67 +-
68 +-#define read_barrier_depends() \
69 +-__asm__ __volatile__("mb": : :"memory")
70 ++#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
71 +
72 + #ifdef CONFIG_SMP
73 + #define __ASM_SMP_MB "\tmb\n"
74 +-#define smp_mb() mb()
75 +-#define smp_rmb() rmb()
76 +-#define smp_wmb() wmb()
77 +-#define smp_read_barrier_depends() read_barrier_depends()
78 + #else
79 + #define __ASM_SMP_MB
80 +-#define smp_mb() barrier()
81 +-#define smp_rmb() barrier()
82 +-#define smp_wmb() barrier()
83 +-#define smp_read_barrier_depends() do { } while (0)
84 + #endif
85 +
86 +-#define set_mb(var, value) \
87 +-do { var = value; mb(); } while (0)
88 ++#include <asm-generic/barrier.h>
89 +
90 + #endif /* __BARRIER_H */
91 +diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
92 +index d8dd660898b9..5c359cf55934 100644
93 +--- a/arch/arc/include/asm/Kbuild
94 ++++ b/arch/arc/include/asm/Kbuild
95 +@@ -46,3 +46,4 @@ generic-y += ucontext.h
96 + generic-y += user.h
97 + generic-y += vga.h
98 + generic-y += xor.h
99 ++generic-y += barrier.h
100 +diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
101 +index 83f03ca6caf6..03e494f695d1 100644
102 +--- a/arch/arc/include/asm/atomic.h
103 ++++ b/arch/arc/include/asm/atomic.h
104 +@@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
105 +
106 + #endif /* !CONFIG_ARC_HAS_LLSC */
107 +
108 ++#define smp_mb__before_atomic_dec() barrier()
109 ++#define smp_mb__after_atomic_dec() barrier()
110 ++#define smp_mb__before_atomic_inc() barrier()
111 ++#define smp_mb__after_atomic_inc() barrier()
112 ++
113 + /**
114 + * __atomic_add_unless - add unless the number is a given value
115 + * @v: pointer of type atomic_t
116 +diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
117 +deleted file mode 100644
118 +index f6cb7c4ffb35..000000000000
119 +--- a/arch/arc/include/asm/barrier.h
120 ++++ /dev/null
121 +@@ -1,42 +0,0 @@
122 +-/*
123 +- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
124 +- *
125 +- * This program is free software; you can redistribute it and/or modify
126 +- * it under the terms of the GNU General Public License version 2 as
127 +- * published by the Free Software Foundation.
128 +- */
129 +-
130 +-#ifndef __ASM_BARRIER_H
131 +-#define __ASM_BARRIER_H
132 +-
133 +-#ifndef __ASSEMBLY__
134 +-
135 +-/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
136 +-#define mb() __asm__ __volatile__ ("" : : : "memory")
137 +-#define rmb() mb()
138 +-#define wmb() mb()
139 +-#define set_mb(var, value) do { var = value; mb(); } while (0)
140 +-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
141 +-#define read_barrier_depends() mb()
142 +-
143 +-/* TODO-vineetg verify the correctness of macros here */
144 +-#ifdef CONFIG_SMP
145 +-#define smp_mb() mb()
146 +-#define smp_rmb() rmb()
147 +-#define smp_wmb() wmb()
148 +-#else
149 +-#define smp_mb() barrier()
150 +-#define smp_rmb() barrier()
151 +-#define smp_wmb() barrier()
152 +-#endif
153 +-
154 +-#define smp_mb__before_atomic_dec() barrier()
155 +-#define smp_mb__after_atomic_dec() barrier()
156 +-#define smp_mb__before_atomic_inc() barrier()
157 +-#define smp_mb__after_atomic_inc() barrier()
158 +-
159 +-#define smp_read_barrier_depends() do { } while (0)
160 +-
161 +-#endif
162 +-
163 +-#endif
164 +diff --git a/arch/arm/Makefile b/arch/arm/Makefile
165 +index db50b626be98..a4254e8ab36c 100644
166 +--- a/arch/arm/Makefile
167 ++++ b/arch/arm/Makefile
168 +@@ -55,6 +55,14 @@ endif
169 +
170 + comma = ,
171 +
172 ++#
173 ++# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
174 ++# later may result in code being generated that handles signed short and signed
175 ++# char struct members incorrectly. So disable it.
176 ++# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
177 ++#
178 ++KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
179 ++
180 + # This selects which instruction set is used.
181 + # Note that GCC does not numerically define an architecture version
182 + # macro, but instead defines a whole series of macros which makes
183 +diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
184 +index 65d7b601651c..542e21da2425 100644
185 +--- a/arch/arm/boot/dts/omap5-uevm.dts
186 ++++ b/arch/arm/boot/dts/omap5-uevm.dts
187 +@@ -143,8 +143,8 @@
188 +
189 + i2c5_pins: pinmux_i2c5_pins {
190 + pinctrl-single,pins = <
191 +- 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
192 +- 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
193 ++ 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
194 ++ 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
195 + >;
196 + };
197 +
198 +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
199 +index ab3304225272..ab5b238ba59a 100644
200 +--- a/arch/arm/kernel/signal.c
201 ++++ b/arch/arm/kernel/signal.c
202 +@@ -375,12 +375,23 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
203 + */
204 + thumb = handler & 1;
205 +
206 ++#if __LINUX_ARM_ARCH__ >= 6
207 ++ /*
208 ++ * Clear the If-Then Thumb-2 execution state. ARM spec
209 ++ * requires this to be all 000s in ARM mode. Snapdragon
210 ++ * S4/Krait misbehaves on a Thumb=>ARM signal transition
211 ++ * without this.
212 ++ *
213 ++ * We must do this whenever we are running on a Thumb-2
214 ++ * capable CPU, which includes ARMv6T2. However, we elect
215 ++ * to do this whenever we're on an ARMv6 or later CPU for
216 ++ * simplicity.
217 ++ */
218 ++ cpsr &= ~PSR_IT_MASK;
219 ++#endif
220 ++
221 + if (thumb) {
222 + cpsr |= PSR_T_BIT;
223 +-#if __LINUX_ARM_ARCH__ >= 7
224 +- /* clear the If-Then Thumb-2 execution state */
225 +- cpsr &= ~PSR_IT_MASK;
226 +-#endif
227 + } else
228 + cpsr &= ~PSR_T_BIT;
229 + }
230 +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
231 +index 4148c05df99a..e06f99f5e37a 100644
232 +--- a/arch/arm64/Makefile
233 ++++ b/arch/arm64/Makefile
234 +@@ -29,7 +29,7 @@ comma = ,
235 + CHECKFLAGS += -D__aarch64__
236 +
237 + ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
238 +-CFLAGS_MODULE += -mcmodel=large
239 ++KBUILD_CFLAGS_MODULE += -mcmodel=large
240 + endif
241 +
242 + # Default value
243 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
244 +index c23751b06120..cc083b6e4ce7 100644
245 +--- a/arch/arm64/mm/fault.c
246 ++++ b/arch/arm64/mm/fault.c
247 +@@ -278,6 +278,7 @@ retry:
248 + * starvation.
249 + */
250 + mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
251 ++ mm_flags |= FAULT_FLAG_TRIED;
252 + goto retry;
253 + }
254 + }
255 +diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h
256 +index 0961275373db..715100790fd0 100644
257 +--- a/arch/avr32/include/asm/barrier.h
258 ++++ b/arch/avr32/include/asm/barrier.h
259 +@@ -8,22 +8,15 @@
260 + #ifndef __ASM_AVR32_BARRIER_H
261 + #define __ASM_AVR32_BARRIER_H
262 +
263 +-#define nop() asm volatile("nop")
264 +-
265 +-#define mb() asm volatile("" : : : "memory")
266 +-#define rmb() mb()
267 +-#define wmb() asm volatile("sync 0" : : : "memory")
268 +-#define read_barrier_depends() do { } while(0)
269 +-#define set_mb(var, value) do { var = value; mb(); } while(0)
270 ++/*
271 ++ * Weirdest thing ever.. no full barrier, but it has a write barrier!
272 ++ */
273 ++#define wmb() asm volatile("sync 0" : : : "memory")
274 +
275 + #ifdef CONFIG_SMP
276 + # error "The AVR32 port does not support SMP"
277 +-#else
278 +-# define smp_mb() barrier()
279 +-# define smp_rmb() barrier()
280 +-# define smp_wmb() barrier()
281 +-# define smp_read_barrier_depends() do { } while(0)
282 + #endif
283 +
284 ++#include <asm-generic/barrier.h>
285 +
286 + #endif /* __ASM_AVR32_BARRIER_H */
287 +diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h
288 +index ebb189507dd7..19283a16ac08 100644
289 +--- a/arch/blackfin/include/asm/barrier.h
290 ++++ b/arch/blackfin/include/asm/barrier.h
291 +@@ -23,26 +23,10 @@
292 + # define rmb() do { barrier(); smp_check_barrier(); } while (0)
293 + # define wmb() do { barrier(); smp_mark_barrier(); } while (0)
294 + # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
295 +-#else
296 +-# define mb() barrier()
297 +-# define rmb() barrier()
298 +-# define wmb() barrier()
299 +-# define read_barrier_depends() do { } while (0)
300 + #endif
301 +
302 +-#else /* !CONFIG_SMP */
303 +-
304 +-#define mb() barrier()
305 +-#define rmb() barrier()
306 +-#define wmb() barrier()
307 +-#define read_barrier_depends() do { } while (0)
308 +-
309 + #endif /* !CONFIG_SMP */
310 +
311 +-#define smp_mb() mb()
312 +-#define smp_rmb() rmb()
313 +-#define smp_wmb() wmb()
314 +-#define set_mb(var, value) do { var = value; mb(); } while (0)
315 +-#define smp_read_barrier_depends() read_barrier_depends()
316 ++#include <asm-generic/barrier.h>
317 +
318 + #endif /* _BLACKFIN_BARRIER_H */
319 +diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
320 +index c8325455520e..497776e4777d 100644
321 +--- a/arch/cris/include/asm/Kbuild
322 ++++ b/arch/cris/include/asm/Kbuild
323 +@@ -11,3 +11,4 @@ generic-y += module.h
324 + generic-y += trace_clock.h
325 + generic-y += vga.h
326 + generic-y += xor.h
327 ++generic-y += barrier.h
328 +diff --git a/arch/cris/include/asm/barrier.h b/arch/cris/include/asm/barrier.h
329 +deleted file mode 100644
330 +index 198ad7fa6b25..000000000000
331 +--- a/arch/cris/include/asm/barrier.h
332 ++++ /dev/null
333 +@@ -1,25 +0,0 @@
334 +-#ifndef __ASM_CRIS_BARRIER_H
335 +-#define __ASM_CRIS_BARRIER_H
336 +-
337 +-#define nop() __asm__ __volatile__ ("nop");
338 +-
339 +-#define barrier() __asm__ __volatile__("": : :"memory")
340 +-#define mb() barrier()
341 +-#define rmb() mb()
342 +-#define wmb() mb()
343 +-#define read_barrier_depends() do { } while(0)
344 +-#define set_mb(var, value) do { var = value; mb(); } while (0)
345 +-
346 +-#ifdef CONFIG_SMP
347 +-#define smp_mb() mb()
348 +-#define smp_rmb() rmb()
349 +-#define smp_wmb() wmb()
350 +-#define smp_read_barrier_depends() read_barrier_depends()
351 +-#else
352 +-#define smp_mb() barrier()
353 +-#define smp_rmb() barrier()
354 +-#define smp_wmb() barrier()
355 +-#define smp_read_barrier_depends() do { } while(0)
356 +-#endif
357 +-
358 +-#endif /* __ASM_CRIS_BARRIER_H */
359 +diff --git a/arch/frv/include/asm/barrier.h b/arch/frv/include/asm/barrier.h
360 +index 06776ad9f5e9..abbef470154c 100644
361 +--- a/arch/frv/include/asm/barrier.h
362 ++++ b/arch/frv/include/asm/barrier.h
363 +@@ -17,13 +17,7 @@
364 + #define mb() asm volatile ("membar" : : :"memory")
365 + #define rmb() asm volatile ("membar" : : :"memory")
366 + #define wmb() asm volatile ("membar" : : :"memory")
367 +-#define read_barrier_depends() do { } while (0)
368 +
369 +-#define smp_mb() barrier()
370 +-#define smp_rmb() barrier()
371 +-#define smp_wmb() barrier()
372 +-#define smp_read_barrier_depends() do {} while(0)
373 +-#define set_mb(var, value) \
374 +- do { var = (value); barrier(); } while (0)
375 ++#include <asm-generic/barrier.h>
376 +
377 + #endif /* _ASM_BARRIER_H */
378 +diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
379 +index 1da17caac23c..a214fa4502f0 100644
380 +--- a/arch/hexagon/include/asm/Kbuild
381 ++++ b/arch/hexagon/include/asm/Kbuild
382 +@@ -53,3 +53,4 @@ generic-y += types.h
383 + generic-y += ucontext.h
384 + generic-y += unaligned.h
385 + generic-y += xor.h
386 ++generic-y += barrier.h
387 +diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
388 +index 8a64ff2337f6..7aae4cb2a29a 100644
389 +--- a/arch/hexagon/include/asm/atomic.h
390 ++++ b/arch/hexagon/include/asm/atomic.h
391 +@@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
392 + #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
393 + #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
394 +
395 +-
396 + #define atomic_inc_return(v) (atomic_add_return(1, v))
397 + #define atomic_dec_return(v) (atomic_sub_return(1, v))
398 +
399 ++#define smp_mb__before_atomic_dec() barrier()
400 ++#define smp_mb__after_atomic_dec() barrier()
401 ++#define smp_mb__before_atomic_inc() barrier()
402 ++#define smp_mb__after_atomic_inc() barrier()
403 ++
404 + #endif
405 +diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h
406 +deleted file mode 100644
407 +index 1041a8e70ce8..000000000000
408 +--- a/arch/hexagon/include/asm/barrier.h
409 ++++ /dev/null
410 +@@ -1,41 +0,0 @@
411 +-/*
412 +- * Memory barrier definitions for the Hexagon architecture
413 +- *
414 +- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
415 +- *
416 +- * This program is free software; you can redistribute it and/or modify
417 +- * it under the terms of the GNU General Public License version 2 and
418 +- * only version 2 as published by the Free Software Foundation.
419 +- *
420 +- * This program is distributed in the hope that it will be useful,
421 +- * but WITHOUT ANY WARRANTY; without even the implied warranty of
422 +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
423 +- * GNU General Public License for more details.
424 +- *
425 +- * You should have received a copy of the GNU General Public License
426 +- * along with this program; if not, write to the Free Software
427 +- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
428 +- * 02110-1301, USA.
429 +- */
430 +-
431 +-#ifndef _ASM_BARRIER_H
432 +-#define _ASM_BARRIER_H
433 +-
434 +-#define rmb() barrier()
435 +-#define read_barrier_depends() barrier()
436 +-#define wmb() barrier()
437 +-#define mb() barrier()
438 +-#define smp_rmb() barrier()
439 +-#define smp_read_barrier_depends() barrier()
440 +-#define smp_wmb() barrier()
441 +-#define smp_mb() barrier()
442 +-#define smp_mb__before_atomic_dec() barrier()
443 +-#define smp_mb__after_atomic_dec() barrier()
444 +-#define smp_mb__before_atomic_inc() barrier()
445 +-#define smp_mb__after_atomic_inc() barrier()
446 +-
447 +-/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
448 +-#define set_mb(var, value) \
449 +- do { var = value; mb(); } while (0)
450 +-
451 +-#endif /* _ASM_BARRIER_H */
452 +diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h
453 +index 6976621efd3f..1a40265e8d88 100644
454 +--- a/arch/m32r/include/asm/barrier.h
455 ++++ b/arch/m32r/include/asm/barrier.h
456 +@@ -11,84 +11,6 @@
457 +
458 + #define nop() __asm__ __volatile__ ("nop" : : )
459 +
460 +-/*
461 +- * Memory barrier.
462 +- *
463 +- * mb() prevents loads and stores being reordered across this point.
464 +- * rmb() prevents loads being reordered across this point.
465 +- * wmb() prevents stores being reordered across this point.
466 +- */
467 +-#define mb() barrier()
468 +-#define rmb() mb()
469 +-#define wmb() mb()
470 +-
471 +-/**
472 +- * read_barrier_depends - Flush all pending reads that subsequents reads
473 +- * depend on.
474 +- *
475 +- * No data-dependent reads from memory-like regions are ever reordered
476 +- * over this barrier. All reads preceding this primitive are guaranteed
477 +- * to access memory (but not necessarily other CPUs' caches) before any
478 +- * reads following this primitive that depend on the data return by
479 +- * any of the preceding reads. This primitive is much lighter weight than
480 +- * rmb() on most CPUs, and is never heavier weight than is
481 +- * rmb().
482 +- *
483 +- * These ordering constraints are respected by both the local CPU
484 +- * and the compiler.
485 +- *
486 +- * Ordering is not guaranteed by anything other than these primitives,
487 +- * not even by data dependencies. See the documentation for
488 +- * memory_barrier() for examples and URLs to more information.
489 +- *
490 +- * For example, the following code would force ordering (the initial
491 +- * value of "a" is zero, "b" is one, and "p" is "&a"):
492 +- *
493 +- * <programlisting>
494 +- * CPU 0 CPU 1
495 +- *
496 +- * b = 2;
497 +- * memory_barrier();
498 +- * p = &b; q = p;
499 +- * read_barrier_depends();
500 +- * d = *q;
501 +- * </programlisting>
502 +- *
503 +- *
504 +- * because the read of "*q" depends on the read of "p" and these
505 +- * two reads are separated by a read_barrier_depends(). However,
506 +- * the following code, with the same initial values for "a" and "b":
507 +- *
508 +- * <programlisting>
509 +- * CPU 0 CPU 1
510 +- *
511 +- * a = 2;
512 +- * memory_barrier();
513 +- * b = 3; y = b;
514 +- * read_barrier_depends();
515 +- * x = a;
516 +- * </programlisting>
517 +- *
518 +- * does not enforce ordering, since there is no data dependency between
519 +- * the read of "a" and the read of "b". Therefore, on some CPUs, such
520 +- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
521 +- * in cases like this where there are no data dependencies.
522 +- **/
523 +-
524 +-#define read_barrier_depends() do { } while (0)
525 +-
526 +-#ifdef CONFIG_SMP
527 +-#define smp_mb() mb()
528 +-#define smp_rmb() rmb()
529 +-#define smp_wmb() wmb()
530 +-#define smp_read_barrier_depends() read_barrier_depends()
531 +-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
532 +-#else
533 +-#define smp_mb() barrier()
534 +-#define smp_rmb() barrier()
535 +-#define smp_wmb() barrier()
536 +-#define smp_read_barrier_depends() do { } while (0)
537 +-#define set_mb(var, value) do { var = value; barrier(); } while (0)
538 +-#endif
539 ++#include <asm-generic/barrier.h>
540 +
541 + #endif /* _ASM_M32R_BARRIER_H */
542 +diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h
543 +index 445ce22c23cb..15c5f77c1614 100644
544 +--- a/arch/m68k/include/asm/barrier.h
545 ++++ b/arch/m68k/include/asm/barrier.h
546 +@@ -1,20 +1,8 @@
547 + #ifndef _M68K_BARRIER_H
548 + #define _M68K_BARRIER_H
549 +
550 +-/*
551 +- * Force strict CPU ordering.
552 +- * Not really required on m68k...
553 +- */
554 + #define nop() do { asm volatile ("nop"); barrier(); } while (0)
555 +-#define mb() barrier()
556 +-#define rmb() barrier()
557 +-#define wmb() barrier()
558 +-#define read_barrier_depends() ((void)0)
559 +-#define set_mb(var, value) ({ (var) = (value); wmb(); })
560 +
561 +-#define smp_mb() barrier()
562 +-#define smp_rmb() barrier()
563 +-#define smp_wmb() barrier()
564 +-#define smp_read_barrier_depends() ((void)0)
565 ++#include <asm-generic/barrier.h>
566 +
567 + #endif /* _M68K_BARRIER_H */
568 +diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
569 +index 5a822bb790f7..066e74f666ae 100644
570 +--- a/arch/m68k/include/asm/linkage.h
571 ++++ b/arch/m68k/include/asm/linkage.h
572 +@@ -4,4 +4,34 @@
573 + #define __ALIGN .align 4
574 + #define __ALIGN_STR ".align 4"
575 +
576 ++/*
577 ++ * Make sure the compiler doesn't do anything stupid with the
578 ++ * arguments on the stack - they are owned by the *caller*, not
579 ++ * the callee. This just fools gcc into not spilling into them,
580 ++ * and keeps it from doing tailcall recursion and/or using the
581 ++ * stack slots for temporaries, since they are live and "used"
582 ++ * all the way to the end of the function.
583 ++ */
584 ++#define asmlinkage_protect(n, ret, args...) \
585 ++ __asmlinkage_protect##n(ret, ##args)
586 ++#define __asmlinkage_protect_n(ret, args...) \
587 ++ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
588 ++#define __asmlinkage_protect0(ret) \
589 ++ __asmlinkage_protect_n(ret)
590 ++#define __asmlinkage_protect1(ret, arg1) \
591 ++ __asmlinkage_protect_n(ret, "m" (arg1))
592 ++#define __asmlinkage_protect2(ret, arg1, arg2) \
593 ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
594 ++#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
595 ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
596 ++#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
597 ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
598 ++ "m" (arg4))
599 ++#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
600 ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
601 ++ "m" (arg4), "m" (arg5))
602 ++#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
603 ++ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
604 ++ "m" (arg4), "m" (arg5), "m" (arg6))
605 ++
606 + #endif
607 +diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
608 +index d3c51a6a601d..9197b379d005 100644
609 +--- a/arch/microblaze/include/asm/Kbuild
610 ++++ b/arch/microblaze/include/asm/Kbuild
611 +@@ -3,3 +3,4 @@ generic-y += clkdev.h
612 + generic-y += exec.h
613 + generic-y += trace_clock.h
614 + generic-y += syscalls.h
615 ++generic-y += barrier.h
616 +diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h
617 +deleted file mode 100644
618 +index df5be3e87044..000000000000
619 +--- a/arch/microblaze/include/asm/barrier.h
620 ++++ /dev/null
621 +@@ -1,27 +0,0 @@
622 +-/*
623 +- * Copyright (C) 2006 Atmark Techno, Inc.
624 +- *
625 +- * This file is subject to the terms and conditions of the GNU General Public
626 +- * License. See the file "COPYING" in the main directory of this archive
627 +- * for more details.
628 +- */
629 +-
630 +-#ifndef _ASM_MICROBLAZE_BARRIER_H
631 +-#define _ASM_MICROBLAZE_BARRIER_H
632 +-
633 +-#define nop() asm volatile ("nop")
634 +-
635 +-#define smp_read_barrier_depends() do {} while (0)
636 +-#define read_barrier_depends() do {} while (0)
637 +-
638 +-#define mb() barrier()
639 +-#define rmb() mb()
640 +-#define wmb() mb()
641 +-#define set_mb(var, value) do { var = value; mb(); } while (0)
642 +-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
643 +-
644 +-#define smp_mb() mb()
645 +-#define smp_rmb() rmb()
646 +-#define smp_wmb() wmb()
647 +-
648 +-#endif /* _ASM_MICROBLAZE_BARRIER_H */
649 +diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
650 +index 5f8b95512580..7dd78fc991bf 100644
651 +--- a/arch/mips/mm/dma-default.c
652 ++++ b/arch/mips/mm/dma-default.c
653 +@@ -92,7 +92,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
654 + else
655 + #endif
656 + #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
657 +- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
658 ++ if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
659 + dma_flag = __GFP_DMA;
660 + else
661 + #endif
662 +diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
663 +index c5d767028306..a530bca92014 100644
664 +--- a/arch/mn10300/include/asm/Kbuild
665 ++++ b/arch/mn10300/include/asm/Kbuild
666 +@@ -2,3 +2,4 @@
667 + generic-y += clkdev.h
668 + generic-y += exec.h
669 + generic-y += trace_clock.h
670 ++generic-y += barrier.h
671 +diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h
672 +deleted file mode 100644
673 +index 2bd97a5c8af7..000000000000
674 +--- a/arch/mn10300/include/asm/barrier.h
675 ++++ /dev/null
676 +@@ -1,37 +0,0 @@
677 +-/* MN10300 memory barrier definitions
678 +- *
679 +- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
680 +- * Written by David Howells (dhowells@××××××.com)
681 +- *
682 +- * This program is free software; you can redistribute it and/or
683 +- * modify it under the terms of the GNU General Public Licence
684 +- * as published by the Free Software Foundation; either version
685 +- * 2 of the Licence, or (at your option) any later version.
686 +- */
687 +-#ifndef _ASM_BARRIER_H
688 +-#define _ASM_BARRIER_H
689 +-
690 +-#define nop() asm volatile ("nop")
691 +-
692 +-#define mb() asm volatile ("": : :"memory")
693 +-#define rmb() mb()
694 +-#define wmb() asm volatile ("": : :"memory")
695 +-
696 +-#ifdef CONFIG_SMP
697 +-#define smp_mb() mb()
698 +-#define smp_rmb() rmb()
699 +-#define smp_wmb() wmb()
700 +-#define set_mb(var, value) do { xchg(&var, value); } while (0)
701 +-#else /* CONFIG_SMP */
702 +-#define smp_mb() barrier()
703 +-#define smp_rmb() barrier()
704 +-#define smp_wmb() barrier()
705 +-#define set_mb(var, value) do { var = value; mb(); } while (0)
706 +-#endif /* CONFIG_SMP */
707 +-
708 +-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
709 +-
710 +-#define read_barrier_depends() do {} while (0)
711 +-#define smp_read_barrier_depends() do {} while (0)
712 +-
713 +-#endif /* _ASM_BARRIER_H */
714 +diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
715 +index ff4c9faed546..827a8465a536 100644
716 +--- a/arch/parisc/include/asm/Kbuild
717 ++++ b/arch/parisc/include/asm/Kbuild
718 +@@ -4,3 +4,4 @@ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
719 + div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
720 + poll.h xor.h clkdev.h exec.h
721 + generic-y += trace_clock.h
722 ++generic-y += barrier.h
723 +diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
724 +deleted file mode 100644
725 +index e77d834aa803..000000000000
726 +--- a/arch/parisc/include/asm/barrier.h
727 ++++ /dev/null
728 +@@ -1,35 +0,0 @@
729 +-#ifndef __PARISC_BARRIER_H
730 +-#define __PARISC_BARRIER_H
731 +-
732 +-/*
733 +-** This is simply the barrier() macro from linux/kernel.h but when serial.c
734 +-** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
735 +-** hasn't yet been included yet so it fails, thus repeating the macro here.
736 +-**
737 +-** PA-RISC architecture allows for weakly ordered memory accesses although
738 +-** none of the processors use it. There is a strong ordered bit that is
739 +-** set in the O-bit of the page directory entry. Operating systems that
740 +-** can not tolerate out of order accesses should set this bit when mapping
741 +-** pages. The O-bit of the PSW should also be set to 1 (I don't believe any
742 +-** of the processor implemented the PSW O-bit). The PCX-W ERS states that
743 +-** the TLB O-bit is not implemented so the page directory does not need to
744 +-** have the O-bit set when mapping pages (section 3.1). This section also
745 +-** states that the PSW Y, Z, G, and O bits are not implemented.
746 +-** So it looks like nothing needs to be done for parisc-linux (yet).
747 +-** (thanks to chada for the above comment -ggg)
748 +-**
749 +-** The __asm__ op below simple prevents gcc/ld from reordering
750 +-** instructions across the mb() "call".
751 +-*/
752 +-#define mb() __asm__ __volatile__("":::"memory") /* barrier() */
753 +-#define rmb() mb()
754 +-#define wmb() mb()
755 +-#define smp_mb() mb()
756 +-#define smp_rmb() mb()
757 +-#define smp_wmb() mb()
758 +-#define smp_read_barrier_depends() do { } while(0)
759 +-#define read_barrier_depends() do { } while(0)
760 +-
761 +-#define set_mb(var, value) do { var = value; mb(); } while (0)
762 +-
763 +-#endif /* __PARISC_BARRIER_H */
764 +diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
765 +index e1c7bb999b06..825c7184fced 100644
766 +--- a/arch/score/include/asm/Kbuild
767 ++++ b/arch/score/include/asm/Kbuild
768 +@@ -4,3 +4,4 @@ header-y +=
769 + generic-y += clkdev.h
770 + generic-y += trace_clock.h
771 + generic-y += xor.h
772 ++generic-y += barrier.h
773 +diff --git a/arch/score/include/asm/barrier.h b/arch/score/include/asm/barrier.h
774 +deleted file mode 100644
775 +index 0eacb6471e6d..000000000000
776 +--- a/arch/score/include/asm/barrier.h
777 ++++ /dev/null
778 +@@ -1,16 +0,0 @@
779 +-#ifndef _ASM_SCORE_BARRIER_H
780 +-#define _ASM_SCORE_BARRIER_H
781 +-
782 +-#define mb() barrier()
783 +-#define rmb() barrier()
784 +-#define wmb() barrier()
785 +-#define smp_mb() barrier()
786 +-#define smp_rmb() barrier()
787 +-#define smp_wmb() barrier()
788 +-
789 +-#define read_barrier_depends() do {} while (0)
790 +-#define smp_read_barrier_depends() do {} while (0)
791 +-
792 +-#define set_mb(var, value) do {var = value; wmb(); } while (0)
793 +-
794 +-#endif /* _ASM_SCORE_BARRIER_H */
795 +diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
796 +index 72c103dae300..43715308b068 100644
797 +--- a/arch/sh/include/asm/barrier.h
798 ++++ b/arch/sh/include/asm/barrier.h
799 +@@ -26,29 +26,14 @@
800 + #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
801 + #define mb() __asm__ __volatile__ ("synco": : :"memory")
802 + #define rmb() mb()
803 +-#define wmb() __asm__ __volatile__ ("synco": : :"memory")
804 ++#define wmb() mb()
805 + #define ctrl_barrier() __icbi(PAGE_OFFSET)
806 +-#define read_barrier_depends() do { } while(0)
807 + #else
808 +-#define mb() __asm__ __volatile__ ("": : :"memory")
809 +-#define rmb() mb()
810 +-#define wmb() __asm__ __volatile__ ("": : :"memory")
811 + #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
812 +-#define read_barrier_depends() do { } while(0)
813 +-#endif
814 +-
815 +-#ifdef CONFIG_SMP
816 +-#define smp_mb() mb()
817 +-#define smp_rmb() rmb()
818 +-#define smp_wmb() wmb()
819 +-#define smp_read_barrier_depends() read_barrier_depends()
820 +-#else
821 +-#define smp_mb() barrier()
822 +-#define smp_rmb() barrier()
823 +-#define smp_wmb() barrier()
824 +-#define smp_read_barrier_depends() do { } while(0)
825 + #endif
826 +
827 + #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
828 +
829 ++#include <asm-generic/barrier.h>
830 ++
831 + #endif /* __ASM_SH_BARRIER_H */
832 +diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
833 +index ded4cee35318..dc78cdd43e0a 100644
834 +--- a/arch/sparc/crypto/aes_glue.c
835 ++++ b/arch/sparc/crypto/aes_glue.c
836 +@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
837 + .blkcipher = {
838 + .min_keysize = AES_MIN_KEY_SIZE,
839 + .max_keysize = AES_MAX_KEY_SIZE,
840 ++ .ivsize = AES_BLOCK_SIZE,
841 + .setkey = aes_set_key,
842 + .encrypt = cbc_encrypt,
843 + .decrypt = cbc_decrypt,
844 +@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
845 + .blkcipher = {
846 + .min_keysize = AES_MIN_KEY_SIZE,
847 + .max_keysize = AES_MAX_KEY_SIZE,
848 ++ .ivsize = AES_BLOCK_SIZE,
849 + .setkey = aes_set_key,
850 + .encrypt = ctr_crypt,
851 + .decrypt = ctr_crypt,
852 +diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
853 +index 641f55cb61c3..eb87d6dd86b1 100644
854 +--- a/arch/sparc/crypto/camellia_glue.c
855 ++++ b/arch/sparc/crypto/camellia_glue.c
856 +@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
857 + .blkcipher = {
858 + .min_keysize = CAMELLIA_MIN_KEY_SIZE,
859 + .max_keysize = CAMELLIA_MAX_KEY_SIZE,
860 ++ .ivsize = CAMELLIA_BLOCK_SIZE,
861 + .setkey = camellia_set_key,
862 + .encrypt = cbc_encrypt,
863 + .decrypt = cbc_decrypt,
864 +diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
865 +index d11500972994..1359bfc544e4 100644
866 +--- a/arch/sparc/crypto/des_glue.c
867 ++++ b/arch/sparc/crypto/des_glue.c
868 +@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
869 + .blkcipher = {
870 + .min_keysize = DES_KEY_SIZE,
871 + .max_keysize = DES_KEY_SIZE,
872 ++ .ivsize = DES_BLOCK_SIZE,
873 + .setkey = des_set_key,
874 + .encrypt = cbc_encrypt,
875 + .decrypt = cbc_decrypt,
876 +@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
877 + .blkcipher = {
878 + .min_keysize = DES3_EDE_KEY_SIZE,
879 + .max_keysize = DES3_EDE_KEY_SIZE,
880 ++ .ivsize = DES3_EDE_BLOCK_SIZE,
881 + .setkey = des3_ede_set_key,
882 + .encrypt = cbc3_encrypt,
883 + .decrypt = cbc3_decrypt,
884 +diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h
885 +index c1b76654ee76..ae69eda288f4 100644
886 +--- a/arch/sparc/include/asm/barrier_32.h
887 ++++ b/arch/sparc/include/asm/barrier_32.h
888 +@@ -1,15 +1,7 @@
889 + #ifndef __SPARC_BARRIER_H
890 + #define __SPARC_BARRIER_H
891 +
892 +-/* XXX Change this if we ever use a PSO mode kernel. */
893 +-#define mb() __asm__ __volatile__ ("" : : : "memory")
894 +-#define rmb() mb()
895 +-#define wmb() mb()
896 +-#define read_barrier_depends() do { } while(0)
897 +-#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
898 +-#define smp_mb() __asm__ __volatile__("":::"memory")
899 +-#define smp_rmb() __asm__ __volatile__("":::"memory")
900 +-#define smp_wmb() __asm__ __volatile__("":::"memory")
901 +-#define smp_read_barrier_depends() do { } while(0)
902 ++#include <asm/processor.h> /* for nop() */
903 ++#include <asm-generic/barrier.h>
904 +
905 + #endif /* !(__SPARC_BARRIER_H) */
906 +diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
907 +index a9a73da5865d..b5a05d050a8f 100644
908 +--- a/arch/tile/include/asm/barrier.h
909 ++++ b/arch/tile/include/asm/barrier.h
910 +@@ -22,59 +22,6 @@
911 + #include <arch/spr_def.h>
912 + #include <asm/timex.h>
913 +
914 +-/*
915 +- * read_barrier_depends - Flush all pending reads that subsequents reads
916 +- * depend on.
917 +- *
918 +- * No data-dependent reads from memory-like regions are ever reordered
919 +- * over this barrier. All reads preceding this primitive are guaranteed
920 +- * to access memory (but not necessarily other CPUs' caches) before any
921 +- * reads following this primitive that depend on the data return by
922 +- * any of the preceding reads. This primitive is much lighter weight than
923 +- * rmb() on most CPUs, and is never heavier weight than is
924 +- * rmb().
925 +- *
926 +- * These ordering constraints are respected by both the local CPU
927 +- * and the compiler.
928 +- *
929 +- * Ordering is not guaranteed by anything other than these primitives,
930 +- * not even by data dependencies. See the documentation for
931 +- * memory_barrier() for examples and URLs to more information.
932 +- *
933 +- * For example, the following code would force ordering (the initial
934 +- * value of "a" is zero, "b" is one, and "p" is "&a"):
935 +- *
936 +- * <programlisting>
937 +- * CPU 0 CPU 1
938 +- *
939 +- * b = 2;
940 +- * memory_barrier();
941 +- * p = &b; q = p;
942 +- * read_barrier_depends();
943 +- * d = *q;
944 +- * </programlisting>
945 +- *
946 +- * because the read of "*q" depends on the read of "p" and these
947 +- * two reads are separated by a read_barrier_depends(). However,
948 +- * the following code, with the same initial values for "a" and "b":
949 +- *
950 +- * <programlisting>
951 +- * CPU 0 CPU 1
952 +- *
953 +- * a = 2;
954 +- * memory_barrier();
955 +- * b = 3; y = b;
956 +- * read_barrier_depends();
957 +- * x = a;
958 +- * </programlisting>
959 +- *
960 +- * does not enforce ordering, since there is no data dependency between
961 +- * the read of "a" and the read of "b". Therefore, on some CPUs, such
962 +- * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
963 +- * in cases like this where there are no data dependencies.
964 +- */
965 +-#define read_barrier_depends() do { } while (0)
966 +-
967 + #define __sync() __insn_mf()
968 +
969 + #include <hv/syscall_public.h>
970 +@@ -125,20 +72,7 @@ mb_incoherent(void)
971 + #define mb() fast_mb()
972 + #define iob() fast_iob()
973 +
974 +-#ifdef CONFIG_SMP
975 +-#define smp_mb() mb()
976 +-#define smp_rmb() rmb()
977 +-#define smp_wmb() wmb()
978 +-#define smp_read_barrier_depends() read_barrier_depends()
979 +-#else
980 +-#define smp_mb() barrier()
981 +-#define smp_rmb() barrier()
982 +-#define smp_wmb() barrier()
983 +-#define smp_read_barrier_depends() do { } while (0)
984 +-#endif
985 +-
986 +-#define set_mb(var, value) \
987 +- do { var = value; mb(); } while (0)
988 ++#include <asm-generic/barrier.h>
989 +
990 + #endif /* !__ASSEMBLY__ */
991 + #endif /* _ASM_TILE_BARRIER_H */
992 +diff --git a/arch/unicore32/include/asm/barrier.h b/arch/unicore32/include/asm/barrier.h
993 +index a6620e5336b6..83d6a520f4bd 100644
994 +--- a/arch/unicore32/include/asm/barrier.h
995 ++++ b/arch/unicore32/include/asm/barrier.h
996 +@@ -14,15 +14,6 @@
997 + #define dsb() __asm__ __volatile__ ("" : : : "memory")
998 + #define dmb() __asm__ __volatile__ ("" : : : "memory")
999 +
1000 +-#define mb() barrier()
1001 +-#define rmb() barrier()
1002 +-#define wmb() barrier()
1003 +-#define smp_mb() barrier()
1004 +-#define smp_rmb() barrier()
1005 +-#define smp_wmb() barrier()
1006 +-#define read_barrier_depends() do { } while (0)
1007 +-#define smp_read_barrier_depends() do { } while (0)
1008 +-
1009 +-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
1010 ++#include <asm-generic/barrier.h>
1011 +
1012 + #endif /* __UNICORE_BARRIER_H__ */
1013 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1014 +index 7170f1738793..5c2742b75be1 100644
1015 +--- a/arch/x86/kernel/apic/apic.c
1016 ++++ b/arch/x86/kernel/apic/apic.c
1017 +@@ -351,6 +351,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
1018 + apic_write(APIC_LVTT, lvtt_value);
1019 +
1020 + if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
1021 ++ /*
1022 ++ * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
1023 ++ * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
1024 ++ * According to Intel, MFENCE can do the serialization here.
1025 ++ */
1026 ++ asm volatile("mfence" : : : "memory");
1027 ++
1028 + printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
1029 + return;
1030 + }
1031 +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
1032 +index 7ed99df028ca..ead3e7c9672e 100644
1033 +--- a/arch/x86/kernel/entry_64.S
1034 ++++ b/arch/x86/kernel/entry_64.S
1035 +@@ -1675,7 +1675,18 @@ END(error_exit)
1036 + /* runs on exception stack */
1037 + ENTRY(nmi)
1038 + INTR_FRAME
1039 ++ /*
1040 ++ * Fix up the exception frame if we're on Xen.
1041 ++ * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
1042 ++ * one value to the stack on native, so it may clobber the rdx
1043 ++ * scratch slot, but it won't clobber any of the important
1044 ++ * slots past it.
1045 ++ *
1046 ++ * Xen is a different story, because the Xen frame itself overlaps
1047 ++ * the "NMI executing" variable.
1048 ++ */
1049 + PARAVIRT_ADJUST_EXCEPTION_FRAME
1050 ++
1051 + /*
1052 + * We allow breakpoints in NMIs. If a breakpoint occurs, then
1053 + * the iretq it performs will take us out of NMI context.
1054 +@@ -1727,9 +1738,12 @@ ENTRY(nmi)
1055 + * we don't want to enable interrupts, because then we'll end
1056 + * up in an awkward situation in which IRQs are on but NMIs
1057 + * are off.
1058 ++ *
1059 ++ * We also must not push anything to the stack before switching
1060 ++ * stacks lest we corrupt the "NMI executing" variable.
1061 + */
1062 +
1063 +- SWAPGS
1064 ++ SWAPGS_UNSAFE_STACK
1065 + cld
1066 + movq %rsp, %rdx
1067 + movq PER_CPU_VAR(kernel_stack), %rsp
1068 +diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
1069 +index 1b10af835c31..45c2045692bd 100644
1070 +--- a/arch/x86/kernel/paravirt.c
1071 ++++ b/arch/x86/kernel/paravirt.c
1072 +@@ -40,10 +40,18 @@
1073 + #include <asm/timer.h>
1074 + #include <asm/special_insns.h>
1075 +
1076 +-/* nop stub */
1077 +-void _paravirt_nop(void)
1078 +-{
1079 +-}
1080 ++/*
1081 ++ * nop stub, which must not clobber anything *including the stack* to
1082 ++ * avoid confusing the entry prologues.
1083 ++ */
1084 ++extern void _paravirt_nop(void);
1085 ++asm (".pushsection .entry.text, \"ax\"\n"
1086 ++ ".global _paravirt_nop\n"
1087 ++ "_paravirt_nop:\n\t"
1088 ++ "ret\n\t"
1089 ++ ".size _paravirt_nop, . - _paravirt_nop\n\t"
1090 ++ ".type _paravirt_nop, @function\n\t"
1091 ++ ".popsection");
1092 +
1093 + /* identity function, which can be inlined */
1094 + u32 _paravirt_ident_32(u32 x)
1095 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
1096 +index cefe57ce4ebd..b40765803d05 100644
1097 +--- a/arch/x86/kernel/tsc.c
1098 ++++ b/arch/x86/kernel/tsc.c
1099 +@@ -20,6 +20,7 @@
1100 + #include <asm/hypervisor.h>
1101 + #include <asm/nmi.h>
1102 + #include <asm/x86_init.h>
1103 ++#include <asm/geode.h>
1104 +
1105 + unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
1106 + EXPORT_SYMBOL(cpu_khz);
1107 +@@ -812,15 +813,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1108 +
1109 + static void __init check_system_tsc_reliable(void)
1110 + {
1111 +-#ifdef CONFIG_MGEODE_LX
1112 +- /* RTSC counts during suspend */
1113 ++#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1114 ++ if (is_geode_lx()) {
1115 ++ /* RTSC counts during suspend */
1116 + #define RTSC_SUSP 0x100
1117 +- unsigned long res_low, res_high;
1118 ++ unsigned long res_low, res_high;
1119 +
1120 +- rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1121 +- /* Geode_LX - the OLPC CPU has a very reliable TSC */
1122 +- if (res_low & RTSC_SUSP)
1123 +- tsc_clocksource_reliable = 1;
1124 ++ rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1125 ++ /* Geode_LX - the OLPC CPU has a very reliable TSC */
1126 ++ if (res_low & RTSC_SUSP)
1127 ++ tsc_clocksource_reliable = 1;
1128 ++ }
1129 + #endif
1130 + if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1131 + tsc_clocksource_reliable = 1;
1132 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1133 +index 2996635196d3..d1a065ec683f 100644
1134 +--- a/arch/x86/kvm/svm.c
1135 ++++ b/arch/x86/kvm/svm.c
1136 +@@ -496,7 +496,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1137 + struct vcpu_svm *svm = to_svm(vcpu);
1138 +
1139 + if (svm->vmcb->control.next_rip != 0) {
1140 +- WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
1141 ++ WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
1142 + svm->next_rip = svm->vmcb->control.next_rip;
1143 + }
1144 +
1145 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
1146 +index b599241aea81..a93e32722ab1 100644
1147 +--- a/arch/x86/mm/init_64.c
1148 ++++ b/arch/x86/mm/init_64.c
1149 +@@ -1131,7 +1131,7 @@ void mark_rodata_ro(void)
1150 + * has been zapped already via cleanup_highmem().
1151 + */
1152 + all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1153 +- set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
1154 ++ set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1155 +
1156 + rodata_test();
1157 +
1158 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
1159 +index 2cbc2f2cf43e..b2de632861c2 100644
1160 +--- a/arch/x86/xen/enlighten.c
1161 ++++ b/arch/x86/xen/enlighten.c
1162 +@@ -33,6 +33,10 @@
1163 + #include <linux/memblock.h>
1164 + #include <linux/edd.h>
1165 +
1166 ++#ifdef CONFIG_KEXEC
1167 ++#include <linux/kexec.h>
1168 ++#endif
1169 ++
1170 + #include <xen/xen.h>
1171 + #include <xen/events.h>
1172 + #include <xen/interface/xen.h>
1173 +@@ -1746,6 +1750,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
1174 + .notifier_call = xen_hvm_cpu_notify,
1175 + };
1176 +
1177 ++#ifdef CONFIG_KEXEC
1178 ++static void xen_hvm_shutdown(void)
1179 ++{
1180 ++ native_machine_shutdown();
1181 ++ if (kexec_in_progress)
1182 ++ xen_reboot(SHUTDOWN_soft_reset);
1183 ++}
1184 ++
1185 ++static void xen_hvm_crash_shutdown(struct pt_regs *regs)
1186 ++{
1187 ++ native_machine_crash_shutdown(regs);
1188 ++ xen_reboot(SHUTDOWN_soft_reset);
1189 ++}
1190 ++#endif
1191 ++
1192 + static void __init xen_hvm_guest_init(void)
1193 + {
1194 + init_hvm_pv_info();
1195 +@@ -1762,6 +1781,10 @@ static void __init xen_hvm_guest_init(void)
1196 + x86_init.irqs.intr_init = xen_init_IRQ;
1197 + xen_hvm_init_time_ops();
1198 + xen_hvm_init_mmu_ops();
1199 ++#ifdef CONFIG_KEXEC
1200 ++ machine_ops.shutdown = xen_hvm_shutdown;
1201 ++ machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
1202 ++#endif
1203 + }
1204 +
1205 + static uint32_t __init xen_hvm_platform(void)
1206 +diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
1207 +index ef021677d536..e1ee6b51dfc5 100644
1208 +--- a/arch/xtensa/include/asm/barrier.h
1209 ++++ b/arch/xtensa/include/asm/barrier.h
1210 +@@ -9,21 +9,14 @@
1211 + #ifndef _XTENSA_SYSTEM_H
1212 + #define _XTENSA_SYSTEM_H
1213 +
1214 +-#define smp_read_barrier_depends() do { } while(0)
1215 +-#define read_barrier_depends() do { } while(0)
1216 +-
1217 + #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
1218 + #define rmb() barrier()
1219 + #define wmb() mb()
1220 +
1221 + #ifdef CONFIG_SMP
1222 + #error smp_* not defined
1223 +-#else
1224 +-#define smp_mb() barrier()
1225 +-#define smp_rmb() barrier()
1226 +-#define smp_wmb() barrier()
1227 + #endif
1228 +
1229 +-#define set_mb(var, value) do { var = value; mb(); } while (0)
1230 ++#include <asm-generic/barrier.h>
1231 +
1232 + #endif /* _XTENSA_SYSTEM_H */
1233 +diff --git a/crypto/ahash.c b/crypto/ahash.c
1234 +index 793a27f2493e..857ae2b2a2a2 100644
1235 +--- a/crypto/ahash.c
1236 ++++ b/crypto/ahash.c
1237 +@@ -462,7 +462,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
1238 + struct crypto_alg *base = &alg->halg.base;
1239 +
1240 + if (alg->halg.digestsize > PAGE_SIZE / 8 ||
1241 +- alg->halg.statesize > PAGE_SIZE / 8)
1242 ++ alg->halg.statesize > PAGE_SIZE / 8 ||
1243 ++ alg->halg.statesize == 0)
1244 + return -EINVAL;
1245 +
1246 + base->cra_type = &crypto_ahash_type;
1247 +diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
1248 +index b18c7da77067..8135feff72a2 100644
1249 +--- a/drivers/base/regmap/regmap-debugfs.c
1250 ++++ b/drivers/base/regmap/regmap-debugfs.c
1251 +@@ -23,8 +23,7 @@ static struct dentry *regmap_debugfs_root;
1252 + /* Calculate the length of a fixed format */
1253 + static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
1254 + {
1255 +- snprintf(buf, buf_size, "%x", max_val);
1256 +- return strlen(buf);
1257 ++ return snprintf(NULL, 0, "%x", max_val);
1258 + }
1259 +
1260 + static ssize_t regmap_name_read_file(struct file *file,
1261 +@@ -423,7 +422,7 @@ static ssize_t regmap_access_read_file(struct file *file,
1262 + /* If we're in the region the user is trying to read */
1263 + if (p >= *ppos) {
1264 + /* ...but not beyond it */
1265 +- if (buf_pos >= count - 1 - tot_len)
1266 ++ if (buf_pos + tot_len + 1 >= count)
1267 + break;
1268 +
1269 + /* Format the register */
1270 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
1271 +index 63ff17fc23df..66f632730969 100644
1272 +--- a/drivers/block/rbd.c
1273 ++++ b/drivers/block/rbd.c
1274 +@@ -4868,7 +4868,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
1275 + out_err:
1276 + if (parent) {
1277 + rbd_dev_unparent(rbd_dev);
1278 +- kfree(rbd_dev->header_name);
1279 + rbd_dev_destroy(parent);
1280 + } else {
1281 + rbd_put_client(rbdc);
1282 +diff --git a/drivers/cpuidle/cpuidle-ux500.c b/drivers/cpuidle/cpuidle-ux500.c
1283 +index e0564652af35..5e35804b1a95 100644
1284 +--- a/drivers/cpuidle/cpuidle-ux500.c
1285 ++++ b/drivers/cpuidle/cpuidle-ux500.c
1286 +@@ -111,7 +111,7 @@ static struct cpuidle_driver ux500_idle_driver = {
1287 + .state_count = 2,
1288 + };
1289 +
1290 +-static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
1291 ++static int dbx500_cpuidle_probe(struct platform_device *pdev)
1292 + {
1293 + /* Configure wake up reasons */
1294 + prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
1295 +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
1296 +index a8884b8aaa9e..c128aab076ab 100644
1297 +--- a/drivers/dma/dw/core.c
1298 ++++ b/drivers/dma/dw/core.c
1299 +@@ -1585,7 +1585,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1300 + INIT_LIST_HEAD(&dw->dma.channels);
1301 + for (i = 0; i < nr_channels; i++) {
1302 + struct dw_dma_chan *dwc = &dw->chan[i];
1303 +- int r = nr_channels - i - 1;
1304 +
1305 + dwc->chan.device = &dw->dma;
1306 + dma_cookie_init(&dwc->chan);
1307 +@@ -1597,7 +1596,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1308 +
1309 + /* 7 is highest priority & 0 is lowest. */
1310 + if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1311 +- dwc->priority = r;
1312 ++ dwc->priority = nr_channels - i - 1;
1313 + else
1314 + dwc->priority = i;
1315 +
1316 +@@ -1617,6 +1616,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1317 + /* Hardware configuration */
1318 + if (autocfg) {
1319 + unsigned int dwc_params;
1320 ++ unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1321 + void __iomem *addr = chip->regs + r * sizeof(u32);
1322 +
1323 + dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
1324 +diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
1325 +index d752c96d6090..bdceb60998d3 100644
1326 +--- a/drivers/gpu/drm/drm_lock.c
1327 ++++ b/drivers/gpu/drm/drm_lock.c
1328 +@@ -58,6 +58,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
1329 + struct drm_master *master = file_priv->master;
1330 + int ret = 0;
1331 +
1332 ++ if (drm_core_check_feature(dev, DRIVER_MODESET))
1333 ++ return -EINVAL;
1334 ++
1335 + ++file_priv->lock_count;
1336 +
1337 + if (lock->context == DRM_KERNEL_CONTEXT) {
1338 +@@ -151,6 +154,9 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
1339 + struct drm_lock *lock = data;
1340 + struct drm_master *master = file_priv->master;
1341 +
1342 ++ if (drm_core_check_feature(dev, DRIVER_MODESET))
1343 ++ return -EINVAL;
1344 ++
1345 + if (lock->context == DRM_KERNEL_CONTEXT) {
1346 + DRM_ERROR("Process %d using kernel context %d\n",
1347 + task_pid_nr(current), lock->context);
1348 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1349 +index a86ecf65c164..2268dd52f3c6 100644
1350 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1351 ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1352 +@@ -183,8 +183,30 @@ nouveau_fbcon_sync(struct fb_info *info)
1353 + return 0;
1354 + }
1355 +
1356 ++static int
1357 ++nouveau_fbcon_open(struct fb_info *info, int user)
1358 ++{
1359 ++ struct nouveau_fbdev *fbcon = info->par;
1360 ++ struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
1361 ++ int ret = pm_runtime_get_sync(drm->dev->dev);
1362 ++ if (ret < 0 && ret != -EACCES)
1363 ++ return ret;
1364 ++ return 0;
1365 ++}
1366 ++
1367 ++static int
1368 ++nouveau_fbcon_release(struct fb_info *info, int user)
1369 ++{
1370 ++ struct nouveau_fbdev *fbcon = info->par;
1371 ++ struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
1372 ++ pm_runtime_put(drm->dev->dev);
1373 ++ return 0;
1374 ++}
1375 ++
1376 + static struct fb_ops nouveau_fbcon_ops = {
1377 + .owner = THIS_MODULE,
1378 ++ .fb_open = nouveau_fbcon_open,
1379 ++ .fb_release = nouveau_fbcon_release,
1380 + .fb_check_var = drm_fb_helper_check_var,
1381 + .fb_set_par = drm_fb_helper_set_par,
1382 + .fb_fillrect = nouveau_fbcon_fillrect,
1383 +@@ -200,6 +222,8 @@ static struct fb_ops nouveau_fbcon_ops = {
1384 +
1385 + static struct fb_ops nouveau_fbcon_sw_ops = {
1386 + .owner = THIS_MODULE,
1387 ++ .fb_open = nouveau_fbcon_open,
1388 ++ .fb_release = nouveau_fbcon_release,
1389 + .fb_check_var = drm_fb_helper_check_var,
1390 + .fb_set_par = drm_fb_helper_set_par,
1391 + .fb_fillrect = cfb_fillrect,
1392 +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
1393 +index ea0904875c74..98976f054597 100644
1394 +--- a/drivers/gpu/drm/qxl/qxl_display.c
1395 ++++ b/drivers/gpu/drm/qxl/qxl_display.c
1396 +@@ -537,7 +537,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
1397 + adjusted_mode->hdisplay,
1398 + adjusted_mode->vdisplay);
1399 +
1400 +- if (qcrtc->index == 0)
1401 ++ if (bo->is_primary == false)
1402 + recreate_primary = true;
1403 +
1404 + if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
1405 +@@ -799,13 +799,15 @@ static enum drm_connector_status qxl_conn_detect(
1406 + drm_connector_to_qxl_output(connector);
1407 + struct drm_device *ddev = connector->dev;
1408 + struct qxl_device *qdev = ddev->dev_private;
1409 +- int connected;
1410 ++ bool connected = false;
1411 +
1412 + /* The first monitor is always connected */
1413 +- connected = (output->index == 0) ||
1414 +- (qdev->client_monitors_config &&
1415 +- qdev->client_monitors_config->count > output->index &&
1416 +- qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
1417 ++ if (!qdev->client_monitors_config) {
1418 ++ if (output->index == 0)
1419 ++ connected = true;
1420 ++ } else
1421 ++ connected = qdev->client_monitors_config->count > output->index &&
1422 ++ qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
1423 +
1424 + DRM_DEBUG("\n");
1425 + return connected ? connector_status_connected
1426 +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
1427 +index 6d9649471f28..68fd96a50fc7 100644
1428 +--- a/drivers/hid/hid-apple.c
1429 ++++ b/drivers/hid/hid-apple.c
1430 +@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
1431 + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
1432 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
1433 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
1434 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
1435 ++ .driver_data = APPLE_HAS_FN },
1436 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
1437 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
1438 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
1439 ++ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
1440 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
1441 + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
1442 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
1443 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1444 +index eb23021390cb..85b0da8c33f4 100644
1445 +--- a/drivers/hid/hid-core.c
1446 ++++ b/drivers/hid/hid-core.c
1447 +@@ -1695,6 +1695,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1448 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
1449 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
1450 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
1451 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
1452 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
1453 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
1454 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1455 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1456 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
1457 +@@ -2370,6 +2373,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
1458 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
1459 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
1460 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
1461 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
1462 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
1463 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
1464 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1465 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1466 + { }
1467 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1468 +index 6da09931a987..50b25fad982d 100644
1469 +--- a/drivers/hid/hid-ids.h
1470 ++++ b/drivers/hid/hid-ids.h
1471 +@@ -139,6 +139,9 @@
1472 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
1473 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
1474 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
1475 ++#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
1476 ++#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
1477 ++#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
1478 + #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
1479 + #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
1480 + #define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
1481 +@@ -878,7 +881,8 @@
1482 + #define USB_DEVICE_ID_TOUCHPACK_RTS 0x1688
1483 +
1484 + #define USB_VENDOR_ID_TPV 0x25aa
1485 +-#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN 0x8883
1486 ++#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882
1487 ++#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883
1488 +
1489 + #define USB_VENDOR_ID_TURBOX 0x062a
1490 + #define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
1491 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
1492 +index 7bc98db768eb..7166d7fb43de 100644
1493 +--- a/drivers/hid/usbhid/hid-quirks.c
1494 ++++ b/drivers/hid/usbhid/hid-quirks.c
1495 +@@ -106,7 +106,8 @@ static const struct hid_blacklist {
1496 + { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
1497 + { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
1498 + { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
1499 +- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN, HID_QUIRK_NOGET },
1500 ++ { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882, HID_QUIRK_NOGET },
1501 ++ { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883, HID_QUIRK_NOGET },
1502 + { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
1503 + { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
1504 + { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
1505 +diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
1506 +index b6d28439f1b9..96dc7e7a58e3 100644
1507 +--- a/drivers/hwmon/nct6775.c
1508 ++++ b/drivers/hwmon/nct6775.c
1509 +@@ -348,6 +348,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
1510 +
1511 + /* NCT6776 specific data */
1512 +
1513 ++/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
1514 ++#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
1515 ++#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
1516 ++
1517 + static const s8 NCT6776_ALARM_BITS[] = {
1518 + 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
1519 + 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
1520 +@@ -3492,8 +3496,8 @@ static int nct6775_probe(struct platform_device *pdev)
1521 + data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
1522 + data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
1523 + data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
1524 +- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
1525 +- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
1526 ++ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
1527 ++ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
1528 + data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
1529 + data->REG_PWM[0] = NCT6775_REG_PWM;
1530 + data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
1531 +@@ -3562,8 +3566,8 @@ static int nct6775_probe(struct platform_device *pdev)
1532 + data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
1533 + data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
1534 + data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
1535 +- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
1536 +- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
1537 ++ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
1538 ++ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
1539 + data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
1540 + data->REG_PWM[0] = NCT6775_REG_PWM;
1541 + data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
1542 +@@ -3636,8 +3640,8 @@ static int nct6775_probe(struct platform_device *pdev)
1543 + data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
1544 + data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
1545 + data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
1546 +- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
1547 +- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
1548 ++ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
1549 ++ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
1550 + data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
1551 + data->REG_PWM[0] = NCT6775_REG_PWM;
1552 + data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
1553 +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
1554 +index d0bdac0498ce..f7439c556413 100644
1555 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c
1556 ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
1557 +@@ -28,6 +28,7 @@
1558 + #include <linux/kernel.h>
1559 + #include <linux/module.h>
1560 + #include <linux/delay.h>
1561 ++#include <linux/dmi.h>
1562 + #include <linux/i2c.h>
1563 + #include <linux/clk.h>
1564 + #include <linux/errno.h>
1565 +@@ -53,6 +54,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
1566 + }
1567 +
1568 + #ifdef CONFIG_ACPI
1569 ++/*
1570 ++ * The HCNT/LCNT information coming from ACPI should be the most accurate
1571 ++ * for given platform. However, some systems get it wrong. On such systems
1572 ++ * we get better results by calculating those based on the input clock.
1573 ++ */
1574 ++static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
1575 ++ {
1576 ++ .ident = "Dell Inspiron 7348",
1577 ++ .matches = {
1578 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1579 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
1580 ++ },
1581 ++ },
1582 ++ { }
1583 ++};
1584 ++
1585 + static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
1586 + u16 *hcnt, u16 *lcnt, u32 *sda_hold)
1587 + {
1588 +@@ -60,6 +77,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
1589 + acpi_handle handle = ACPI_HANDLE(&pdev->dev);
1590 + union acpi_object *obj;
1591 +
1592 ++ if (dmi_check_system(dw_i2c_no_acpi_params))
1593 ++ return;
1594 ++
1595 + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
1596 + return;
1597 +
1598 +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
1599 +index c8a42602205b..622b6fce149b 100644
1600 +--- a/drivers/i2c/busses/i2c-rcar.c
1601 ++++ b/drivers/i2c/busses/i2c-rcar.c
1602 +@@ -690,15 +690,16 @@ static int rcar_i2c_probe(struct platform_device *pdev)
1603 + return ret;
1604 + }
1605 +
1606 ++ pm_runtime_enable(dev);
1607 ++ platform_set_drvdata(pdev, priv);
1608 ++
1609 + ret = i2c_add_numbered_adapter(adap);
1610 + if (ret < 0) {
1611 + dev_err(dev, "reg adap failed: %d\n", ret);
1612 ++ pm_runtime_disable(dev);
1613 + return ret;
1614 + }
1615 +
1616 +- pm_runtime_enable(dev);
1617 +- platform_set_drvdata(pdev, priv);
1618 +-
1619 + dev_info(dev, "probed\n");
1620 +
1621 + return 0;
1622 +diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
1623 +index ce09bf932831..8983e7fa0fb4 100644
1624 +--- a/drivers/i2c/busses/i2c-s3c2410.c
1625 ++++ b/drivers/i2c/busses/i2c-s3c2410.c
1626 +@@ -1151,17 +1151,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1627 + i2c->adap.nr = i2c->pdata->bus_num;
1628 + i2c->adap.dev.of_node = pdev->dev.of_node;
1629 +
1630 ++ platform_set_drvdata(pdev, i2c);
1631 ++
1632 ++ pm_runtime_enable(&pdev->dev);
1633 ++
1634 + ret = i2c_add_numbered_adapter(&i2c->adap);
1635 + if (ret < 0) {
1636 + dev_err(&pdev->dev, "failed to add bus to i2c core\n");
1637 ++ pm_runtime_disable(&pdev->dev);
1638 + s3c24xx_i2c_deregister_cpufreq(i2c);
1639 + clk_unprepare(i2c->clk);
1640 + return ret;
1641 + }
1642 +
1643 +- platform_set_drvdata(pdev, i2c);
1644 +-
1645 +- pm_runtime_enable(&pdev->dev);
1646 + pm_runtime_enable(&i2c->adap.dev);
1647 +
1648 + dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
1649 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1650 +index 9a51eb2242a0..2e04d5253130 100644
1651 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
1652 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
1653 +@@ -2541,9 +2541,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1654 + static int
1655 + isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
1656 + {
1657 +- int ret;
1658 ++ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1659 ++ int ret = 0;
1660 +
1661 + switch (state) {
1662 ++ case ISTATE_REMOVE:
1663 ++ spin_lock_bh(&conn->cmd_lock);
1664 ++ list_del_init(&cmd->i_conn_node);
1665 ++ spin_unlock_bh(&conn->cmd_lock);
1666 ++ isert_put_cmd(isert_cmd, true);
1667 ++ break;
1668 + case ISTATE_SEND_NOPIN_WANT_RESPONSE:
1669 + ret = isert_put_nopin(cmd, conn, false);
1670 + break;
1671 +diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
1672 +index 56eb471b5576..4215b5382092 100644
1673 +--- a/drivers/input/joystick/Kconfig
1674 ++++ b/drivers/input/joystick/Kconfig
1675 +@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
1676 + config JOYSTICK_ZHENHUA
1677 + tristate "5-byte Zhenhua RC transmitter"
1678 + select SERIO
1679 ++ select BITREVERSE
1680 + help
1681 + Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
1682 + supplied with a ready to fly micro electric indoor helicopters
1683 +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
1684 +index 30acfd49fa6c..1ba3490b9ffe 100644
1685 +--- a/drivers/input/keyboard/omap4-keypad.c
1686 ++++ b/drivers/input/keyboard/omap4-keypad.c
1687 +@@ -284,7 +284,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
1688 + } else {
1689 + error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
1690 + if (error)
1691 +- return error;
1692 ++ goto err_free_keypad;
1693 + }
1694 +
1695 + res = request_mem_region(res->start, resource_size(res), pdev->name);
1696 +diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
1697 +index cff065f6261c..de3d92077c77 100644
1698 +--- a/drivers/input/mouse/psmouse-base.c
1699 ++++ b/drivers/input/mouse/psmouse-base.c
1700 +@@ -1441,6 +1441,10 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
1701 + if (error)
1702 + goto err_clear_drvdata;
1703 +
1704 ++ /* give PT device some time to settle down before probing */
1705 ++ if (serio->id.type == SERIO_PS_PSTHRU)
1706 ++ usleep_range(10000, 15000);
1707 ++
1708 + if (psmouse_probe(psmouse) < 0) {
1709 + error = -ENODEV;
1710 + goto err_close_serio;
1711 +diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
1712 +index 26b45936f9fd..1e8cd6f1fe9e 100644
1713 +--- a/drivers/input/serio/parkbd.c
1714 ++++ b/drivers/input/serio/parkbd.c
1715 +@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
1716 + parkbd_port = parkbd_allocate_serio();
1717 + if (!parkbd_port) {
1718 + parport_release(parkbd_dev);
1719 ++ parport_unregister_device(parkbd_dev);
1720 + return -ENOMEM;
1721 + }
1722 +
1723 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1724 +index b853bb47fc7d..d22b4af761f5 100644
1725 +--- a/drivers/iommu/amd_iommu.c
1726 ++++ b/drivers/iommu/amd_iommu.c
1727 +@@ -1750,14 +1750,16 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
1728 + unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
1729 + int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
1730 + int i = start >> APERTURE_RANGE_SHIFT;
1731 +- unsigned long boundary_size;
1732 ++ unsigned long boundary_size, mask;
1733 + unsigned long address = -1;
1734 + unsigned long limit;
1735 +
1736 + next_bit >>= PAGE_SHIFT;
1737 +
1738 +- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1739 +- PAGE_SIZE) >> PAGE_SHIFT;
1740 ++ mask = dma_get_seg_boundary(dev);
1741 ++
1742 ++ boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
1743 ++ 1UL << (BITS_PER_LONG - PAGE_SHIFT);
1744 +
1745 + for (;i < max_index; ++i) {
1746 + unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
1747 +diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
1748 +index 3ee198b65843..cc7ece1712b5 100644
1749 +--- a/drivers/macintosh/windfarm_core.c
1750 ++++ b/drivers/macintosh/windfarm_core.c
1751 +@@ -435,7 +435,7 @@ int wf_unregister_client(struct notifier_block *nb)
1752 + {
1753 + mutex_lock(&wf_lock);
1754 + blocking_notifier_chain_unregister(&wf_client_list, nb);
1755 +- wf_client_count++;
1756 ++ wf_client_count--;
1757 + if (wf_client_count == 0)
1758 + wf_stop_thread();
1759 + mutex_unlock(&wf_lock);
1760 +diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
1761 +index b04d1f904d07..2eca9084defe 100644
1762 +--- a/drivers/md/dm-cache-policy-cleaner.c
1763 ++++ b/drivers/md/dm-cache-policy-cleaner.c
1764 +@@ -434,7 +434,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
1765 + static struct dm_cache_policy_type wb_policy_type = {
1766 + .name = "cleaner",
1767 + .version = {1, 0, 0},
1768 +- .hint_size = 0,
1769 ++ .hint_size = 4,
1770 + .owner = THIS_MODULE,
1771 + .create = wb_create
1772 + };
1773 +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
1774 +index 59715389b3cf..19cfd7affebe 100644
1775 +--- a/drivers/md/dm-raid.c
1776 ++++ b/drivers/md/dm-raid.c
1777 +@@ -325,8 +325,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
1778 + */
1779 + if (min_region_size > (1 << 13)) {
1780 + /* If not a power of 2, make it the next power of 2 */
1781 +- if (min_region_size & (min_region_size - 1))
1782 +- region_size = 1 << fls(region_size);
1783 ++ region_size = roundup_pow_of_two(min_region_size);
1784 + DMINFO("Choosing default region size of %lu sectors",
1785 + region_size);
1786 + } else {
1787 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1788 +index d2b3563129c2..5ff934102f30 100644
1789 +--- a/drivers/md/dm-thin.c
1790 ++++ b/drivers/md/dm-thin.c
1791 +@@ -2153,7 +2153,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1792 + metadata_low_callback,
1793 + pool);
1794 + if (r)
1795 +- goto out_free_pt;
1796 ++ goto out_flags_changed;
1797 +
1798 + pt->callbacks.congested_fn = pool_is_congested;
1799 + dm_table_add_target_callbacks(ti->table, &pt->callbacks);
1800 +diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
1801 +index bf2b80d5c470..8731b6ea026b 100644
1802 +--- a/drivers/md/persistent-data/dm-btree-internal.h
1803 ++++ b/drivers/md/persistent-data/dm-btree-internal.h
1804 +@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
1805 +
1806 + extern struct dm_block_validator btree_node_validator;
1807 +
1808 ++/*
1809 ++ * Value type for upper levels of multi-level btrees.
1810 ++ */
1811 ++extern void init_le64_type(struct dm_transaction_manager *tm,
1812 ++ struct dm_btree_value_type *vt);
1813 ++
1814 + #endif /* DM_BTREE_INTERNAL_H */
1815 +diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
1816 +index a03178e91a79..7c0d75547ccf 100644
1817 +--- a/drivers/md/persistent-data/dm-btree-remove.c
1818 ++++ b/drivers/md/persistent-data/dm-btree-remove.c
1819 +@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
1820 + return r;
1821 + }
1822 +
1823 +-static struct dm_btree_value_type le64_type = {
1824 +- .context = NULL,
1825 +- .size = sizeof(__le64),
1826 +- .inc = NULL,
1827 +- .dec = NULL,
1828 +- .equal = NULL
1829 +-};
1830 +-
1831 + int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
1832 + uint64_t *keys, dm_block_t *new_root)
1833 + {
1834 +@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
1835 + int index = 0, r = 0;
1836 + struct shadow_spine spine;
1837 + struct btree_node *n;
1838 ++ struct dm_btree_value_type le64_vt;
1839 +
1840 ++ init_le64_type(info->tm, &le64_vt);
1841 + init_shadow_spine(&spine, info);
1842 + for (level = 0; level < info->levels; level++) {
1843 + r = remove_raw(&spine, info,
1844 + (level == last_level ?
1845 +- &info->value_type : &le64_type),
1846 ++ &info->value_type : &le64_vt),
1847 + root, keys[level], (unsigned *)&index);
1848 + if (r < 0)
1849 + break;
1850 +diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
1851 +index 1b5e13ec7f96..0dee514ba4c5 100644
1852 +--- a/drivers/md/persistent-data/dm-btree-spine.c
1853 ++++ b/drivers/md/persistent-data/dm-btree-spine.c
1854 +@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
1855 + {
1856 + return s->root;
1857 + }
1858 ++
1859 ++static void le64_inc(void *context, const void *value_le)
1860 ++{
1861 ++ struct dm_transaction_manager *tm = context;
1862 ++ __le64 v_le;
1863 ++
1864 ++ memcpy(&v_le, value_le, sizeof(v_le));
1865 ++ dm_tm_inc(tm, le64_to_cpu(v_le));
1866 ++}
1867 ++
1868 ++static void le64_dec(void *context, const void *value_le)
1869 ++{
1870 ++ struct dm_transaction_manager *tm = context;
1871 ++ __le64 v_le;
1872 ++
1873 ++ memcpy(&v_le, value_le, sizeof(v_le));
1874 ++ dm_tm_dec(tm, le64_to_cpu(v_le));
1875 ++}
1876 ++
1877 ++static int le64_equal(void *context, const void *value1_le, const void *value2_le)
1878 ++{
1879 ++ __le64 v1_le, v2_le;
1880 ++
1881 ++ memcpy(&v1_le, value1_le, sizeof(v1_le));
1882 ++ memcpy(&v2_le, value2_le, sizeof(v2_le));
1883 ++ return v1_le == v2_le;
1884 ++}
1885 ++
1886 ++void init_le64_type(struct dm_transaction_manager *tm,
1887 ++ struct dm_btree_value_type *vt)
1888 ++{
1889 ++ vt->context = tm;
1890 ++ vt->size = sizeof(__le64);
1891 ++ vt->inc = le64_inc;
1892 ++ vt->dec = le64_dec;
1893 ++ vt->equal = le64_equal;
1894 ++}
1895 +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
1896 +index 8dad9849649e..50cf11119af9 100644
1897 +--- a/drivers/md/persistent-data/dm-btree.c
1898 ++++ b/drivers/md/persistent-data/dm-btree.c
1899 +@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
1900 + struct btree_node *n;
1901 + struct dm_btree_value_type le64_type;
1902 +
1903 +- le64_type.context = NULL;
1904 +- le64_type.size = sizeof(__le64);
1905 +- le64_type.inc = NULL;
1906 +- le64_type.dec = NULL;
1907 +- le64_type.equal = NULL;
1908 +-
1909 ++ init_le64_type(info->tm, &le64_type);
1910 + init_shadow_spine(&spine, info);
1911 +
1912 + for (level = 0; level < (info->levels - 1); level++) {
1913 +diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
1914 +index 1d3304f1365b..72faf593427e 100644
1915 +--- a/drivers/media/platform/vsp1/vsp1_regs.h
1916 ++++ b/drivers/media/platform/vsp1/vsp1_regs.h
1917 +@@ -238,7 +238,7 @@
1918 + #define VI6_WPF_SZCLIP_EN (1 << 28)
1919 + #define VI6_WPF_SZCLIP_OFST_MASK (0xff << 16)
1920 + #define VI6_WPF_SZCLIP_OFST_SHIFT 16
1921 +-#define VI6_WPF_SZCLIP_SIZE_MASK (0x1fff << 0)
1922 ++#define VI6_WPF_SZCLIP_SIZE_MASK (0xfff << 0)
1923 + #define VI6_WPF_SZCLIP_SIZE_SHIFT 0
1924 +
1925 + #define VI6_WPF_OUTFMT 0x100c
1926 +@@ -304,9 +304,9 @@
1927 + #define VI6_DPR_HST_ROUTE 0x2044
1928 + #define VI6_DPR_HSI_ROUTE 0x2048
1929 + #define VI6_DPR_BRU_ROUTE 0x204c
1930 +-#define VI6_DPR_ROUTE_FXA_MASK (0xff << 8)
1931 ++#define VI6_DPR_ROUTE_FXA_MASK (0xff << 16)
1932 + #define VI6_DPR_ROUTE_FXA_SHIFT 16
1933 +-#define VI6_DPR_ROUTE_FP_MASK (0xff << 8)
1934 ++#define VI6_DPR_ROUTE_FP_MASK (0x3f << 8)
1935 + #define VI6_DPR_ROUTE_FP_SHIFT 8
1936 + #define VI6_DPR_ROUTE_RT_MASK (0x3f << 0)
1937 + #define VI6_DPR_ROUTE_RT_SHIFT 0
1938 +diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
1939 +index 7cbc3a00bda8..bf6b215438e3 100644
1940 +--- a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
1941 ++++ b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
1942 +@@ -177,7 +177,7 @@ static int rotation_thread_function(void *data)
1943 + __s32 vflip, hflip;
1944 +
1945 + set_current_state(TASK_INTERRUPTIBLE);
1946 +- while (!schedule_timeout(100)) {
1947 ++ while (!schedule_timeout(msecs_to_jiffies(100))) {
1948 + if (mutex_lock_interruptible(&sd->gspca_dev.usb_lock))
1949 + break;
1950 +
1951 +diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
1952 +index 661f7f2a9e8b..ea5ec8ed67a7 100644
1953 +--- a/drivers/media/usb/usbvision/usbvision-video.c
1954 ++++ b/drivers/media/usb/usbvision/usbvision-video.c
1955 +@@ -435,6 +435,7 @@ static int usbvision_v4l2_close(struct file *file)
1956 + usbvision_scratch_free(usbvision);
1957 +
1958 + usbvision->user--;
1959 ++ mutex_unlock(&usbvision->v4l2_lock);
1960 +
1961 + if (power_on_at_open) {
1962 + /* power off in a little while
1963 +@@ -448,7 +449,6 @@ static int usbvision_v4l2_close(struct file *file)
1964 + usbvision_release(usbvision);
1965 + return 0;
1966 + }
1967 +- mutex_unlock(&usbvision->v4l2_lock);
1968 +
1969 + PDEBUG(DBG_IO, "success");
1970 + return 0;
1971 +diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
1972 +index bf79def40126..8822e880833b 100644
1973 +--- a/drivers/mtd/ubi/io.c
1974 ++++ b/drivers/mtd/ubi/io.c
1975 +@@ -931,6 +931,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
1976 + goto bad;
1977 + }
1978 +
1979 ++ if (data_size > ubi->leb_size) {
1980 ++ ubi_err("bad data_size");
1981 ++ goto bad;
1982 ++ }
1983 ++
1984 + if (vol_type == UBI_VID_STATIC) {
1985 + /*
1986 + * Although from high-level point of view static volumes may
1987 +diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
1988 +index d77b1c1d7c72..bebf49e0dbe9 100644
1989 +--- a/drivers/mtd/ubi/vtbl.c
1990 ++++ b/drivers/mtd/ubi/vtbl.c
1991 +@@ -651,6 +651,7 @@ static int init_volumes(struct ubi_device *ubi,
1992 + if (ubi->corr_peb_count)
1993 + ubi_err("%d PEBs are corrupted and not used",
1994 + ubi->corr_peb_count);
1995 ++ return -ENOSPC;
1996 + }
1997 + ubi->rsvd_pebs += reserved_pebs;
1998 + ubi->avail_pebs -= reserved_pebs;
1999 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
2000 +index c08254016fe8..3375bfb1b246 100644
2001 +--- a/drivers/mtd/ubi/wl.c
2002 ++++ b/drivers/mtd/ubi/wl.c
2003 +@@ -1978,6 +1978,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
2004 + if (ubi->corr_peb_count)
2005 + ubi_err("%d PEBs are corrupted and not used",
2006 + ubi->corr_peb_count);
2007 ++ err = -ENOSPC;
2008 + goto out_free;
2009 + }
2010 + ubi->avail_pebs -= reserved_pebs;
2011 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
2012 +index addd23246eb6..d66cf214e95e 100644
2013 +--- a/drivers/net/ppp/pppoe.c
2014 ++++ b/drivers/net/ppp/pppoe.c
2015 +@@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev)
2016 + if (po->pppoe_dev == dev &&
2017 + sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
2018 + pppox_unbind_sock(sk);
2019 +- sk->sk_state = PPPOX_ZOMBIE;
2020 + sk->sk_state_change(sk);
2021 + po->pppoe_dev = NULL;
2022 + dev_put(dev);
2023 +diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
2024 +index 20643833f0e6..31e607afb1d0 100644
2025 +--- a/drivers/net/usb/asix_devices.c
2026 ++++ b/drivers/net/usb/asix_devices.c
2027 +@@ -466,19 +466,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
2028 + return ret;
2029 + }
2030 +
2031 +- ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
2032 +- if (ret < 0)
2033 +- return ret;
2034 +-
2035 +- msleep(150);
2036 +-
2037 +- ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
2038 +- if (ret < 0)
2039 +- return ret;
2040 +-
2041 +- msleep(150);
2042 +-
2043 +- ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
2044 ++ ax88772_reset(dev);
2045 +
2046 + /* Read PHYID register *AFTER* the PHY was reset properly */
2047 + phyid = asix_get_phyid(dev);
2048 +@@ -891,7 +879,7 @@ static const struct driver_info ax88772_info = {
2049 + .unbind = ax88772_unbind,
2050 + .status = asix_status,
2051 + .link_reset = ax88772_link_reset,
2052 +- .reset = ax88772_reset,
2053 ++ .reset = ax88772_link_reset,
2054 + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
2055 + .rx_fixup = asix_rx_fixup_common,
2056 + .tx_fixup = asix_tx_fixup,
2057 +diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
2058 +index c9887cb60650..f900dfd551e8 100644
2059 +--- a/drivers/net/wireless/ath/ath9k/init.c
2060 ++++ b/drivers/net/wireless/ath/ath9k/init.c
2061 +@@ -893,6 +893,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
2062 + hw->max_rate_tries = 10;
2063 + hw->sta_data_size = sizeof(struct ath_node);
2064 + hw->vif_data_size = sizeof(struct ath_vif);
2065 ++ hw->extra_tx_headroom = 4;
2066 +
2067 + hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
2068 + hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
2069 +diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
2070 +index bb6b0df50b33..efb6e13dc788 100644
2071 +--- a/drivers/s390/char/con3270.c
2072 ++++ b/drivers/s390/char/con3270.c
2073 +@@ -407,6 +407,10 @@ con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
2074 + else
2075 + /* Normal end. Copy residual count. */
2076 + rq->rescnt = irb->scsw.cmd.count;
2077 ++ } else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
2078 ++ /* Interrupt without an outstanding request -> update all */
2079 ++ cp->update_flags = CON_UPDATE_ALL;
2080 ++ con3270_set_timer(cp, 1);
2081 + }
2082 + return RAW3270_IO_DONE;
2083 + }
2084 +diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
2085 +index 34629ea913d4..49f034facf77 100644
2086 +--- a/drivers/s390/char/tty3270.c
2087 ++++ b/drivers/s390/char/tty3270.c
2088 +@@ -662,6 +662,10 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
2089 + else
2090 + /* Normal end. Copy residual count. */
2091 + rq->rescnt = irb->scsw.cmd.count;
2092 ++ } else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
2093 ++ /* Interrupt without an outstanding request -> update all */
2094 ++ tp->update_flags = TTY_UPDATE_ALL;
2095 ++ tty3270_set_timer(tp, 1);
2096 + }
2097 + return RAW3270_IO_DONE;
2098 + }
2099 +diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
2100 +index 5f57e3d35e26..6adf9abdf955 100644
2101 +--- a/drivers/scsi/3w-9xxx.c
2102 ++++ b/drivers/scsi/3w-9xxx.c
2103 +@@ -225,6 +225,17 @@ static const struct file_operations twa_fops = {
2104 + .llseek = noop_llseek,
2105 + };
2106 +
2107 ++/*
2108 ++ * The controllers use an inline buffer instead of a mapped SGL for small,
2109 ++ * single entry buffers. Note that we treat a zero-length transfer like
2110 ++ * a mapped SGL.
2111 ++ */
2112 ++static bool twa_command_mapped(struct scsi_cmnd *cmd)
2113 ++{
2114 ++ return scsi_sg_count(cmd) != 1 ||
2115 ++ scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
2116 ++}
2117 ++
2118 + /* This function will complete an aen request from the isr */
2119 + static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
2120 + {
2121 +@@ -1351,7 +1362,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
2122 + }
2123 +
2124 + /* Now complete the io */
2125 +- scsi_dma_unmap(cmd);
2126 ++ if (twa_command_mapped(cmd))
2127 ++ scsi_dma_unmap(cmd);
2128 + cmd->scsi_done(cmd);
2129 + tw_dev->state[request_id] = TW_S_COMPLETED;
2130 + twa_free_request_id(tw_dev, request_id);
2131 +@@ -1594,7 +1606,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
2132 + struct scsi_cmnd *cmd = tw_dev->srb[i];
2133 +
2134 + cmd->result = (DID_RESET << 16);
2135 +- scsi_dma_unmap(cmd);
2136 ++ if (twa_command_mapped(cmd))
2137 ++ scsi_dma_unmap(cmd);
2138 + cmd->scsi_done(cmd);
2139 + }
2140 + }
2141 +@@ -1777,12 +1790,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
2142 + retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
2143 + switch (retval) {
2144 + case SCSI_MLQUEUE_HOST_BUSY:
2145 +- scsi_dma_unmap(SCpnt);
2146 ++ if (twa_command_mapped(SCpnt))
2147 ++ scsi_dma_unmap(SCpnt);
2148 + twa_free_request_id(tw_dev, request_id);
2149 + break;
2150 + case 1:
2151 + SCpnt->result = (DID_ERROR << 16);
2152 +- scsi_dma_unmap(SCpnt);
2153 ++ if (twa_command_mapped(SCpnt))
2154 ++ scsi_dma_unmap(SCpnt);
2155 + done(SCpnt);
2156 + tw_dev->state[request_id] = TW_S_COMPLETED;
2157 + twa_free_request_id(tw_dev, request_id);
2158 +@@ -1843,8 +1858,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
2159 + /* Map sglist from scsi layer to cmd packet */
2160 +
2161 + if (scsi_sg_count(srb)) {
2162 +- if ((scsi_sg_count(srb) == 1) &&
2163 +- (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
2164 ++ if (!twa_command_mapped(srb)) {
2165 + if (srb->sc_data_direction == DMA_TO_DEVICE ||
2166 + srb->sc_data_direction == DMA_BIDIRECTIONAL)
2167 + scsi_sg_copy_to_buffer(srb,
2168 +@@ -1917,7 +1931,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
2169 + {
2170 + struct scsi_cmnd *cmd = tw_dev->srb[request_id];
2171 +
2172 +- if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
2173 ++ if (!twa_command_mapped(cmd) &&
2174 + (cmd->sc_data_direction == DMA_FROM_DEVICE ||
2175 + cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
2176 + if (scsi_sg_count(cmd) == 1) {
2177 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
2178 +index 066e3198838d..ff2689d01209 100644
2179 +--- a/drivers/scsi/scsi_error.c
2180 ++++ b/drivers/scsi/scsi_error.c
2181 +@@ -1906,8 +1906,17 @@ int scsi_error_handler(void *data)
2182 + * We never actually get interrupted because kthread_run
2183 + * disables signal delivery for the created thread.
2184 + */
2185 +- while (!kthread_should_stop()) {
2186 ++ while (true) {
2187 ++ /*
2188 ++ * The sequence in kthread_stop() sets the stop flag first
2189 ++ * then wakes the process. To avoid missed wakeups, the task
2190 ++ * should always be in a non running state before the stop
2191 ++ * flag is checked
2192 ++ */
2193 + set_current_state(TASK_INTERRUPTIBLE);
2194 ++ if (kthread_should_stop())
2195 ++ break;
2196 ++
2197 + if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
2198 + shost->host_failed != shost->host_busy) {
2199 + SCSI_LOG_ERROR_RECOVERY(1,
2200 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
2201 +index d01ae4d353d4..bb4a919d2fdf 100644
2202 +--- a/drivers/spi/spi-pxa2xx.c
2203 ++++ b/drivers/spi/spi-pxa2xx.c
2204 +@@ -562,6 +562,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
2205 + if (!(sccr1_reg & SSCR1_TIE))
2206 + mask &= ~SSSR_TFS;
2207 +
2208 ++ /* Ignore RX timeout interrupt if it is disabled */
2209 ++ if (!(sccr1_reg & SSCR1_TINTE))
2210 ++ mask &= ~SSSR_TINT;
2211 ++
2212 + if (!(status & mask))
2213 + return IRQ_NONE;
2214 +
2215 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
2216 +index d254477372b9..5ddda10472c6 100644
2217 +--- a/drivers/spi/spi.c
2218 ++++ b/drivers/spi/spi.c
2219 +@@ -1087,8 +1087,7 @@ static struct class spi_master_class = {
2220 + *
2221 + * The caller is responsible for assigning the bus number and initializing
2222 + * the master's methods before calling spi_register_master(); and (after errors
2223 +- * adding the device) calling spi_master_put() and kfree() to prevent a memory
2224 +- * leak.
2225 ++ * adding the device) calling spi_master_put() to prevent a memory leak.
2226 + */
2227 + struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
2228 + {
2229 +diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
2230 +index 4299cf45f947..5e1f16c36b49 100644
2231 +--- a/drivers/staging/speakup/fakekey.c
2232 ++++ b/drivers/staging/speakup/fakekey.c
2233 +@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
2234 + __this_cpu_write(reporting_keystroke, true);
2235 + input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
2236 + input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
2237 ++ input_sync(virt_keyboard);
2238 + __this_cpu_write(reporting_keystroke, false);
2239 +
2240 + /* reenable preemption */
2241 +diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
2242 +index 92f0cc442d46..eac6a3212de2 100644
2243 +--- a/drivers/usb/chipidea/debug.c
2244 ++++ b/drivers/usb/chipidea/debug.c
2245 +@@ -62,9 +62,11 @@ static int ci_port_test_show(struct seq_file *s, void *data)
2246 + unsigned long flags;
2247 + unsigned mode;
2248 +
2249 ++ pm_runtime_get_sync(ci->dev);
2250 + spin_lock_irqsave(&ci->lock, flags);
2251 + mode = hw_port_test_get(ci);
2252 + spin_unlock_irqrestore(&ci->lock, flags);
2253 ++ pm_runtime_put_sync(ci->dev);
2254 +
2255 + seq_printf(s, "mode = %u\n", mode);
2256 +
2257 +@@ -94,9 +96,11 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
2258 + if (sscanf(buf, "%u", &mode) != 1)
2259 + return -EINVAL;
2260 +
2261 ++ pm_runtime_get_sync(ci->dev);
2262 + spin_lock_irqsave(&ci->lock, flags);
2263 + ret = hw_port_test_set(ci, mode);
2264 + spin_unlock_irqrestore(&ci->lock, flags);
2265 ++ pm_runtime_put_sync(ci->dev);
2266 +
2267 + return ret ? ret : count;
2268 + }
2269 +@@ -238,8 +242,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
2270 + if (role == CI_ROLE_END || role == ci->role)
2271 + return -EINVAL;
2272 +
2273 ++ pm_runtime_get_sync(ci->dev);
2274 + ci_role_stop(ci);
2275 + ret = ci_role_start(ci, role);
2276 ++ pm_runtime_put_sync(ci->dev);
2277 +
2278 + return ret ? ret : count;
2279 + }
2280 +diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
2281 +index 09de131ee0cb..c997ee9122bc 100644
2282 +--- a/drivers/usb/class/usbtmc.c
2283 ++++ b/drivers/usb/class/usbtmc.c
2284 +@@ -110,6 +110,7 @@ struct usbtmc_ID_rigol_quirk {
2285 +
2286 + static const struct usbtmc_ID_rigol_quirk usbtmc_id_quirk[] = {
2287 + { 0x1ab1, 0x0588 },
2288 ++ { 0x1ab1, 0x04b0 },
2289 + { 0, 0 }
2290 + };
2291 +
2292 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
2293 +index 98cb09617b20..b9560f485d21 100644
2294 +--- a/drivers/usb/core/config.c
2295 ++++ b/drivers/usb/core/config.c
2296 +@@ -114,7 +114,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
2297 + cfgno, inum, asnum, ep->desc.bEndpointAddress);
2298 + ep->ss_ep_comp.bmAttributes = 16;
2299 + } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
2300 +- desc->bmAttributes > 2) {
2301 ++ USB_SS_MULT(desc->bmAttributes) > 3) {
2302 + dev_warn(ddev, "Isoc endpoint has Mult of %d in "
2303 + "config %d interface %d altsetting %d ep %d: "
2304 + "setting to 3\n", desc->bmAttributes + 1,
2305 +@@ -123,7 +123,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
2306 + }
2307 +
2308 + if (usb_endpoint_xfer_isoc(&ep->desc))
2309 +- max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
2310 ++ max_tx = (desc->bMaxBurst + 1) *
2311 ++ (USB_SS_MULT(desc->bmAttributes)) *
2312 + usb_endpoint_maxp(&ep->desc);
2313 + else if (usb_endpoint_xfer_int(&ep->desc))
2314 + max_tx = usb_endpoint_maxp(&ep->desc) *
2315 +diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
2316 +index 5e1a1790c2f6..04b21577e8ed 100644
2317 +--- a/drivers/usb/core/hcd-pci.c
2318 ++++ b/drivers/usb/core/hcd-pci.c
2319 +@@ -215,6 +215,9 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2320 + goto disable_pci;
2321 + }
2322 +
2323 ++ hcd->amd_resume_bug = (usb_hcd_amd_remote_wakeup_quirk(dev) &&
2324 ++ driver->flags & (HCD_USB11 | HCD_USB3)) ? 1 : 0;
2325 ++
2326 + if (driver->flags & HCD_MEMORY) {
2327 + /* EHCI, OHCI */
2328 + hcd->rsrc_start = pci_resource_start(dev, 0);
2329 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2330 +index 78141993dfd0..f9af3bf33e1b 100644
2331 +--- a/drivers/usb/core/hub.c
2332 ++++ b/drivers/usb/core/hub.c
2333 +@@ -2539,9 +2539,6 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
2334 + #define HUB_LONG_RESET_TIME 200
2335 + #define HUB_RESET_TIMEOUT 800
2336 +
2337 +-static int hub_port_reset(struct usb_hub *hub, int port1,
2338 +- struct usb_device *udev, unsigned int delay, bool warm);
2339 +-
2340 + /* Is a USB 3.0 port in the Inactive or Complinance Mode state?
2341 + * Port worm reset is required to recover
2342 + */
2343 +@@ -2622,44 +2619,6 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
2344 + return 0;
2345 + }
2346 +
2347 +-static void hub_port_finish_reset(struct usb_hub *hub, int port1,
2348 +- struct usb_device *udev, int *status)
2349 +-{
2350 +- switch (*status) {
2351 +- case 0:
2352 +- /* TRSTRCY = 10 ms; plus some extra */
2353 +- msleep(10 + 40);
2354 +- if (udev) {
2355 +- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
2356 +-
2357 +- update_devnum(udev, 0);
2358 +- /* The xHC may think the device is already reset,
2359 +- * so ignore the status.
2360 +- */
2361 +- if (hcd->driver->reset_device)
2362 +- hcd->driver->reset_device(hcd, udev);
2363 +- }
2364 +- /* FALL THROUGH */
2365 +- case -ENOTCONN:
2366 +- case -ENODEV:
2367 +- usb_clear_port_feature(hub->hdev,
2368 +- port1, USB_PORT_FEAT_C_RESET);
2369 +- if (hub_is_superspeed(hub->hdev)) {
2370 +- usb_clear_port_feature(hub->hdev, port1,
2371 +- USB_PORT_FEAT_C_BH_PORT_RESET);
2372 +- usb_clear_port_feature(hub->hdev, port1,
2373 +- USB_PORT_FEAT_C_PORT_LINK_STATE);
2374 +- usb_clear_port_feature(hub->hdev, port1,
2375 +- USB_PORT_FEAT_C_CONNECTION);
2376 +- }
2377 +- if (udev)
2378 +- usb_set_device_state(udev, *status
2379 +- ? USB_STATE_NOTATTACHED
2380 +- : USB_STATE_DEFAULT);
2381 +- break;
2382 +- }
2383 +-}
2384 +-
2385 + /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
2386 + static int hub_port_reset(struct usb_hub *hub, int port1,
2387 + struct usb_device *udev, unsigned int delay, bool warm)
2388 +@@ -2682,13 +2641,10 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2389 + * If the caller hasn't explicitly requested a warm reset,
2390 + * double check and see if one is needed.
2391 + */
2392 +- status = hub_port_status(hub, port1,
2393 +- &portstatus, &portchange);
2394 +- if (status < 0)
2395 +- goto done;
2396 +-
2397 +- if (hub_port_warm_reset_required(hub, portstatus))
2398 +- warm = true;
2399 ++ if (hub_port_status(hub, port1, &portstatus, &portchange) == 0)
2400 ++ if (hub_port_warm_reset_required(hub,
2401 ++ portstatus))
2402 ++ warm = true;
2403 + }
2404 +
2405 + /* Reset the port */
2406 +@@ -2713,11 +2669,19 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2407 +
2408 + /* Check for disconnect or reset */
2409 + if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
2410 +- hub_port_finish_reset(hub, port1, udev, &status);
2411 ++ usb_clear_port_feature(hub->hdev, port1,
2412 ++ USB_PORT_FEAT_C_RESET);
2413 +
2414 + if (!hub_is_superspeed(hub->hdev))
2415 + goto done;
2416 +
2417 ++ usb_clear_port_feature(hub->hdev, port1,
2418 ++ USB_PORT_FEAT_C_BH_PORT_RESET);
2419 ++ usb_clear_port_feature(hub->hdev, port1,
2420 ++ USB_PORT_FEAT_C_PORT_LINK_STATE);
2421 ++ usb_clear_port_feature(hub->hdev, port1,
2422 ++ USB_PORT_FEAT_C_CONNECTION);
2423 ++
2424 + /*
2425 + * If a USB 3.0 device migrates from reset to an error
2426 + * state, re-issue the warm reset.
2427 +@@ -2751,6 +2715,26 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2428 + port1);
2429 +
2430 + done:
2431 ++ if (status == 0) {
2432 ++ /* TRSTRCY = 10 ms; plus some extra */
2433 ++ msleep(10 + 40);
2434 ++ if (udev) {
2435 ++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
2436 ++
2437 ++ update_devnum(udev, 0);
2438 ++ /* The xHC may think the device is already reset,
2439 ++ * so ignore the status.
2440 ++ */
2441 ++ if (hcd->driver->reset_device)
2442 ++ hcd->driver->reset_device(hcd, udev);
2443 ++
2444 ++ usb_set_device_state(udev, USB_STATE_DEFAULT);
2445 ++ }
2446 ++ } else {
2447 ++ if (udev)
2448 ++ usb_set_device_state(udev, USB_STATE_NOTATTACHED);
2449 ++ }
2450 ++
2451 + if (!hub_is_superspeed(hub->hdev))
2452 + up_read(&ehci_cf_port_reset_rwsem);
2453 +
2454 +diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
2455 +index e8cdce571bb1..2753cec61aaf 100644
2456 +--- a/drivers/usb/core/otg_whitelist.h
2457 ++++ b/drivers/usb/core/otg_whitelist.h
2458 +@@ -59,6 +59,11 @@ static int is_targeted(struct usb_device *dev)
2459 + le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
2460 + return 0;
2461 +
2462 ++ /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
2463 ++ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
2464 ++ le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
2465 ++ return 1;
2466 ++
2467 + /* NOTE: can't use usb_match_id() since interface caches
2468 + * aren't set up yet. this is cut/paste from that code.
2469 + */
2470 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2471 +index 5014a4282352..08f321904fb7 100644
2472 +--- a/drivers/usb/core/quirks.c
2473 ++++ b/drivers/usb/core/quirks.c
2474 +@@ -13,6 +13,7 @@
2475 +
2476 + #include <linux/usb.h>
2477 + #include <linux/usb/quirks.h>
2478 ++#include <linux/usb/hcd.h>
2479 + #include "usb.h"
2480 +
2481 + /* Lists of quirky USB devices, split in device quirks and interface quirks.
2482 +@@ -53,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = {
2483 + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
2484 + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
2485 +
2486 ++ /* Logitech ConferenceCam CC3000e */
2487 ++ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
2488 ++ { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
2489 ++
2490 ++ /* Logitech PTZ Pro Camera */
2491 ++ { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
2492 ++
2493 + /* Logitech Quickcam Fusion */
2494 + { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
2495 +
2496 +@@ -77,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = {
2497 + /* Philips PSC805 audio device */
2498 + { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
2499 +
2500 ++ /* Plantronic Audio 655 DSP */
2501 ++ { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
2502 ++
2503 ++ /* Plantronic Audio 648 USB */
2504 ++ { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
2505 ++
2506 + /* Artisman Watchdog Dongle */
2507 + { USB_DEVICE(0x04b4, 0x0526), .driver_info =
2508 + USB_QUIRK_CONFIG_INTF_STRINGS },
2509 +@@ -120,9 +134,6 @@ static const struct usb_device_id usb_quirk_list[] = {
2510 + /* Alcor Micro Corp. Hub */
2511 + { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
2512 +
2513 +- /* MicroTouch Systems touchscreen */
2514 +- { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
2515 +-
2516 + /* appletouch */
2517 + { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
2518 +
2519 +@@ -184,6 +195,10 @@ static const struct usb_device_id usb_quirk_list[] = {
2520 + { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
2521 + USB_QUIRK_IGNORE_REMOTE_WAKEUP },
2522 +
2523 ++ /* Protocol and OTG Electrical Test Device */
2524 ++ { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
2525 ++ USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
2526 ++
2527 + { } /* terminating entry must be last */
2528 + };
2529 +
2530 +@@ -192,9 +207,20 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
2531 + { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
2532 + .driver_info = USB_QUIRK_RESET_RESUME },
2533 +
2534 +- /* ASUS Base Station(T100) */
2535 +- { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
2536 +- USB_QUIRK_IGNORE_REMOTE_WAKEUP },
2537 ++ { } /* terminating entry must be last */
2538 ++};
2539 ++
2540 ++static const struct usb_device_id usb_amd_resume_quirk_list[] = {
2541 ++ /* Lenovo Mouse with Pixart controller */
2542 ++ { USB_DEVICE(0x17ef, 0x602e), .driver_info = USB_QUIRK_RESET_RESUME },
2543 ++
2544 ++ /* Pixart Mouse */
2545 ++ { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
2546 ++ { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
2547 ++ { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
2548 ++
2549 ++ /* Logitech Optical Mouse M90/M100 */
2550 ++ { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
2551 +
2552 + { } /* terminating entry must be last */
2553 + };
2554 +@@ -225,6 +251,18 @@ static bool usb_match_any_interface(struct usb_device *udev,
2555 + return false;
2556 + }
2557 +
2558 ++int usb_amd_resume_quirk(struct usb_device *udev)
2559 ++{
2560 ++ struct usb_hcd *hcd;
2561 ++
2562 ++ hcd = bus_to_hcd(udev->bus);
2563 ++ /* The device should be attached directly to root hub */
2564 ++ if (udev->level == 1 && hcd->amd_resume_bug == 1)
2565 ++ return 1;
2566 ++
2567 ++ return 0;
2568 ++}
2569 ++
2570 + static u32 __usb_detect_quirks(struct usb_device *udev,
2571 + const struct usb_device_id *id)
2572 + {
2573 +@@ -250,6 +288,15 @@ static u32 __usb_detect_quirks(struct usb_device *udev,
2574 + void usb_detect_quirks(struct usb_device *udev)
2575 + {
2576 + udev->quirks = __usb_detect_quirks(udev, usb_quirk_list);
2577 ++
2578 ++ /*
2579 ++ * Pixart-based mice would trigger remote wakeup issue on AMD
2580 ++ * Yangtze chipset, so set them as RESET_RESUME flag.
2581 ++ */
2582 ++ if (usb_amd_resume_quirk(udev))
2583 ++ udev->quirks |= __usb_detect_quirks(udev,
2584 ++ usb_amd_resume_quirk_list);
2585 ++
2586 + if (udev->quirks)
2587 + dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
2588 + udev->quirks);
2589 +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
2590 +index 5a45437da097..a47ff42e620a 100644
2591 +--- a/drivers/usb/host/pci-quirks.c
2592 ++++ b/drivers/usb/host/pci-quirks.c
2593 +@@ -250,6 +250,18 @@ commit:
2594 + }
2595 + EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
2596 +
2597 ++int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
2598 ++{
2599 ++ /* Make sure amd chipset type has already been initialized */
2600 ++ usb_amd_find_chipset_info();
2601 ++ if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
2602 ++ return 0;
2603 ++
2604 ++ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
2605 ++ return 1;
2606 ++}
2607 ++EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
2608 ++
2609 + bool usb_amd_hang_symptom_quirk(void)
2610 + {
2611 + u8 rev;
2612 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
2613 +index 9af524c1f48f..9552d2080d12 100644
2614 +--- a/drivers/usb/host/xhci-mem.c
2615 ++++ b/drivers/usb/host/xhci-mem.c
2616 +@@ -1402,10 +1402,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
2617 + * use Event Data TRBs, and we don't chain in a link TRB on short
2618 + * transfers, we're basically dividing by 1.
2619 + *
2620 +- * xHCI 1.0 specification indicates that the Average TRB Length should
2621 +- * be set to 8 for control endpoints.
2622 ++ * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
2623 ++ * should be set to 8 for control endpoints.
2624 + */
2625 +- if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
2626 ++ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
2627 + ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
2628 + else
2629 + ep_ctx->tx_info |=
2630 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
2631 +index 4ddceb7e05c3..68b8bc2e82d9 100644
2632 +--- a/drivers/usb/host/xhci-pci.c
2633 ++++ b/drivers/usb/host/xhci-pci.c
2634 +@@ -37,6 +37,9 @@
2635 +
2636 + #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
2637 + #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
2638 ++#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
2639 ++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
2640 ++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
2641 +
2642 + static const char hcd_name[] = "xhci_hcd";
2643 +
2644 +@@ -129,6 +132,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2645 + pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
2646 + xhci->quirks |= XHCI_SPURIOUS_REBOOT;
2647 + }
2648 ++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2649 ++ (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
2650 ++ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
2651 ++ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
2652 ++ xhci->quirks |= XHCI_PME_STUCK_QUIRK;
2653 ++ }
2654 + if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
2655 + pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
2656 + xhci->quirks |= XHCI_RESET_ON_RESUME;
2657 +@@ -143,6 +152,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2658 + xhci->quirks |= XHCI_RESET_ON_RESUME;
2659 + }
2660 +
2661 ++/*
2662 ++ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
2663 ++ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
2664 ++ */
2665 ++static void xhci_pme_quirk(struct xhci_hcd *xhci)
2666 ++{
2667 ++ u32 val;
2668 ++ void __iomem *reg;
2669 ++
2670 ++ reg = (void __iomem *) xhci->cap_regs + 0x80a4;
2671 ++ val = readl(reg);
2672 ++ writel(val | BIT(28), reg);
2673 ++ readl(reg);
2674 ++}
2675 ++
2676 + /* called during probe() after chip reset completes */
2677 + static int xhci_pci_setup(struct usb_hcd *hcd)
2678 + {
2679 +@@ -269,6 +293,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
2680 + if (xhci_compliance_mode_recovery_timer_quirk_check())
2681 + pdev->no_d3cold = true;
2682 +
2683 ++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
2684 ++ xhci_pme_quirk(xhci);
2685 ++
2686 + return xhci_suspend(xhci, do_wakeup);
2687 + }
2688 +
2689 +@@ -299,6 +326,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
2690 + if (pdev->vendor == PCI_VENDOR_ID_INTEL)
2691 + usb_enable_intel_xhci_ports(pdev);
2692 +
2693 ++ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
2694 ++ xhci_pme_quirk(xhci);
2695 ++
2696 + retval = xhci_resume(xhci, hibernated);
2697 + return retval;
2698 + }
2699 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2700 +index 66deb0af258e..ad381c22e5ac 100644
2701 +--- a/drivers/usb/host/xhci-ring.c
2702 ++++ b/drivers/usb/host/xhci-ring.c
2703 +@@ -554,9 +554,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
2704 + struct xhci_virt_device *dev = xhci->devs[slot_id];
2705 + struct xhci_virt_ep *ep = &dev->eps[ep_index];
2706 + struct xhci_ring *ep_ring;
2707 +- struct xhci_generic_trb *trb;
2708 ++ struct xhci_segment *new_seg;
2709 ++ union xhci_trb *new_deq;
2710 + dma_addr_t addr;
2711 + u64 hw_dequeue;
2712 ++ bool cycle_found = false;
2713 ++ bool td_last_trb_found = false;
2714 +
2715 + ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
2716 + ep_index, stream_id);
2717 +@@ -581,45 +584,45 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
2718 + hw_dequeue = le64_to_cpu(ep_ctx->deq);
2719 + }
2720 +
2721 +- /* Find virtual address and segment of hardware dequeue pointer */
2722 +- state->new_deq_seg = ep_ring->deq_seg;
2723 +- state->new_deq_ptr = ep_ring->dequeue;
2724 +- while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
2725 +- != (dma_addr_t)(hw_dequeue & ~0xf)) {
2726 +- next_trb(xhci, ep_ring, &state->new_deq_seg,
2727 +- &state->new_deq_ptr);
2728 +- if (state->new_deq_ptr == ep_ring->dequeue) {
2729 +- WARN_ON(1);
2730 +- return;
2731 +- }
2732 +- }
2733 ++ new_seg = ep_ring->deq_seg;
2734 ++ new_deq = ep_ring->dequeue;
2735 ++ state->new_cycle_state = hw_dequeue & 0x1;
2736 ++
2737 + /*
2738 +- * Find cycle state for last_trb, starting at old cycle state of
2739 +- * hw_dequeue. If there is only one segment ring, find_trb_seg() will
2740 +- * return immediately and cannot toggle the cycle state if this search
2741 +- * wraps around, so add one more toggle manually in that case.
2742 ++ * We want to find the pointer, segment and cycle state of the new trb
2743 ++ * (the one after current TD's last_trb). We know the cycle state at
2744 ++ * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
2745 ++ * found.
2746 + */
2747 +- state->new_cycle_state = hw_dequeue & 0x1;
2748 +- if (ep_ring->first_seg == ep_ring->first_seg->next &&
2749 +- cur_td->last_trb < state->new_deq_ptr)
2750 +- state->new_cycle_state ^= 0x1;
2751 ++ do {
2752 ++ if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
2753 ++ == (dma_addr_t)(hw_dequeue & ~0xf)) {
2754 ++ cycle_found = true;
2755 ++ if (td_last_trb_found)
2756 ++ break;
2757 ++ }
2758 ++ if (new_deq == cur_td->last_trb)
2759 ++ td_last_trb_found = true;
2760 +
2761 +- state->new_deq_ptr = cur_td->last_trb;
2762 +- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
2763 +- "Finding segment containing last TRB in TD.");
2764 +- state->new_deq_seg = find_trb_seg(state->new_deq_seg,
2765 +- state->new_deq_ptr, &state->new_cycle_state);
2766 +- if (!state->new_deq_seg) {
2767 +- WARN_ON(1);
2768 +- return;
2769 +- }
2770 ++ if (cycle_found &&
2771 ++ TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
2772 ++ new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
2773 ++ state->new_cycle_state ^= 0x1;
2774 +
2775 +- /* Increment to find next TRB after last_trb. Cycle if appropriate. */
2776 +- trb = &state->new_deq_ptr->generic;
2777 +- if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
2778 +- (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
2779 +- state->new_cycle_state ^= 0x1;
2780 +- next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
2781 ++ next_trb(xhci, ep_ring, &new_seg, &new_deq);
2782 ++
2783 ++ /* Search wrapped around, bail out */
2784 ++ if (new_deq == ep->ring->dequeue) {
2785 ++ xhci_err(xhci, "Error: Failed finding new dequeue state\n");
2786 ++ state->new_deq_seg = NULL;
2787 ++ state->new_deq_ptr = NULL;
2788 ++ return;
2789 ++ }
2790 ++
2791 ++ } while (!cycle_found || !td_last_trb_found);
2792 ++
2793 ++ state->new_deq_seg = new_seg;
2794 ++ state->new_deq_ptr = new_deq;
2795 +
2796 + /* Don't update the ring cycle state for the producer (us). */
2797 + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
2798 +@@ -3190,9 +3193,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2799 + struct xhci_td *td;
2800 + struct scatterlist *sg;
2801 + int num_sgs;
2802 +- int trb_buff_len, this_sg_len, running_total;
2803 ++ int trb_buff_len, this_sg_len, running_total, ret;
2804 + unsigned int total_packet_count;
2805 ++ bool zero_length_needed;
2806 + bool first_trb;
2807 ++ int last_trb_num;
2808 + u64 addr;
2809 + bool more_trbs_coming;
2810 +
2811 +@@ -3208,13 +3213,27 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2812 + total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
2813 + usb_endpoint_maxp(&urb->ep->desc));
2814 +
2815 +- trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2816 ++ ret = prepare_transfer(xhci, xhci->devs[slot_id],
2817 + ep_index, urb->stream_id,
2818 + num_trbs, urb, 0, mem_flags);
2819 +- if (trb_buff_len < 0)
2820 +- return trb_buff_len;
2821 ++ if (ret < 0)
2822 ++ return ret;
2823 +
2824 + urb_priv = urb->hcpriv;
2825 ++
2826 ++ /* Deal with URB_ZERO_PACKET - need one more td/trb */
2827 ++ zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
2828 ++ urb_priv->length == 2;
2829 ++ if (zero_length_needed) {
2830 ++ num_trbs++;
2831 ++ xhci_dbg(xhci, "Creating zero length td.\n");
2832 ++ ret = prepare_transfer(xhci, xhci->devs[slot_id],
2833 ++ ep_index, urb->stream_id,
2834 ++ 1, urb, 1, mem_flags);
2835 ++ if (ret < 0)
2836 ++ return ret;
2837 ++ }
2838 ++
2839 + td = urb_priv->td[0];
2840 +
2841 + /*
2842 +@@ -3244,6 +3263,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2843 + trb_buff_len = urb->transfer_buffer_length;
2844 +
2845 + first_trb = true;
2846 ++ last_trb_num = zero_length_needed ? 2 : 1;
2847 + /* Queue the first TRB, even if it's zero-length */
2848 + do {
2849 + u32 field = 0;
2850 +@@ -3261,12 +3281,15 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2851 + /* Chain all the TRBs together; clear the chain bit in the last
2852 + * TRB to indicate it's the last TRB in the chain.
2853 + */
2854 +- if (num_trbs > 1) {
2855 ++ if (num_trbs > last_trb_num) {
2856 + field |= TRB_CHAIN;
2857 +- } else {
2858 +- /* FIXME - add check for ZERO_PACKET flag before this */
2859 ++ } else if (num_trbs == last_trb_num) {
2860 + td->last_trb = ep_ring->enqueue;
2861 + field |= TRB_IOC;
2862 ++ } else if (zero_length_needed && num_trbs == 1) {
2863 ++ trb_buff_len = 0;
2864 ++ urb_priv->td[1]->last_trb = ep_ring->enqueue;
2865 ++ field |= TRB_IOC;
2866 + }
2867 +
2868 + /* Only set interrupt on short packet for IN endpoints */
2869 +@@ -3328,7 +3351,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2870 + if (running_total + trb_buff_len > urb->transfer_buffer_length)
2871 + trb_buff_len =
2872 + urb->transfer_buffer_length - running_total;
2873 +- } while (running_total < urb->transfer_buffer_length);
2874 ++ } while (num_trbs > 0);
2875 +
2876 + check_trb_math(urb, num_trbs, running_total);
2877 + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2878 +@@ -3346,7 +3369,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2879 + int num_trbs;
2880 + struct xhci_generic_trb *start_trb;
2881 + bool first_trb;
2882 ++ int last_trb_num;
2883 + bool more_trbs_coming;
2884 ++ bool zero_length_needed;
2885 + int start_cycle;
2886 + u32 field, length_field;
2887 +
2888 +@@ -3377,7 +3402,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2889 + num_trbs++;
2890 + running_total += TRB_MAX_BUFF_SIZE;
2891 + }
2892 +- /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
2893 +
2894 + ret = prepare_transfer(xhci, xhci->devs[slot_id],
2895 + ep_index, urb->stream_id,
2896 +@@ -3386,6 +3410,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2897 + return ret;
2898 +
2899 + urb_priv = urb->hcpriv;
2900 ++
2901 ++ /* Deal with URB_ZERO_PACKET - need one more td/trb */
2902 ++ zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
2903 ++ urb_priv->length == 2;
2904 ++ if (zero_length_needed) {
2905 ++ num_trbs++;
2906 ++ xhci_dbg(xhci, "Creating zero length td.\n");
2907 ++ ret = prepare_transfer(xhci, xhci->devs[slot_id],
2908 ++ ep_index, urb->stream_id,
2909 ++ 1, urb, 1, mem_flags);
2910 ++ if (ret < 0)
2911 ++ return ret;
2912 ++ }
2913 ++
2914 + td = urb_priv->td[0];
2915 +
2916 + /*
2917 +@@ -3407,7 +3445,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2918 + trb_buff_len = urb->transfer_buffer_length;
2919 +
2920 + first_trb = true;
2921 +-
2922 ++ last_trb_num = zero_length_needed ? 2 : 1;
2923 + /* Queue the first TRB, even if it's zero-length */
2924 + do {
2925 + u32 remainder = 0;
2926 +@@ -3424,12 +3462,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2927 + /* Chain all the TRBs together; clear the chain bit in the last
2928 + * TRB to indicate it's the last TRB in the chain.
2929 + */
2930 +- if (num_trbs > 1) {
2931 ++ if (num_trbs > last_trb_num) {
2932 + field |= TRB_CHAIN;
2933 +- } else {
2934 +- /* FIXME - add check for ZERO_PACKET flag before this */
2935 ++ } else if (num_trbs == last_trb_num) {
2936 + td->last_trb = ep_ring->enqueue;
2937 + field |= TRB_IOC;
2938 ++ } else if (zero_length_needed && num_trbs == 1) {
2939 ++ trb_buff_len = 0;
2940 ++ urb_priv->td[1]->last_trb = ep_ring->enqueue;
2941 ++ field |= TRB_IOC;
2942 + }
2943 +
2944 + /* Only set interrupt on short packet for IN endpoints */
2945 +@@ -3467,7 +3508,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2946 + trb_buff_len = urb->transfer_buffer_length - running_total;
2947 + if (trb_buff_len > TRB_MAX_BUFF_SIZE)
2948 + trb_buff_len = TRB_MAX_BUFF_SIZE;
2949 +- } while (running_total < urb->transfer_buffer_length);
2950 ++ } while (num_trbs > 0);
2951 +
2952 + check_trb_math(urb, num_trbs, running_total);
2953 + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2954 +@@ -3534,8 +3575,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2955 + if (start_cycle == 0)
2956 + field |= 0x1;
2957 +
2958 +- /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
2959 +- if (xhci->hci_version == 0x100) {
2960 ++ /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
2961 ++ if (xhci->hci_version >= 0x100) {
2962 + if (urb->transfer_buffer_length > 0) {
2963 + if (setup->bRequestType & USB_DIR_IN)
2964 + field |= TRB_TX_TYPE(TRB_DATA_IN);
2965 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2966 +index 00686a8c4fa0..3d98a3a82c79 100644
2967 +--- a/drivers/usb/host/xhci.c
2968 ++++ b/drivers/usb/host/xhci.c
2969 +@@ -143,7 +143,8 @@ static int xhci_start(struct xhci_hcd *xhci)
2970 + "waited %u microseconds.\n",
2971 + XHCI_MAX_HALT_USEC);
2972 + if (!ret)
2973 +- xhci->xhc_state &= ~XHCI_STATE_HALTED;
2974 ++ xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
2975 ++
2976 + return ret;
2977 + }
2978 +
2979 +@@ -1318,6 +1319,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
2980 +
2981 + if (usb_endpoint_xfer_isoc(&urb->ep->desc))
2982 + size = urb->number_of_packets;
2983 ++ else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
2984 ++ urb->transfer_buffer_length > 0 &&
2985 ++ urb->transfer_flags & URB_ZERO_PACKET &&
2986 ++ !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
2987 ++ size = 2;
2988 + else
2989 + size = 1;
2990 +
2991 +@@ -2902,6 +2908,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2992 + ep_index, ep->stopped_stream, ep->stopped_td,
2993 + &deq_state);
2994 +
2995 ++ if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2996 ++ return;
2997 ++
2998 + /* HW with the reset endpoint quirk will use the saved dequeue state to
2999 + * issue a configure endpoint command later.
3000 + */
3001 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3002 +index 8686a06d83d4..0419137c4732 100644
3003 +--- a/drivers/usb/host/xhci.h
3004 ++++ b/drivers/usb/host/xhci.h
3005 +@@ -1554,6 +1554,7 @@ struct xhci_hcd {
3006 + #define XHCI_PLAT (1 << 16)
3007 + #define XHCI_SLOW_SUSPEND (1 << 17)
3008 + #define XHCI_SPURIOUS_WAKEUP (1 << 18)
3009 ++#define XHCI_PME_STUCK_QUIRK (1 << 20)
3010 + unsigned int num_active_eps;
3011 + unsigned int limit_active_eps;
3012 + /* There are two roothubs to keep track of bus suspend info for */
3013 +diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
3014 +index 77b475a43dad..2ed1695ff5ad 100644
3015 +--- a/drivers/usb/musb/musb_cppi41.c
3016 ++++ b/drivers/usb/musb/musb_cppi41.c
3017 +@@ -507,10 +507,18 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
3018 + csr &= ~MUSB_TXCSR_DMAENAB;
3019 + musb_writew(epio, MUSB_TXCSR, csr);
3020 + } else {
3021 ++ cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
3022 ++
3023 ++ /* delay to drain to cppi dma pipeline for isoch */
3024 ++ udelay(250);
3025 ++
3026 + csr = musb_readw(epio, MUSB_RXCSR);
3027 + csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
3028 + musb_writew(epio, MUSB_RXCSR, csr);
3029 +
3030 ++ /* wait to drain cppi dma pipe line */
3031 ++ udelay(50);
3032 ++
3033 + csr = musb_readw(epio, MUSB_RXCSR);
3034 + if (csr & MUSB_RXCSR_RXPKTRDY) {
3035 + csr |= MUSB_RXCSR_FLUSHFIFO;
3036 +@@ -524,13 +532,14 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
3037 + tdbit <<= 16;
3038 +
3039 + do {
3040 +- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
3041 ++ if (is_tx)
3042 ++ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
3043 + ret = dmaengine_terminate_all(cppi41_channel->dc);
3044 + } while (ret == -EAGAIN);
3045 +
3046 +- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
3047 +-
3048 + if (is_tx) {
3049 ++ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
3050 ++
3051 + csr = musb_readw(epio, MUSB_TXCSR);
3052 + if (csr & MUSB_TXCSR_TXPKTRDY) {
3053 + csr |= MUSB_TXCSR_FLUSHFIFO;
3054 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3055 +index 096438e4fb0c..c918075e5eae 100644
3056 +--- a/drivers/usb/serial/option.c
3057 ++++ b/drivers/usb/serial/option.c
3058 +@@ -276,6 +276,10 @@ static void option_instat_callback(struct urb *urb);
3059 + #define ZTE_PRODUCT_MF622 0x0001
3060 + #define ZTE_PRODUCT_MF628 0x0015
3061 + #define ZTE_PRODUCT_MF626 0x0031
3062 ++#define ZTE_PRODUCT_ZM8620_X 0x0396
3063 ++#define ZTE_PRODUCT_ME3620_MBIM 0x0426
3064 ++#define ZTE_PRODUCT_ME3620_X 0x1432
3065 ++#define ZTE_PRODUCT_ME3620_L 0x1433
3066 + #define ZTE_PRODUCT_AC2726 0xfff1
3067 + #define ZTE_PRODUCT_CDMA_TECH 0xfffe
3068 + #define ZTE_PRODUCT_AC8710T 0xffff
3069 +@@ -549,6 +553,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
3070 + .sendsetup = BIT(1) | BIT(2) | BIT(3),
3071 + };
3072 +
3073 ++static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
3074 ++ .reserved = BIT(2) | BIT(3) | BIT(4),
3075 ++};
3076 ++
3077 ++static const struct option_blacklist_info zte_me3620_xl_blacklist = {
3078 ++ .reserved = BIT(3) | BIT(4) | BIT(5),
3079 ++};
3080 ++
3081 ++static const struct option_blacklist_info zte_zm8620_x_blacklist = {
3082 ++ .reserved = BIT(3) | BIT(4) | BIT(5),
3083 ++};
3084 ++
3085 + static const struct option_blacklist_info huawei_cdc12_blacklist = {
3086 + .reserved = BIT(1) | BIT(2),
3087 + };
3088 +@@ -1579,6 +1595,14 @@ static const struct usb_device_id option_ids[] = {
3089 + .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
3090 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
3091 + .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
3092 ++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
3093 ++ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
3094 ++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
3095 ++ .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
3096 ++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
3097 ++ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
3098 ++ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
3099 ++ .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
3100 + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
3101 + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
3102 + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
3103 +diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
3104 +index 1e2d86d4f539..aa6d2bea856b 100644
3105 +--- a/drivers/usb/serial/symbolserial.c
3106 ++++ b/drivers/usb/serial/symbolserial.c
3107 +@@ -61,17 +61,15 @@ static void symbol_int_callback(struct urb *urb)
3108 +
3109 + usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
3110 +
3111 ++ /*
3112 ++ * Data from the device comes with a 1 byte header:
3113 ++ *
3114 ++ * <size of data> <data>...
3115 ++ */
3116 + if (urb->actual_length > 1) {
3117 +- data_length = urb->actual_length - 1;
3118 +-
3119 +- /*
3120 +- * Data from the device comes with a 1 byte header:
3121 +- *
3122 +- * <size of data>data...
3123 +- * This is real data to be sent to the tty layer
3124 +- * we pretty much just ignore the size and send everything
3125 +- * else to the tty layer.
3126 +- */
3127 ++ data_length = data[0];
3128 ++ if (data_length > (urb->actual_length - 1))
3129 ++ data_length = urb->actual_length - 1;
3130 + tty_insert_flip_string(&port->port, &data[1], data_length);
3131 + tty_flip_buffer_push(&port->port);
3132 + } else {
3133 +diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
3134 +index cc5a430dc357..69fec1a99b3e 100644
3135 +--- a/drivers/usb/serial/whiteheat.c
3136 ++++ b/drivers/usb/serial/whiteheat.c
3137 +@@ -81,6 +81,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial,
3138 + static int whiteheat_firmware_attach(struct usb_serial *serial);
3139 +
3140 + /* function prototypes for the Connect Tech WhiteHEAT serial converter */
3141 ++static int whiteheat_probe(struct usb_serial *serial,
3142 ++ const struct usb_device_id *id);
3143 + static int whiteheat_attach(struct usb_serial *serial);
3144 + static void whiteheat_release(struct usb_serial *serial);
3145 + static int whiteheat_port_probe(struct usb_serial_port *port);
3146 +@@ -117,6 +119,7 @@ static struct usb_serial_driver whiteheat_device = {
3147 + .description = "Connect Tech - WhiteHEAT",
3148 + .id_table = id_table_std,
3149 + .num_ports = 4,
3150 ++ .probe = whiteheat_probe,
3151 + .attach = whiteheat_attach,
3152 + .release = whiteheat_release,
3153 + .port_probe = whiteheat_port_probe,
3154 +@@ -218,6 +221,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
3155 + /*****************************************************************************
3156 + * Connect Tech's White Heat serial driver functions
3157 + *****************************************************************************/
3158 ++
3159 ++static int whiteheat_probe(struct usb_serial *serial,
3160 ++ const struct usb_device_id *id)
3161 ++{
3162 ++ struct usb_host_interface *iface_desc;
3163 ++ struct usb_endpoint_descriptor *endpoint;
3164 ++ size_t num_bulk_in = 0;
3165 ++ size_t num_bulk_out = 0;
3166 ++ size_t min_num_bulk;
3167 ++ unsigned int i;
3168 ++
3169 ++ iface_desc = serial->interface->cur_altsetting;
3170 ++
3171 ++ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
3172 ++ endpoint = &iface_desc->endpoint[i].desc;
3173 ++ if (usb_endpoint_is_bulk_in(endpoint))
3174 ++ ++num_bulk_in;
3175 ++ if (usb_endpoint_is_bulk_out(endpoint))
3176 ++ ++num_bulk_out;
3177 ++ }
3178 ++
3179 ++ min_num_bulk = COMMAND_PORT + 1;
3180 ++ if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
3181 ++ return -ENODEV;
3182 ++
3183 ++ return 0;
3184 ++}
3185 ++
3186 + static int whiteheat_attach(struct usb_serial *serial)
3187 + {
3188 + struct usb_serial_port *command_port;
3189 +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
3190 +index 53039de1495d..db6818878462 100644
3191 +--- a/fs/btrfs/backref.c
3192 ++++ b/fs/btrfs/backref.c
3193 +@@ -1668,7 +1668,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
3194 + int found = 0;
3195 + struct extent_buffer *eb;
3196 + struct btrfs_inode_extref *extref;
3197 +- struct extent_buffer *leaf;
3198 + u32 item_size;
3199 + u32 cur_offset;
3200 + unsigned long ptr;
3201 +@@ -1693,9 +1692,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
3202 + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
3203 + btrfs_release_path(path);
3204 +
3205 +- leaf = path->nodes[0];
3206 +- item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3207 +- ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3208 ++ item_size = btrfs_item_size_nr(eb, path->slots[0]);
3209 ++ ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3210 + cur_offset = 0;
3211 +
3212 + while (cur_offset < item_size) {
3213 +@@ -1709,7 +1707,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
3214 + if (ret)
3215 + break;
3216 +
3217 +- cur_offset += btrfs_inode_extref_name_len(leaf, extref);
3218 ++ cur_offset += btrfs_inode_extref_name_len(eb, extref);
3219 + cur_offset += sizeof(*extref);
3220 + }
3221 + btrfs_tree_read_unlock_blocking(eb);
3222 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
3223 +index 855f6668cb8e..85bcb25384c0 100644
3224 +--- a/fs/btrfs/extent_io.c
3225 ++++ b/fs/btrfs/extent_io.c
3226 +@@ -2642,7 +2642,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
3227 + bio_end_io_t end_io_func,
3228 + int mirror_num,
3229 + unsigned long prev_bio_flags,
3230 +- unsigned long bio_flags)
3231 ++ unsigned long bio_flags,
3232 ++ bool force_bio_submit)
3233 + {
3234 + int ret = 0;
3235 + struct bio *bio;
3236 +@@ -2660,6 +2661,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
3237 + contig = bio_end_sector(bio) == sector;
3238 +
3239 + if (prev_bio_flags != bio_flags || !contig ||
3240 ++ force_bio_submit ||
3241 + merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
3242 + bio_add_page(bio, page, page_size, offset) < page_size) {
3243 + ret = submit_one_bio(rw, bio, mirror_num,
3244 +@@ -2751,7 +2753,8 @@ static int __do_readpage(struct extent_io_tree *tree,
3245 + get_extent_t *get_extent,
3246 + struct extent_map **em_cached,
3247 + struct bio **bio, int mirror_num,
3248 +- unsigned long *bio_flags, int rw)
3249 ++ unsigned long *bio_flags, int rw,
3250 ++ u64 *prev_em_start)
3251 + {
3252 + struct inode *inode = page->mapping->host;
3253 + u64 start = page_offset(page);
3254 +@@ -2799,6 +2802,7 @@ static int __do_readpage(struct extent_io_tree *tree,
3255 + }
3256 + while (cur <= end) {
3257 + unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
3258 ++ bool force_bio_submit = false;
3259 +
3260 + if (cur >= last_byte) {
3261 + char *userpage;
3262 +@@ -2849,6 +2853,49 @@ static int __do_readpage(struct extent_io_tree *tree,
3263 + block_start = em->block_start;
3264 + if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3265 + block_start = EXTENT_MAP_HOLE;
3266 ++
3267 ++ /*
3268 ++ * If we have a file range that points to a compressed extent
3269 ++ * and it's followed by a consecutive file range that points to
3270 ++ * to the same compressed extent (possibly with a different
3271 ++ * offset and/or length, so it either points to the whole extent
3272 ++ * or only part of it), we must make sure we do not submit a
3273 ++ * single bio to populate the pages for the 2 ranges because
3274 ++ * this makes the compressed extent read zero out the pages
3275 ++ * belonging to the 2nd range. Imagine the following scenario:
3276 ++ *
3277 ++ * File layout
3278 ++ * [0 - 8K] [8K - 24K]
3279 ++ * | |
3280 ++ * | |
3281 ++ * points to extent X, points to extent X,
3282 ++ * offset 4K, length of 8K offset 0, length 16K
3283 ++ *
3284 ++ * [extent X, compressed length = 4K uncompressed length = 16K]
3285 ++ *
3286 ++ * If the bio to read the compressed extent covers both ranges,
3287 ++ * it will decompress extent X into the pages belonging to the
3288 ++ * first range and then it will stop, zeroing out the remaining
3289 ++ * pages that belong to the other range that points to extent X.
3290 ++ * So here we make sure we submit 2 bios, one for the first
3291 ++ * range and another one for the third range. Both will target
3292 ++ * the same physical extent from disk, but we can't currently
3293 ++ * make the compressed bio endio callback populate the pages
3294 ++ * for both ranges because each compressed bio is tightly
3295 ++ * coupled with a single extent map, and each range can have
3296 ++ * an extent map with a different offset value relative to the
3297 ++ * uncompressed data of our extent and different lengths. This
3298 ++ * is a corner case so we prioritize correctness over
3299 ++ * non-optimal behavior (submitting 2 bios for the same extent).
3300 ++ */
3301 ++ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3302 ++ prev_em_start && *prev_em_start != (u64)-1 &&
3303 ++ *prev_em_start != em->orig_start)
3304 ++ force_bio_submit = true;
3305 ++
3306 ++ if (prev_em_start)
3307 ++ *prev_em_start = em->orig_start;
3308 ++
3309 + free_extent_map(em);
3310 + em = NULL;
3311 +
3312 +@@ -2898,7 +2945,8 @@ static int __do_readpage(struct extent_io_tree *tree,
3313 + bdev, bio, pnr,
3314 + end_bio_extent_readpage, mirror_num,
3315 + *bio_flags,
3316 +- this_bio_flag);
3317 ++ this_bio_flag,
3318 ++ force_bio_submit);
3319 + if (!ret) {
3320 + nr++;
3321 + *bio_flags = this_bio_flag;
3322 +@@ -2925,7 +2973,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3323 + get_extent_t *get_extent,
3324 + struct extent_map **em_cached,
3325 + struct bio **bio, int mirror_num,
3326 +- unsigned long *bio_flags, int rw)
3327 ++ unsigned long *bio_flags, int rw,
3328 ++ u64 *prev_em_start)
3329 + {
3330 + struct inode *inode;
3331 + struct btrfs_ordered_extent *ordered;
3332 +@@ -2945,7 +2994,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3333 +
3334 + for (index = 0; index < nr_pages; index++) {
3335 + __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3336 +- mirror_num, bio_flags, rw);
3337 ++ mirror_num, bio_flags, rw, prev_em_start);
3338 + page_cache_release(pages[index]);
3339 + }
3340 + }
3341 +@@ -2955,7 +3004,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
3342 + int nr_pages, get_extent_t *get_extent,
3343 + struct extent_map **em_cached,
3344 + struct bio **bio, int mirror_num,
3345 +- unsigned long *bio_flags, int rw)
3346 ++ unsigned long *bio_flags, int rw,
3347 ++ u64 *prev_em_start)
3348 + {
3349 + u64 start = 0;
3350 + u64 end = 0;
3351 +@@ -2976,7 +3026,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
3352 + index - first_index, start,
3353 + end, get_extent, em_cached,
3354 + bio, mirror_num, bio_flags,
3355 +- rw);
3356 ++ rw, prev_em_start);
3357 + start = page_start;
3358 + end = start + PAGE_CACHE_SIZE - 1;
3359 + first_index = index;
3360 +@@ -2987,7 +3037,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
3361 + __do_contiguous_readpages(tree, &pages[first_index],
3362 + index - first_index, start,
3363 + end, get_extent, em_cached, bio,
3364 +- mirror_num, bio_flags, rw);
3365 ++ mirror_num, bio_flags, rw,
3366 ++ prev_em_start);
3367 + }
3368 +
3369 + static int __extent_read_full_page(struct extent_io_tree *tree,
3370 +@@ -3013,7 +3064,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
3371 + }
3372 +
3373 + ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3374 +- bio_flags, rw);
3375 ++ bio_flags, rw, NULL);
3376 + return ret;
3377 + }
3378 +
3379 +@@ -3039,7 +3090,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3380 + int ret;
3381 +
3382 + ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3383 +- &bio_flags, READ);
3384 ++ &bio_flags, READ, NULL);
3385 + if (bio)
3386 + ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3387 + return ret;
3388 +@@ -3308,7 +3359,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3389 + sector, iosize, pg_offset,
3390 + bdev, &epd->bio, max_nr,
3391 + end_bio_extent_writepage,
3392 +- 0, 0, 0);
3393 ++ 0, 0, 0, false);
3394 + if (ret)
3395 + SetPageError(page);
3396 + }
3397 +@@ -3479,7 +3530,7 @@ static int write_one_eb(struct extent_buffer *eb,
3398 + ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3399 + PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3400 + -1, end_bio_extent_buffer_writepage,
3401 +- 0, epd->bio_flags, bio_flags);
3402 ++ 0, epd->bio_flags, bio_flags, false);
3403 + epd->bio_flags = bio_flags;
3404 + if (ret) {
3405 + set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3406 +@@ -3882,6 +3933,7 @@ int extent_readpages(struct extent_io_tree *tree,
3407 + struct page *page;
3408 + struct extent_map *em_cached = NULL;
3409 + int nr = 0;
3410 ++ u64 prev_em_start = (u64)-1;
3411 +
3412 + for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3413 + page = list_entry(pages->prev, struct page, lru);
3414 +@@ -3898,12 +3950,12 @@ int extent_readpages(struct extent_io_tree *tree,
3415 + if (nr < ARRAY_SIZE(pagepool))
3416 + continue;
3417 + __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3418 +- &bio, 0, &bio_flags, READ);
3419 ++ &bio, 0, &bio_flags, READ, &prev_em_start);
3420 + nr = 0;
3421 + }
3422 + if (nr)
3423 + __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3424 +- &bio, 0, &bio_flags, READ);
3425 ++ &bio, 0, &bio_flags, READ, &prev_em_start);
3426 +
3427 + if (em_cached)
3428 + free_extent_map(em_cached);
3429 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3430 +index 904ed6d7e4bb..50f08d5f9cbb 100644
3431 +--- a/fs/btrfs/inode.c
3432 ++++ b/fs/btrfs/inode.c
3433 +@@ -4516,7 +4516,8 @@ void btrfs_evict_inode(struct inode *inode)
3434 + goto no_delete;
3435 + }
3436 + /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
3437 +- btrfs_wait_ordered_range(inode, 0, (u64)-1);
3438 ++ if (!special_file(inode->i_mode))
3439 ++ btrfs_wait_ordered_range(inode, 0, (u64)-1);
3440 +
3441 + if (root->fs_info->log_root_recovering) {
3442 + BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3443 +diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
3444 +index fc6f4f3a1a9d..134ed52f616f 100644
3445 +--- a/fs/cifs/cifsencrypt.c
3446 ++++ b/fs/cifs/cifsencrypt.c
3447 +@@ -441,6 +441,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
3448 + return 0;
3449 + }
3450 +
3451 ++/* Server has provided av pairs/target info in the type 2 challenge
3452 ++ * packet and we have plucked it and stored within smb session.
3453 ++ * We parse that blob here to find the server given timestamp
3454 ++ * as part of ntlmv2 authentication (or local current time as
3455 ++ * default in case of failure)
3456 ++ */
3457 ++static __le64
3458 ++find_timestamp(struct cifs_ses *ses)
3459 ++{
3460 ++ unsigned int attrsize;
3461 ++ unsigned int type;
3462 ++ unsigned int onesize = sizeof(struct ntlmssp2_name);
3463 ++ unsigned char *blobptr;
3464 ++ unsigned char *blobend;
3465 ++ struct ntlmssp2_name *attrptr;
3466 ++
3467 ++ if (!ses->auth_key.len || !ses->auth_key.response)
3468 ++ return 0;
3469 ++
3470 ++ blobptr = ses->auth_key.response;
3471 ++ blobend = blobptr + ses->auth_key.len;
3472 ++
3473 ++ while (blobptr + onesize < blobend) {
3474 ++ attrptr = (struct ntlmssp2_name *) blobptr;
3475 ++ type = le16_to_cpu(attrptr->type);
3476 ++ if (type == NTLMSSP_AV_EOL)
3477 ++ break;
3478 ++ blobptr += 2; /* advance attr type */
3479 ++ attrsize = le16_to_cpu(attrptr->length);
3480 ++ blobptr += 2; /* advance attr size */
3481 ++ if (blobptr + attrsize > blobend)
3482 ++ break;
3483 ++ if (type == NTLMSSP_AV_TIMESTAMP) {
3484 ++ if (attrsize == sizeof(u64))
3485 ++ return *((__le64 *)blobptr);
3486 ++ }
3487 ++ blobptr += attrsize; /* advance attr value */
3488 ++ }
3489 ++
3490 ++ return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
3491 ++}
3492 ++
3493 + static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
3494 + const struct nls_table *nls_cp)
3495 + {
3496 +@@ -630,6 +672,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
3497 + struct ntlmv2_resp *buf;
3498 + char ntlmv2_hash[16];
3499 + unsigned char *tiblob = NULL; /* target info blob */
3500 ++ __le64 rsp_timestamp;
3501 +
3502 + if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
3503 + if (!ses->domainName) {
3504 +@@ -648,6 +691,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
3505 + }
3506 + }
3507 +
3508 ++ /* Must be within 5 minutes of the server (or in range +/-2h
3509 ++ * in case of Mac OS X), so simply carry over server timestamp
3510 ++ * (as Windows 7 does)
3511 ++ */
3512 ++ rsp_timestamp = find_timestamp(ses);
3513 ++
3514 + baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
3515 + tilen = ses->auth_key.len;
3516 + tiblob = ses->auth_key.response;
3517 +@@ -664,7 +713,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
3518 + (ses->auth_key.response + CIFS_SESS_KEY_SIZE);
3519 + buf->blob_signature = cpu_to_le32(0x00000101);
3520 + buf->reserved = 0;
3521 +- buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
3522 ++ buf->time = rsp_timestamp;
3523 ++
3524 + get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
3525 + buf->reserved2 = 0;
3526 +
3527 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
3528 +index 5f1f3285479e..ea938a8bf240 100644
3529 +--- a/fs/cifs/cifssmb.c
3530 ++++ b/fs/cifs/cifssmb.c
3531 +@@ -629,9 +629,8 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
3532 + server->negflavor = CIFS_NEGFLAVOR_UNENCAP;
3533 + memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey,
3534 + CIFS_CRYPTO_KEY_SIZE);
3535 +- } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
3536 +- server->capabilities & CAP_EXTENDED_SECURITY) &&
3537 +- (pSMBr->EncryptionKeyLength == 0)) {
3538 ++ } else if (pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
3539 ++ server->capabilities & CAP_EXTENDED_SECURITY) {
3540 + server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
3541 + rc = decode_ext_sec_blob(ses, pSMBr);
3542 + } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
3543 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3544 +index 6f79cd867a2e..57519567b2ac 100644
3545 +--- a/fs/cifs/smb2ops.c
3546 ++++ b/fs/cifs/smb2ops.c
3547 +@@ -49,9 +49,13 @@ change_conf(struct TCP_Server_Info *server)
3548 + break;
3549 + default:
3550 + server->echoes = true;
3551 +- server->oplocks = true;
3552 ++ if (enable_oplocks) {
3553 ++ server->oplocks = true;
3554 ++ server->oplock_credits = 1;
3555 ++ } else
3556 ++ server->oplocks = false;
3557 ++
3558 + server->echo_credits = 1;
3559 +- server->oplock_credits = 1;
3560 + }
3561 + server->credits -= server->echo_credits + server->oplock_credits;
3562 + return 0;
3563 +diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
3564 +index b892355f1944..d4c7e470dec8 100644
3565 +--- a/fs/jbd2/checkpoint.c
3566 ++++ b/fs/jbd2/checkpoint.c
3567 +@@ -475,14 +475,15 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
3568 + * journal_clean_one_cp_list
3569 + *
3570 + * Find all the written-back checkpoint buffers in the given list and
3571 +- * release them.
3572 ++ * release them. If 'destroy' is set, clean all buffers unconditionally.
3573 + *
3574 + * Called with the journal locked.
3575 + * Called with j_list_lock held.
3576 + * Returns number of buffers reaped (for debug)
3577 + */
3578 +
3579 +-static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
3580 ++static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy,
3581 ++ int *released)
3582 + {
3583 + struct journal_head *last_jh;
3584 + struct journal_head *next_jh = jh;
3585 +@@ -496,7 +497,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
3586 + do {
3587 + jh = next_jh;
3588 + next_jh = jh->b_cpnext;
3589 +- ret = __try_to_free_cp_buf(jh);
3590 ++ if (!destroy)
3591 ++ ret = __try_to_free_cp_buf(jh);
3592 ++ else
3593 ++ ret = __jbd2_journal_remove_checkpoint(jh) + 1;
3594 + if (ret) {
3595 + freed++;
3596 + if (ret == 2) {
3597 +@@ -521,13 +525,14 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
3598 + * journal_clean_checkpoint_list
3599 + *
3600 + * Find all the written-back checkpoint buffers in the journal and release them.
3601 ++ * If 'destroy' is set, release all buffers unconditionally.
3602 + *
3603 + * Called with the journal locked.
3604 + * Called with j_list_lock held.
3605 + * Returns number of buffers reaped (for debug)
3606 + */
3607 +
3608 +-int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3609 ++int __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
3610 + {
3611 + transaction_t *transaction, *last_transaction, *next_transaction;
3612 + int ret = 0;
3613 +@@ -543,7 +548,7 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3614 + transaction = next_transaction;
3615 + next_transaction = transaction->t_cpnext;
3616 + ret += journal_clean_one_cp_list(transaction->
3617 +- t_checkpoint_list, &released);
3618 ++ t_checkpoint_list, destroy, &released);
3619 + /*
3620 + * This function only frees up some memory if possible so we
3621 + * dont have an obligation to finish processing. Bail out if
3622 +@@ -559,7 +564,7 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
3623 + * we can possibly see not yet submitted buffers on io_list
3624 + */
3625 + ret += journal_clean_one_cp_list(transaction->
3626 +- t_checkpoint_io_list, &released);
3627 ++ t_checkpoint_io_list, destroy, &released);
3628 + if (need_resched())
3629 + goto out;
3630 + } while (transaction != last_transaction);
3631 +@@ -568,6 +573,28 @@ out:
3632 + }
3633 +
3634 + /*
3635 ++ * Remove buffers from all checkpoint lists as journal is aborted and we just
3636 ++ * need to free memory
3637 ++ */
3638 ++void jbd2_journal_destroy_checkpoint(journal_t *journal)
3639 ++{
3640 ++ /*
3641 ++ * We loop because __jbd2_journal_clean_checkpoint_list() may abort
3642 ++ * early due to a need of rescheduling.
3643 ++ */
3644 ++ while (1) {
3645 ++ spin_lock(&journal->j_list_lock);
3646 ++ if (!journal->j_checkpoint_transactions) {
3647 ++ spin_unlock(&journal->j_list_lock);
3648 ++ break;
3649 ++ }
3650 ++ __jbd2_journal_clean_checkpoint_list(journal, true);
3651 ++ spin_unlock(&journal->j_list_lock);
3652 ++ cond_resched();
3653 ++ }
3654 ++}
3655 ++
3656 ++/*
3657 + * journal_remove_checkpoint: called after a buffer has been committed
3658 + * to disk (either by being write-back flushed to disk, or being
3659 + * committed to the log).
3660 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
3661 +index 9181c2b22b3c..4207cf2caa87 100644
3662 +--- a/fs/jbd2/commit.c
3663 ++++ b/fs/jbd2/commit.c
3664 +@@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
3665 + * frees some memory
3666 + */
3667 + spin_lock(&journal->j_list_lock);
3668 +- __jbd2_journal_clean_checkpoint_list(journal);
3669 ++ __jbd2_journal_clean_checkpoint_list(journal, false);
3670 + spin_unlock(&journal->j_list_lock);
3671 +
3672 + jbd_debug(3, "JBD2: commit phase 1\n");
3673 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
3674 +index 614ecbf8a48c..2ebb7aadb381 100644
3675 +--- a/fs/jbd2/journal.c
3676 ++++ b/fs/jbd2/journal.c
3677 +@@ -1710,8 +1710,17 @@ int jbd2_journal_destroy(journal_t *journal)
3678 + while (journal->j_checkpoint_transactions != NULL) {
3679 + spin_unlock(&journal->j_list_lock);
3680 + mutex_lock(&journal->j_checkpoint_mutex);
3681 +- jbd2_log_do_checkpoint(journal);
3682 ++ err = jbd2_log_do_checkpoint(journal);
3683 + mutex_unlock(&journal->j_checkpoint_mutex);
3684 ++ /*
3685 ++ * If checkpointing failed, just free the buffers to avoid
3686 ++ * looping forever
3687 ++ */
3688 ++ if (err) {
3689 ++ jbd2_journal_destroy_checkpoint(journal);
3690 ++ spin_lock(&journal->j_list_lock);
3691 ++ break;
3692 ++ }
3693 + spin_lock(&journal->j_list_lock);
3694 + }
3695 +
3696 +diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
3697 +index 01613b382b0e..6f692f8ac664 100644
3698 +--- a/include/asm-generic/barrier.h
3699 ++++ b/include/asm-generic/barrier.h
3700 +@@ -1,4 +1,5 @@
3701 +-/* Generic barrier definitions, based on MN10300 definitions.
3702 ++/*
3703 ++ * Generic barrier definitions, originally based on MN10300 definitions.
3704 + *
3705 + * It should be possible to use these on really simple architectures,
3706 + * but it serves more as a starting point for new ports.
3707 +@@ -16,35 +17,50 @@
3708 +
3709 + #ifndef __ASSEMBLY__
3710 +
3711 +-#define nop() asm volatile ("nop")
3712 ++#include <linux/compiler.h>
3713 ++
3714 ++#ifndef nop
3715 ++#define nop() asm volatile ("nop")
3716 ++#endif
3717 +
3718 + /*
3719 +- * Force strict CPU ordering.
3720 +- * And yes, this is required on UP too when we're talking
3721 +- * to devices.
3722 ++ * Force strict CPU ordering. And yes, this is required on UP too when we're
3723 ++ * talking to devices.
3724 + *
3725 +- * This implementation only contains a compiler barrier.
3726 ++ * Fall back to compiler barriers if nothing better is provided.
3727 + */
3728 +
3729 +-#define mb() asm volatile ("": : :"memory")
3730 ++#ifndef mb
3731 ++#define mb() barrier()
3732 ++#endif
3733 ++
3734 ++#ifndef rmb
3735 + #define rmb() mb()
3736 +-#define wmb() asm volatile ("": : :"memory")
3737 ++#endif
3738 ++
3739 ++#ifndef wmb
3740 ++#define wmb() mb()
3741 ++#endif
3742 ++
3743 ++#ifndef read_barrier_depends
3744 ++#define read_barrier_depends() do { } while (0)
3745 ++#endif
3746 +
3747 + #ifdef CONFIG_SMP
3748 + #define smp_mb() mb()
3749 + #define smp_rmb() rmb()
3750 + #define smp_wmb() wmb()
3751 ++#define smp_read_barrier_depends() read_barrier_depends()
3752 + #else
3753 + #define smp_mb() barrier()
3754 + #define smp_rmb() barrier()
3755 + #define smp_wmb() barrier()
3756 ++#define smp_read_barrier_depends() do { } while (0)
3757 + #endif
3758 +
3759 +-#define set_mb(var, value) do { var = value; mb(); } while (0)
3760 +-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
3761 +-
3762 +-#define read_barrier_depends() do {} while (0)
3763 +-#define smp_read_barrier_depends() do {} while (0)
3764 ++#ifndef set_mb
3765 ++#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
3766 ++#endif
3767 +
3768 + #define smp_store_release(p, v) \
3769 + do { \
3770 +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
3771 +index e1fb0f613a99..385593d748f6 100644
3772 +--- a/include/linux/jbd2.h
3773 ++++ b/include/linux/jbd2.h
3774 +@@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
3775 + extern void jbd2_journal_commit_transaction(journal_t *);
3776 +
3777 + /* Checkpoint list management */
3778 +-int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
3779 ++int __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
3780 + int __jbd2_journal_remove_checkpoint(struct journal_head *);
3781 ++void jbd2_journal_destroy_checkpoint(journal_t *journal);
3782 + void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
3783 +
3784 +
3785 +diff --git a/include/linux/security.h b/include/linux/security.h
3786 +index 9d37e2b9d3ec..dd7c1a16ab5e 100644
3787 +--- a/include/linux/security.h
3788 ++++ b/include/linux/security.h
3789 +@@ -2441,7 +2441,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
3790 + unsigned long arg4,
3791 + unsigned long arg5)
3792 + {
3793 +- return cap_task_prctl(option, arg2, arg3, arg3, arg5);
3794 ++ return cap_task_prctl(option, arg2, arg3, arg4, arg5);
3795 + }
3796 +
3797 + static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
3798 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3799 +index 79147dc9630d..16e753a9922a 100644
3800 +--- a/include/linux/skbuff.h
3801 ++++ b/include/linux/skbuff.h
3802 +@@ -2264,6 +2264,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
3803 + {
3804 + if (skb->ip_summed == CHECKSUM_COMPLETE)
3805 + skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
3806 ++ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3807 ++ skb_checksum_start_offset(skb) < 0)
3808 ++ skb->ip_summed = CHECKSUM_NONE;
3809 + }
3810 +
3811 + unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3812 +@@ -2350,7 +2353,8 @@ extern int skb_copy_datagram_iovec(const struct sk_buff *from,
3813 + int size);
3814 + extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
3815 + int hlen,
3816 +- struct iovec *iov);
3817 ++ struct iovec *iov,
3818 ++ int len);
3819 + extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
3820 + int offset,
3821 + const struct iovec *from,
3822 +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
3823 +index d8ee9fd7ca4e..914ce51fa056 100644
3824 +--- a/include/linux/usb/hcd.h
3825 ++++ b/include/linux/usb/hcd.h
3826 +@@ -140,6 +140,7 @@ struct usb_hcd {
3827 + unsigned wireless:1; /* Wireless USB HCD */
3828 + unsigned authorized_default:1;
3829 + unsigned has_tt:1; /* Integrated TT in root hub */
3830 ++ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
3831 +
3832 + unsigned int irq; /* irq allocated */
3833 + void __iomem *regs; /* device memory/io */
3834 +@@ -428,6 +429,8 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
3835 + extern void usb_hcd_pci_remove(struct pci_dev *dev);
3836 + extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
3837 +
3838 ++extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
3839 ++
3840 + #ifdef CONFIG_PM
3841 + extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
3842 + #endif
3843 +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
3844 +index 3fb428883460..a4abaeb3fb00 100644
3845 +--- a/include/linux/usb/quirks.h
3846 ++++ b/include/linux/usb/quirks.h
3847 +@@ -41,13 +41,10 @@
3848 + */
3849 + #define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL 0x00000080
3850 +
3851 +-/* device generates spurious wakeup, ignore remote wakeup capability */
3852 +-#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
3853 ++/* device can't handle device_qualifier descriptor requests */
3854 ++#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
3855 +
3856 + /* device generates spurious wakeup, ignore remote wakeup capability */
3857 + #define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
3858 +
3859 +-/* device can't handle device_qualifier descriptor requests */
3860 +-#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
3861 +-
3862 + #endif /* __LINUX_USB_QUIRKS_H */
3863 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
3864 +index a175ba4a7adb..dfe4ddfbb43c 100644
3865 +--- a/include/net/af_unix.h
3866 ++++ b/include/net/af_unix.h
3867 +@@ -64,7 +64,11 @@ struct unix_sock {
3868 + #define UNIX_GC_MAYBE_CYCLE 1
3869 + struct socket_wq peer_wq;
3870 + };
3871 +-#define unix_sk(__sk) ((struct unix_sock *)__sk)
3872 ++
3873 ++static inline struct unix_sock *unix_sk(struct sock *sk)
3874 ++{
3875 ++ return (struct unix_sock *)sk;
3876 ++}
3877 +
3878 + #define peer_wait peer_wq.wait
3879 +
3880 +diff --git a/include/net/sock.h b/include/net/sock.h
3881 +index d157f4f56f01..4f355e69e5d2 100644
3882 +--- a/include/net/sock.h
3883 ++++ b/include/net/sock.h
3884 +@@ -788,6 +788,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
3885 + if (sk_rcvqueues_full(sk, skb, limit))
3886 + return -ENOBUFS;
3887 +
3888 ++ /*
3889 ++ * If the skb was allocated from pfmemalloc reserves, only
3890 ++ * allow SOCK_MEMALLOC sockets to use it as this socket is
3891 ++ * helping free memory
3892 ++ */
3893 ++ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
3894 ++ return -ENOMEM;
3895 ++
3896 + __sk_add_backlog(sk, skb);
3897 + sk->sk_backlog.len += skb->truesize;
3898 + return 0;
3899 +diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
3900 +index 9ce083960a25..f18490985fc8 100644
3901 +--- a/include/xen/interface/sched.h
3902 ++++ b/include/xen/interface/sched.h
3903 +@@ -107,5 +107,13 @@ struct sched_watchdog {
3904 + #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
3905 + #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
3906 + #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
3907 ++/*
3908 ++ * Domain asked to perform 'soft reset' for it. The expected behavior is to
3909 ++ * reset internal Xen state for the domain returning it to the point where it
3910 ++ * was created but leaving the domain's memory contents and vCPU contexts
3911 ++ * intact. This will allow the domain to start over and set up all Xen specific
3912 ++ * interfaces again.
3913 ++ */
3914 ++#define SHUTDOWN_soft_reset 5
3915 +
3916 + #endif /* __XEN_PUBLIC_SCHED_H__ */
3917 +diff --git a/ipc/msg.c b/ipc/msg.c
3918 +index 52770bfde2a5..32aaaab15c5c 100644
3919 +--- a/ipc/msg.c
3920 ++++ b/ipc/msg.c
3921 +@@ -202,13 +202,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
3922 + return retval;
3923 + }
3924 +
3925 +- /* ipc_addid() locks msq upon success. */
3926 +- id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
3927 +- if (id < 0) {
3928 +- ipc_rcu_putref(msq, msg_rcu_free);
3929 +- return id;
3930 +- }
3931 +-
3932 + msq->q_stime = msq->q_rtime = 0;
3933 + msq->q_ctime = get_seconds();
3934 + msq->q_cbytes = msq->q_qnum = 0;
3935 +@@ -218,6 +211,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
3936 + INIT_LIST_HEAD(&msq->q_receivers);
3937 + INIT_LIST_HEAD(&msq->q_senders);
3938 +
3939 ++ /* ipc_addid() locks msq upon success. */
3940 ++ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
3941 ++ if (id < 0) {
3942 ++ ipc_rcu_putref(msq, msg_rcu_free);
3943 ++ return id;
3944 ++ }
3945 ++
3946 + ipc_unlock_object(&msq->q_perm);
3947 + rcu_read_unlock();
3948 +
3949 +diff --git a/ipc/shm.c b/ipc/shm.c
3950 +index 623bc3877118..02f7125c8a0f 100644
3951 +--- a/ipc/shm.c
3952 ++++ b/ipc/shm.c
3953 +@@ -545,12 +545,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
3954 + if (IS_ERR(file))
3955 + goto no_file;
3956 +
3957 +- id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
3958 +- if (id < 0) {
3959 +- error = id;
3960 +- goto no_id;
3961 +- }
3962 +-
3963 + shp->shm_cprid = task_tgid_vnr(current);
3964 + shp->shm_lprid = 0;
3965 + shp->shm_atim = shp->shm_dtim = 0;
3966 +@@ -560,6 +554,12 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
3967 + shp->shm_file = file;
3968 + shp->shm_creator = current;
3969 +
3970 ++ id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
3971 ++ if (id < 0) {
3972 ++ error = id;
3973 ++ goto no_id;
3974 ++ }
3975 ++
3976 + /*
3977 + * shmid gets reported as "inode#" in /proc/pid/maps.
3978 + * proc-ps tools use this. Changing this will break them.
3979 +diff --git a/ipc/util.c b/ipc/util.c
3980 +index 7684f41bce76..735342570a87 100644
3981 +--- a/ipc/util.c
3982 ++++ b/ipc/util.c
3983 +@@ -292,6 +292,10 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
3984 + rcu_read_lock();
3985 + spin_lock(&new->lock);
3986 +
3987 ++ current_euid_egid(&euid, &egid);
3988 ++ new->cuid = new->uid = euid;
3989 ++ new->gid = new->cgid = egid;
3990 ++
3991 + id = idr_alloc(&ids->ipcs_idr, new,
3992 + (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
3993 + GFP_NOWAIT);
3994 +@@ -304,10 +308,6 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
3995 +
3996 + ids->in_use++;
3997 +
3998 +- current_euid_egid(&euid, &egid);
3999 +- new->cuid = new->uid = euid;
4000 +- new->gid = new->cgid = egid;
4001 +-
4002 + if (next_id < 0) {
4003 + new->seq = ids->seq++;
4004 + if (ids->seq > ids->seq_max)
4005 +diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
4006 +index 095cd7230aef..56d7272199ff 100644
4007 +--- a/kernel/irq/proc.c
4008 ++++ b/kernel/irq/proc.c
4009 +@@ -12,6 +12,7 @@
4010 + #include <linux/seq_file.h>
4011 + #include <linux/interrupt.h>
4012 + #include <linux/kernel_stat.h>
4013 ++#include <linux/mutex.h>
4014 +
4015 + #include "internals.h"
4016 +
4017 +@@ -326,18 +327,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
4018 +
4019 + void register_irq_proc(unsigned int irq, struct irq_desc *desc)
4020 + {
4021 ++ static DEFINE_MUTEX(register_lock);
4022 + char name [MAX_NAMELEN];
4023 +
4024 +- if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
4025 ++ if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
4026 + return;
4027 +
4028 ++ /*
4029 ++ * irq directories are registered only when a handler is
4030 ++ * added, not when the descriptor is created, so multiple
4031 ++ * tasks might try to register at the same time.
4032 ++ */
4033 ++ mutex_lock(&register_lock);
4034 ++
4035 ++ if (desc->dir)
4036 ++ goto out_unlock;
4037 ++
4038 + memset(name, 0, MAX_NAMELEN);
4039 + sprintf(name, "%d", irq);
4040 +
4041 + /* create /proc/irq/1234 */
4042 + desc->dir = proc_mkdir(name, root_irq_dir);
4043 + if (!desc->dir)
4044 +- return;
4045 ++ goto out_unlock;
4046 +
4047 + #ifdef CONFIG_SMP
4048 + /* create /proc/irq/<irq>/smp_affinity */
4049 +@@ -358,6 +370,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
4050 +
4051 + proc_create_data("spurious", 0444, desc->dir,
4052 + &irq_spurious_proc_fops, (void *)(long)irq);
4053 ++
4054 ++out_unlock:
4055 ++ mutex_unlock(&register_lock);
4056 + }
4057 +
4058 + void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
4059 +diff --git a/kernel/rcutree.c b/kernel/rcutree.c
4060 +index e27526232b5f..a92bd6bd2bf1 100644
4061 +--- a/kernel/rcutree.c
4062 ++++ b/kernel/rcutree.c
4063 +@@ -802,8 +802,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
4064 +
4065 + static void record_gp_stall_check_time(struct rcu_state *rsp)
4066 + {
4067 +- rsp->gp_start = jiffies;
4068 +- rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
4069 ++ unsigned long j = ACCESS_ONCE(jiffies);
4070 ++
4071 ++ rsp->gp_start = j;
4072 ++ smp_wmb(); /* Record start time before stall time. */
4073 ++ rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
4074 + }
4075 +
4076 + /*
4077 +@@ -932,17 +935,48 @@ static void print_cpu_stall(struct rcu_state *rsp)
4078 +
4079 + static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
4080 + {
4081 ++ unsigned long completed;
4082 ++ unsigned long gpnum;
4083 ++ unsigned long gps;
4084 + unsigned long j;
4085 + unsigned long js;
4086 + struct rcu_node *rnp;
4087 +
4088 +- if (rcu_cpu_stall_suppress)
4089 ++ if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
4090 + return;
4091 + j = ACCESS_ONCE(jiffies);
4092 ++
4093 ++ /*
4094 ++ * Lots of memory barriers to reject false positives.
4095 ++ *
4096 ++ * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
4097 ++ * then rsp->gp_start, and finally rsp->completed. These values
4098 ++ * are updated in the opposite order with memory barriers (or
4099 ++ * equivalent) during grace-period initialization and cleanup.
4100 ++ * Now, a false positive can occur if we get an new value of
4101 ++ * rsp->gp_start and a old value of rsp->jiffies_stall. But given
4102 ++ * the memory barriers, the only way that this can happen is if one
4103 ++ * grace period ends and another starts between these two fetches.
4104 ++ * Detect this by comparing rsp->completed with the previous fetch
4105 ++ * from rsp->gpnum.
4106 ++ *
4107 ++ * Given this check, comparisons of jiffies, rsp->jiffies_stall,
4108 ++ * and rsp->gp_start suffice to forestall false positives.
4109 ++ */
4110 ++ gpnum = ACCESS_ONCE(rsp->gpnum);
4111 ++ smp_rmb(); /* Pick up ->gpnum first... */
4112 + js = ACCESS_ONCE(rsp->jiffies_stall);
4113 ++ smp_rmb(); /* ...then ->jiffies_stall before the rest... */
4114 ++ gps = ACCESS_ONCE(rsp->gp_start);
4115 ++ smp_rmb(); /* ...and finally ->gp_start before ->completed. */
4116 ++ completed = ACCESS_ONCE(rsp->completed);
4117 ++ if (ULONG_CMP_GE(completed, gpnum) ||
4118 ++ ULONG_CMP_LT(j, js) ||
4119 ++ ULONG_CMP_GE(gps, js))
4120 ++ return; /* No stall or GP completed since entering function. */
4121 + rnp = rdp->mynode;
4122 + if (rcu_gp_in_progress(rsp) &&
4123 +- (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
4124 ++ (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
4125 +
4126 + /* We haven't checked in, so go dump stack. */
4127 + print_cpu_stall(rsp);
4128 +@@ -1331,9 +1365,10 @@ static int rcu_gp_init(struct rcu_state *rsp)
4129 + }
4130 +
4131 + /* Advance to a new grace period and initialize state. */
4132 ++ record_gp_stall_check_time(rsp);
4133 ++ smp_wmb(); /* Record GP times before starting GP. */
4134 + rsp->gpnum++;
4135 + trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
4136 +- record_gp_stall_check_time(rsp);
4137 + raw_spin_unlock_irq(&rnp->lock);
4138 +
4139 + /* Exclude any concurrent CPU-hotplug operations. */
4140 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4141 +index 0030db473c99..0bcdceaca6e2 100644
4142 +--- a/kernel/sched/core.c
4143 ++++ b/kernel/sched/core.c
4144 +@@ -1873,11 +1873,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
4145 + * If a task dies, then it sets TASK_DEAD in tsk->state and calls
4146 + * schedule one last time. The schedule call will never return, and
4147 + * the scheduled task must drop that reference.
4148 +- * The test for TASK_DEAD must occur while the runqueue locks are
4149 +- * still held, otherwise prev could be scheduled on another cpu, die
4150 +- * there before we look at prev->state, and then the reference would
4151 +- * be dropped twice.
4152 +- * Manfred Spraul <manfred@××××××××××××.com>
4153 ++ *
4154 ++ * We must observe prev->state before clearing prev->on_cpu (in
4155 ++ * finish_lock_switch), otherwise a concurrent wakeup can get prev
4156 ++ * running on another CPU and we could rave with its RUNNING -> DEAD
4157 ++ * transition, resulting in a double drop.
4158 + */
4159 + prev_state = prev->state;
4160 + vtime_task_switch(prev);
4161 +@@ -4729,6 +4729,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
4162 + unsigned long action, void *hcpu)
4163 + {
4164 + switch (action & ~CPU_TASKS_FROZEN) {
4165 ++ case CPU_ONLINE:
4166 ++ /*
4167 ++ * At this point a starting CPU has marked itself as online via
4168 ++ * set_cpu_online(). But it might not yet have marked itself
4169 ++ * as active, which is essential from here on.
4170 ++ *
4171 ++ * Thus, fall-through and help the starting CPU along.
4172 ++ */
4173 + case CPU_DOWN_FAILED:
4174 + set_cpu_active((long)hcpu, true);
4175 + return NOTIFY_OK;
4176 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
4177 +index 4f310592b1ba..1a1cdc3783ed 100644
4178 +--- a/kernel/sched/sched.h
4179 ++++ b/kernel/sched/sched.h
4180 +@@ -845,9 +845,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
4181 + * After ->on_cpu is cleared, the task can be moved to a different CPU.
4182 + * We must ensure this doesn't happen until the switch is completely
4183 + * finished.
4184 ++ *
4185 ++ * Pairs with the control dependency and rmb in try_to_wake_up().
4186 + */
4187 +- smp_wmb();
4188 +- prev->on_cpu = 0;
4189 ++ smp_store_release(&prev->on_cpu, 0);
4190 + #endif
4191 + #ifdef CONFIG_DEBUG_SPINLOCK
4192 + /* this is a valid case when another task releases the spinlock */
4193 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
4194 +index bb5f920268d7..bba4e426ccbc 100644
4195 +--- a/kernel/workqueue.c
4196 ++++ b/kernel/workqueue.c
4197 +@@ -1468,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
4198 + timer_stats_timer_set_start_info(&dwork->timer);
4199 +
4200 + dwork->wq = wq;
4201 ++ /* timer isn't guaranteed to run in this cpu, record earlier */
4202 ++ if (cpu == WORK_CPU_UNBOUND)
4203 ++ cpu = raw_smp_processor_id();
4204 + dwork->cpu = cpu;
4205 + timer->expires = jiffies + delay;
4206 +
4207 +- if (unlikely(cpu != WORK_CPU_UNBOUND))
4208 +- add_timer_on(timer, cpu);
4209 +- else
4210 +- add_timer(timer);
4211 ++ add_timer_on(timer, cpu);
4212 + }
4213 +
4214 + /**
4215 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
4216 +index c91c347bb3ea..a3a9676c65cf 100644
4217 +--- a/mm/hugetlb.c
4218 ++++ b/mm/hugetlb.c
4219 +@@ -2605,6 +2605,14 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
4220 + continue;
4221 +
4222 + /*
4223 ++ * Shared VMAs have their own reserves and do not affect
4224 ++ * MAP_PRIVATE accounting but it is possible that a shared
4225 ++ * VMA is using the same page so check and skip such VMAs.
4226 ++ */
4227 ++ if (iter_vma->vm_flags & VM_MAYSHARE)
4228 ++ continue;
4229 ++
4230 ++ /*
4231 + * Unmap the page from other VMAs without their own reserves.
4232 + * They get marked to be SIGKILLed if they fault in these
4233 + * areas. This is because a future no-page fault on this VMA
4234 +diff --git a/mm/slab.c b/mm/slab.c
4235 +index c180fbb8460b..e160d9c39796 100644
4236 +--- a/mm/slab.c
4237 ++++ b/mm/slab.c
4238 +@@ -2304,9 +2304,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
4239 + size += BYTES_PER_WORD;
4240 + }
4241 + #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
4242 +- if (size >= kmalloc_size(INDEX_NODE + 1)
4243 +- && cachep->object_size > cache_line_size()
4244 +- && ALIGN(size, cachep->align) < PAGE_SIZE) {
4245 ++ /*
4246 ++ * To activate debug pagealloc, off-slab management is necessary
4247 ++ * requirement. In early phase of initialization, small sized slab
4248 ++ * doesn't get initialized so it would not be possible. So, we need
4249 ++ * to check size >= 256. It guarantees that all necessary small
4250 ++ * sized slab is initialized in current slab initialization sequence.
4251 ++ */
4252 ++ if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
4253 ++ size >= 256 && cachep->object_size > cache_line_size() &&
4254 ++ ALIGN(size, cachep->align) < PAGE_SIZE) {
4255 + cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
4256 + size = PAGE_SIZE;
4257 + }
4258 +diff --git a/net/core/datagram.c b/net/core/datagram.c
4259 +index 98e3d61e7476..f22f120771ef 100644
4260 +--- a/net/core/datagram.c
4261 ++++ b/net/core/datagram.c
4262 +@@ -796,6 +796,7 @@ EXPORT_SYMBOL(__skb_checksum_complete);
4263 + * @skb: skbuff
4264 + * @hlen: hardware length
4265 + * @iov: io vector
4266 ++ * @len: amount of data to copy from skb to iov
4267 + *
4268 + * Caller _must_ check that skb will fit to this iovec.
4269 + *
4270 +@@ -805,11 +806,14 @@ EXPORT_SYMBOL(__skb_checksum_complete);
4271 + * can be modified!
4272 + */
4273 + int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
4274 +- int hlen, struct iovec *iov)
4275 ++ int hlen, struct iovec *iov, int len)
4276 + {
4277 + __wsum csum;
4278 + int chunk = skb->len - hlen;
4279 +
4280 ++ if (chunk > len)
4281 ++ chunk = len;
4282 ++
4283 + if (!chunk)
4284 + return 0;
4285 +
4286 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
4287 +index 78e9d9223e40..944c60ce15d8 100644
4288 +--- a/net/core/ethtool.c
4289 ++++ b/net/core/ethtool.c
4290 +@@ -1077,7 +1077,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
4291 +
4292 + gstrings.len = ret;
4293 +
4294 +- data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
4295 ++ data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
4296 + if (!data)
4297 + return -ENOMEM;
4298 +
4299 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
4300 +index b01dd5f421da..de76393a9916 100644
4301 +--- a/net/core/skbuff.c
4302 ++++ b/net/core/skbuff.c
4303 +@@ -2726,11 +2726,12 @@ EXPORT_SYMBOL(skb_append_datato_frags);
4304 + */
4305 + unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
4306 + {
4307 ++ unsigned char *data = skb->data;
4308 ++
4309 + BUG_ON(len > skb->len);
4310 +- skb->len -= len;
4311 +- BUG_ON(skb->len < skb->data_len);
4312 +- skb_postpull_rcsum(skb, skb->data, len);
4313 +- return skb->data += len;
4314 ++ __skb_pull(skb, len);
4315 ++ skb_postpull_rcsum(skb, data, len);
4316 ++ return skb->data;
4317 + }
4318 + EXPORT_SYMBOL_GPL(skb_pull_rcsum);
4319 +
4320 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
4321 +index 49c87a39948f..4829750aa424 100644
4322 +--- a/net/ipv4/tcp_input.c
4323 ++++ b/net/ipv4/tcp_input.c
4324 +@@ -4892,7 +4892,7 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
4325 + err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
4326 + else
4327 + err = skb_copy_and_csum_datagram_iovec(skb, hlen,
4328 +- tp->ucopy.iov);
4329 ++ tp->ucopy.iov, chunk);
4330 +
4331 + if (!err) {
4332 + tp->ucopy.len -= chunk;
4333 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4334 +index 268ed25f2d65..4908eaa1cdec 100644
4335 +--- a/net/ipv4/udp.c
4336 ++++ b/net/ipv4/udp.c
4337 +@@ -1245,7 +1245,7 @@ try_again:
4338 + else {
4339 + err = skb_copy_and_csum_datagram_iovec(skb,
4340 + sizeof(struct udphdr),
4341 +- msg->msg_iov);
4342 ++ msg->msg_iov, copied);
4343 +
4344 + if (err == -EINVAL)
4345 + goto csum_copy_err;
4346 +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
4347 +index 430067cb9210..0d51ebc176a7 100644
4348 +--- a/net/ipv6/raw.c
4349 ++++ b/net/ipv6/raw.c
4350 +@@ -489,7 +489,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
4351 + goto csum_copy_err;
4352 + err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
4353 + } else {
4354 +- err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
4355 ++ err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov, copied);
4356 + if (err == -EINVAL)
4357 + goto csum_copy_err;
4358 + }
4359 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
4360 +index e09ca285e8f5..946ee8efe74b 100644
4361 +--- a/net/ipv6/udp.c
4362 ++++ b/net/ipv6/udp.c
4363 +@@ -410,7 +410,8 @@ try_again:
4364 + err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
4365 + msg->msg_iov, copied);
4366 + else {
4367 +- err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
4368 ++ err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr),
4369 ++ msg->msg_iov, copied);
4370 + if (err == -EINVAL)
4371 + goto csum_copy_err;
4372 + }
4373 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
4374 +index b076e8309bc2..6639bc27edb9 100644
4375 +--- a/net/l2tp/l2tp_core.c
4376 ++++ b/net/l2tp/l2tp_core.c
4377 +@@ -1438,7 +1438,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
4378 + tunnel = container_of(work, struct l2tp_tunnel, del_work);
4379 + sk = l2tp_tunnel_sock_lookup(tunnel);
4380 + if (!sk)
4381 +- return;
4382 ++ goto out;
4383 +
4384 + sock = sk->sk_socket;
4385 +
4386 +@@ -1459,6 +1459,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
4387 + }
4388 +
4389 + l2tp_tunnel_sock_put(sk);
4390 ++out:
4391 ++ l2tp_tunnel_dec_refcount(tunnel);
4392 + }
4393 +
4394 + /* Create a socket for the tunnel, if one isn't set up by
4395 +@@ -1788,8 +1790,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
4396 + */
4397 + int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
4398 + {
4399 ++ l2tp_tunnel_inc_refcount(tunnel);
4400 + l2tp_tunnel_closeall(tunnel);
4401 +- return (false == queue_work(l2tp_wq, &tunnel->del_work));
4402 ++ if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
4403 ++ l2tp_tunnel_dec_refcount(tunnel);
4404 ++ return 1;
4405 ++ }
4406 ++ return 0;
4407 + }
4408 + EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
4409 +
4410 +diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
4411 +index 6d91d760a896..3e3e4f4f594a 100644
4412 +--- a/net/netfilter/ipvs/ip_vs_sync.c
4413 ++++ b/net/netfilter/ipvs/ip_vs_sync.c
4414 +@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
4415 + pkts = atomic_add_return(1, &cp->in_pkts);
4416 + else
4417 + pkts = sysctl_sync_threshold(ipvs);
4418 +- ip_vs_sync_conn(net, cp->control, pkts);
4419 ++ ip_vs_sync_conn(net, cp, pkts);
4420 + }
4421 + }
4422 +
4423 +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
4424 +index 1692e7534759..c3d204973dbc 100644
4425 +--- a/net/netfilter/ipvs/ip_vs_xmit.c
4426 ++++ b/net/netfilter/ipvs/ip_vs_xmit.c
4427 +@@ -129,7 +129,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
4428 +
4429 + memset(&fl4, 0, sizeof(fl4));
4430 + fl4.daddr = daddr;
4431 +- fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
4432 + fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
4433 + FLOWI_FLAG_KNOWN_NH : 0;
4434 +
4435 +diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
4436 +index 4fd1ca94fd4a..71c46f463969 100644
4437 +--- a/net/netfilter/nf_conntrack_expect.c
4438 ++++ b/net/netfilter/nf_conntrack_expect.c
4439 +@@ -202,7 +202,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
4440 + a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
4441 + }
4442 +
4443 +- return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
4444 ++ return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
4445 ++ nf_ct_zone(a->master) == nf_ct_zone(b->master);
4446 + }
4447 +
4448 + static inline int expect_matches(const struct nf_conntrack_expect *a,
4449 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
4450 +index eea936b70d15..db744dd68707 100644
4451 +--- a/net/netfilter/nf_conntrack_netlink.c
4452 ++++ b/net/netfilter/nf_conntrack_netlink.c
4453 +@@ -2925,11 +2925,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
4454 + }
4455 +
4456 + err = nf_ct_expect_related_report(exp, portid, report);
4457 +- if (err < 0)
4458 +- goto err_exp;
4459 +-
4460 +- return 0;
4461 +-err_exp:
4462 + nf_ct_expect_put(exp);
4463 + err_ct:
4464 + nf_ct_put(ct);
4465 +diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
4466 +index 5cc2da5d295d..c67f5d3f6e61 100644
4467 +--- a/net/rxrpc/ar-recvmsg.c
4468 ++++ b/net/rxrpc/ar-recvmsg.c
4469 +@@ -185,7 +185,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
4470 + msg->msg_iov, copy);
4471 + } else {
4472 + ret = skb_copy_and_csum_datagram_iovec(skb, offset,
4473 +- msg->msg_iov);
4474 ++ msg->msg_iov,
4475 ++ copy);
4476 + if (ret == -EINVAL)
4477 + goto csum_copy_error;
4478 + }
4479 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
4480 +index 9afa362d8a31..157b3595ef62 100644
4481 +--- a/net/unix/af_unix.c
4482 ++++ b/net/unix/af_unix.c
4483 +@@ -1954,6 +1954,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
4484 + goto out;
4485 + }
4486 +
4487 ++ if (flags & MSG_PEEK)
4488 ++ skip = sk_peek_offset(sk, flags);
4489 ++ else
4490 ++ skip = 0;
4491 ++
4492 + do {
4493 + int chunk;
4494 + struct sk_buff *skb, *last;
4495 +@@ -2000,7 +2005,6 @@ again:
4496 + break;
4497 + }
4498 +
4499 +- skip = sk_peek_offset(sk, flags);
4500 + while (skip >= unix_skb_len(skb)) {
4501 + skip -= unix_skb_len(skb);
4502 + last = skb;
4503 +@@ -2064,6 +2068,16 @@ again:
4504 +
4505 + sk_peek_offset_fwd(sk, chunk);
4506 +
4507 ++ if (UNIXCB(skb).fp)
4508 ++ break;
4509 ++
4510 ++ skip = 0;
4511 ++ last = skb;
4512 ++ unix_state_lock(sk);
4513 ++ skb = skb_peek_next(skb, &sk->sk_receive_queue);
4514 ++ if (skb)
4515 ++ goto again;
4516 ++ unix_state_unlock(sk);
4517 + break;
4518 + }
4519 + } while (size);
4520 +diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
4521 +index 885683a3b0bd..e0406211716b 100644
4522 +--- a/sound/arm/Kconfig
4523 ++++ b/sound/arm/Kconfig
4524 +@@ -9,6 +9,14 @@ menuconfig SND_ARM
4525 + Drivers that are implemented on ASoC can be found in
4526 + "ALSA for SoC audio support" section.
4527 +
4528 ++config SND_PXA2XX_LIB
4529 ++ tristate
4530 ++ select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
4531 ++ select SND_DMAENGINE_PCM
4532 ++
4533 ++config SND_PXA2XX_LIB_AC97
4534 ++ bool
4535 ++
4536 + if SND_ARM
4537 +
4538 + config SND_ARMAACI
4539 +@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
4540 + tristate
4541 + select SND_PCM
4542 +
4543 +-config SND_PXA2XX_LIB
4544 +- tristate
4545 +- select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
4546 +-
4547 +-config SND_PXA2XX_LIB_AC97
4548 +- bool
4549 +-
4550 + config SND_PXA2XX_AC97
4551 + tristate "AC97 driver for the Intel PXA2xx chip"
4552 + depends on ARCH_PXA
4553 +diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
4554 +index d54d218fe810..3c90743fa50b 100644
4555 +--- a/sound/pci/hda/patch_cirrus.c
4556 ++++ b/sound/pci/hda/patch_cirrus.c
4557 +@@ -47,6 +47,10 @@ struct cs_spec {
4558 + unsigned int spdif_present:1;
4559 + unsigned int sense_b:1;
4560 + hda_nid_t vendor_nid;
4561 ++
4562 ++ /* for MBP SPDIF control */
4563 ++ int (*spdif_sw_put)(struct snd_kcontrol *kcontrol,
4564 ++ struct snd_ctl_elem_value *ucontrol);
4565 + };
4566 +
4567 + /* available models with CS420x */
4568 +@@ -331,10 +335,21 @@ static int cs_init(struct hda_codec *codec)
4569 + return 0;
4570 + }
4571 +
4572 ++static int cs_build_controls(struct hda_codec *codec)
4573 ++{
4574 ++ int err;
4575 ++
4576 ++ err = snd_hda_gen_build_controls(codec);
4577 ++ if (err < 0)
4578 ++ return err;
4579 ++ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_BUILD);
4580 ++ return 0;
4581 ++}
4582 ++
4583 + #define cs_free snd_hda_gen_free
4584 +
4585 + static const struct hda_codec_ops cs_patch_ops = {
4586 +- .build_controls = snd_hda_gen_build_controls,
4587 ++ .build_controls = cs_build_controls,
4588 + .build_pcms = snd_hda_gen_build_pcms,
4589 + .init = cs_init,
4590 + .free = cs_free,
4591 +@@ -601,12 +616,14 @@ static int patch_cs420x(struct hda_codec *codec)
4592 + enum {
4593 + CS4208_MAC_AUTO,
4594 + CS4208_MBA6,
4595 ++ CS4208_MBP11,
4596 + CS4208_GPIO0,
4597 + };
4598 +
4599 + static const struct hda_model_fixup cs4208_models[] = {
4600 + { .id = CS4208_GPIO0, .name = "gpio0" },
4601 + { .id = CS4208_MBA6, .name = "mba6" },
4602 ++ { .id = CS4208_MBP11, .name = "mbp11" },
4603 + {}
4604 + };
4605 +
4606 +@@ -617,8 +634,10 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
4607 +
4608 + /* codec SSID matching */
4609 + static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
4610 ++ SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
4611 + SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
4612 + SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
4613 ++ SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
4614 + {} /* terminator */
4615 + };
4616 +
4617 +@@ -648,6 +667,36 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
4618 + snd_hda_apply_fixup(codec, action);
4619 + }
4620 +
4621 ++static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
4622 ++ struct snd_ctl_elem_value *ucontrol)
4623 ++{
4624 ++ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
4625 ++ struct cs_spec *spec = codec->spec;
4626 ++ hda_nid_t pin = spec->gen.autocfg.dig_out_pins[0];
4627 ++ int pinctl = ucontrol->value.integer.value[0] ? PIN_OUT : 0;
4628 ++
4629 ++ snd_hda_set_pin_ctl_cache(codec, pin, pinctl);
4630 ++ return spec->spdif_sw_put(kcontrol, ucontrol);
4631 ++}
4632 ++
4633 ++/* hook the SPDIF switch */
4634 ++static void cs4208_fixup_spdif_switch(struct hda_codec *codec,
4635 ++ const struct hda_fixup *fix, int action)
4636 ++{
4637 ++ if (action == HDA_FIXUP_ACT_BUILD) {
4638 ++ struct cs_spec *spec = codec->spec;
4639 ++ struct snd_kcontrol *kctl;
4640 ++
4641 ++ if (!spec->gen.autocfg.dig_out_pins[0])
4642 ++ return;
4643 ++ kctl = snd_hda_find_mixer_ctl(codec, "IEC958 Playback Switch");
4644 ++ if (!kctl)
4645 ++ return;
4646 ++ spec->spdif_sw_put = kctl->put;
4647 ++ kctl->put = cs4208_spdif_sw_put;
4648 ++ }
4649 ++}
4650 ++
4651 + static const struct hda_fixup cs4208_fixups[] = {
4652 + [CS4208_MBA6] = {
4653 + .type = HDA_FIXUP_PINS,
4654 +@@ -655,6 +704,12 @@ static const struct hda_fixup cs4208_fixups[] = {
4655 + .chained = true,
4656 + .chain_id = CS4208_GPIO0,
4657 + },
4658 ++ [CS4208_MBP11] = {
4659 ++ .type = HDA_FIXUP_FUNC,
4660 ++ .v.func = cs4208_fixup_spdif_switch,
4661 ++ .chained = true,
4662 ++ .chain_id = CS4208_GPIO0,
4663 ++ },
4664 + [CS4208_GPIO0] = {
4665 + .type = HDA_FIXUP_FUNC,
4666 + .v.func = cs4208_fixup_gpio0,
4667 +diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
4668 +index 2f6357578616..1b6cbbc95456 100644
4669 +--- a/sound/soc/dwc/designware_i2s.c
4670 ++++ b/sound/soc/dwc/designware_i2s.c
4671 +@@ -100,10 +100,10 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
4672 +
4673 + if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
4674 + for (i = 0; i < 4; i++)
4675 +- i2s_write_reg(dev->i2s_base, TOR(i), 0);
4676 ++ i2s_read_reg(dev->i2s_base, TOR(i));
4677 + } else {
4678 + for (i = 0; i < 4; i++)
4679 +- i2s_write_reg(dev->i2s_base, ROR(i), 0);
4680 ++ i2s_read_reg(dev->i2s_base, ROR(i));
4681 + }
4682 + }
4683 +
4684 +diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
4685 +index 4db74a083db1..dbaba4f4fa53 100644
4686 +--- a/sound/soc/pxa/Kconfig
4687 ++++ b/sound/soc/pxa/Kconfig
4688 +@@ -1,7 +1,6 @@
4689 + config SND_PXA2XX_SOC
4690 + tristate "SoC Audio for the Intel PXA2xx chip"
4691 + depends on ARCH_PXA
4692 +- select SND_ARM
4693 + select SND_PXA2XX_LIB
4694 + help
4695 + Say Y or M if you want to add support for codecs attached to
4696 +@@ -24,7 +23,6 @@ config SND_PXA2XX_AC97
4697 + config SND_PXA2XX_SOC_AC97
4698 + tristate
4699 + select AC97_BUS
4700 +- select SND_ARM
4701 + select SND_PXA2XX_LIB_AC97
4702 + select SND_SOC_AC97_BUS
4703 +
4704 +diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
4705 +index f1059d999de6..ae939cf22ebd 100644
4706 +--- a/sound/soc/pxa/pxa2xx-ac97.c
4707 ++++ b/sound/soc/pxa/pxa2xx-ac97.c
4708 +@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
4709 + .reset = pxa2xx_ac97_cold_reset,
4710 + };
4711 +
4712 +-static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
4713 ++static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
4714 + static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
4715 + .addr = __PREG(PCDR),
4716 + .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
4717 +@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
4718 + .filter_data = &pxa2xx_ac97_pcm_stereo_in_req,
4719 + };
4720 +
4721 +-static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
4722 ++static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
4723 + static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
4724 + .addr = __PREG(PCDR),
4725 + .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
4726 +diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
4727 +index daf61abc3670..646b66703bd8 100644
4728 +--- a/sound/synth/emux/emux_oss.c
4729 ++++ b/sound/synth/emux/emux_oss.c
4730 +@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu)
4731 + struct snd_seq_oss_reg *arg;
4732 + struct snd_seq_device *dev;
4733 +
4734 +- if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
4735 ++ /* using device#1 here for avoiding conflicts with OPL3 */
4736 ++ if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
4737 + sizeof(struct snd_seq_oss_reg), &dev) < 0)
4738 + return;
4739 +
4740 +diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
4741 +index 5098f144b92d..df4784df26b3 100644
4742 +--- a/tools/perf/builtin-stat.c
4743 ++++ b/tools/perf/builtin-stat.c
4744 +@@ -945,7 +945,7 @@ static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
4745 + static void print_aggr(char *prefix)
4746 + {
4747 + struct perf_evsel *counter;
4748 +- int cpu, cpu2, s, s2, id, nr;
4749 ++ int cpu, s, s2, id, nr;
4750 + u64 ena, run, val;
4751 +
4752 + if (!(aggr_map || aggr_get_id))
4753 +@@ -957,8 +957,7 @@ static void print_aggr(char *prefix)
4754 + val = ena = run = 0;
4755 + nr = 0;
4756 + for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
4757 +- cpu2 = perf_evsel__cpus(counter)->map[cpu];
4758 +- s2 = aggr_get_id(evsel_list->cpus, cpu2);
4759 ++ s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
4760 + if (s2 != id)
4761 + continue;
4762 + val += counter->counts->cpu[cpu].val;
4763 +diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
4764 +index c3e5a3b817ab..3f82a2f65a65 100644
4765 +--- a/tools/perf/util/header.c
4766 ++++ b/tools/perf/util/header.c
4767 +@@ -1718,7 +1718,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
4768 + if (ph->needs_swap)
4769 + nr = bswap_32(nr);
4770 +
4771 +- ph->env.nr_cpus_online = nr;
4772 ++ ph->env.nr_cpus_avail = nr;
4773 +
4774 + ret = readn(fd, &nr, sizeof(nr));
4775 + if (ret != sizeof(nr))
4776 +@@ -1727,7 +1727,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
4777 + if (ph->needs_swap)
4778 + nr = bswap_32(nr);
4779 +
4780 +- ph->env.nr_cpus_avail = nr;
4781 ++ ph->env.nr_cpus_online = nr;
4782 + return 0;
4783 + }
4784 +
4785 +diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
4786 +index 9ff6cf3e9a99..b1c914413c5f 100644
4787 +--- a/tools/perf/util/hist.c
4788 ++++ b/tools/perf/util/hist.c
4789 +@@ -160,6 +160,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
4790 + hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
4791 + hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
4792 + hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
4793 ++
4794 ++ if (h->srcline)
4795 ++ hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
4796 + }
4797 +
4798 + void hists__output_recalc_col_len(struct hists *hists, int max_rows)
4799 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4800 +index a3510441f7d7..235b3f0cc97e 100644
4801 +--- a/virt/kvm/kvm_main.c
4802 ++++ b/virt/kvm/kvm_main.c
4803 +@@ -2813,10 +2813,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
4804 + static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
4805 + const struct kvm_io_range *r2)
4806 + {
4807 +- if (r1->addr < r2->addr)
4808 ++ gpa_t addr1 = r1->addr;
4809 ++ gpa_t addr2 = r2->addr;
4810 ++
4811 ++ if (addr1 < addr2)
4812 + return -1;
4813 +- if (r1->addr + r1->len > r2->addr + r2->len)
4814 ++
4815 ++ /* If r2->len == 0, match the exact address. If r2->len != 0,
4816 ++ * accept any overlapping write. Any order is acceptable for
4817 ++ * overlapping ranges, because kvm_io_bus_get_first_dev ensures
4818 ++ * we process all of them.
4819 ++ */
4820 ++ if (r2->len) {
4821 ++ addr1 += r1->len;
4822 ++ addr2 += r2->len;
4823 ++ }
4824 ++
4825 ++ if (addr1 > addr2)
4826 + return 1;
4827 ++
4828 + return 0;
4829 + }
4830 +