Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.18 commit in: /
Date: Fri, 30 Jan 2015 11:01:45
Message-Id: 1422615708.c006e3c68a07765a9f1aa4d9a1a6b3df0e0a417c.mpagano@gentoo
1 commit: c006e3c68a07765a9f1aa4d9a1a6b3df0e0a417c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jan 30 11:01:48 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Jan 30 11:01:48 2015 +0000
6 URL: http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=c006e3c6
7
8 Linux patch 3.18.5
9
10 ---
11 0000_README | 4 +
12 1004_linux-3.18.5.patch | 3915 +++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 3919 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index 52cb186..f8b4dcb 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -59,6 +59,10 @@ Patch: 1003_linux-3.18.4.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.18.4
22
23 +Patch: 1004_linux-3.18.5.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.18.5
26 +
27 Patch: 1500_XATTR_USER_PREFIX.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
29 Desc: Support for namespace user.pax.* on tmpfs.
30
31 diff --git a/1004_linux-3.18.5.patch b/1004_linux-3.18.5.patch
32 new file mode 100644
33 index 0000000..2a470c7
34 --- /dev/null
35 +++ b/1004_linux-3.18.5.patch
36 @@ -0,0 +1,3915 @@
37 +diff --git a/Makefile b/Makefile
38 +index 4e9328491c1e..6276fcaabf21 100644
39 +--- a/Makefile
40 ++++ b/Makefile
41 +@@ -1,6 +1,6 @@
42 + VERSION = 3
43 + PATCHLEVEL = 18
44 +-SUBLEVEL = 4
45 ++SUBLEVEL = 5
46 + EXTRAVERSION =
47 + NAME = Diseased Newt
48 +
49 +diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
50 +index d238676a9107..e4d3aecc4ed2 100644
51 +--- a/arch/arm/boot/dts/imx25.dtsi
52 ++++ b/arch/arm/boot/dts/imx25.dtsi
53 +@@ -369,7 +369,7 @@
54 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
55 + #pwm-cells = <2>;
56 + reg = <0x53fa0000 0x4000>;
57 +- clocks = <&clks 106>, <&clks 36>;
58 ++ clocks = <&clks 106>, <&clks 52>;
59 + clock-names = "ipg", "per";
60 + interrupts = <36>;
61 + };
62 +@@ -388,7 +388,7 @@
63 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
64 + #pwm-cells = <2>;
65 + reg = <0x53fa8000 0x4000>;
66 +- clocks = <&clks 107>, <&clks 36>;
67 ++ clocks = <&clks 107>, <&clks 52>;
68 + clock-names = "ipg", "per";
69 + interrupts = <41>;
70 + };
71 +@@ -429,7 +429,7 @@
72 + pwm4: pwm@53fc8000 {
73 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
74 + reg = <0x53fc8000 0x4000>;
75 +- clocks = <&clks 108>, <&clks 36>;
76 ++ clocks = <&clks 108>, <&clks 52>;
77 + clock-names = "ipg", "per";
78 + interrupts = <42>;
79 + };
80 +@@ -476,7 +476,7 @@
81 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
82 + #pwm-cells = <2>;
83 + reg = <0x53fe0000 0x4000>;
84 +- clocks = <&clks 105>, <&clks 36>;
85 ++ clocks = <&clks 105>, <&clks 52>;
86 + clock-names = "ipg", "per";
87 + interrupts = <26>;
88 + };
89 +diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
90 +index 3003fa1f6fb4..0409b8f89782 100644
91 +--- a/arch/arm/crypto/aes_glue.c
92 ++++ b/arch/arm/crypto/aes_glue.c
93 +@@ -93,6 +93,6 @@ module_exit(aes_fini);
94 +
95 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
96 + MODULE_LICENSE("GPL");
97 +-MODULE_ALIAS("aes");
98 +-MODULE_ALIAS("aes-asm");
99 ++MODULE_ALIAS_CRYPTO("aes");
100 ++MODULE_ALIAS_CRYPTO("aes-asm");
101 + MODULE_AUTHOR("David McCullough <ucdevel@×××××.com>");
102 +diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
103 +index 84f2a756588b..e31b0440c613 100644
104 +--- a/arch/arm/crypto/sha1_glue.c
105 ++++ b/arch/arm/crypto/sha1_glue.c
106 +@@ -171,5 +171,5 @@ module_exit(sha1_mod_fini);
107 +
108 + MODULE_LICENSE("GPL");
109 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
110 +-MODULE_ALIAS("sha1");
111 ++MODULE_ALIAS_CRYPTO("sha1");
112 + MODULE_AUTHOR("David McCullough <ucdevel@×××××.com>");
113 +diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
114 +index 6f1b411b1d55..0b0083757d47 100644
115 +--- a/arch/arm/crypto/sha1_neon_glue.c
116 ++++ b/arch/arm/crypto/sha1_neon_glue.c
117 +@@ -194,4 +194,4 @@ module_exit(sha1_neon_mod_fini);
118 +
119 + MODULE_LICENSE("GPL");
120 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated");
121 +-MODULE_ALIAS("sha1");
122 ++MODULE_ALIAS_CRYPTO("sha1");
123 +diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c
124 +index 0d2758ff5e12..f3452c66059d 100644
125 +--- a/arch/arm/crypto/sha512_neon_glue.c
126 ++++ b/arch/arm/crypto/sha512_neon_glue.c
127 +@@ -301,5 +301,5 @@ module_exit(sha512_neon_mod_fini);
128 + MODULE_LICENSE("GPL");
129 + MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated");
130 +
131 +-MODULE_ALIAS("sha512");
132 +-MODULE_ALIAS("sha384");
133 ++MODULE_ALIAS_CRYPTO("sha512");
134 ++MODULE_ALIAS_CRYPTO("sha384");
135 +diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
136 +index c31f4c00b1fc..1163a3e9accd 100644
137 +--- a/arch/arm/mach-mvebu/coherency.c
138 ++++ b/arch/arm/mach-mvebu/coherency.c
139 +@@ -398,9 +398,14 @@ static int coherency_type(void)
140 + return type;
141 + }
142 +
143 ++/*
144 ++ * As a precaution, we currently completely disable hardware I/O
145 ++ * coherency, until enough testing is done with automatic I/O
146 ++ * synchronization barriers to validate that it is a proper solution.
147 ++ */
148 + int coherency_available(void)
149 + {
150 +- return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
151 ++ return false;
152 + }
153 +
154 + int __init coherency_init(void)
155 +diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
156 +index 9e6cdde9b43d..0156a268e163 100644
157 +--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
158 ++++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
159 +@@ -294,4 +294,4 @@ module_exit(aes_mod_exit);
160 + MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
161 + MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@××××××.org>");
162 + MODULE_LICENSE("GPL v2");
163 +-MODULE_ALIAS("ccm(aes)");
164 ++MODULE_ALIAS_CRYPTO("ccm(aes)");
165 +diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
166 +index 79cd911ef88c..5f63a791b2fb 100644
167 +--- a/arch/arm64/crypto/aes-glue.c
168 ++++ b/arch/arm64/crypto/aes-glue.c
169 +@@ -38,10 +38,10 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
170 + #define aes_xts_encrypt neon_aes_xts_encrypt
171 + #define aes_xts_decrypt neon_aes_xts_decrypt
172 + MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
173 +-MODULE_ALIAS("ecb(aes)");
174 +-MODULE_ALIAS("cbc(aes)");
175 +-MODULE_ALIAS("ctr(aes)");
176 +-MODULE_ALIAS("xts(aes)");
177 ++MODULE_ALIAS_CRYPTO("ecb(aes)");
178 ++MODULE_ALIAS_CRYPTO("cbc(aes)");
179 ++MODULE_ALIAS_CRYPTO("ctr(aes)");
180 ++MODULE_ALIAS_CRYPTO("xts(aes)");
181 + #endif
182 +
183 + MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@××××××.org>");
184 +diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
185 +index f9e8b9491efc..b51da9132744 100644
186 +--- a/arch/powerpc/crypto/sha1.c
187 ++++ b/arch/powerpc/crypto/sha1.c
188 +@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
189 + MODULE_LICENSE("GPL");
190 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
191 +
192 +-MODULE_ALIAS("sha1-powerpc");
193 ++MODULE_ALIAS_CRYPTO("sha1");
194 ++MODULE_ALIAS_CRYPTO("sha1-powerpc");
195 +diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
196 +index 23223cd63e54..1f272b24fc0b 100644
197 +--- a/arch/s390/crypto/aes_s390.c
198 ++++ b/arch/s390/crypto/aes_s390.c
199 +@@ -979,7 +979,7 @@ static void __exit aes_s390_fini(void)
200 + module_init(aes_s390_init);
201 + module_exit(aes_s390_fini);
202 +
203 +-MODULE_ALIAS("aes-all");
204 ++MODULE_ALIAS_CRYPTO("aes-all");
205 +
206 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
207 + MODULE_LICENSE("GPL");
208 +diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
209 +index 7acb77f7ef1a..9e05cc453a40 100644
210 +--- a/arch/s390/crypto/des_s390.c
211 ++++ b/arch/s390/crypto/des_s390.c
212 +@@ -619,8 +619,8 @@ static void __exit des_s390_exit(void)
213 + module_init(des_s390_init);
214 + module_exit(des_s390_exit);
215 +
216 +-MODULE_ALIAS("des");
217 +-MODULE_ALIAS("des3_ede");
218 ++MODULE_ALIAS_CRYPTO("des");
219 ++MODULE_ALIAS_CRYPTO("des3_ede");
220 +
221 + MODULE_LICENSE("GPL");
222 + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
223 +diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
224 +index d43485d142e9..7940dc90e80b 100644
225 +--- a/arch/s390/crypto/ghash_s390.c
226 ++++ b/arch/s390/crypto/ghash_s390.c
227 +@@ -160,7 +160,7 @@ static void __exit ghash_mod_exit(void)
228 + module_init(ghash_mod_init);
229 + module_exit(ghash_mod_exit);
230 +
231 +-MODULE_ALIAS("ghash");
232 ++MODULE_ALIAS_CRYPTO("ghash");
233 +
234 + MODULE_LICENSE("GPL");
235 + MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
236 +diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
237 +index a1b3a9dc9d8a..5b2bee323694 100644
238 +--- a/arch/s390/crypto/sha1_s390.c
239 ++++ b/arch/s390/crypto/sha1_s390.c
240 +@@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void)
241 + module_init(sha1_s390_init);
242 + module_exit(sha1_s390_fini);
243 +
244 +-MODULE_ALIAS("sha1");
245 ++MODULE_ALIAS_CRYPTO("sha1");
246 + MODULE_LICENSE("GPL");
247 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
248 +diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
249 +index 9b853809a492..b74ff158108c 100644
250 +--- a/arch/s390/crypto/sha256_s390.c
251 ++++ b/arch/s390/crypto/sha256_s390.c
252 +@@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void)
253 + module_init(sha256_s390_init);
254 + module_exit(sha256_s390_fini);
255 +
256 +-MODULE_ALIAS("sha256");
257 +-MODULE_ALIAS("sha224");
258 ++MODULE_ALIAS_CRYPTO("sha256");
259 ++MODULE_ALIAS_CRYPTO("sha224");
260 + MODULE_LICENSE("GPL");
261 + MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
262 +diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
263 +index 32a81383b69c..0c36989ba182 100644
264 +--- a/arch/s390/crypto/sha512_s390.c
265 ++++ b/arch/s390/crypto/sha512_s390.c
266 +@@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = {
267 + }
268 + };
269 +
270 +-MODULE_ALIAS("sha512");
271 ++MODULE_ALIAS_CRYPTO("sha512");
272 +
273 + static int sha384_init(struct shash_desc *desc)
274 + {
275 +@@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = {
276 + }
277 + };
278 +
279 +-MODULE_ALIAS("sha384");
280 ++MODULE_ALIAS_CRYPTO("sha384");
281 +
282 + static int __init init(void)
283 + {
284 +diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
285 +index df922f52d76d..705408766ab0 100644
286 +--- a/arch/sparc/crypto/aes_glue.c
287 ++++ b/arch/sparc/crypto/aes_glue.c
288 +@@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini);
289 + MODULE_LICENSE("GPL");
290 + MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
291 +
292 +-MODULE_ALIAS("aes");
293 ++MODULE_ALIAS_CRYPTO("aes");
294 +
295 + #include "crop_devid.c"
296 +diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
297 +index 888f6260b4ec..641f55cb61c3 100644
298 +--- a/arch/sparc/crypto/camellia_glue.c
299 ++++ b/arch/sparc/crypto/camellia_glue.c
300 +@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
301 + MODULE_LICENSE("GPL");
302 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
303 +
304 +-MODULE_ALIAS("aes");
305 ++MODULE_ALIAS_CRYPTO("aes");
306 +
307 + #include "crop_devid.c"
308 +diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
309 +index 5162fad912ce..d1064e46efe8 100644
310 +--- a/arch/sparc/crypto/crc32c_glue.c
311 ++++ b/arch/sparc/crypto/crc32c_glue.c
312 +@@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini);
313 + MODULE_LICENSE("GPL");
314 + MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
315 +
316 +-MODULE_ALIAS("crc32c");
317 ++MODULE_ALIAS_CRYPTO("crc32c");
318 +
319 + #include "crop_devid.c"
320 +diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
321 +index 3065bc61f9d3..d11500972994 100644
322 +--- a/arch/sparc/crypto/des_glue.c
323 ++++ b/arch/sparc/crypto/des_glue.c
324 +@@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini);
325 + MODULE_LICENSE("GPL");
326 + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
327 +
328 +-MODULE_ALIAS("des");
329 ++MODULE_ALIAS_CRYPTO("des");
330 +
331 + #include "crop_devid.c"
332 +diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
333 +index 09a9ea1dfb69..64c7ff5f72a9 100644
334 +--- a/arch/sparc/crypto/md5_glue.c
335 ++++ b/arch/sparc/crypto/md5_glue.c
336 +@@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini);
337 + MODULE_LICENSE("GPL");
338 + MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
339 +
340 +-MODULE_ALIAS("md5");
341 ++MODULE_ALIAS_CRYPTO("md5");
342 +
343 + #include "crop_devid.c"
344 +diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
345 +index 6cd5f29e1e0d..1b3e47accc74 100644
346 +--- a/arch/sparc/crypto/sha1_glue.c
347 ++++ b/arch/sparc/crypto/sha1_glue.c
348 +@@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini);
349 + MODULE_LICENSE("GPL");
350 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
351 +
352 +-MODULE_ALIAS("sha1");
353 ++MODULE_ALIAS_CRYPTO("sha1");
354 +
355 + #include "crop_devid.c"
356 +diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
357 +index 04f555ab2680..41f27cca2a22 100644
358 +--- a/arch/sparc/crypto/sha256_glue.c
359 ++++ b/arch/sparc/crypto/sha256_glue.c
360 +@@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini);
361 + MODULE_LICENSE("GPL");
362 + MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
363 +
364 +-MODULE_ALIAS("sha224");
365 +-MODULE_ALIAS("sha256");
366 ++MODULE_ALIAS_CRYPTO("sha224");
367 ++MODULE_ALIAS_CRYPTO("sha256");
368 +
369 + #include "crop_devid.c"
370 +diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
371 +index f04d1994d19a..9fff88541b8c 100644
372 +--- a/arch/sparc/crypto/sha512_glue.c
373 ++++ b/arch/sparc/crypto/sha512_glue.c
374 +@@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini);
375 + MODULE_LICENSE("GPL");
376 + MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
377 +
378 +-MODULE_ALIAS("sha384");
379 +-MODULE_ALIAS("sha512");
380 ++MODULE_ALIAS_CRYPTO("sha384");
381 ++MODULE_ALIAS_CRYPTO("sha512");
382 +
383 + #include "crop_devid.c"
384 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
385 +index 41a503c15862..3635fff7b32d 100644
386 +--- a/arch/x86/Kconfig
387 ++++ b/arch/x86/Kconfig
388 +@@ -856,7 +856,7 @@ source "kernel/Kconfig.preempt"
389 +
390 + config X86_UP_APIC
391 + bool "Local APIC support on uniprocessors"
392 +- depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
393 ++ depends on X86_32 && !SMP && !X86_32_NON_STANDARD
394 + ---help---
395 + A local APIC (Advanced Programmable Interrupt Controller) is an
396 + integrated interrupt controller in the CPU. If you have a single-CPU
397 +@@ -867,6 +867,10 @@ config X86_UP_APIC
398 + performance counters), and the NMI watchdog which detects hard
399 + lockups.
400 +
401 ++config X86_UP_APIC_MSI
402 ++ def_bool y
403 ++ select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
404 ++
405 + config X86_UP_IOAPIC
406 + bool "IO-APIC support on uniprocessors"
407 + depends on X86_UP_APIC
408 +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
409 +index 30dd59a9f0b4..0c33a7c67ea5 100644
410 +--- a/arch/x86/boot/compressed/misc.c
411 ++++ b/arch/x86/boot/compressed/misc.c
412 +@@ -361,6 +361,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
413 + unsigned long output_len,
414 + unsigned long run_size)
415 + {
416 ++ unsigned char *output_orig = output;
417 ++
418 + real_mode = rmode;
419 +
420 + sanitize_boot_params(real_mode);
421 +@@ -409,7 +411,12 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
422 + debug_putstr("\nDecompressing Linux... ");
423 + decompress(input_data, input_len, NULL, NULL, output, NULL, error);
424 + parse_elf(output);
425 +- handle_relocations(output, output_len);
426 ++ /*
427 ++ * 32-bit always performs relocations. 64-bit relocations are only
428 ++ * needed if kASLR has chosen a different load address.
429 ++ */
430 ++ if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
431 ++ handle_relocations(output, output_len);
432 + debug_putstr("done.\nBooting the kernel.\n");
433 + return output;
434 + }
435 +diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
436 +index aafe8ce0d65d..e26984f7ab8d 100644
437 +--- a/arch/x86/crypto/aes_glue.c
438 ++++ b/arch/x86/crypto/aes_glue.c
439 +@@ -66,5 +66,5 @@ module_exit(aes_fini);
440 +
441 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
442 + MODULE_LICENSE("GPL");
443 +-MODULE_ALIAS("aes");
444 +-MODULE_ALIAS("aes-asm");
445 ++MODULE_ALIAS_CRYPTO("aes");
446 ++MODULE_ALIAS_CRYPTO("aes-asm");
447 +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
448 +index 888950f29fd9..70fece226d17 100644
449 +--- a/arch/x86/crypto/aesni-intel_glue.c
450 ++++ b/arch/x86/crypto/aesni-intel_glue.c
451 +@@ -1550,4 +1550,4 @@ module_exit(aesni_exit);
452 +
453 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
454 + MODULE_LICENSE("GPL");
455 +-MODULE_ALIAS("aes");
456 ++MODULE_ALIAS_CRYPTO("aes");
457 +diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
458 +index 8af519ed73d1..17c05531dfd1 100644
459 +--- a/arch/x86/crypto/blowfish_glue.c
460 ++++ b/arch/x86/crypto/blowfish_glue.c
461 +@@ -478,5 +478,5 @@ module_exit(fini);
462 +
463 + MODULE_LICENSE("GPL");
464 + MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
465 +-MODULE_ALIAS("blowfish");
466 +-MODULE_ALIAS("blowfish-asm");
467 ++MODULE_ALIAS_CRYPTO("blowfish");
468 ++MODULE_ALIAS_CRYPTO("blowfish-asm");
469 +diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
470 +index 4209a76fcdaa..9a07fafe3831 100644
471 +--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
472 ++++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
473 +@@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini);
474 +
475 + MODULE_LICENSE("GPL");
476 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized");
477 +-MODULE_ALIAS("camellia");
478 +-MODULE_ALIAS("camellia-asm");
479 ++MODULE_ALIAS_CRYPTO("camellia");
480 ++MODULE_ALIAS_CRYPTO("camellia-asm");
481 +diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
482 +index 87a041a10f4a..ed38d959add6 100644
483 +--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
484 ++++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
485 +@@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini);
486 +
487 + MODULE_LICENSE("GPL");
488 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
489 +-MODULE_ALIAS("camellia");
490 +-MODULE_ALIAS("camellia-asm");
491 ++MODULE_ALIAS_CRYPTO("camellia");
492 ++MODULE_ALIAS_CRYPTO("camellia-asm");
493 +diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
494 +index c171dcbf192d..5c8b6266a394 100644
495 +--- a/arch/x86/crypto/camellia_glue.c
496 ++++ b/arch/x86/crypto/camellia_glue.c
497 +@@ -1725,5 +1725,5 @@ module_exit(fini);
498 +
499 + MODULE_LICENSE("GPL");
500 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
501 +-MODULE_ALIAS("camellia");
502 +-MODULE_ALIAS("camellia-asm");
503 ++MODULE_ALIAS_CRYPTO("camellia");
504 ++MODULE_ALIAS_CRYPTO("camellia-asm");
505 +diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
506 +index e57e20ab5e0b..60ada677a928 100644
507 +--- a/arch/x86/crypto/cast5_avx_glue.c
508 ++++ b/arch/x86/crypto/cast5_avx_glue.c
509 +@@ -491,4 +491,4 @@ module_exit(cast5_exit);
510 +
511 + MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
512 + MODULE_LICENSE("GPL");
513 +-MODULE_ALIAS("cast5");
514 ++MODULE_ALIAS_CRYPTO("cast5");
515 +diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
516 +index 09f3677393e4..0160f68a57ff 100644
517 +--- a/arch/x86/crypto/cast6_avx_glue.c
518 ++++ b/arch/x86/crypto/cast6_avx_glue.c
519 +@@ -611,4 +611,4 @@ module_exit(cast6_exit);
520 +
521 + MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
522 + MODULE_LICENSE("GPL");
523 +-MODULE_ALIAS("cast6");
524 ++MODULE_ALIAS_CRYPTO("cast6");
525 +diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
526 +index 9d014a74ef96..1937fc1d8763 100644
527 +--- a/arch/x86/crypto/crc32-pclmul_glue.c
528 ++++ b/arch/x86/crypto/crc32-pclmul_glue.c
529 +@@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini);
530 + MODULE_AUTHOR("Alexander Boyko <alexander_boyko@×××××××.com>");
531 + MODULE_LICENSE("GPL");
532 +
533 +-MODULE_ALIAS("crc32");
534 +-MODULE_ALIAS("crc32-pclmul");
535 ++MODULE_ALIAS_CRYPTO("crc32");
536 ++MODULE_ALIAS_CRYPTO("crc32-pclmul");
537 +diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
538 +index 6812ad98355c..28640c3d6af7 100644
539 +--- a/arch/x86/crypto/crc32c-intel_glue.c
540 ++++ b/arch/x86/crypto/crc32c-intel_glue.c
541 +@@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang <austin.zhang@×××××.com>, Kent Liu <kent.liu@intel.c
542 + MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
543 + MODULE_LICENSE("GPL");
544 +
545 +-MODULE_ALIAS("crc32c");
546 +-MODULE_ALIAS("crc32c-intel");
547 ++MODULE_ALIAS_CRYPTO("crc32c");
548 ++MODULE_ALIAS_CRYPTO("crc32c-intel");
549 +diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
550 +index 7845d7fd54c0..b6c67bf30fdf 100644
551 +--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
552 ++++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
553 +@@ -147,5 +147,5 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@×××××××××××.com>");
554 + MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
555 + MODULE_LICENSE("GPL");
556 +
557 +-MODULE_ALIAS("crct10dif");
558 +-MODULE_ALIAS("crct10dif-pclmul");
559 ++MODULE_ALIAS_CRYPTO("crct10dif");
560 ++MODULE_ALIAS_CRYPTO("crct10dif-pclmul");
561 +diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
562 +index 0e9c0668fe4e..38a14f818ef1 100644
563 +--- a/arch/x86/crypto/des3_ede_glue.c
564 ++++ b/arch/x86/crypto/des3_ede_glue.c
565 +@@ -502,8 +502,8 @@ module_exit(des3_ede_x86_fini);
566 +
567 + MODULE_LICENSE("GPL");
568 + MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
569 +-MODULE_ALIAS("des3_ede");
570 +-MODULE_ALIAS("des3_ede-asm");
571 +-MODULE_ALIAS("des");
572 +-MODULE_ALIAS("des-asm");
573 ++MODULE_ALIAS_CRYPTO("des3_ede");
574 ++MODULE_ALIAS_CRYPTO("des3_ede-asm");
575 ++MODULE_ALIAS_CRYPTO("des");
576 ++MODULE_ALIAS_CRYPTO("des-asm");
577 + MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@×××.fi>");
578 +diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
579 +index 98d7a188f46b..f368ba261739 100644
580 +--- a/arch/x86/crypto/fpu.c
581 ++++ b/arch/x86/crypto/fpu.c
582 +@@ -17,6 +17,7 @@
583 + #include <linux/kernel.h>
584 + #include <linux/module.h>
585 + #include <linux/slab.h>
586 ++#include <linux/crypto.h>
587 + #include <asm/i387.h>
588 +
589 + struct crypto_fpu_ctx {
590 +@@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void)
591 + {
592 + crypto_unregister_template(&crypto_fpu_tmpl);
593 + }
594 ++
595 ++MODULE_ALIAS_CRYPTO("fpu");
596 +diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
597 +index 88bb7ba8b175..8253d85aa165 100644
598 +--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
599 ++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
600 +@@ -341,4 +341,4 @@ module_exit(ghash_pclmulqdqni_mod_exit);
601 + MODULE_LICENSE("GPL");
602 + MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
603 + "acclerated by PCLMULQDQ-NI");
604 +-MODULE_ALIAS("ghash");
605 ++MODULE_ALIAS_CRYPTO("ghash");
606 +diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
607 +index 5e8e67739bb5..399a29d067d6 100644
608 +--- a/arch/x86/crypto/salsa20_glue.c
609 ++++ b/arch/x86/crypto/salsa20_glue.c
610 +@@ -119,5 +119,5 @@ module_exit(fini);
611 +
612 + MODULE_LICENSE("GPL");
613 + MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
614 +-MODULE_ALIAS("salsa20");
615 +-MODULE_ALIAS("salsa20-asm");
616 ++MODULE_ALIAS_CRYPTO("salsa20");
617 ++MODULE_ALIAS_CRYPTO("salsa20-asm");
618 +diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
619 +index 2fae489b1524..437e47a4d302 100644
620 +--- a/arch/x86/crypto/serpent_avx2_glue.c
621 ++++ b/arch/x86/crypto/serpent_avx2_glue.c
622 +@@ -558,5 +558,5 @@ module_exit(fini);
623 +
624 + MODULE_LICENSE("GPL");
625 + MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
626 +-MODULE_ALIAS("serpent");
627 +-MODULE_ALIAS("serpent-asm");
628 ++MODULE_ALIAS_CRYPTO("serpent");
629 ++MODULE_ALIAS_CRYPTO("serpent-asm");
630 +diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
631 +index ff4870870972..7e217398b4eb 100644
632 +--- a/arch/x86/crypto/serpent_avx_glue.c
633 ++++ b/arch/x86/crypto/serpent_avx_glue.c
634 +@@ -617,4 +617,4 @@ module_exit(serpent_exit);
635 +
636 + MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
637 + MODULE_LICENSE("GPL");
638 +-MODULE_ALIAS("serpent");
639 ++MODULE_ALIAS_CRYPTO("serpent");
640 +diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
641 +index 8c95f8637306..bf025adaea01 100644
642 +--- a/arch/x86/crypto/serpent_sse2_glue.c
643 ++++ b/arch/x86/crypto/serpent_sse2_glue.c
644 +@@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit);
645 +
646 + MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
647 + MODULE_LICENSE("GPL");
648 +-MODULE_ALIAS("serpent");
649 ++MODULE_ALIAS_CRYPTO("serpent");
650 +diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
651 +index 99eefd812958..d42c9b7fadcf 100644
652 +--- a/arch/x86/crypto/sha-mb/sha1_mb.c
653 ++++ b/arch/x86/crypto/sha-mb/sha1_mb.c
654 +@@ -932,4 +932,4 @@ module_exit(sha1_mb_mod_fini);
655 + MODULE_LICENSE("GPL");
656 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
657 +
658 +-MODULE_ALIAS("sha1");
659 ++MODULE_ALIAS_CRYPTO("sha1");
660 +diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
661 +index 74d16ef707c7..6c20fe04a738 100644
662 +--- a/arch/x86/crypto/sha1_ssse3_glue.c
663 ++++ b/arch/x86/crypto/sha1_ssse3_glue.c
664 +@@ -278,4 +278,4 @@ module_exit(sha1_ssse3_mod_fini);
665 + MODULE_LICENSE("GPL");
666 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
667 +
668 +-MODULE_ALIAS("sha1");
669 ++MODULE_ALIAS_CRYPTO("sha1");
670 +diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
671 +index f248546da1ca..4dc100d82902 100644
672 +--- a/arch/x86/crypto/sha256_ssse3_glue.c
673 ++++ b/arch/x86/crypto/sha256_ssse3_glue.c
674 +@@ -318,5 +318,5 @@ module_exit(sha256_ssse3_mod_fini);
675 + MODULE_LICENSE("GPL");
676 + MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
677 +
678 +-MODULE_ALIAS("sha256");
679 +-MODULE_ALIAS("sha224");
680 ++MODULE_ALIAS_CRYPTO("sha256");
681 ++MODULE_ALIAS_CRYPTO("sha224");
682 +diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
683 +index 8626b03e83b7..26a5898a6f26 100644
684 +--- a/arch/x86/crypto/sha512_ssse3_glue.c
685 ++++ b/arch/x86/crypto/sha512_ssse3_glue.c
686 +@@ -326,5 +326,5 @@ module_exit(sha512_ssse3_mod_fini);
687 + MODULE_LICENSE("GPL");
688 + MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
689 +
690 +-MODULE_ALIAS("sha512");
691 +-MODULE_ALIAS("sha384");
692 ++MODULE_ALIAS_CRYPTO("sha512");
693 ++MODULE_ALIAS_CRYPTO("sha384");
694 +diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
695 +index 4e3c665be129..1ac531ea9bcc 100644
696 +--- a/arch/x86/crypto/twofish_avx_glue.c
697 ++++ b/arch/x86/crypto/twofish_avx_glue.c
698 +@@ -579,4 +579,4 @@ module_exit(twofish_exit);
699 +
700 + MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
701 + MODULE_LICENSE("GPL");
702 +-MODULE_ALIAS("twofish");
703 ++MODULE_ALIAS_CRYPTO("twofish");
704 +diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
705 +index 0a5202303501..77e06c2da83d 100644
706 +--- a/arch/x86/crypto/twofish_glue.c
707 ++++ b/arch/x86/crypto/twofish_glue.c
708 +@@ -96,5 +96,5 @@ module_exit(fini);
709 +
710 + MODULE_LICENSE("GPL");
711 + MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
712 +-MODULE_ALIAS("twofish");
713 +-MODULE_ALIAS("twofish-asm");
714 ++MODULE_ALIAS_CRYPTO("twofish");
715 ++MODULE_ALIAS_CRYPTO("twofish-asm");
716 +diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
717 +index 13e63b3e1dfb..56d8a08ee479 100644
718 +--- a/arch/x86/crypto/twofish_glue_3way.c
719 ++++ b/arch/x86/crypto/twofish_glue_3way.c
720 +@@ -495,5 +495,5 @@ module_exit(fini);
721 +
722 + MODULE_LICENSE("GPL");
723 + MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
724 +-MODULE_ALIAS("twofish");
725 +-MODULE_ALIAS("twofish-asm");
726 ++MODULE_ALIAS_CRYPTO("twofish");
727 ++MODULE_ALIAS_CRYPTO("twofish-asm");
728 +diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
729 +index 50d033a8947d..a94b82e8f156 100644
730 +--- a/arch/x86/include/asm/desc.h
731 ++++ b/arch/x86/include/asm/desc.h
732 +@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
733 + gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
734 + }
735 +
736 +-#define _LDT_empty(info) \
737 ++/* This intentionally ignores lm, since 32-bit apps don't have that field. */
738 ++#define LDT_empty(info) \
739 + ((info)->base_addr == 0 && \
740 + (info)->limit == 0 && \
741 + (info)->contents == 0 && \
742 +@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
743 + (info)->seg_not_present == 1 && \
744 + (info)->useable == 0)
745 +
746 +-#ifdef CONFIG_X86_64
747 +-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
748 +-#else
749 +-#define LDT_empty(info) (_LDT_empty(info))
750 +-#endif
751 ++/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
752 ++static inline bool LDT_zero(const struct user_desc *info)
753 ++{
754 ++ return (info->base_addr == 0 &&
755 ++ info->limit == 0 &&
756 ++ info->contents == 0 &&
757 ++ info->read_exec_only == 0 &&
758 ++ info->seg_32bit == 0 &&
759 ++ info->limit_in_pages == 0 &&
760 ++ info->seg_not_present == 0 &&
761 ++ info->useable == 0);
762 ++}
763 +
764 + static inline void clear_LDT(void)
765 + {
766 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
767 +index a450373e8e91..939155ffdece 100644
768 +--- a/arch/x86/kernel/cpu/mshyperv.c
769 ++++ b/arch/x86/kernel/cpu/mshyperv.c
770 +@@ -107,6 +107,7 @@ static struct clocksource hyperv_cs = {
771 + .rating = 400, /* use this when running on Hyperv*/
772 + .read = read_hv_clock,
773 + .mask = CLOCKSOURCE_MASK(64),
774 ++ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
775 + };
776 +
777 + static void __init ms_hyperv_init_platform(void)
778 +diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
779 +index 922d28581024..37907756fc41 100644
780 +--- a/arch/x86/kernel/irq.c
781 ++++ b/arch/x86/kernel/irq.c
782 +@@ -127,7 +127,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
783 + seq_printf(p, " Machine check polls\n");
784 + #endif
785 + #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
786 +- seq_printf(p, "%*s: ", prec, "THR");
787 ++ seq_printf(p, "%*s: ", prec, "HYP");
788 + for_each_online_cpu(j)
789 + seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
790 + seq_printf(p, " Hypervisor callback interrupts\n");
791 +diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
792 +index 4e942f31b1a7..7fc5e843f247 100644
793 +--- a/arch/x86/kernel/tls.c
794 ++++ b/arch/x86/kernel/tls.c
795 +@@ -29,7 +29,28 @@ static int get_free_idx(void)
796 +
797 + static bool tls_desc_okay(const struct user_desc *info)
798 + {
799 +- if (LDT_empty(info))
800 ++ /*
801 ++ * For historical reasons (i.e. no one ever documented how any
802 ++ * of the segmentation APIs work), user programs can and do
803 ++ * assume that a struct user_desc that's all zeros except for
804 ++ * entry_number means "no segment at all". This never actually
805 ++ * worked. In fact, up to Linux 3.19, a struct user_desc like
806 ++ * this would create a 16-bit read-write segment with base and
807 ++ * limit both equal to zero.
808 ++ *
809 ++ * That was close enough to "no segment at all" until we
810 ++ * hardened this function to disallow 16-bit TLS segments. Fix
811 ++ * it up by interpreting these zeroed segments the way that they
812 ++ * were almost certainly intended to be interpreted.
813 ++ *
814 ++ * The correct way to ask for "no segment at all" is to specify
815 ++ * a user_desc that satisfies LDT_empty. To keep everything
816 ++ * working, we accept both.
817 ++ *
818 ++ * Note that there's a similar kludge in modify_ldt -- look at
819 ++ * the distinction between modes 1 and 0x11.
820 ++ */
821 ++ if (LDT_empty(info) || LDT_zero(info))
822 + return true;
823 +
824 + /*
825 +@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
826 + cpu = get_cpu();
827 +
828 + while (n-- > 0) {
829 +- if (LDT_empty(info))
830 ++ if (LDT_empty(info) || LDT_zero(info))
831 + desc->a = desc->b = 0;
832 + else
833 + fill_ldt(desc, info);
834 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
835 +index b7e50bba3bbb..505449700e0c 100644
836 +--- a/arch/x86/kernel/tsc.c
837 ++++ b/arch/x86/kernel/tsc.c
838 +@@ -617,7 +617,7 @@ static unsigned long quick_pit_calibrate(void)
839 + goto success;
840 + }
841 + }
842 +- pr_err("Fast TSC calibration failed\n");
843 ++ pr_info("Fast TSC calibration failed\n");
844 + return 0;
845 +
846 + success:
847 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
848 +index 22e7ed9e6d8e..c7327a7761ca 100644
849 +--- a/arch/x86/kvm/emulate.c
850 ++++ b/arch/x86/kvm/emulate.c
851 +@@ -2345,7 +2345,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
852 + * Not recognized on AMD in compat mode (but is recognized in legacy
853 + * mode).
854 + */
855 +- if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
856 ++ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
857 + && !vendor_intel(ctxt))
858 + return emulate_ud(ctxt);
859 +
860 +@@ -2358,25 +2358,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
861 + setup_syscalls_segments(ctxt, &cs, &ss);
862 +
863 + ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
864 +- switch (ctxt->mode) {
865 +- case X86EMUL_MODE_PROT32:
866 +- if ((msr_data & 0xfffc) == 0x0)
867 +- return emulate_gp(ctxt, 0);
868 +- break;
869 +- case X86EMUL_MODE_PROT64:
870 +- if (msr_data == 0x0)
871 +- return emulate_gp(ctxt, 0);
872 +- break;
873 +- default:
874 +- break;
875 +- }
876 ++ if ((msr_data & 0xfffc) == 0x0)
877 ++ return emulate_gp(ctxt, 0);
878 +
879 + ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
880 +- cs_sel = (u16)msr_data;
881 +- cs_sel &= ~SELECTOR_RPL_MASK;
882 ++ cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
883 + ss_sel = cs_sel + 8;
884 +- ss_sel &= ~SELECTOR_RPL_MASK;
885 +- if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
886 ++ if (efer & EFER_LMA) {
887 + cs.d = 0;
888 + cs.l = 1;
889 + }
890 +@@ -2385,10 +2373,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
891 + ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
892 +
893 + ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
894 +- ctxt->_eip = msr_data;
895 ++ ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
896 +
897 + ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
898 +- *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
899 ++ *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
900 ++ (u32)msr_data;
901 +
902 + return X86EMUL_CONTINUE;
903 + }
904 +@@ -3788,8 +3777,8 @@ static const struct opcode group5[] = {
905 + };
906 +
907 + static const struct opcode group6[] = {
908 +- DI(Prot, sldt),
909 +- DI(Prot, str),
910 ++ DI(Prot | DstMem, sldt),
911 ++ DI(Prot | DstMem, str),
912 + II(Prot | Priv | SrcMem16, em_lldt, lldt),
913 + II(Prot | Priv | SrcMem16, em_ltr, ltr),
914 + N, N, N, N,
915 +diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
916 +index 37c1435889ce..d0583eb61a5d 100644
917 +--- a/arch/x86/pci/i386.c
918 ++++ b/arch/x86/pci/i386.c
919 +@@ -216,7 +216,7 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
920 + continue;
921 + if (r->parent) /* Already allocated */
922 + continue;
923 +- if (!r->start || pci_claim_resource(dev, idx) < 0) {
924 ++ if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
925 + /*
926 + * Something is wrong with the region.
927 + * Invalidate the resource to prevent
928 +diff --git a/crypto/842.c b/crypto/842.c
929 +index 65c7a89cfa09..b48f4f108c47 100644
930 +--- a/crypto/842.c
931 ++++ b/crypto/842.c
932 +@@ -180,3 +180,4 @@ module_exit(nx842_mod_exit);
933 +
934 + MODULE_LICENSE("GPL");
935 + MODULE_DESCRIPTION("842 Compression Algorithm");
936 ++MODULE_ALIAS_CRYPTO("842");
937 +diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
938 +index fd0d6b454975..3dd101144a58 100644
939 +--- a/crypto/aes_generic.c
940 ++++ b/crypto/aes_generic.c
941 +@@ -1474,4 +1474,5 @@ module_exit(aes_fini);
942 +
943 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
944 + MODULE_LICENSE("Dual BSD/GPL");
945 +-MODULE_ALIAS("aes");
946 ++MODULE_ALIAS_CRYPTO("aes");
947 ++MODULE_ALIAS_CRYPTO("aes-generic");
948 +diff --git a/crypto/algapi.c b/crypto/algapi.c
949 +index e8d3a7dca8c4..71a8143e23b1 100644
950 +--- a/crypto/algapi.c
951 ++++ b/crypto/algapi.c
952 +@@ -509,8 +509,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
953 +
954 + struct crypto_template *crypto_lookup_template(const char *name)
955 + {
956 +- return try_then_request_module(__crypto_lookup_template(name), "%s",
957 +- name);
958 ++ return try_then_request_module(__crypto_lookup_template(name),
959 ++ "crypto-%s", name);
960 + }
961 + EXPORT_SYMBOL_GPL(crypto_lookup_template);
962 +
963 +diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
964 +index 666f1962a160..6f5bebc9bf01 100644
965 +--- a/crypto/ansi_cprng.c
966 ++++ b/crypto/ansi_cprng.c
967 +@@ -476,4 +476,5 @@ module_param(dbg, int, 0);
968 + MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
969 + module_init(prng_mod_init);
970 + module_exit(prng_mod_fini);
971 +-MODULE_ALIAS("stdrng");
972 ++MODULE_ALIAS_CRYPTO("stdrng");
973 ++MODULE_ALIAS_CRYPTO("ansi_cprng");
974 +diff --git a/crypto/anubis.c b/crypto/anubis.c
975 +index 008c8a4fb67c..4bb187c2a902 100644
976 +--- a/crypto/anubis.c
977 ++++ b/crypto/anubis.c
978 +@@ -704,3 +704,4 @@ module_exit(anubis_mod_fini);
979 +
980 + MODULE_LICENSE("GPL");
981 + MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
982 ++MODULE_ALIAS_CRYPTO("anubis");
983 +diff --git a/crypto/api.c b/crypto/api.c
984 +index a2b39c5f3649..2a81e98a0021 100644
985 +--- a/crypto/api.c
986 ++++ b/crypto/api.c
987 +@@ -216,11 +216,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
988 +
989 + alg = crypto_alg_lookup(name, type, mask);
990 + if (!alg) {
991 +- request_module("%s", name);
992 ++ request_module("crypto-%s", name);
993 +
994 + if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
995 + CRYPTO_ALG_NEED_FALLBACK))
996 +- request_module("%s-all", name);
997 ++ request_module("crypto-%s-all", name);
998 +
999 + alg = crypto_alg_lookup(name, type, mask);
1000 + }
1001 +diff --git a/crypto/arc4.c b/crypto/arc4.c
1002 +index 5a772c3657d5..f1a81925558f 100644
1003 +--- a/crypto/arc4.c
1004 ++++ b/crypto/arc4.c
1005 +@@ -166,3 +166,4 @@ module_exit(arc4_exit);
1006 + MODULE_LICENSE("GPL");
1007 + MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
1008 + MODULE_AUTHOR("Jon Oberheide <jon@×××××××××.org>");
1009 ++MODULE_ALIAS_CRYPTO("arc4");
1010 +diff --git a/crypto/authenc.c b/crypto/authenc.c
1011 +index e1223559d5df..78fb16cab13f 100644
1012 +--- a/crypto/authenc.c
1013 ++++ b/crypto/authenc.c
1014 +@@ -721,3 +721,4 @@ module_exit(crypto_authenc_module_exit);
1015 +
1016 + MODULE_LICENSE("GPL");
1017 + MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
1018 ++MODULE_ALIAS_CRYPTO("authenc");
1019 +diff --git a/crypto/authencesn.c b/crypto/authencesn.c
1020 +index 4be0dd4373a9..024bff2344fc 100644
1021 +--- a/crypto/authencesn.c
1022 ++++ b/crypto/authencesn.c
1023 +@@ -814,3 +814,4 @@ module_exit(crypto_authenc_esn_module_exit);
1024 + MODULE_LICENSE("GPL");
1025 + MODULE_AUTHOR("Steffen Klassert <steffen.klassert@×××××××.com>");
1026 + MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
1027 ++MODULE_ALIAS_CRYPTO("authencesn");
1028 +diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
1029 +index 8baf5447d35b..87b392a77a93 100644
1030 +--- a/crypto/blowfish_generic.c
1031 ++++ b/crypto/blowfish_generic.c
1032 +@@ -138,4 +138,5 @@ module_exit(blowfish_mod_fini);
1033 +
1034 + MODULE_LICENSE("GPL");
1035 + MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
1036 +-MODULE_ALIAS("blowfish");
1037 ++MODULE_ALIAS_CRYPTO("blowfish");
1038 ++MODULE_ALIAS_CRYPTO("blowfish-generic");
1039 +diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
1040 +index 26bcd7a2d6b4..a02286bf319e 100644
1041 +--- a/crypto/camellia_generic.c
1042 ++++ b/crypto/camellia_generic.c
1043 +@@ -1098,4 +1098,5 @@ module_exit(camellia_fini);
1044 +
1045 + MODULE_DESCRIPTION("Camellia Cipher Algorithm");
1046 + MODULE_LICENSE("GPL");
1047 +-MODULE_ALIAS("camellia");
1048 ++MODULE_ALIAS_CRYPTO("camellia");
1049 ++MODULE_ALIAS_CRYPTO("camellia-generic");
1050 +diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
1051 +index 5558f630a0eb..df5c72629383 100644
1052 +--- a/crypto/cast5_generic.c
1053 ++++ b/crypto/cast5_generic.c
1054 +@@ -549,4 +549,5 @@ module_exit(cast5_mod_fini);
1055 +
1056 + MODULE_LICENSE("GPL");
1057 + MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
1058 +-MODULE_ALIAS("cast5");
1059 ++MODULE_ALIAS_CRYPTO("cast5");
1060 ++MODULE_ALIAS_CRYPTO("cast5-generic");
1061 +diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
1062 +index de732528a430..058c8d755d03 100644
1063 +--- a/crypto/cast6_generic.c
1064 ++++ b/crypto/cast6_generic.c
1065 +@@ -291,4 +291,5 @@ module_exit(cast6_mod_fini);
1066 +
1067 + MODULE_LICENSE("GPL");
1068 + MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
1069 +-MODULE_ALIAS("cast6");
1070 ++MODULE_ALIAS_CRYPTO("cast6");
1071 ++MODULE_ALIAS_CRYPTO("cast6-generic");
1072 +diff --git a/crypto/cbc.c b/crypto/cbc.c
1073 +index 61ac42e1e32b..780ee27b2d43 100644
1074 +--- a/crypto/cbc.c
1075 ++++ b/crypto/cbc.c
1076 +@@ -289,3 +289,4 @@ module_exit(crypto_cbc_module_exit);
1077 +
1078 + MODULE_LICENSE("GPL");
1079 + MODULE_DESCRIPTION("CBC block cipher algorithm");
1080 ++MODULE_ALIAS_CRYPTO("cbc");
1081 +diff --git a/crypto/ccm.c b/crypto/ccm.c
1082 +index 1df84217f7c9..003bbbd21a2b 100644
1083 +--- a/crypto/ccm.c
1084 ++++ b/crypto/ccm.c
1085 +@@ -879,5 +879,6 @@ module_exit(crypto_ccm_module_exit);
1086 +
1087 + MODULE_LICENSE("GPL");
1088 + MODULE_DESCRIPTION("Counter with CBC MAC");
1089 +-MODULE_ALIAS("ccm_base");
1090 +-MODULE_ALIAS("rfc4309");
1091 ++MODULE_ALIAS_CRYPTO("ccm_base");
1092 ++MODULE_ALIAS_CRYPTO("rfc4309");
1093 ++MODULE_ALIAS_CRYPTO("ccm");
1094 +diff --git a/crypto/chainiv.c b/crypto/chainiv.c
1095 +index 9c294c8f9a07..63c17d5992f7 100644
1096 +--- a/crypto/chainiv.c
1097 ++++ b/crypto/chainiv.c
1098 +@@ -359,3 +359,4 @@ module_exit(chainiv_module_exit);
1099 +
1100 + MODULE_LICENSE("GPL");
1101 + MODULE_DESCRIPTION("Chain IV Generator");
1102 ++MODULE_ALIAS_CRYPTO("chainiv");
1103 +diff --git a/crypto/cmac.c b/crypto/cmac.c
1104 +index 50880cf17fad..7a8bfbd548f6 100644
1105 +--- a/crypto/cmac.c
1106 ++++ b/crypto/cmac.c
1107 +@@ -313,3 +313,4 @@ module_exit(crypto_cmac_module_exit);
1108 +
1109 + MODULE_LICENSE("GPL");
1110 + MODULE_DESCRIPTION("CMAC keyed hash algorithm");
1111 ++MODULE_ALIAS_CRYPTO("cmac");
1112 +diff --git a/crypto/crc32.c b/crypto/crc32.c
1113 +index 9d1c41569898..187ded28cb0b 100644
1114 +--- a/crypto/crc32.c
1115 ++++ b/crypto/crc32.c
1116 +@@ -156,3 +156,4 @@ module_exit(crc32_mod_fini);
1117 + MODULE_AUTHOR("Alexander Boyko <alexander_boyko@×××××××.com>");
1118 + MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
1119 + MODULE_LICENSE("GPL");
1120 ++MODULE_ALIAS_CRYPTO("crc32");
1121 +diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
1122 +index d9c7beba8e50..06f1b60f02b2 100644
1123 +--- a/crypto/crc32c_generic.c
1124 ++++ b/crypto/crc32c_generic.c
1125 +@@ -170,5 +170,6 @@ module_exit(crc32c_mod_fini);
1126 + MODULE_AUTHOR("Clay Haapala <chaapala@×××××.com>");
1127 + MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
1128 + MODULE_LICENSE("GPL");
1129 +-MODULE_ALIAS("crc32c");
1130 ++MODULE_ALIAS_CRYPTO("crc32c");
1131 ++MODULE_ALIAS_CRYPTO("crc32c-generic");
1132 + MODULE_SOFTDEP("pre: crc32c");
1133 +diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
1134 +index 877e7114ec5c..c1229614c7e3 100644
1135 +--- a/crypto/crct10dif_generic.c
1136 ++++ b/crypto/crct10dif_generic.c
1137 +@@ -124,4 +124,5 @@ module_exit(crct10dif_mod_fini);
1138 + MODULE_AUTHOR("Tim Chen <tim.c.chen@×××××××××××.com>");
1139 + MODULE_DESCRIPTION("T10 DIF CRC calculation.");
1140 + MODULE_LICENSE("GPL");
1141 +-MODULE_ALIAS("crct10dif");
1142 ++MODULE_ALIAS_CRYPTO("crct10dif");
1143 ++MODULE_ALIAS_CRYPTO("crct10dif-generic");
1144 +diff --git a/crypto/cryptd.c b/crypto/cryptd.c
1145 +index e592c90abebb..650afac10fd7 100644
1146 +--- a/crypto/cryptd.c
1147 ++++ b/crypto/cryptd.c
1148 +@@ -955,3 +955,4 @@ module_exit(cryptd_exit);
1149 +
1150 + MODULE_LICENSE("GPL");
1151 + MODULE_DESCRIPTION("Software async crypto daemon");
1152 ++MODULE_ALIAS_CRYPTO("cryptd");
1153 +diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
1154 +index 1dc54bb95a87..a20319132e33 100644
1155 +--- a/crypto/crypto_null.c
1156 ++++ b/crypto/crypto_null.c
1157 +@@ -145,9 +145,9 @@ static struct crypto_alg null_algs[3] = { {
1158 + .coa_decompress = null_compress } }
1159 + } };
1160 +
1161 +-MODULE_ALIAS("compress_null");
1162 +-MODULE_ALIAS("digest_null");
1163 +-MODULE_ALIAS("cipher_null");
1164 ++MODULE_ALIAS_CRYPTO("compress_null");
1165 ++MODULE_ALIAS_CRYPTO("digest_null");
1166 ++MODULE_ALIAS_CRYPTO("cipher_null");
1167 +
1168 + static int __init crypto_null_mod_init(void)
1169 + {
1170 +diff --git a/crypto/ctr.c b/crypto/ctr.c
1171 +index f2b94f27bb2c..2386f7313952 100644
1172 +--- a/crypto/ctr.c
1173 ++++ b/crypto/ctr.c
1174 +@@ -466,4 +466,5 @@ module_exit(crypto_ctr_module_exit);
1175 +
1176 + MODULE_LICENSE("GPL");
1177 + MODULE_DESCRIPTION("CTR Counter block mode");
1178 +-MODULE_ALIAS("rfc3686");
1179 ++MODULE_ALIAS_CRYPTO("rfc3686");
1180 ++MODULE_ALIAS_CRYPTO("ctr");
1181 +diff --git a/crypto/cts.c b/crypto/cts.c
1182 +index 133f0874c95e..bd9405820e8a 100644
1183 +--- a/crypto/cts.c
1184 ++++ b/crypto/cts.c
1185 +@@ -351,3 +351,4 @@ module_exit(crypto_cts_module_exit);
1186 +
1187 + MODULE_LICENSE("Dual BSD/GPL");
1188 + MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
1189 ++MODULE_ALIAS_CRYPTO("cts");
1190 +diff --git a/crypto/deflate.c b/crypto/deflate.c
1191 +index b57d70eb156b..95d8d37c5021 100644
1192 +--- a/crypto/deflate.c
1193 ++++ b/crypto/deflate.c
1194 +@@ -222,4 +222,4 @@ module_exit(deflate_mod_fini);
1195 + MODULE_LICENSE("GPL");
1196 + MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
1197 + MODULE_AUTHOR("James Morris <jmorris@×××××××××××××.au>");
1198 +-
1199 ++MODULE_ALIAS_CRYPTO("deflate");
1200 +diff --git a/crypto/des_generic.c b/crypto/des_generic.c
1201 +index 298d464ab7d2..a71720544d11 100644
1202 +--- a/crypto/des_generic.c
1203 ++++ b/crypto/des_generic.c
1204 +@@ -983,8 +983,6 @@ static struct crypto_alg des_algs[2] = { {
1205 + .cia_decrypt = des3_ede_decrypt } }
1206 + } };
1207 +
1208 +-MODULE_ALIAS("des3_ede");
1209 +-
1210 + static int __init des_generic_mod_init(void)
1211 + {
1212 + return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
1213 +@@ -1001,4 +999,7 @@ module_exit(des_generic_mod_fini);
1214 + MODULE_LICENSE("GPL");
1215 + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
1216 + MODULE_AUTHOR("Dag Arne Osvik <da@×××××.no>");
1217 +-MODULE_ALIAS("des");
1218 ++MODULE_ALIAS_CRYPTO("des");
1219 ++MODULE_ALIAS_CRYPTO("des-generic");
1220 ++MODULE_ALIAS_CRYPTO("des3_ede");
1221 ++MODULE_ALIAS_CRYPTO("des3_ede-generic");
1222 +diff --git a/crypto/ecb.c b/crypto/ecb.c
1223 +index 935cfef4aa84..12011aff0971 100644
1224 +--- a/crypto/ecb.c
1225 ++++ b/crypto/ecb.c
1226 +@@ -185,3 +185,4 @@ module_exit(crypto_ecb_module_exit);
1227 +
1228 + MODULE_LICENSE("GPL");
1229 + MODULE_DESCRIPTION("ECB block cipher algorithm");
1230 ++MODULE_ALIAS_CRYPTO("ecb");
1231 +diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
1232 +index bf7ab4a89493..f116fae766f8 100644
1233 +--- a/crypto/eseqiv.c
1234 ++++ b/crypto/eseqiv.c
1235 +@@ -267,3 +267,4 @@ module_exit(eseqiv_module_exit);
1236 +
1237 + MODULE_LICENSE("GPL");
1238 + MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
1239 ++MODULE_ALIAS_CRYPTO("eseqiv");
1240 +diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
1241 +index 021d7fec6bc8..77286ea28865 100644
1242 +--- a/crypto/fcrypt.c
1243 ++++ b/crypto/fcrypt.c
1244 +@@ -420,3 +420,4 @@ module_exit(fcrypt_mod_fini);
1245 + MODULE_LICENSE("Dual BSD/GPL");
1246 + MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
1247 + MODULE_AUTHOR("David Howells <dhowells@××××××.com>");
1248 ++MODULE_ALIAS_CRYPTO("fcrypt");
1249 +diff --git a/crypto/gcm.c b/crypto/gcm.c
1250 +index 276cdac567b6..2e403f6138c1 100644
1251 +--- a/crypto/gcm.c
1252 ++++ b/crypto/gcm.c
1253 +@@ -1441,6 +1441,7 @@ module_exit(crypto_gcm_module_exit);
1254 + MODULE_LICENSE("GPL");
1255 + MODULE_DESCRIPTION("Galois/Counter Mode");
1256 + MODULE_AUTHOR("Mikko Herranen <mh1@×××.fi>");
1257 +-MODULE_ALIAS("gcm_base");
1258 +-MODULE_ALIAS("rfc4106");
1259 +-MODULE_ALIAS("rfc4543");
1260 ++MODULE_ALIAS_CRYPTO("gcm_base");
1261 ++MODULE_ALIAS_CRYPTO("rfc4106");
1262 ++MODULE_ALIAS_CRYPTO("rfc4543");
1263 ++MODULE_ALIAS_CRYPTO("gcm");
1264 +diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
1265 +index 9d3f0c69a86f..bac70995e064 100644
1266 +--- a/crypto/ghash-generic.c
1267 ++++ b/crypto/ghash-generic.c
1268 +@@ -172,4 +172,5 @@ module_exit(ghash_mod_exit);
1269 +
1270 + MODULE_LICENSE("GPL");
1271 + MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
1272 +-MODULE_ALIAS("ghash");
1273 ++MODULE_ALIAS_CRYPTO("ghash");
1274 ++MODULE_ALIAS_CRYPTO("ghash-generic");
1275 +diff --git a/crypto/hmac.c b/crypto/hmac.c
1276 +index e392219ddc61..72e38c098bb3 100644
1277 +--- a/crypto/hmac.c
1278 ++++ b/crypto/hmac.c
1279 +@@ -268,3 +268,4 @@ module_exit(hmac_module_exit);
1280 +
1281 + MODULE_LICENSE("GPL");
1282 + MODULE_DESCRIPTION("HMAC hash algorithm");
1283 ++MODULE_ALIAS_CRYPTO("hmac");
1284 +diff --git a/crypto/khazad.c b/crypto/khazad.c
1285 +index 60e7cd66facc..873eb5ded6d7 100644
1286 +--- a/crypto/khazad.c
1287 ++++ b/crypto/khazad.c
1288 +@@ -880,3 +880,4 @@ module_exit(khazad_mod_fini);
1289 +
1290 + MODULE_LICENSE("GPL");
1291 + MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
1292 ++MODULE_ALIAS_CRYPTO("khazad");
1293 +diff --git a/crypto/krng.c b/crypto/krng.c
1294 +index a2d2b72fc135..0224841b6579 100644
1295 +--- a/crypto/krng.c
1296 ++++ b/crypto/krng.c
1297 +@@ -62,4 +62,5 @@ module_exit(krng_mod_fini);
1298 +
1299 + MODULE_LICENSE("GPL");
1300 + MODULE_DESCRIPTION("Kernel Random Number Generator");
1301 +-MODULE_ALIAS("stdrng");
1302 ++MODULE_ALIAS_CRYPTO("stdrng");
1303 ++MODULE_ALIAS_CRYPTO("krng");
1304 +diff --git a/crypto/lrw.c b/crypto/lrw.c
1305 +index ba42acc4deba..6f9908a7ebcb 100644
1306 +--- a/crypto/lrw.c
1307 ++++ b/crypto/lrw.c
1308 +@@ -400,3 +400,4 @@ module_exit(crypto_module_exit);
1309 +
1310 + MODULE_LICENSE("GPL");
1311 + MODULE_DESCRIPTION("LRW block cipher mode");
1312 ++MODULE_ALIAS_CRYPTO("lrw");
1313 +diff --git a/crypto/lz4.c b/crypto/lz4.c
1314 +index 34d072b72a73..aefbceaf3104 100644
1315 +--- a/crypto/lz4.c
1316 ++++ b/crypto/lz4.c
1317 +@@ -104,3 +104,4 @@ module_exit(lz4_mod_fini);
1318 +
1319 + MODULE_LICENSE("GPL");
1320 + MODULE_DESCRIPTION("LZ4 Compression Algorithm");
1321 ++MODULE_ALIAS_CRYPTO("lz4");
1322 +diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
1323 +index 9218b3fed5e3..a1d3b5bd3d85 100644
1324 +--- a/crypto/lz4hc.c
1325 ++++ b/crypto/lz4hc.c
1326 +@@ -104,3 +104,4 @@ module_exit(lz4hc_mod_fini);
1327 +
1328 + MODULE_LICENSE("GPL");
1329 + MODULE_DESCRIPTION("LZ4HC Compression Algorithm");
1330 ++MODULE_ALIAS_CRYPTO("lz4hc");
1331 +diff --git a/crypto/lzo.c b/crypto/lzo.c
1332 +index a8ff2f778dc4..4b3e92525dac 100644
1333 +--- a/crypto/lzo.c
1334 ++++ b/crypto/lzo.c
1335 +@@ -107,3 +107,4 @@ module_exit(lzo_mod_fini);
1336 +
1337 + MODULE_LICENSE("GPL");
1338 + MODULE_DESCRIPTION("LZO Compression Algorithm");
1339 ++MODULE_ALIAS_CRYPTO("lzo");
1340 +diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
1341 +index b39fbd530102..a8e870444ea9 100644
1342 +--- a/crypto/mcryptd.c
1343 ++++ b/crypto/mcryptd.c
1344 +@@ -703,3 +703,4 @@ module_exit(mcryptd_exit);
1345 +
1346 + MODULE_LICENSE("GPL");
1347 + MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
1348 ++MODULE_ALIAS_CRYPTO("mcryptd");
1349 +diff --git a/crypto/md4.c b/crypto/md4.c
1350 +index 0477a6a01d58..3515af425cc9 100644
1351 +--- a/crypto/md4.c
1352 ++++ b/crypto/md4.c
1353 +@@ -255,4 +255,4 @@ module_exit(md4_mod_fini);
1354 +
1355 + MODULE_LICENSE("GPL");
1356 + MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
1357 +-
1358 ++MODULE_ALIAS_CRYPTO("md4");
1359 +diff --git a/crypto/md5.c b/crypto/md5.c
1360 +index 7febeaab923b..36f5e5b103f3 100644
1361 +--- a/crypto/md5.c
1362 ++++ b/crypto/md5.c
1363 +@@ -168,3 +168,4 @@ module_exit(md5_mod_fini);
1364 +
1365 + MODULE_LICENSE("GPL");
1366 + MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
1367 ++MODULE_ALIAS_CRYPTO("md5");
1368 +diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
1369 +index 079b761bc70d..46195e0d0f4d 100644
1370 +--- a/crypto/michael_mic.c
1371 ++++ b/crypto/michael_mic.c
1372 +@@ -184,3 +184,4 @@ module_exit(michael_mic_exit);
1373 + MODULE_LICENSE("GPL v2");
1374 + MODULE_DESCRIPTION("Michael MIC");
1375 + MODULE_AUTHOR("Jouni Malinen <j@××.fi>");
1376 ++MODULE_ALIAS_CRYPTO("michael_mic");
1377 +diff --git a/crypto/pcbc.c b/crypto/pcbc.c
1378 +index d1b8bdfb5855..f654965f0933 100644
1379 +--- a/crypto/pcbc.c
1380 ++++ b/crypto/pcbc.c
1381 +@@ -295,3 +295,4 @@ module_exit(crypto_pcbc_module_exit);
1382 +
1383 + MODULE_LICENSE("GPL");
1384 + MODULE_DESCRIPTION("PCBC block cipher algorithm");
1385 ++MODULE_ALIAS_CRYPTO("pcbc");
1386 +diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
1387 +index 309d345ead95..c305d4112735 100644
1388 +--- a/crypto/pcrypt.c
1389 ++++ b/crypto/pcrypt.c
1390 +@@ -565,3 +565,4 @@ module_exit(pcrypt_exit);
1391 + MODULE_LICENSE("GPL");
1392 + MODULE_AUTHOR("Steffen Klassert <steffen.klassert@×××××××.com>");
1393 + MODULE_DESCRIPTION("Parallel crypto wrapper");
1394 ++MODULE_ALIAS_CRYPTO("pcrypt");
1395 +diff --git a/crypto/rmd128.c b/crypto/rmd128.c
1396 +index 8a0f68b7f257..049486ede938 100644
1397 +--- a/crypto/rmd128.c
1398 ++++ b/crypto/rmd128.c
1399 +@@ -327,3 +327,4 @@ module_exit(rmd128_mod_fini);
1400 + MODULE_LICENSE("GPL");
1401 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1402 + MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
1403 ++MODULE_ALIAS_CRYPTO("rmd128");
1404 +diff --git a/crypto/rmd160.c b/crypto/rmd160.c
1405 +index 525d7bb752cf..de585e51d455 100644
1406 +--- a/crypto/rmd160.c
1407 ++++ b/crypto/rmd160.c
1408 +@@ -371,3 +371,4 @@ module_exit(rmd160_mod_fini);
1409 + MODULE_LICENSE("GPL");
1410 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1411 + MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
1412 ++MODULE_ALIAS_CRYPTO("rmd160");
1413 +diff --git a/crypto/rmd256.c b/crypto/rmd256.c
1414 +index 69293d9b56e0..4ec02a754e09 100644
1415 +--- a/crypto/rmd256.c
1416 ++++ b/crypto/rmd256.c
1417 +@@ -346,3 +346,4 @@ module_exit(rmd256_mod_fini);
1418 + MODULE_LICENSE("GPL");
1419 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1420 + MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
1421 ++MODULE_ALIAS_CRYPTO("rmd256");
1422 +diff --git a/crypto/rmd320.c b/crypto/rmd320.c
1423 +index 09f97dfdfbba..770f2cb369f8 100644
1424 +--- a/crypto/rmd320.c
1425 ++++ b/crypto/rmd320.c
1426 +@@ -395,3 +395,4 @@ module_exit(rmd320_mod_fini);
1427 + MODULE_LICENSE("GPL");
1428 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1429 + MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
1430 ++MODULE_ALIAS_CRYPTO("rmd320");
1431 +diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
1432 +index 9a4770c02284..f550b5d94630 100644
1433 +--- a/crypto/salsa20_generic.c
1434 ++++ b/crypto/salsa20_generic.c
1435 +@@ -248,4 +248,5 @@ module_exit(salsa20_generic_mod_fini);
1436 +
1437 + MODULE_LICENSE("GPL");
1438 + MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
1439 +-MODULE_ALIAS("salsa20");
1440 ++MODULE_ALIAS_CRYPTO("salsa20");
1441 ++MODULE_ALIAS_CRYPTO("salsa20-generic");
1442 +diff --git a/crypto/seed.c b/crypto/seed.c
1443 +index 9c904d6d2151..c6ba8438be43 100644
1444 +--- a/crypto/seed.c
1445 ++++ b/crypto/seed.c
1446 +@@ -476,3 +476,4 @@ module_exit(seed_fini);
1447 + MODULE_DESCRIPTION("SEED Cipher Algorithm");
1448 + MODULE_LICENSE("GPL");
1449 + MODULE_AUTHOR("Hye-Shik Chang <perky@×××××××.org>, Kim Hyun <hkim@×××××××.kr>");
1450 ++MODULE_ALIAS_CRYPTO("seed");
1451 +diff --git a/crypto/seqiv.c b/crypto/seqiv.c
1452 +index ee190fcedcd2..9daa854cc485 100644
1453 +--- a/crypto/seqiv.c
1454 ++++ b/crypto/seqiv.c
1455 +@@ -362,3 +362,4 @@ module_exit(seqiv_module_exit);
1456 +
1457 + MODULE_LICENSE("GPL");
1458 + MODULE_DESCRIPTION("Sequence Number IV Generator");
1459 ++MODULE_ALIAS_CRYPTO("seqiv");
1460 +diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
1461 +index 7ddbd7e88859..94970a794975 100644
1462 +--- a/crypto/serpent_generic.c
1463 ++++ b/crypto/serpent_generic.c
1464 +@@ -665,5 +665,6 @@ module_exit(serpent_mod_fini);
1465 + MODULE_LICENSE("GPL");
1466 + MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
1467 + MODULE_AUTHOR("Dag Arne Osvik <osvik@××××××.no>");
1468 +-MODULE_ALIAS("tnepres");
1469 +-MODULE_ALIAS("serpent");
1470 ++MODULE_ALIAS_CRYPTO("tnepres");
1471 ++MODULE_ALIAS_CRYPTO("serpent");
1472 ++MODULE_ALIAS_CRYPTO("serpent-generic");
1473 +diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
1474 +index 7bb047432782..a3e50c37eb6f 100644
1475 +--- a/crypto/sha1_generic.c
1476 ++++ b/crypto/sha1_generic.c
1477 +@@ -153,4 +153,5 @@ module_exit(sha1_generic_mod_fini);
1478 + MODULE_LICENSE("GPL");
1479 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
1480 +
1481 +-MODULE_ALIAS("sha1");
1482 ++MODULE_ALIAS_CRYPTO("sha1");
1483 ++MODULE_ALIAS_CRYPTO("sha1-generic");
1484 +diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
1485 +index 65e7b76b057f..b001ff5c2efc 100644
1486 +--- a/crypto/sha256_generic.c
1487 ++++ b/crypto/sha256_generic.c
1488 +@@ -384,5 +384,7 @@ module_exit(sha256_generic_mod_fini);
1489 + MODULE_LICENSE("GPL");
1490 + MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
1491 +
1492 +-MODULE_ALIAS("sha224");
1493 +-MODULE_ALIAS("sha256");
1494 ++MODULE_ALIAS_CRYPTO("sha224");
1495 ++MODULE_ALIAS_CRYPTO("sha224-generic");
1496 ++MODULE_ALIAS_CRYPTO("sha256");
1497 ++MODULE_ALIAS_CRYPTO("sha256-generic");
1498 +diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
1499 +index 95db67197cd9..1c3c3767e079 100644
1500 +--- a/crypto/sha512_generic.c
1501 ++++ b/crypto/sha512_generic.c
1502 +@@ -288,5 +288,7 @@ module_exit(sha512_generic_mod_fini);
1503 + MODULE_LICENSE("GPL");
1504 + MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
1505 +
1506 +-MODULE_ALIAS("sha384");
1507 +-MODULE_ALIAS("sha512");
1508 ++MODULE_ALIAS_CRYPTO("sha384");
1509 ++MODULE_ALIAS_CRYPTO("sha384-generic");
1510 ++MODULE_ALIAS_CRYPTO("sha512");
1511 ++MODULE_ALIAS_CRYPTO("sha512-generic");
1512 +diff --git a/crypto/tea.c b/crypto/tea.c
1513 +index 0a572323ee4a..b70b441c7d1e 100644
1514 +--- a/crypto/tea.c
1515 ++++ b/crypto/tea.c
1516 +@@ -270,8 +270,9 @@ static void __exit tea_mod_fini(void)
1517 + crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
1518 + }
1519 +
1520 +-MODULE_ALIAS("xtea");
1521 +-MODULE_ALIAS("xeta");
1522 ++MODULE_ALIAS_CRYPTO("tea");
1523 ++MODULE_ALIAS_CRYPTO("xtea");
1524 ++MODULE_ALIAS_CRYPTO("xeta");
1525 +
1526 + module_init(tea_mod_init);
1527 + module_exit(tea_mod_fini);
1528 +diff --git a/crypto/tgr192.c b/crypto/tgr192.c
1529 +index 3c7af0d1ff7a..321bc6ff2a9d 100644
1530 +--- a/crypto/tgr192.c
1531 ++++ b/crypto/tgr192.c
1532 +@@ -676,8 +676,9 @@ static void __exit tgr192_mod_fini(void)
1533 + crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
1534 + }
1535 +
1536 +-MODULE_ALIAS("tgr160");
1537 +-MODULE_ALIAS("tgr128");
1538 ++MODULE_ALIAS_CRYPTO("tgr192");
1539 ++MODULE_ALIAS_CRYPTO("tgr160");
1540 ++MODULE_ALIAS_CRYPTO("tgr128");
1541 +
1542 + module_init(tgr192_mod_init);
1543 + module_exit(tgr192_mod_fini);
1544 +diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
1545 +index 2d5000552d0f..ebf7a3efb572 100644
1546 +--- a/crypto/twofish_generic.c
1547 ++++ b/crypto/twofish_generic.c
1548 +@@ -211,4 +211,5 @@ module_exit(twofish_mod_fini);
1549 +
1550 + MODULE_LICENSE("GPL");
1551 + MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
1552 +-MODULE_ALIAS("twofish");
1553 ++MODULE_ALIAS_CRYPTO("twofish");
1554 ++MODULE_ALIAS_CRYPTO("twofish-generic");
1555 +diff --git a/crypto/vmac.c b/crypto/vmac.c
1556 +index d84c24bd7ff7..df76a816cfb2 100644
1557 +--- a/crypto/vmac.c
1558 ++++ b/crypto/vmac.c
1559 +@@ -713,3 +713,4 @@ module_exit(vmac_module_exit);
1560 +
1561 + MODULE_LICENSE("GPL");
1562 + MODULE_DESCRIPTION("VMAC hash algorithm");
1563 ++MODULE_ALIAS_CRYPTO("vmac");
1564 +diff --git a/crypto/wp512.c b/crypto/wp512.c
1565 +index ec64e7762fbb..7ee5a043a988 100644
1566 +--- a/crypto/wp512.c
1567 ++++ b/crypto/wp512.c
1568 +@@ -1167,8 +1167,9 @@ static void __exit wp512_mod_fini(void)
1569 + crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
1570 + }
1571 +
1572 +-MODULE_ALIAS("wp384");
1573 +-MODULE_ALIAS("wp256");
1574 ++MODULE_ALIAS_CRYPTO("wp512");
1575 ++MODULE_ALIAS_CRYPTO("wp384");
1576 ++MODULE_ALIAS_CRYPTO("wp256");
1577 +
1578 + module_init(wp512_mod_init);
1579 + module_exit(wp512_mod_fini);
1580 +diff --git a/crypto/xcbc.c b/crypto/xcbc.c
1581 +index a5fbdf3738cf..df90b332554c 100644
1582 +--- a/crypto/xcbc.c
1583 ++++ b/crypto/xcbc.c
1584 +@@ -286,3 +286,4 @@ module_exit(crypto_xcbc_module_exit);
1585 +
1586 + MODULE_LICENSE("GPL");
1587 + MODULE_DESCRIPTION("XCBC keyed hash algorithm");
1588 ++MODULE_ALIAS_CRYPTO("xcbc");
1589 +diff --git a/crypto/xts.c b/crypto/xts.c
1590 +index ca1608f44cb5..f6fd43f100c8 100644
1591 +--- a/crypto/xts.c
1592 ++++ b/crypto/xts.c
1593 +@@ -362,3 +362,4 @@ module_exit(crypto_module_exit);
1594 +
1595 + MODULE_LICENSE("GPL");
1596 + MODULE_DESCRIPTION("XTS block cipher mode");
1597 ++MODULE_ALIAS_CRYPTO("xts");
1598 +diff --git a/crypto/zlib.c b/crypto/zlib.c
1599 +index c9ee681d57fd..0eefa9d237ac 100644
1600 +--- a/crypto/zlib.c
1601 ++++ b/crypto/zlib.c
1602 +@@ -378,3 +378,4 @@ module_exit(zlib_mod_fini);
1603 + MODULE_LICENSE("GPL");
1604 + MODULE_DESCRIPTION("Zlib Compression Algorithm");
1605 + MODULE_AUTHOR("Sony Corporation");
1606 ++MODULE_ALIAS_CRYPTO("zlib");
1607 +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
1608 +index 93b71420a046..6341e668f362 100644
1609 +--- a/drivers/acpi/device_pm.c
1610 ++++ b/drivers/acpi/device_pm.c
1611 +@@ -680,13 +680,21 @@ static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
1612 + if (error)
1613 + return error;
1614 +
1615 ++ if (adev->wakeup.flags.enabled)
1616 ++ return 0;
1617 ++
1618 + res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
1619 +- if (ACPI_FAILURE(res)) {
1620 ++ if (ACPI_SUCCESS(res)) {
1621 ++ adev->wakeup.flags.enabled = 1;
1622 ++ } else {
1623 + acpi_disable_wakeup_device_power(adev);
1624 + return -EIO;
1625 + }
1626 + } else {
1627 +- acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
1628 ++ if (adev->wakeup.flags.enabled) {
1629 ++ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
1630 ++ adev->wakeup.flags.enabled = 0;
1631 ++ }
1632 + acpi_disable_wakeup_device_power(adev);
1633 + }
1634 + return 0;
1635 +diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
1636 +index 0f8538f238b6..0ffd3c930bed 100644
1637 +--- a/drivers/ata/ahci_xgene.c
1638 ++++ b/drivers/ata/ahci_xgene.c
1639 +@@ -188,7 +188,7 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
1640 + *
1641 + * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
1642 + */
1643 +- id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
1644 ++ id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
1645 +
1646 + return 0;
1647 + }
1648 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1649 +index c5ba15af87d3..485f7eab0d4b 100644
1650 +--- a/drivers/ata/libata-core.c
1651 ++++ b/drivers/ata/libata-core.c
1652 +@@ -4740,7 +4740,10 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
1653 + return NULL;
1654 +
1655 + for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
1656 +- tag = tag < max_queue ? tag : 0;
1657 ++ if (ap->flags & ATA_FLAG_LOWTAG)
1658 ++ tag = i;
1659 ++ else
1660 ++ tag = tag < max_queue ? tag : 0;
1661 +
1662 + /* the last tag is reserved for internal command. */
1663 + if (tag == ATA_TAG_INTERNAL)
1664 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1665 +index db90aa35cb71..2e86e3b85266 100644
1666 +--- a/drivers/ata/libata-sff.c
1667 ++++ b/drivers/ata/libata-sff.c
1668 +@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
1669 + DPRINTK("ENTER\n");
1670 +
1671 + cancel_delayed_work_sync(&ap->sff_pio_task);
1672 ++
1673 ++ /*
1674 ++ * We wanna reset the HSM state to IDLE. If we do so without
1675 ++ * grabbing the port lock, critical sections protected by it which
1676 ++ * expect the HSM state to stay stable may get surprised. For
1677 ++ * example, we may set IDLE in between the time
1678 ++ * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
1679 ++ * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
1680 ++ */
1681 ++ spin_lock_irq(ap->lock);
1682 + ap->hsm_task_state = HSM_ST_IDLE;
1683 ++ spin_unlock_irq(ap->lock);
1684 ++
1685 + ap->sff_pio_task_link = NULL;
1686 +
1687 + if (ata_msg_ctl(ap))
1688 +diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
1689 +index 0bb2cabd2197..4ebaa1e7b2d3 100644
1690 +--- a/drivers/ata/sata_dwc_460ex.c
1691 ++++ b/drivers/ata/sata_dwc_460ex.c
1692 +@@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
1693 + if (err) {
1694 + dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
1695 + " %d\n", __func__, err);
1696 +- goto error_out;
1697 ++ return err;
1698 + }
1699 +
1700 + /* Enabe DMA */
1701 +@@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
1702 + sata_dma_regs);
1703 +
1704 + return 0;
1705 +-
1706 +-error_out:
1707 +- dma_dwc_exit(hsdev);
1708 +-
1709 +- return err;
1710 + }
1711 +
1712 + static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
1713 +@@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1714 + char *ver = (char *)&versionr;
1715 + u8 *base = NULL;
1716 + int err = 0;
1717 +- int irq, rc;
1718 ++ int irq;
1719 + struct ata_host *host;
1720 + struct ata_port_info pi = sata_dwc_port_info[0];
1721 + const struct ata_port_info *ppi[] = { &pi, NULL };
1722 +@@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1723 + if (irq == NO_IRQ) {
1724 + dev_err(&ofdev->dev, "no SATA DMA irq\n");
1725 + err = -ENODEV;
1726 +- goto error_out;
1727 ++ goto error_iomap;
1728 + }
1729 +
1730 + /* Get physical SATA DMA register base address */
1731 +@@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1732 + dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
1733 + " address\n");
1734 + err = -ENODEV;
1735 +- goto error_out;
1736 ++ goto error_iomap;
1737 + }
1738 +
1739 + /* Save dev for later use in dev_xxx() routines */
1740 + host_pvt.dwc_dev = &ofdev->dev;
1741 +
1742 + /* Initialize AHB DMAC */
1743 +- dma_dwc_init(hsdev, irq);
1744 ++ err = dma_dwc_init(hsdev, irq);
1745 ++ if (err)
1746 ++ goto error_dma_iomap;
1747 +
1748 + /* Enable SATA Interrupts */
1749 + sata_dwc_enable_interrupts(hsdev);
1750 +@@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1751 + * device discovery process, invoking our port_start() handler &
1752 + * error_handler() to execute a dummy Softreset EH session
1753 + */
1754 +- rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1755 +-
1756 +- if (rc != 0)
1757 ++ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1758 ++ if (err)
1759 + dev_err(&ofdev->dev, "failed to activate host");
1760 +
1761 + dev_set_drvdata(&ofdev->dev, host);
1762 +@@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1763 + error_out:
1764 + /* Free SATA DMA resources */
1765 + dma_dwc_exit(hsdev);
1766 +-
1767 ++error_dma_iomap:
1768 ++ iounmap((void __iomem *)host_pvt.sata_dma_regs);
1769 + error_iomap:
1770 + iounmap(base);
1771 + error_kmalloc:
1772 +@@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
1773 + /* Free SATA DMA resources */
1774 + dma_dwc_exit(hsdev);
1775 +
1776 ++ iounmap((void __iomem *)host_pvt.sata_dma_regs);
1777 + iounmap(hsdev->reg_base);
1778 + kfree(hsdev);
1779 + kfree(host);
1780 +diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
1781 +index d81b20ddb527..ea655949023f 100644
1782 +--- a/drivers/ata/sata_sil24.c
1783 ++++ b/drivers/ata/sata_sil24.c
1784 +@@ -246,7 +246,7 @@ enum {
1785 + /* host flags */
1786 + SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
1787 + ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
1788 +- ATA_FLAG_AN | ATA_FLAG_PMP,
1789 ++ ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG,
1790 + SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
1791 +
1792 + IRQ_STAT_4PORTS = 0xf,
1793 +diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
1794 +index 26c3779d871d..d29f5ffdb0f4 100644
1795 +--- a/drivers/bus/mvebu-mbus.c
1796 ++++ b/drivers/bus/mvebu-mbus.c
1797 +@@ -182,12 +182,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
1798 + }
1799 +
1800 + /* Checks whether the given window number is available */
1801 ++
1802 ++/* On Armada XP, 375 and 38x the MBus window 13 has the remap
1803 ++ * capability, like windows 0 to 7. However, the mvebu-mbus driver
1804 ++ * isn't currently taking into account this special case, which means
1805 ++ * that when window 13 is actually used, the remap registers are left
1806 ++ * to 0, making the device using this MBus window unavailable. The
1807 ++ * quick fix for stable is to not use window 13. A follow up patch
1808 ++ * will correctly handle this window.
1809 ++*/
1810 + static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
1811 + const int win)
1812 + {
1813 + void __iomem *addr = mbus->mbuswins_base +
1814 + mbus->soc->win_cfg_offset(win);
1815 + u32 ctrl = readl(addr + WIN_CTRL_OFF);
1816 ++
1817 ++ if (win == 13)
1818 ++ return false;
1819 ++
1820 + return !(ctrl & WIN_CTRL_ENABLE);
1821 + }
1822 +
1823 +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
1824 +index 9403061a2acc..83564c9cfdbe 100644
1825 +--- a/drivers/clocksource/exynos_mct.c
1826 ++++ b/drivers/clocksource/exynos_mct.c
1827 +@@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
1828 + writel_relaxed(value, reg_base + offset);
1829 +
1830 + if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
1831 +- stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
1832 +- switch (offset & EXYNOS4_MCT_L_MASK) {
1833 ++ stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
1834 ++ switch (offset & ~EXYNOS4_MCT_L_MASK) {
1835 + case MCT_L_TCON_OFFSET:
1836 + mask = 1 << 3; /* L_TCON write status */
1837 + break;
1838 +diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
1839 +index 633ba945e153..c178ed8c3908 100644
1840 +--- a/drivers/crypto/padlock-aes.c
1841 ++++ b/drivers/crypto/padlock-aes.c
1842 +@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
1843 + MODULE_LICENSE("GPL");
1844 + MODULE_AUTHOR("Michal Ludvig");
1845 +
1846 +-MODULE_ALIAS("aes");
1847 ++MODULE_ALIAS_CRYPTO("aes");
1848 +diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
1849 +index bace885634f2..95f7d27ce491 100644
1850 +--- a/drivers/crypto/padlock-sha.c
1851 ++++ b/drivers/crypto/padlock-sha.c
1852 +@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
1853 + MODULE_LICENSE("GPL");
1854 + MODULE_AUTHOR("Michal Ludvig");
1855 +
1856 +-MODULE_ALIAS("sha1-all");
1857 +-MODULE_ALIAS("sha256-all");
1858 +-MODULE_ALIAS("sha1-padlock");
1859 +-MODULE_ALIAS("sha256-padlock");
1860 ++MODULE_ALIAS_CRYPTO("sha1-all");
1861 ++MODULE_ALIAS_CRYPTO("sha256-all");
1862 ++MODULE_ALIAS_CRYPTO("sha1-padlock");
1863 ++MODULE_ALIAS_CRYPTO("sha256-padlock");
1864 +diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
1865 +index 244d73378f0e..7ee93f881db6 100644
1866 +--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
1867 ++++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
1868 +@@ -52,6 +52,7 @@
1869 + #include <linux/pci.h>
1870 + #include <linux/cdev.h>
1871 + #include <linux/uaccess.h>
1872 ++#include <linux/crypto.h>
1873 +
1874 + #include "adf_accel_devices.h"
1875 + #include "adf_common_drv.h"
1876 +@@ -487,4 +488,4 @@ module_exit(adf_unregister_ctl_device_driver);
1877 + MODULE_LICENSE("Dual BSD/GPL");
1878 + MODULE_AUTHOR("Intel");
1879 + MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
1880 +-MODULE_ALIAS("intel_qat");
1881 ++MODULE_ALIAS_CRYPTO("intel_qat");
1882 +diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
1883 +index 92105f3dc8e0..e4cea7c45142 100644
1884 +--- a/drivers/crypto/ux500/cryp/cryp_core.c
1885 ++++ b/drivers/crypto/ux500/cryp/cryp_core.c
1886 +@@ -1810,7 +1810,7 @@ module_exit(ux500_cryp_mod_fini);
1887 + module_param(cryp_mode, int, 0);
1888 +
1889 + MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
1890 +-MODULE_ALIAS("aes-all");
1891 +-MODULE_ALIAS("des-all");
1892 ++MODULE_ALIAS_CRYPTO("aes-all");
1893 ++MODULE_ALIAS_CRYPTO("des-all");
1894 +
1895 + MODULE_LICENSE("GPL");
1896 +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
1897 +index 1c73f4fbc252..8e5e0187506f 100644
1898 +--- a/drivers/crypto/ux500/hash/hash_core.c
1899 ++++ b/drivers/crypto/ux500/hash/hash_core.c
1900 +@@ -1995,7 +1995,7 @@ module_exit(ux500_hash_mod_fini);
1901 + MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1902 + MODULE_LICENSE("GPL");
1903 +
1904 +-MODULE_ALIAS("sha1-all");
1905 +-MODULE_ALIAS("sha256-all");
1906 +-MODULE_ALIAS("hmac-sha1-all");
1907 +-MODULE_ALIAS("hmac-sha256-all");
1908 ++MODULE_ALIAS_CRYPTO("sha1-all");
1909 ++MODULE_ALIAS_CRYPTO("sha256-all");
1910 ++MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1911 ++MODULE_ALIAS_CRYPTO("hmac-sha256-all");
1912 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1913 +index 2de5f5f4ba45..fd76933eed04 100644
1914 +--- a/drivers/gpu/drm/i915/i915_gem.c
1915 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1916 +@@ -5144,7 +5144,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
1917 + if (!mutex_is_locked(mutex))
1918 + return false;
1919 +
1920 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
1921 ++#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
1922 + return mutex->owner == task;
1923 + #else
1924 + /* Since UP may be pre-empted, we cannot assume that we own the lock */
1925 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1926 +index cadc3bcf1de2..31b96643b59c 100644
1927 +--- a/drivers/gpu/drm/i915/intel_display.c
1928 ++++ b/drivers/gpu/drm/i915/intel_display.c
1929 +@@ -10019,7 +10019,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
1930 + if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
1931 + /* vlv: DISPLAY_FLIP fails to change tiling */
1932 + ring = NULL;
1933 +- } else if (IS_IVYBRIDGE(dev)) {
1934 ++ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
1935 + ring = &dev_priv->ring[BCS];
1936 + } else if (INTEL_INFO(dev)->gen >= 7) {
1937 + ring = obj->ring;
1938 +diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
1939 +index 850de57069be..121aff6a3b41 100644
1940 +--- a/drivers/gpu/drm/radeon/radeon_asic.c
1941 ++++ b/drivers/gpu/drm/radeon/radeon_asic.c
1942 +@@ -333,6 +333,20 @@ static struct radeon_asic_ring r300_gfx_ring = {
1943 + .set_wptr = &r100_gfx_set_wptr,
1944 + };
1945 +
1946 ++static struct radeon_asic_ring rv515_gfx_ring = {
1947 ++ .ib_execute = &r100_ring_ib_execute,
1948 ++ .emit_fence = &r300_fence_ring_emit,
1949 ++ .emit_semaphore = &r100_semaphore_ring_emit,
1950 ++ .cs_parse = &r300_cs_parse,
1951 ++ .ring_start = &rv515_ring_start,
1952 ++ .ring_test = &r100_ring_test,
1953 ++ .ib_test = &r100_ib_test,
1954 ++ .is_lockup = &r100_gpu_is_lockup,
1955 ++ .get_rptr = &r100_gfx_get_rptr,
1956 ++ .get_wptr = &r100_gfx_get_wptr,
1957 ++ .set_wptr = &r100_gfx_set_wptr,
1958 ++};
1959 ++
1960 + static struct radeon_asic r300_asic = {
1961 + .init = &r300_init,
1962 + .fini = &r300_fini,
1963 +@@ -748,7 +762,7 @@ static struct radeon_asic rv515_asic = {
1964 + .set_page = &rv370_pcie_gart_set_page,
1965 + },
1966 + .ring = {
1967 +- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
1968 ++ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
1969 + },
1970 + .irq = {
1971 + .set = &rs600_irq_set,
1972 +@@ -814,7 +828,7 @@ static struct radeon_asic r520_asic = {
1973 + .set_page = &rv370_pcie_gart_set_page,
1974 + },
1975 + .ring = {
1976 +- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
1977 ++ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
1978 + },
1979 + .irq = {
1980 + .set = &rs600_irq_set,
1981 +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
1982 +index 32522cc940a1..f7da8fe96a66 100644
1983 +--- a/drivers/gpu/drm/radeon/radeon_pm.c
1984 ++++ b/drivers/gpu/drm/radeon/radeon_pm.c
1985 +@@ -1287,8 +1287,39 @@ dpm_failed:
1986 + return ret;
1987 + }
1988 +
1989 ++struct radeon_dpm_quirk {
1990 ++ u32 chip_vendor;
1991 ++ u32 chip_device;
1992 ++ u32 subsys_vendor;
1993 ++ u32 subsys_device;
1994 ++};
1995 ++
1996 ++/* cards with dpm stability problems */
1997 ++static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1998 ++ /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1999 ++ { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
2000 ++ /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
2001 ++ { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
2002 ++ { 0, 0, 0, 0 },
2003 ++};
2004 ++
2005 + int radeon_pm_init(struct radeon_device *rdev)
2006 + {
2007 ++ struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
2008 ++ bool disable_dpm = false;
2009 ++
2010 ++ /* Apply dpm quirks */
2011 ++ while (p && p->chip_device != 0) {
2012 ++ if (rdev->pdev->vendor == p->chip_vendor &&
2013 ++ rdev->pdev->device == p->chip_device &&
2014 ++ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
2015 ++ rdev->pdev->subsystem_device == p->subsys_device) {
2016 ++ disable_dpm = true;
2017 ++ break;
2018 ++ }
2019 ++ ++p;
2020 ++ }
2021 ++
2022 + /* enable dpm on rv6xx+ */
2023 + switch (rdev->family) {
2024 + case CHIP_RV610:
2025 +@@ -1344,6 +1375,8 @@ int radeon_pm_init(struct radeon_device *rdev)
2026 + (!(rdev->flags & RADEON_IS_IGP)) &&
2027 + (!rdev->smc_fw))
2028 + rdev->pm.pm_method = PM_METHOD_PROFILE;
2029 ++ else if (disable_dpm && (radeon_dpm == -1))
2030 ++ rdev->pm.pm_method = PM_METHOD_PROFILE;
2031 + else if (radeon_dpm == 0)
2032 + rdev->pm.pm_method = PM_METHOD_PROFILE;
2033 + else
2034 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
2035 +index 676e6c2ba90a..2b70d3eca8fd 100644
2036 +--- a/drivers/gpu/drm/radeon/si_dpm.c
2037 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
2038 +@@ -2908,6 +2908,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
2039 + return ret;
2040 + }
2041 +
2042 ++struct si_dpm_quirk {
2043 ++ u32 chip_vendor;
2044 ++ u32 chip_device;
2045 ++ u32 subsys_vendor;
2046 ++ u32 subsys_device;
2047 ++ u32 max_sclk;
2048 ++ u32 max_mclk;
2049 ++};
2050 ++
2051 ++/* cards with dpm stability problems */
2052 ++static struct si_dpm_quirk si_dpm_quirk_list[] = {
2053 ++ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
2054 ++ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2055 ++ { 0, 0, 0, 0 },
2056 ++};
2057 ++
2058 + static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2059 + struct radeon_ps *rps)
2060 + {
2061 +@@ -2918,7 +2934,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2062 + u32 mclk, sclk;
2063 + u16 vddc, vddci;
2064 + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2065 ++ u32 max_sclk = 0, max_mclk = 0;
2066 + int i;
2067 ++ struct si_dpm_quirk *p = si_dpm_quirk_list;
2068 ++
2069 ++ /* Apply dpm quirks */
2070 ++ while (p && p->chip_device != 0) {
2071 ++ if (rdev->pdev->vendor == p->chip_vendor &&
2072 ++ rdev->pdev->device == p->chip_device &&
2073 ++ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
2074 ++ rdev->pdev->subsystem_device == p->subsys_device) {
2075 ++ max_sclk = p->max_sclk;
2076 ++ max_mclk = p->max_mclk;
2077 ++ break;
2078 ++ }
2079 ++ ++p;
2080 ++ }
2081 +
2082 + if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
2083 + ni_dpm_vblank_too_short(rdev))
2084 +@@ -2972,6 +3003,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2085 + if (ps->performance_levels[i].mclk > max_mclk_vddc)
2086 + ps->performance_levels[i].mclk = max_mclk_vddc;
2087 + }
2088 ++ if (max_mclk) {
2089 ++ if (ps->performance_levels[i].mclk > max_mclk)
2090 ++ ps->performance_levels[i].mclk = max_mclk;
2091 ++ }
2092 ++ if (max_sclk) {
2093 ++ if (ps->performance_levels[i].sclk > max_sclk)
2094 ++ ps->performance_levels[i].sclk = max_sclk;
2095 ++ }
2096 + }
2097 +
2098 + /* XXX validate the min clocks required for display */
2099 +diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
2100 +index cc4f9d80122e..fa22e5ba3d33 100644
2101 +--- a/drivers/irqchip/irq-atmel-aic-common.c
2102 ++++ b/drivers/irqchip/irq-atmel-aic-common.c
2103 +@@ -28,7 +28,7 @@
2104 + #define AT91_AIC_IRQ_MIN_PRIORITY 0
2105 + #define AT91_AIC_IRQ_MAX_PRIORITY 7
2106 +
2107 +-#define AT91_AIC_SRCTYPE GENMASK(7, 6)
2108 ++#define AT91_AIC_SRCTYPE GENMASK(6, 5)
2109 + #define AT91_AIC_SRCTYPE_LOW (0 << 5)
2110 + #define AT91_AIC_SRCTYPE_FALLING (1 << 5)
2111 + #define AT91_AIC_SRCTYPE_HIGH (2 << 5)
2112 +@@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
2113 + return -EINVAL;
2114 + }
2115 +
2116 +- *val &= AT91_AIC_SRCTYPE;
2117 ++ *val &= ~AT91_AIC_SRCTYPE;
2118 + *val |= aic_type;
2119 +
2120 + return 0;
2121 +diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
2122 +index 28718d3e8281..c03f140acbae 100644
2123 +--- a/drivers/irqchip/irq-omap-intc.c
2124 ++++ b/drivers/irqchip/irq-omap-intc.c
2125 +@@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node)
2126 + return ret;
2127 + }
2128 +
2129 +-static int __init omap_init_irq_legacy(u32 base)
2130 ++static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
2131 + {
2132 + int j, irq_base;
2133 +
2134 +@@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base)
2135 + irq_base = 0;
2136 + }
2137 +
2138 +- domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0,
2139 ++ domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
2140 + &irq_domain_simple_ops, NULL);
2141 +
2142 + omap_irq_soft_reset();
2143 +@@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
2144 + {
2145 + int ret;
2146 +
2147 +- if (node)
2148 ++ /*
2149 ++ * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
2150 ++ * depends is still not ready for linear IRQ domains; because of that
2151 ++ * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
2152 ++ * linear IRQ Domain until that driver is finally fixed.
2153 ++ */
2154 ++ if (of_device_is_compatible(node, "ti,omap2-intc") ||
2155 ++ of_device_is_compatible(node, "ti,omap3-intc")) {
2156 ++ struct resource res;
2157 ++
2158 ++ if (of_address_to_resource(node, 0, &res))
2159 ++ return -ENOMEM;
2160 ++
2161 ++ base = res.start;
2162 ++ ret = omap_init_irq_legacy(base, node);
2163 ++ } else if (node) {
2164 + ret = omap_init_irq_of(node);
2165 +- else
2166 +- ret = omap_init_irq_legacy(base);
2167 ++ } else {
2168 ++ ret = omap_init_irq_legacy(base, NULL);
2169 ++ }
2170 +
2171 + if (ret == 0)
2172 + omap_irq_enable_protection();
2173 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
2174 +index 06709257adde..97e3a6c07e31 100644
2175 +--- a/drivers/md/dm-cache-metadata.c
2176 ++++ b/drivers/md/dm-cache-metadata.c
2177 +@@ -94,6 +94,9 @@ struct cache_disk_superblock {
2178 + } __packed;
2179 +
2180 + struct dm_cache_metadata {
2181 ++ atomic_t ref_count;
2182 ++ struct list_head list;
2183 ++
2184 + struct block_device *bdev;
2185 + struct dm_block_manager *bm;
2186 + struct dm_space_map *metadata_sm;
2187 +@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
2188 +
2189 + /*----------------------------------------------------------------*/
2190 +
2191 +-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
2192 +- sector_t data_block_size,
2193 +- bool may_format_device,
2194 +- size_t policy_hint_size)
2195 ++static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
2196 ++ sector_t data_block_size,
2197 ++ bool may_format_device,
2198 ++ size_t policy_hint_size)
2199 + {
2200 + int r;
2201 + struct dm_cache_metadata *cmd;
2202 +@@ -683,6 +686,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
2203 + return NULL;
2204 + }
2205 +
2206 ++ atomic_set(&cmd->ref_count, 1);
2207 + init_rwsem(&cmd->root_lock);
2208 + cmd->bdev = bdev;
2209 + cmd->data_block_size = data_block_size;
2210 +@@ -705,10 +709,95 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
2211 + return cmd;
2212 + }
2213 +
2214 ++/*
2215 ++ * We keep a little list of ref counted metadata objects to prevent two
2216 ++ * different target instances creating separate bufio instances. This is
2217 ++ * an issue if a table is reloaded before the suspend.
2218 ++ */
2219 ++static DEFINE_MUTEX(table_lock);
2220 ++static LIST_HEAD(table);
2221 ++
2222 ++static struct dm_cache_metadata *lookup(struct block_device *bdev)
2223 ++{
2224 ++ struct dm_cache_metadata *cmd;
2225 ++
2226 ++ list_for_each_entry(cmd, &table, list)
2227 ++ if (cmd->bdev == bdev) {
2228 ++ atomic_inc(&cmd->ref_count);
2229 ++ return cmd;
2230 ++ }
2231 ++
2232 ++ return NULL;
2233 ++}
2234 ++
2235 ++static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
2236 ++ sector_t data_block_size,
2237 ++ bool may_format_device,
2238 ++ size_t policy_hint_size)
2239 ++{
2240 ++ struct dm_cache_metadata *cmd, *cmd2;
2241 ++
2242 ++ mutex_lock(&table_lock);
2243 ++ cmd = lookup(bdev);
2244 ++ mutex_unlock(&table_lock);
2245 ++
2246 ++ if (cmd)
2247 ++ return cmd;
2248 ++
2249 ++ cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
2250 ++ if (cmd) {
2251 ++ mutex_lock(&table_lock);
2252 ++ cmd2 = lookup(bdev);
2253 ++ if (cmd2) {
2254 ++ mutex_unlock(&table_lock);
2255 ++ __destroy_persistent_data_objects(cmd);
2256 ++ kfree(cmd);
2257 ++ return cmd2;
2258 ++ }
2259 ++ list_add(&cmd->list, &table);
2260 ++ mutex_unlock(&table_lock);
2261 ++ }
2262 ++
2263 ++ return cmd;
2264 ++}
2265 ++
2266 ++static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
2267 ++{
2268 ++ if (cmd->data_block_size != data_block_size) {
2269 ++ DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
2270 ++ (unsigned long long) data_block_size,
2271 ++ (unsigned long long) cmd->data_block_size);
2272 ++ return false;
2273 ++ }
2274 ++
2275 ++ return true;
2276 ++}
2277 ++
2278 ++struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
2279 ++ sector_t data_block_size,
2280 ++ bool may_format_device,
2281 ++ size_t policy_hint_size)
2282 ++{
2283 ++ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
2284 ++ may_format_device, policy_hint_size);
2285 ++ if (cmd && !same_params(cmd, data_block_size)) {
2286 ++ dm_cache_metadata_close(cmd);
2287 ++ return NULL;
2288 ++ }
2289 ++
2290 ++ return cmd;
2291 ++}
2292 ++
2293 + void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
2294 + {
2295 +- __destroy_persistent_data_objects(cmd);
2296 +- kfree(cmd);
2297 ++ if (atomic_dec_and_test(&cmd->ref_count)) {
2298 ++ mutex_lock(&table_lock);
2299 ++ list_del(&cmd->list);
2300 ++ mutex_unlock(&table_lock);
2301 ++
2302 ++ __destroy_persistent_data_objects(cmd);
2303 ++ kfree(cmd);
2304 ++ }
2305 + }
2306 +
2307 + /*
2308 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
2309 +index da496cfb458d..3baed67cf26f 100644
2310 +--- a/drivers/md/dm-cache-target.c
2311 ++++ b/drivers/md/dm-cache-target.c
2312 +@@ -222,7 +222,13 @@ struct cache {
2313 + struct list_head need_commit_migrations;
2314 + sector_t migration_threshold;
2315 + wait_queue_head_t migration_wait;
2316 +- atomic_t nr_migrations;
2317 ++ atomic_t nr_allocated_migrations;
2318 ++
2319 ++ /*
2320 ++ * The number of in flight migrations that are performing
2321 ++ * background io. eg, promotion, writeback.
2322 ++ */
2323 ++ atomic_t nr_io_migrations;
2324 +
2325 + wait_queue_head_t quiescing_wait;
2326 + atomic_t quiescing;
2327 +@@ -258,7 +264,6 @@ struct cache {
2328 + struct dm_deferred_set *all_io_ds;
2329 +
2330 + mempool_t *migration_pool;
2331 +- struct dm_cache_migration *next_migration;
2332 +
2333 + struct dm_cache_policy *policy;
2334 + unsigned policy_nr_args;
2335 +@@ -349,10 +354,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
2336 + dm_bio_prison_free_cell(cache->prison, cell);
2337 + }
2338 +
2339 ++static struct dm_cache_migration *alloc_migration(struct cache *cache)
2340 ++{
2341 ++ struct dm_cache_migration *mg;
2342 ++
2343 ++ mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
2344 ++ if (mg) {
2345 ++ mg->cache = cache;
2346 ++ atomic_inc(&mg->cache->nr_allocated_migrations);
2347 ++ }
2348 ++
2349 ++ return mg;
2350 ++}
2351 ++
2352 ++static void free_migration(struct dm_cache_migration *mg)
2353 ++{
2354 ++ if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
2355 ++ wake_up(&mg->cache->migration_wait);
2356 ++
2357 ++ mempool_free(mg, mg->cache->migration_pool);
2358 ++}
2359 ++
2360 + static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
2361 + {
2362 + if (!p->mg) {
2363 +- p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
2364 ++ p->mg = alloc_migration(cache);
2365 + if (!p->mg)
2366 + return -ENOMEM;
2367 + }
2368 +@@ -381,7 +407,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
2369 + free_prison_cell(cache, p->cell1);
2370 +
2371 + if (p->mg)
2372 +- mempool_free(p->mg, cache->migration_pool);
2373 ++ free_migration(p->mg);
2374 + }
2375 +
2376 + static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
2377 +@@ -817,24 +843,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
2378 + * Migration covers moving data from the origin device to the cache, or
2379 + * vice versa.
2380 + *--------------------------------------------------------------*/
2381 +-static void free_migration(struct dm_cache_migration *mg)
2382 +-{
2383 +- mempool_free(mg, mg->cache->migration_pool);
2384 +-}
2385 +-
2386 +-static void inc_nr_migrations(struct cache *cache)
2387 ++static void inc_io_migrations(struct cache *cache)
2388 + {
2389 +- atomic_inc(&cache->nr_migrations);
2390 ++ atomic_inc(&cache->nr_io_migrations);
2391 + }
2392 +
2393 +-static void dec_nr_migrations(struct cache *cache)
2394 ++static void dec_io_migrations(struct cache *cache)
2395 + {
2396 +- atomic_dec(&cache->nr_migrations);
2397 +-
2398 +- /*
2399 +- * Wake the worker in case we're suspending the target.
2400 +- */
2401 +- wake_up(&cache->migration_wait);
2402 ++ atomic_dec(&cache->nr_io_migrations);
2403 + }
2404 +
2405 + static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
2406 +@@ -857,11 +873,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
2407 + wake_worker(cache);
2408 + }
2409 +
2410 +-static void cleanup_migration(struct dm_cache_migration *mg)
2411 ++static void free_io_migration(struct dm_cache_migration *mg)
2412 + {
2413 +- struct cache *cache = mg->cache;
2414 ++ dec_io_migrations(mg->cache);
2415 + free_migration(mg);
2416 +- dec_nr_migrations(cache);
2417 + }
2418 +
2419 + static void migration_failure(struct dm_cache_migration *mg)
2420 +@@ -886,7 +901,7 @@ static void migration_failure(struct dm_cache_migration *mg)
2421 + cell_defer(cache, mg->new_ocell, true);
2422 + }
2423 +
2424 +- cleanup_migration(mg);
2425 ++ free_io_migration(mg);
2426 + }
2427 +
2428 + static void migration_success_pre_commit(struct dm_cache_migration *mg)
2429 +@@ -897,7 +912,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
2430 + if (mg->writeback) {
2431 + clear_dirty(cache, mg->old_oblock, mg->cblock);
2432 + cell_defer(cache, mg->old_ocell, false);
2433 +- cleanup_migration(mg);
2434 ++ free_io_migration(mg);
2435 + return;
2436 +
2437 + } else if (mg->demote) {
2438 +@@ -907,14 +922,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
2439 + mg->old_oblock);
2440 + if (mg->promote)
2441 + cell_defer(cache, mg->new_ocell, true);
2442 +- cleanup_migration(mg);
2443 ++ free_io_migration(mg);
2444 + return;
2445 + }
2446 + } else {
2447 + if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
2448 + DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
2449 + policy_remove_mapping(cache->policy, mg->new_oblock);
2450 +- cleanup_migration(mg);
2451 ++ free_io_migration(mg);
2452 + return;
2453 + }
2454 + }
2455 +@@ -947,7 +962,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
2456 + } else {
2457 + if (mg->invalidate)
2458 + policy_remove_mapping(cache->policy, mg->old_oblock);
2459 +- cleanup_migration(mg);
2460 ++ free_io_migration(mg);
2461 + }
2462 +
2463 + } else {
2464 +@@ -962,7 +977,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
2465 + bio_endio(mg->new_ocell->holder, 0);
2466 + cell_defer(cache, mg->new_ocell, false);
2467 + }
2468 +- cleanup_migration(mg);
2469 ++ free_io_migration(mg);
2470 + }
2471 + }
2472 +
2473 +@@ -1178,7 +1193,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
2474 + mg->new_ocell = cell;
2475 + mg->start_jiffies = jiffies;
2476 +
2477 +- inc_nr_migrations(cache);
2478 ++ inc_io_migrations(cache);
2479 + quiesce_migration(mg);
2480 + }
2481 +
2482 +@@ -1201,7 +1216,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
2483 + mg->new_ocell = NULL;
2484 + mg->start_jiffies = jiffies;
2485 +
2486 +- inc_nr_migrations(cache);
2487 ++ inc_io_migrations(cache);
2488 + quiesce_migration(mg);
2489 + }
2490 +
2491 +@@ -1227,7 +1242,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
2492 + mg->new_ocell = new_ocell;
2493 + mg->start_jiffies = jiffies;
2494 +
2495 +- inc_nr_migrations(cache);
2496 ++ inc_io_migrations(cache);
2497 + quiesce_migration(mg);
2498 + }
2499 +
2500 +@@ -1254,7 +1269,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
2501 + mg->new_ocell = NULL;
2502 + mg->start_jiffies = jiffies;
2503 +
2504 +- inc_nr_migrations(cache);
2505 ++ inc_io_migrations(cache);
2506 + quiesce_migration(mg);
2507 + }
2508 +
2509 +@@ -1320,7 +1335,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio)
2510 +
2511 + static bool spare_migration_bandwidth(struct cache *cache)
2512 + {
2513 +- sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
2514 ++ sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
2515 + cache->sectors_per_block;
2516 + return current_volume < cache->migration_threshold;
2517 + }
2518 +@@ -1670,7 +1685,7 @@ static void stop_quiescing(struct cache *cache)
2519 +
2520 + static void wait_for_migrations(struct cache *cache)
2521 + {
2522 +- wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
2523 ++ wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
2524 + }
2525 +
2526 + static void stop_worker(struct cache *cache)
2527 +@@ -1782,9 +1797,6 @@ static void destroy(struct cache *cache)
2528 + {
2529 + unsigned i;
2530 +
2531 +- if (cache->next_migration)
2532 +- mempool_free(cache->next_migration, cache->migration_pool);
2533 +-
2534 + if (cache->migration_pool)
2535 + mempool_destroy(cache->migration_pool);
2536 +
2537 +@@ -2292,7 +2304,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2538 + INIT_LIST_HEAD(&cache->quiesced_migrations);
2539 + INIT_LIST_HEAD(&cache->completed_migrations);
2540 + INIT_LIST_HEAD(&cache->need_commit_migrations);
2541 +- atomic_set(&cache->nr_migrations, 0);
2542 ++ atomic_set(&cache->nr_allocated_migrations, 0);
2543 ++ atomic_set(&cache->nr_io_migrations, 0);
2544 + init_waitqueue_head(&cache->migration_wait);
2545 +
2546 + init_waitqueue_head(&cache->quiescing_wait);
2547 +@@ -2351,8 +2364,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2548 + goto bad;
2549 + }
2550 +
2551 +- cache->next_migration = NULL;
2552 +-
2553 + cache->need_tick_bio = true;
2554 + cache->sized = false;
2555 + cache->invalidate = false;
2556 +diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
2557 +index 88c257d1161b..377818887ed2 100644
2558 +--- a/drivers/media/pci/cx23885/cx23885-cards.c
2559 ++++ b/drivers/media/pci/cx23885/cx23885-cards.c
2560 +@@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = {
2561 + .portb = CX23885_MPEG_DVB,
2562 + },
2563 + [CX23885_BOARD_HAUPPAUGE_HVR4400] = {
2564 +- .name = "Hauppauge WinTV-HVR4400",
2565 ++ .name = "Hauppauge WinTV-HVR4400/HVR5500",
2566 + .porta = CX23885_ANALOG_VIDEO,
2567 + .portb = CX23885_MPEG_DVB,
2568 + .portc = CX23885_MPEG_DVB,
2569 +@@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = {
2570 + .tuner_addr = 0x60, /* 0xc0 >> 1 */
2571 + .tuner_bus = 1,
2572 + },
2573 ++ [CX23885_BOARD_HAUPPAUGE_STARBURST] = {
2574 ++ .name = "Hauppauge WinTV Starburst",
2575 ++ .portb = CX23885_MPEG_DVB,
2576 ++ },
2577 + [CX23885_BOARD_AVERMEDIA_HC81R] = {
2578 + .name = "AVerTV Hybrid Express Slim HC81R",
2579 + .tuner_type = TUNER_XC2028,
2580 +@@ -910,19 +914,19 @@ struct cx23885_subid cx23885_subids[] = {
2581 + }, {
2582 + .subvendor = 0x0070,
2583 + .subdevice = 0xc108,
2584 +- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
2585 ++ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */
2586 + }, {
2587 + .subvendor = 0x0070,
2588 + .subdevice = 0xc138,
2589 +- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
2590 ++ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
2591 + }, {
2592 + .subvendor = 0x0070,
2593 + .subdevice = 0xc12a,
2594 +- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
2595 ++ .card = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */
2596 + }, {
2597 + .subvendor = 0x0070,
2598 + .subdevice = 0xc1f8,
2599 +- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
2600 ++ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
2601 + }, {
2602 + .subvendor = 0x1461,
2603 + .subdevice = 0xd939,
2604 +@@ -1495,8 +1499,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
2605 + cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
2606 + break;
2607 + case CX23885_BOARD_HAUPPAUGE_HVR4400:
2608 ++ case CX23885_BOARD_HAUPPAUGE_STARBURST:
2609 + /* GPIO-8 tda10071 demod reset */
2610 +- /* GPIO-9 si2165 demod reset */
2611 ++ /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/
2612 +
2613 + /* Put the parts into reset and back */
2614 + cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
2615 +@@ -1760,6 +1765,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
2616 + case CX23885_BOARD_HAUPPAUGE_HVR1850:
2617 + case CX23885_BOARD_HAUPPAUGE_HVR1290:
2618 + case CX23885_BOARD_HAUPPAUGE_HVR4400:
2619 ++ case CX23885_BOARD_HAUPPAUGE_STARBURST:
2620 + case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
2621 + if (dev->i2c_bus[0].i2c_rc == 0)
2622 + hauppauge_eeprom(dev, eeprom+0xc0);
2623 +@@ -1864,6 +1870,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
2624 + ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
2625 + ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
2626 + break;
2627 ++ case CX23885_BOARD_HAUPPAUGE_STARBURST:
2628 ++ ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
2629 ++ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
2630 ++ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
2631 ++ break;
2632 + case CX23885_BOARD_DVBSKY_T9580:
2633 + ts1->gen_ctrl_val = 0x5; /* Parallel */
2634 + ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
2635 +diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
2636 +index 4cb90317ff45..a8d207929295 100644
2637 +--- a/drivers/media/pci/cx23885/cx23885-dvb.c
2638 ++++ b/drivers/media/pci/cx23885/cx23885-dvb.c
2639 +@@ -1586,6 +1586,17 @@ static int dvb_register(struct cx23885_tsport *port)
2640 + break;
2641 + }
2642 + break;
2643 ++ case CX23885_BOARD_HAUPPAUGE_STARBURST:
2644 ++ i2c_bus = &dev->i2c_bus[0];
2645 ++ fe0->dvb.frontend = dvb_attach(tda10071_attach,
2646 ++ &hauppauge_tda10071_config,
2647 ++ &i2c_bus->i2c_adap);
2648 ++ if (fe0->dvb.frontend != NULL) {
2649 ++ dvb_attach(a8293_attach, fe0->dvb.frontend,
2650 ++ &i2c_bus->i2c_adap,
2651 ++ &hauppauge_a8293_config);
2652 ++ }
2653 ++ break;
2654 + case CX23885_BOARD_DVBSKY_T9580:
2655 + i2c_bus = &dev->i2c_bus[0];
2656 + i2c_bus2 = &dev->i2c_bus[1];
2657 +diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
2658 +index 6c35e6115969..a33bead82ecc 100644
2659 +--- a/drivers/media/pci/cx23885/cx23885.h
2660 ++++ b/drivers/media/pci/cx23885/cx23885.h
2661 +@@ -93,6 +93,7 @@
2662 + #define CX23885_BOARD_HAUPPAUGE_IMPACTVCBE 43
2663 + #define CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP2 44
2664 + #define CX23885_BOARD_DVBSKY_T9580 45
2665 ++#define CX23885_BOARD_HAUPPAUGE_STARBURST 52
2666 +
2667 + #define GPIO_0 0x00000001
2668 + #define GPIO_1 0x00000002
2669 +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
2670 +index f2e43de3dd87..ea36447c74f9 100644
2671 +--- a/drivers/media/v4l2-core/videobuf2-core.c
2672 ++++ b/drivers/media/v4l2-core/videobuf2-core.c
2673 +@@ -3142,27 +3142,26 @@ static int vb2_thread(void *data)
2674 + prequeue--;
2675 + } else {
2676 + call_void_qop(q, wait_finish, q);
2677 +- ret = vb2_internal_dqbuf(q, &fileio->b, 0);
2678 ++ if (!threadio->stop)
2679 ++ ret = vb2_internal_dqbuf(q, &fileio->b, 0);
2680 + call_void_qop(q, wait_prepare, q);
2681 + dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
2682 + }
2683 +- if (threadio->stop)
2684 +- break;
2685 +- if (ret)
2686 ++ if (ret || threadio->stop)
2687 + break;
2688 + try_to_freeze();
2689 +
2690 + vb = q->bufs[fileio->b.index];
2691 + if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
2692 +- ret = threadio->fnc(vb, threadio->priv);
2693 +- if (ret)
2694 +- break;
2695 ++ if (threadio->fnc(vb, threadio->priv))
2696 ++ break;
2697 + call_void_qop(q, wait_finish, q);
2698 + if (set_timestamp)
2699 + v4l2_get_timestamp(&fileio->b.timestamp);
2700 +- ret = vb2_internal_qbuf(q, &fileio->b);
2701 ++ if (!threadio->stop)
2702 ++ ret = vb2_internal_qbuf(q, &fileio->b);
2703 + call_void_qop(q, wait_prepare, q);
2704 +- if (ret)
2705 ++ if (ret || threadio->stop)
2706 + break;
2707 + }
2708 +
2709 +@@ -3231,11 +3230,11 @@ int vb2_thread_stop(struct vb2_queue *q)
2710 + threadio->stop = true;
2711 + vb2_internal_streamoff(q, q->type);
2712 + call_void_qop(q, wait_prepare, q);
2713 ++ err = kthread_stop(threadio->thread);
2714 + q->fileio = NULL;
2715 + fileio->req.count = 0;
2716 + vb2_reqbufs(q, &fileio->req);
2717 + kfree(fileio);
2718 +- err = kthread_stop(threadio->thread);
2719 + threadio->thread = NULL;
2720 + kfree(threadio);
2721 + q->fileio = NULL;
2722 +diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
2723 +index 9cf98d142d9a..c2474cf6bfef 100644
2724 +--- a/drivers/mfd/rtsx_usb.c
2725 ++++ b/drivers/mfd/rtsx_usb.c
2726 +@@ -681,21 +681,9 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
2727 + #ifdef CONFIG_PM
2728 + static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
2729 + {
2730 +- struct rtsx_ucr *ucr =
2731 +- (struct rtsx_ucr *)usb_get_intfdata(intf);
2732 +-
2733 + dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
2734 + __func__, message.event);
2735 +
2736 +- /*
2737 +- * Call to make sure LED is off during suspend to save more power.
2738 +- * It is NOT a permanent state and could be turned on anytime later.
2739 +- * Thus no need to call turn_on when resunming.
2740 +- */
2741 +- mutex_lock(&ucr->dev_mutex);
2742 +- rtsx_usb_turn_off_led(ucr);
2743 +- mutex_unlock(&ucr->dev_mutex);
2744 +-
2745 + return 0;
2746 + }
2747 +
2748 +diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
2749 +index 0d256cb002eb..d6b764349f9d 100644
2750 +--- a/drivers/mfd/tps65218.c
2751 ++++ b/drivers/mfd/tps65218.c
2752 +@@ -125,10 +125,21 @@ int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg,
2753 + }
2754 + EXPORT_SYMBOL_GPL(tps65218_clear_bits);
2755 +
2756 ++static const struct regmap_range tps65218_yes_ranges[] = {
2757 ++ regmap_reg_range(TPS65218_REG_INT1, TPS65218_REG_INT2),
2758 ++ regmap_reg_range(TPS65218_REG_STATUS, TPS65218_REG_STATUS),
2759 ++};
2760 ++
2761 ++static const struct regmap_access_table tps65218_volatile_table = {
2762 ++ .yes_ranges = tps65218_yes_ranges,
2763 ++ .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges),
2764 ++};
2765 ++
2766 + static struct regmap_config tps65218_regmap_config = {
2767 + .reg_bits = 8,
2768 + .val_bits = 8,
2769 + .cache_type = REGCACHE_RBTREE,
2770 ++ .volatile_table = &tps65218_volatile_table,
2771 + };
2772 +
2773 + static const struct regmap_irq tps65218_irqs[] = {
2774 +@@ -193,6 +204,7 @@ static struct regmap_irq_chip tps65218_irq_chip = {
2775 +
2776 + .num_regs = 2,
2777 + .mask_base = TPS65218_REG_INT_MASK1,
2778 ++ .status_base = TPS65218_REG_INT1,
2779 + };
2780 +
2781 + static const struct of_device_id of_tps65218_match_table[] = {
2782 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
2783 +index 2cfe5012e4e5..4b008c9c738d 100644
2784 +--- a/drivers/net/can/dev.c
2785 ++++ b/drivers/net/can/dev.c
2786 +@@ -729,10 +729,14 @@ static int can_changelink(struct net_device *dev,
2787 + if (dev->flags & IFF_UP)
2788 + return -EBUSY;
2789 + cm = nla_data(data[IFLA_CAN_CTRLMODE]);
2790 +- if (cm->flags & ~priv->ctrlmode_supported)
2791 ++
2792 ++ /* check whether changed bits are allowed to be modified */
2793 ++ if (cm->mask & ~priv->ctrlmode_supported)
2794 + return -EOPNOTSUPP;
2795 ++
2796 ++ /* clear bits to be modified and copy the flag values */
2797 + priv->ctrlmode &= ~cm->mask;
2798 +- priv->ctrlmode |= cm->flags;
2799 ++ priv->ctrlmode |= (cm->flags & cm->mask);
2800 +
2801 + /* CAN_CTRLMODE_FD can only be set when driver supports FD */
2802 + if (priv->ctrlmode & CAN_CTRLMODE_FD)
2803 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
2804 +index d7bc462aafdc..244529881be9 100644
2805 +--- a/drivers/net/can/m_can/m_can.c
2806 ++++ b/drivers/net/can/m_can/m_can.c
2807 +@@ -955,6 +955,11 @@ static struct net_device *alloc_m_can_dev(void)
2808 + priv->can.data_bittiming_const = &m_can_data_bittiming_const;
2809 + priv->can.do_set_mode = m_can_set_mode;
2810 + priv->can.do_get_berr_counter = m_can_get_berr_counter;
2811 ++
2812 ++ /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
2813 ++ priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
2814 ++
2815 ++ /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
2816 + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
2817 + CAN_CTRLMODE_LISTENONLY |
2818 + CAN_CTRLMODE_BERR_REPORTING |
2819 +diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
2820 +index 1354c68f6468..be0527734170 100644
2821 +--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
2822 ++++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
2823 +@@ -672,6 +672,7 @@ struct iwl_scan_channel_opt {
2824 + * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
2825 + * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
2826 + * and DS parameter set IEs into probe requests.
2827 ++ * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
2828 + */
2829 + enum iwl_mvm_lmac_scan_flags {
2830 + IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
2831 +@@ -681,6 +682,7 @@ enum iwl_mvm_lmac_scan_flags {
2832 + IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
2833 + IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
2834 + IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
2835 ++ IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
2836 + };
2837 +
2838 + enum iwl_scan_priority {
2839 +diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
2840 +index 7554f7053830..886b64710443 100644
2841 +--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
2842 ++++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
2843 +@@ -1334,6 +1334,7 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
2844 + IWL_DEBUG_SCAN(mvm,
2845 + "Sending scheduled scan with filtering, n_match_sets %d\n",
2846 + req->n_match_sets);
2847 ++ flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
2848 + } else {
2849 + IWL_DEBUG_SCAN(mvm,
2850 + "Sending Scheduled scan without filtering\n");
2851 +diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
2852 +index 73aef51a28f0..8fb16188cd82 100644
2853 +--- a/drivers/pci/bus.c
2854 ++++ b/drivers/pci/bus.c
2855 +@@ -228,6 +228,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
2856 + }
2857 + EXPORT_SYMBOL(pci_bus_alloc_resource);
2858 +
2859 ++/*
2860 ++ * The @idx resource of @dev should be a PCI-PCI bridge window. If this
2861 ++ * resource fits inside a window of an upstream bridge, do nothing. If it
2862 ++ * overlaps an upstream window but extends outside it, clip the resource so
2863 ++ * it fits completely inside.
2864 ++ */
2865 ++bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
2866 ++{
2867 ++ struct pci_bus *bus = dev->bus;
2868 ++ struct resource *res = &dev->resource[idx];
2869 ++ struct resource orig_res = *res;
2870 ++ struct resource *r;
2871 ++ int i;
2872 ++
2873 ++ pci_bus_for_each_resource(bus, r, i) {
2874 ++ resource_size_t start, end;
2875 ++
2876 ++ if (!r)
2877 ++ continue;
2878 ++
2879 ++ if (resource_type(res) != resource_type(r))
2880 ++ continue;
2881 ++
2882 ++ start = max(r->start, res->start);
2883 ++ end = min(r->end, res->end);
2884 ++
2885 ++ if (start > end)
2886 ++ continue; /* no overlap */
2887 ++
2888 ++ if (res->start == start && res->end == end)
2889 ++ return false; /* no change */
2890 ++
2891 ++ res->start = start;
2892 ++ res->end = end;
2893 ++ dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
2894 ++ &orig_res, res);
2895 ++
2896 ++ return true;
2897 ++ }
2898 ++
2899 ++ return false;
2900 ++}
2901 ++
2902 + void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
2903 +
2904 + /**
2905 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2906 +index 625a4ace10b4..0190d1ee36b7 100644
2907 +--- a/drivers/pci/pci.c
2908 ++++ b/drivers/pci/pci.c
2909 +@@ -3280,7 +3280,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2910 + {
2911 + struct pci_dev *pdev;
2912 +
2913 +- if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
2914 ++ if (pci_is_root_bus(dev->bus) || dev->subordinate ||
2915 ++ !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
2916 + return -ENOTTY;
2917 +
2918 + list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2919 +@@ -3314,7 +3315,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
2920 + {
2921 + struct pci_dev *pdev;
2922 +
2923 +- if (dev->subordinate || !dev->slot)
2924 ++ if (dev->subordinate || !dev->slot ||
2925 ++ dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
2926 + return -ENOTTY;
2927 +
2928 + list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2929 +@@ -3566,6 +3568,20 @@ int pci_try_reset_function(struct pci_dev *dev)
2930 + }
2931 + EXPORT_SYMBOL_GPL(pci_try_reset_function);
2932 +
2933 ++/* Do any devices on or below this bus prevent a bus reset? */
2934 ++static bool pci_bus_resetable(struct pci_bus *bus)
2935 ++{
2936 ++ struct pci_dev *dev;
2937 ++
2938 ++ list_for_each_entry(dev, &bus->devices, bus_list) {
2939 ++ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
2940 ++ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
2941 ++ return false;
2942 ++ }
2943 ++
2944 ++ return true;
2945 ++}
2946 ++
2947 + /* Lock devices from the top of the tree down */
2948 + static void pci_bus_lock(struct pci_bus *bus)
2949 + {
2950 +@@ -3616,6 +3632,22 @@ unlock:
2951 + return 0;
2952 + }
2953 +
2954 ++/* Do any devices on or below this slot prevent a bus reset? */
2955 ++static bool pci_slot_resetable(struct pci_slot *slot)
2956 ++{
2957 ++ struct pci_dev *dev;
2958 ++
2959 ++ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
2960 ++ if (!dev->slot || dev->slot != slot)
2961 ++ continue;
2962 ++ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
2963 ++ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
2964 ++ return false;
2965 ++ }
2966 ++
2967 ++ return true;
2968 ++}
2969 ++
2970 + /* Lock devices from the top of the tree down */
2971 + static void pci_slot_lock(struct pci_slot *slot)
2972 + {
2973 +@@ -3737,7 +3769,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
2974 + {
2975 + int rc;
2976 +
2977 +- if (!slot)
2978 ++ if (!slot || !pci_slot_resetable(slot))
2979 + return -ENOTTY;
2980 +
2981 + if (!probe)
2982 +@@ -3829,7 +3861,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
2983 +
2984 + static int pci_bus_reset(struct pci_bus *bus, int probe)
2985 + {
2986 +- if (!bus->self)
2987 ++ if (!bus->self || !pci_bus_resetable(bus))
2988 + return -ENOTTY;
2989 +
2990 + if (probe)
2991 +diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
2992 +index 4a3902d8e6fe..b5defca86795 100644
2993 +--- a/drivers/pci/pci.h
2994 ++++ b/drivers/pci/pci.h
2995 +@@ -208,6 +208,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus,
2996 + void __pci_bus_assign_resources(const struct pci_bus *bus,
2997 + struct list_head *realloc_head,
2998 + struct list_head *fail_head);
2999 ++bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
3000 +
3001 + /**
3002 + * pci_ari_enabled - query ARI forwarding status
3003 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3004 +index 90acb32c85b1..b72e2cdfd59a 100644
3005 +--- a/drivers/pci/quirks.c
3006 ++++ b/drivers/pci/quirks.c
3007 +@@ -3008,6 +3008,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
3008 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3009 + quirk_broken_intx_masking);
3010 +
3011 ++static void quirk_no_bus_reset(struct pci_dev *dev)
3012 ++{
3013 ++ dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
3014 ++}
3015 ++
3016 ++/*
3017 ++ * Atheros AR93xx chips do not behave after a bus reset. The device will
3018 ++ * throw a Link Down error on AER-capable systems and regardless of AER,
3019 ++ * config space of the device is never accessible again and typically
3020 ++ * causes the system to hang or reset when access is attempted.
3021 ++ * http://www.spinics.net/lists/linux-pci/msg34797.html
3022 ++ */
3023 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3024 ++
3025 + #ifdef CONFIG_ACPI
3026 + /*
3027 + * Apple: Shutdown Cactus Ridge Thunderbolt controller.
3028 +diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
3029 +index 0482235eee92..e3e17f3c0f0f 100644
3030 +--- a/drivers/pci/setup-bus.c
3031 ++++ b/drivers/pci/setup-bus.c
3032 +@@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus);
3033 + config space writes, so it's quite possible that an I/O window of
3034 + the bridge will have some undesirable address (e.g. 0) after the
3035 + first write. Ditto 64-bit prefetchable MMIO. */
3036 +-static void pci_setup_bridge_io(struct pci_bus *bus)
3037 ++static void pci_setup_bridge_io(struct pci_dev *bridge)
3038 + {
3039 +- struct pci_dev *bridge = bus->self;
3040 + struct resource *res;
3041 + struct pci_bus_region region;
3042 + unsigned long io_mask;
3043 +@@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
3044 + io_mask = PCI_IO_1K_RANGE_MASK;
3045 +
3046 + /* Set up the top and bottom of the PCI I/O segment for this bus. */
3047 +- res = bus->resource[0];
3048 ++ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
3049 + pcibios_resource_to_bus(bridge->bus, &region, res);
3050 + if (res->flags & IORESOURCE_IO) {
3051 + pci_read_config_word(bridge, PCI_IO_BASE, &l);
3052 +@@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
3053 + pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
3054 + }
3055 +
3056 +-static void pci_setup_bridge_mmio(struct pci_bus *bus)
3057 ++static void pci_setup_bridge_mmio(struct pci_dev *bridge)
3058 + {
3059 +- struct pci_dev *bridge = bus->self;
3060 + struct resource *res;
3061 + struct pci_bus_region region;
3062 + u32 l;
3063 +
3064 + /* Set up the top and bottom of the PCI Memory segment for this bus. */
3065 +- res = bus->resource[1];
3066 ++ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
3067 + pcibios_resource_to_bus(bridge->bus, &region, res);
3068 + if (res->flags & IORESOURCE_MEM) {
3069 + l = (region.start >> 16) & 0xfff0;
3070 +@@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
3071 + pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
3072 + }
3073 +
3074 +-static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
3075 ++static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
3076 + {
3077 +- struct pci_dev *bridge = bus->self;
3078 + struct resource *res;
3079 + struct pci_bus_region region;
3080 + u32 l, bu, lu;
3081 +@@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
3082 +
3083 + /* Set up PREF base/limit. */
3084 + bu = lu = 0;
3085 +- res = bus->resource[2];
3086 ++ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
3087 + pcibios_resource_to_bus(bridge->bus, &region, res);
3088 + if (res->flags & IORESOURCE_PREFETCH) {
3089 + l = (region.start >> 16) & 0xfff0;
3090 +@@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
3091 + &bus->busn_res);
3092 +
3093 + if (type & IORESOURCE_IO)
3094 +- pci_setup_bridge_io(bus);
3095 ++ pci_setup_bridge_io(bridge);
3096 +
3097 + if (type & IORESOURCE_MEM)
3098 +- pci_setup_bridge_mmio(bus);
3099 ++ pci_setup_bridge_mmio(bridge);
3100 +
3101 + if (type & IORESOURCE_PREFETCH)
3102 +- pci_setup_bridge_mmio_pref(bus);
3103 ++ pci_setup_bridge_mmio_pref(bridge);
3104 +
3105 + pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
3106 + }
3107 +@@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus)
3108 + __pci_setup_bridge(bus, type);
3109 + }
3110 +
3111 ++
3112 ++int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
3113 ++{
3114 ++ if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
3115 ++ return 0;
3116 ++
3117 ++ if (pci_claim_resource(bridge, i) == 0)
3118 ++ return 0; /* claimed the window */
3119 ++
3120 ++ if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
3121 ++ return 0;
3122 ++
3123 ++ if (!pci_bus_clip_resource(bridge, i))
3124 ++ return -EINVAL; /* clipping didn't change anything */
3125 ++
3126 ++ switch (i - PCI_BRIDGE_RESOURCES) {
3127 ++ case 0:
3128 ++ pci_setup_bridge_io(bridge);
3129 ++ break;
3130 ++ case 1:
3131 ++ pci_setup_bridge_mmio(bridge);
3132 ++ break;
3133 ++ case 2:
3134 ++ pci_setup_bridge_mmio_pref(bridge);
3135 ++ break;
3136 ++ default:
3137 ++ return -EINVAL;
3138 ++ }
3139 ++
3140 ++ if (pci_claim_resource(bridge, i) == 0)
3141 ++ return 0; /* claimed a smaller window */
3142 ++
3143 ++ return -EINVAL;
3144 ++}
3145 ++
3146 + /* Check whether the bridge supports optional I/O and
3147 + prefetchable memory ranges. If not, the respective
3148 + base/limit registers must be read-only and read as 0. */
3149 +diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
3150 +index e4f65510c87e..89dca77ca038 100644
3151 +--- a/drivers/pinctrl/core.c
3152 ++++ b/drivers/pinctrl/core.c
3153 +@@ -1801,14 +1801,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
3154 + if (pctldev == NULL)
3155 + return;
3156 +
3157 +- mutex_lock(&pinctrldev_list_mutex);
3158 + mutex_lock(&pctldev->mutex);
3159 +-
3160 + pinctrl_remove_device_debugfs(pctldev);
3161 ++ mutex_unlock(&pctldev->mutex);
3162 +
3163 + if (!IS_ERR(pctldev->p))
3164 + pinctrl_put(pctldev->p);
3165 +
3166 ++ mutex_lock(&pinctrldev_list_mutex);
3167 ++ mutex_lock(&pctldev->mutex);
3168 + /* TODO: check that no pinmuxes are still active? */
3169 + list_del(&pctldev->node);
3170 + /* Destroy descriptor tree */
3171 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
3172 +index e730935fa457..ed7017df065d 100644
3173 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
3174 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
3175 +@@ -865,10 +865,10 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
3176 +
3177 + static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
3178 + {
3179 +- int i = 0;
3180 ++ int i;
3181 + const struct msm_function *func = pctrl->soc->functions;
3182 +
3183 +- for (; i <= pctrl->soc->nfunctions; i++)
3184 ++ for (i = 0; i < pctrl->soc->nfunctions; i++)
3185 + if (!strcmp(func[i].name, "ps_hold")) {
3186 + pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
3187 + pctrl->restart_nb.priority = 128;
3188 +diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
3189 +index 99485415dcc2..91e97ec01418 100644
3190 +--- a/drivers/s390/crypto/ap_bus.c
3191 ++++ b/drivers/s390/crypto/ap_bus.c
3192 +@@ -44,6 +44,7 @@
3193 + #include <linux/hrtimer.h>
3194 + #include <linux/ktime.h>
3195 + #include <asm/facility.h>
3196 ++#include <linux/crypto.h>
3197 +
3198 + #include "ap_bus.h"
3199 +
3200 +@@ -71,7 +72,7 @@ MODULE_AUTHOR("IBM Corporation");
3201 + MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
3202 + "Copyright IBM Corp. 2006, 2012");
3203 + MODULE_LICENSE("GPL");
3204 +-MODULE_ALIAS("z90crypt");
3205 ++MODULE_ALIAS_CRYPTO("z90crypt");
3206 +
3207 + /*
3208 + * Module parameter
3209 +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
3210 +index 2a9578c116b7..c3bdca7bf1e9 100644
3211 +--- a/drivers/scsi/ipr.c
3212 ++++ b/drivers/scsi/ipr.c
3213 +@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
3214 + ipr_reinit_ipr_cmnd(ipr_cmd);
3215 + ipr_cmd->u.scratch = 0;
3216 + ipr_cmd->sibling = NULL;
3217 ++ ipr_cmd->eh_comp = NULL;
3218 + ipr_cmd->fast_done = fast_done;
3219 + init_timer(&ipr_cmd->timer);
3220 + }
3221 +@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
3222 +
3223 + scsi_dma_unmap(ipr_cmd->scsi_cmd);
3224 + scsi_cmd->scsi_done(scsi_cmd);
3225 ++ if (ipr_cmd->eh_comp)
3226 ++ complete(ipr_cmd->eh_comp);
3227 + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
3228 + }
3229 +
3230 +@@ -4853,6 +4856,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
3231 + return rc;
3232 + }
3233 +
3234 ++/**
3235 ++ * ipr_match_lun - Match function for specified LUN
3236 ++ * @ipr_cmd: ipr command struct
3237 ++ * @device: device to match (sdev)
3238 ++ *
3239 ++ * Returns:
3240 ++ * 1 if command matches sdev / 0 if command does not match sdev
3241 ++ **/
3242 ++static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
3243 ++{
3244 ++ if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
3245 ++ return 1;
3246 ++ return 0;
3247 ++}
3248 ++
3249 ++/**
3250 ++ * ipr_wait_for_ops - Wait for matching commands to complete
3251 ++ * @ipr_cmd: ipr command struct
3252 ++ * @device: device to match (sdev)
3253 ++ * @match: match function to use
3254 ++ *
3255 ++ * Returns:
3256 ++ * SUCCESS / FAILED
3257 ++ **/
3258 ++static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
3259 ++ int (*match)(struct ipr_cmnd *, void *))
3260 ++{
3261 ++ struct ipr_cmnd *ipr_cmd;
3262 ++ int wait;
3263 ++ unsigned long flags;
3264 ++ struct ipr_hrr_queue *hrrq;
3265 ++ signed long timeout = IPR_ABORT_TASK_TIMEOUT;
3266 ++ DECLARE_COMPLETION_ONSTACK(comp);
3267 ++
3268 ++ ENTER;
3269 ++ do {
3270 ++ wait = 0;
3271 ++
3272 ++ for_each_hrrq(hrrq, ioa_cfg) {
3273 ++ spin_lock_irqsave(hrrq->lock, flags);
3274 ++ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
3275 ++ if (match(ipr_cmd, device)) {
3276 ++ ipr_cmd->eh_comp = &comp;
3277 ++ wait++;
3278 ++ }
3279 ++ }
3280 ++ spin_unlock_irqrestore(hrrq->lock, flags);
3281 ++ }
3282 ++
3283 ++ if (wait) {
3284 ++ timeout = wait_for_completion_timeout(&comp, timeout);
3285 ++
3286 ++ if (!timeout) {
3287 ++ wait = 0;
3288 ++
3289 ++ for_each_hrrq(hrrq, ioa_cfg) {
3290 ++ spin_lock_irqsave(hrrq->lock, flags);
3291 ++ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
3292 ++ if (match(ipr_cmd, device)) {
3293 ++ ipr_cmd->eh_comp = NULL;
3294 ++ wait++;
3295 ++ }
3296 ++ }
3297 ++ spin_unlock_irqrestore(hrrq->lock, flags);
3298 ++ }
3299 ++
3300 ++ if (wait)
3301 ++ dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
3302 ++ LEAVE;
3303 ++ return wait ? FAILED : SUCCESS;
3304 ++ }
3305 ++ }
3306 ++ } while (wait);
3307 ++
3308 ++ LEAVE;
3309 ++ return SUCCESS;
3310 ++}
3311 ++
3312 + static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
3313 + {
3314 + struct ipr_ioa_cfg *ioa_cfg;
3315 +@@ -5072,11 +5153,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
3316 + static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
3317 + {
3318 + int rc;
3319 ++ struct ipr_ioa_cfg *ioa_cfg;
3320 ++
3321 ++ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
3322 +
3323 + spin_lock_irq(cmd->device->host->host_lock);
3324 + rc = __ipr_eh_dev_reset(cmd);
3325 + spin_unlock_irq(cmd->device->host->host_lock);
3326 +
3327 ++ if (rc == SUCCESS)
3328 ++ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
3329 ++
3330 + return rc;
3331 + }
3332 +
3333 +@@ -5254,13 +5341,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
3334 + {
3335 + unsigned long flags;
3336 + int rc;
3337 ++ struct ipr_ioa_cfg *ioa_cfg;
3338 +
3339 + ENTER;
3340 +
3341 ++ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3342 ++
3343 + spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3344 + rc = ipr_cancel_op(scsi_cmd);
3345 + spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3346 +
3347 ++ if (rc == SUCCESS)
3348 ++ rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
3349 + LEAVE;
3350 + return rc;
3351 + }
3352 +diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
3353 +index d0201ceb4aac..fa82c003bc32 100644
3354 +--- a/drivers/scsi/ipr.h
3355 ++++ b/drivers/scsi/ipr.h
3356 +@@ -1608,6 +1608,7 @@ struct ipr_cmnd {
3357 + struct scsi_device *sdev;
3358 + } u;
3359 +
3360 ++ struct completion *eh_comp;
3361 + struct ipr_hrr_queue *hrrq;
3362 + struct ipr_ioa_cfg *ioa_cfg;
3363 + };
3364 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
3365 +index 9df5d6ec7eec..f3a9d831d0f9 100644
3366 +--- a/drivers/xen/swiotlb-xen.c
3367 ++++ b/drivers/xen/swiotlb-xen.c
3368 +@@ -449,7 +449,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
3369 +
3370 + /* NOTE: We use dev_addr here, not paddr! */
3371 + if (is_xen_swiotlb_buffer(dev_addr)) {
3372 +- swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir);
3373 ++ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
3374 + return;
3375 + }
3376 +
3377 +diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
3378 +index 45cb59bcc791..8b7898b7670f 100644
3379 +--- a/fs/cifs/ioctl.c
3380 ++++ b/fs/cifs/ioctl.c
3381 +@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
3382 + }
3383 +
3384 + src_inode = file_inode(src_file.file);
3385 ++ rc = -EINVAL;
3386 ++ if (S_ISDIR(src_inode->i_mode))
3387 ++ goto out_fput;
3388 +
3389 + /*
3390 + * Note: cifs case is easier than btrfs since server responsible for
3391 + * checks for proper open modes and file type and if it wants
3392 + * server could even support copy of range where source = target
3393 + */
3394 +-
3395 +- /* so we do not deadlock racing two ioctls on same files */
3396 +- if (target_inode < src_inode) {
3397 +- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
3398 +- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
3399 +- } else {
3400 +- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
3401 +- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
3402 +- }
3403 ++ lock_two_nondirectories(target_inode, src_inode);
3404 +
3405 + /* determine range to clone */
3406 + rc = -EINVAL;
3407 +@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
3408 + out_unlock:
3409 + /* although unlocking in the reverse order from locking is not
3410 + strictly necessary here it is a little cleaner to be consistent */
3411 +- if (target_inode < src_inode) {
3412 +- mutex_unlock(&src_inode->i_mutex);
3413 +- mutex_unlock(&target_inode->i_mutex);
3414 +- } else {
3415 +- mutex_unlock(&target_inode->i_mutex);
3416 +- mutex_unlock(&src_inode->i_mutex);
3417 +- }
3418 ++ unlock_two_nondirectories(src_inode, target_inode);
3419 + out_fput:
3420 + fdput(src_file);
3421 + out_drop_write:
3422 +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
3423 +index f34a0835aa4f..8de31d472fad 100644
3424 +--- a/include/acpi/acpi_bus.h
3425 ++++ b/include/acpi/acpi_bus.h
3426 +@@ -312,6 +312,7 @@ struct acpi_device_wakeup_flags {
3427 + u8 valid:1; /* Can successfully enable wakeup? */
3428 + u8 run_wake:1; /* Run-Wake GPE devices */
3429 + u8 notifier_present:1; /* Wake-up notify handler has been installed */
3430 ++ u8 enabled:1; /* Enabled for wakeup */
3431 + };
3432 +
3433 + struct acpi_device_wakeup_context {
3434 +diff --git a/include/linux/crypto.h b/include/linux/crypto.h
3435 +index d45e949699ea..dc34dfc766b5 100644
3436 +--- a/include/linux/crypto.h
3437 ++++ b/include/linux/crypto.h
3438 +@@ -26,6 +26,19 @@
3439 + #include <linux/uaccess.h>
3440 +
3441 + /*
3442 ++ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
3443 ++ * arbitrary modules to be loaded. Loading from userspace may still need the
3444 ++ * unprefixed names, so retains those aliases as well.
3445 ++ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
3446 ++ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
3447 ++ * expands twice on the same line. Instead, use a separate base name for the
3448 ++ * alias.
3449 ++ */
3450 ++#define MODULE_ALIAS_CRYPTO(name) \
3451 ++ __MODULE_INFO(alias, alias_userspace, name); \
3452 ++ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
3453 ++
3454 ++/*
3455 + * Algorithm masks and types.
3456 + */
3457 + #define CRYPTO_ALG_TYPE_MASK 0x0000000f
3458 +diff --git a/include/linux/libata.h b/include/linux/libata.h
3459 +index bd5fefeaf548..fe0bf8dc83bb 100644
3460 +--- a/include/linux/libata.h
3461 ++++ b/include/linux/libata.h
3462 +@@ -230,6 +230,7 @@ enum {
3463 + ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
3464 + * led */
3465 + ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
3466 ++ ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */
3467 +
3468 + /* bits 24:31 of ap->flags are reserved for LLD specific flags */
3469 +
3470 +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
3471 +index 7ea069cd3257..4b3736f7065c 100644
3472 +--- a/include/linux/pagemap.h
3473 ++++ b/include/linux/pagemap.h
3474 +@@ -251,7 +251,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
3475 + #define FGP_NOWAIT 0x00000020
3476 +
3477 + struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
3478 +- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
3479 ++ int fgp_flags, gfp_t cache_gfp_mask);
3480 +
3481 + /**
3482 + * find_get_page - find and get a page reference
3483 +@@ -266,13 +266,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
3484 + static inline struct page *find_get_page(struct address_space *mapping,
3485 + pgoff_t offset)
3486 + {
3487 +- return pagecache_get_page(mapping, offset, 0, 0, 0);
3488 ++ return pagecache_get_page(mapping, offset, 0, 0);
3489 + }
3490 +
3491 + static inline struct page *find_get_page_flags(struct address_space *mapping,
3492 + pgoff_t offset, int fgp_flags)
3493 + {
3494 +- return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
3495 ++ return pagecache_get_page(mapping, offset, fgp_flags, 0);
3496 + }
3497 +
3498 + /**
3499 +@@ -292,7 +292,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
3500 + static inline struct page *find_lock_page(struct address_space *mapping,
3501 + pgoff_t offset)
3502 + {
3503 +- return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
3504 ++ return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
3505 + }
3506 +
3507 + /**
3508 +@@ -319,7 +319,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
3509 + {
3510 + return pagecache_get_page(mapping, offset,
3511 + FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
3512 +- gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
3513 ++ gfp_mask);
3514 + }
3515 +
3516 + /**
3517 +@@ -340,8 +340,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
3518 + {
3519 + return pagecache_get_page(mapping, index,
3520 + FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
3521 +- mapping_gfp_mask(mapping),
3522 +- GFP_NOFS);
3523 ++ mapping_gfp_mask(mapping));
3524 + }
3525 +
3526 + struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
3527 +diff --git a/include/linux/pci.h b/include/linux/pci.h
3528 +index 4c8ac5fcc224..2882c13c6391 100644
3529 +--- a/include/linux/pci.h
3530 ++++ b/include/linux/pci.h
3531 +@@ -175,6 +175,8 @@ enum pci_dev_flags {
3532 + PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
3533 + /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
3534 + PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
3535 ++ /* Do not use bus resets for device */
3536 ++ PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
3537 + };
3538 +
3539 + enum pci_irq_reroute_variant {
3540 +@@ -1062,6 +1064,7 @@ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
3541 + void pci_bus_assign_resources(const struct pci_bus *bus);
3542 + void pci_bus_size_bridges(struct pci_bus *bus);
3543 + int pci_claim_resource(struct pci_dev *, int);
3544 ++int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
3545 + void pci_assign_unassigned_resources(void);
3546 + void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
3547 + void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
3548 +diff --git a/include/linux/time.h b/include/linux/time.h
3549 +index 8c42cf8d2444..5989b0ead1ec 100644
3550 +--- a/include/linux/time.h
3551 ++++ b/include/linux/time.h
3552 +@@ -99,6 +99,19 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
3553 + return true;
3554 + }
3555 +
3556 ++static inline bool timeval_valid(const struct timeval *tv)
3557 ++{
3558 ++ /* Dates before 1970 are bogus */
3559 ++ if (tv->tv_sec < 0)
3560 ++ return false;
3561 ++
3562 ++ /* Can't have more microseconds then a second */
3563 ++ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
3564 ++ return false;
3565 ++
3566 ++ return true;
3567 ++}
3568 ++
3569 + extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
3570 +
3571 + #define CURRENT_TIME (current_kernel_time())
3572 +diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
3573 +index 3e4323a3918d..94ffe0c83ce7 100644
3574 +--- a/include/uapi/linux/can/netlink.h
3575 ++++ b/include/uapi/linux/can/netlink.h
3576 +@@ -98,6 +98,7 @@ struct can_ctrlmode {
3577 + #define CAN_CTRLMODE_BERR_REPORTING 0x10 /* Bus-error reporting */
3578 + #define CAN_CTRLMODE_FD 0x20 /* CAN FD mode */
3579 + #define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */
3580 ++#define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */
3581 +
3582 + /*
3583 + * CAN device statistics
3584 +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
3585 +index 87a346fd6d61..28bf91c60a0b 100644
3586 +--- a/kernel/time/ntp.c
3587 ++++ b/kernel/time/ntp.c
3588 +@@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc)
3589 + if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
3590 + return -EPERM;
3591 +
3592 ++ if (txc->modes & ADJ_FREQUENCY) {
3593 ++ if (LONG_MIN / PPM_SCALE > txc->freq)
3594 ++ return -EINVAL;
3595 ++ if (LONG_MAX / PPM_SCALE < txc->freq)
3596 ++ return -EINVAL;
3597 ++ }
3598 ++
3599 + return 0;
3600 + }
3601 +
3602 +diff --git a/kernel/time/time.c b/kernel/time/time.c
3603 +index a9ae20fb0b11..22d5d3b73970 100644
3604 +--- a/kernel/time/time.c
3605 ++++ b/kernel/time/time.c
3606 +@@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
3607 + if (tv) {
3608 + if (copy_from_user(&user_tv, tv, sizeof(*tv)))
3609 + return -EFAULT;
3610 ++
3611 ++ if (!timeval_valid(&user_tv))
3612 ++ return -EINVAL;
3613 ++
3614 + new_ts.tv_sec = user_tv.tv_sec;
3615 + new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
3616 + }
3617 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
3618 +index 09b685daee3d..66940a53d128 100644
3619 +--- a/kernel/workqueue.c
3620 ++++ b/kernel/workqueue.c
3621 +@@ -1841,17 +1841,11 @@ static void pool_mayday_timeout(unsigned long __pool)
3622 + * spin_lock_irq(pool->lock) which may be released and regrabbed
3623 + * multiple times. Does GFP_KERNEL allocations. Called only from
3624 + * manager.
3625 +- *
3626 +- * Return:
3627 +- * %false if no action was taken and pool->lock stayed locked, %true
3628 +- * otherwise.
3629 + */
3630 +-static bool maybe_create_worker(struct worker_pool *pool)
3631 ++static void maybe_create_worker(struct worker_pool *pool)
3632 + __releases(&pool->lock)
3633 + __acquires(&pool->lock)
3634 + {
3635 +- if (!need_to_create_worker(pool))
3636 +- return false;
3637 + restart:
3638 + spin_unlock_irq(&pool->lock);
3639 +
3640 +@@ -1877,7 +1871,6 @@ restart:
3641 + */
3642 + if (need_to_create_worker(pool))
3643 + goto restart;
3644 +- return true;
3645 + }
3646 +
3647 + /**
3648 +@@ -1897,16 +1890,14 @@ restart:
3649 + * multiple times. Does GFP_KERNEL allocations.
3650 + *
3651 + * Return:
3652 +- * %false if the pool don't need management and the caller can safely start
3653 +- * processing works, %true indicates that the function released pool->lock
3654 +- * and reacquired it to perform some management function and that the
3655 +- * conditions that the caller verified while holding the lock before
3656 +- * calling the function might no longer be true.
3657 ++ * %false if the pool doesn't need management and the caller can safely
3658 ++ * start processing works, %true if management function was performed and
3659 ++ * the conditions that the caller verified before calling the function may
3660 ++ * no longer be true.
3661 + */
3662 + static bool manage_workers(struct worker *worker)
3663 + {
3664 + struct worker_pool *pool = worker->pool;
3665 +- bool ret = false;
3666 +
3667 + /*
3668 + * Anyone who successfully grabs manager_arb wins the arbitration
3669 +@@ -1919,12 +1910,12 @@ static bool manage_workers(struct worker *worker)
3670 + * actual management, the pool may stall indefinitely.
3671 + */
3672 + if (!mutex_trylock(&pool->manager_arb))
3673 +- return ret;
3674 ++ return false;
3675 +
3676 +- ret |= maybe_create_worker(pool);
3677 ++ maybe_create_worker(pool);
3678 +
3679 + mutex_unlock(&pool->manager_arb);
3680 +- return ret;
3681 ++ return true;
3682 + }
3683 +
3684 + /**
3685 +diff --git a/mm/filemap.c b/mm/filemap.c
3686 +index 14b4642279f1..37beab98b416 100644
3687 +--- a/mm/filemap.c
3688 ++++ b/mm/filemap.c
3689 +@@ -1046,8 +1046,7 @@ EXPORT_SYMBOL(find_lock_entry);
3690 + * @mapping: the address_space to search
3691 + * @offset: the page index
3692 + * @fgp_flags: PCG flags
3693 +- * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
3694 +- * @radix_gfp_mask: gfp mask to use for radix tree node allocation
3695 ++ * @gfp_mask: gfp mask to use for the page cache data page allocation
3696 + *
3697 + * Looks up the page cache slot at @mapping & @offset.
3698 + *
3699 +@@ -1056,11 +1055,9 @@ EXPORT_SYMBOL(find_lock_entry);
3700 + * FGP_ACCESSED: the page will be marked accessed
3701 + * FGP_LOCK: Page is return locked
3702 + * FGP_CREAT: If page is not present then a new page is allocated using
3703 +- * @cache_gfp_mask and added to the page cache and the VM's LRU
3704 +- * list. If radix tree nodes are allocated during page cache
3705 +- * insertion then @radix_gfp_mask is used. The page is returned
3706 +- * locked and with an increased refcount. Otherwise, %NULL is
3707 +- * returned.
3708 ++ * @gfp_mask and added to the page cache and the VM's LRU
3709 ++ * list. The page is returned locked and with an increased
3710 ++ * refcount. Otherwise, %NULL is returned.
3711 + *
3712 + * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
3713 + * if the GFP flags specified for FGP_CREAT are atomic.
3714 +@@ -1068,7 +1065,7 @@ EXPORT_SYMBOL(find_lock_entry);
3715 + * If there is a page cache page, it is returned with an increased refcount.
3716 + */
3717 + struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
3718 +- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
3719 ++ int fgp_flags, gfp_t gfp_mask)
3720 + {
3721 + struct page *page;
3722 +
3723 +@@ -1105,13 +1102,11 @@ no_page:
3724 + if (!page && (fgp_flags & FGP_CREAT)) {
3725 + int err;
3726 + if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
3727 +- cache_gfp_mask |= __GFP_WRITE;
3728 +- if (fgp_flags & FGP_NOFS) {
3729 +- cache_gfp_mask &= ~__GFP_FS;
3730 +- radix_gfp_mask &= ~__GFP_FS;
3731 +- }
3732 ++ gfp_mask |= __GFP_WRITE;
3733 ++ if (fgp_flags & FGP_NOFS)
3734 ++ gfp_mask &= ~__GFP_FS;
3735 +
3736 +- page = __page_cache_alloc(cache_gfp_mask);
3737 ++ page = __page_cache_alloc(gfp_mask);
3738 + if (!page)
3739 + return NULL;
3740 +
3741 +@@ -1122,7 +1117,8 @@ no_page:
3742 + if (fgp_flags & FGP_ACCESSED)
3743 + __SetPageReferenced(page);
3744 +
3745 +- err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
3746 ++ err = add_to_page_cache_lru(page, mapping, offset,
3747 ++ gfp_mask & GFP_RECLAIM_MASK);
3748 + if (unlikely(err)) {
3749 + page_cache_release(page);
3750 + page = NULL;
3751 +@@ -2443,8 +2439,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
3752 + fgp_flags |= FGP_NOFS;
3753 +
3754 + page = pagecache_get_page(mapping, index, fgp_flags,
3755 +- mapping_gfp_mask(mapping),
3756 +- GFP_KERNEL);
3757 ++ mapping_gfp_mask(mapping));
3758 + if (page)
3759 + wait_for_stable_page(page);
3760 +
3761 +diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
3762 +index 1d5341f3761d..5d3daae98bf0 100644
3763 +--- a/net/netfilter/ipvs/ip_vs_ftp.c
3764 ++++ b/net/netfilter/ipvs/ip_vs_ftp.c
3765 +@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
3766 + struct nf_conn *ct;
3767 + struct net *net;
3768 +
3769 ++ *diff = 0;
3770 ++
3771 + #ifdef CONFIG_IP_VS_IPV6
3772 + /* This application helper doesn't work with IPv6 yet,
3773 + * so turn this into a no-op for IPv6 packets
3774 +@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
3775 + return 1;
3776 + #endif
3777 +
3778 +- *diff = 0;
3779 +-
3780 + /* Only useful for established sessions */
3781 + if (cp->state != IP_VS_TCP_S_ESTABLISHED)
3782 + return 1;
3783 +@@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
3784 + struct ip_vs_conn *n_cp;
3785 + struct net *net;
3786 +
3787 ++ /* no diff required for incoming packets */
3788 ++ *diff = 0;
3789 ++
3790 + #ifdef CONFIG_IP_VS_IPV6
3791 + /* This application helper doesn't work with IPv6 yet,
3792 + * so turn this into a no-op for IPv6 packets
3793 +@@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
3794 + return 1;
3795 + #endif
3796 +
3797 +- /* no diff required for incoming packets */
3798 +- *diff = 0;
3799 +-
3800 + /* Only useful for established sessions */
3801 + if (cp->state != IP_VS_TCP_S_ESTABLISHED)
3802 + return 1;
3803 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
3804 +index 5016a6929085..c5880124ec0d 100644
3805 +--- a/net/netfilter/nf_conntrack_core.c
3806 ++++ b/net/netfilter/nf_conntrack_core.c
3807 +@@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb)
3808 + */
3809 + NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
3810 + pr_debug("Confirming conntrack %p\n", ct);
3811 +- /* We have to check the DYING flag inside the lock to prevent
3812 +- a race against nf_ct_get_next_corpse() possibly called from
3813 +- user context, else we insert an already 'dead' hash, blocking
3814 +- further use of that particular connection -JM */
3815 ++ /* We have to check the DYING flag after unlink to prevent
3816 ++ * a race against nf_ct_get_next_corpse() possibly called from
3817 ++ * user context, else we insert an already 'dead' hash, blocking
3818 ++ * further use of that particular connection -JM.
3819 ++ */
3820 ++ nf_ct_del_from_dying_or_unconfirmed_list(ct);
3821 +
3822 +- if (unlikely(nf_ct_is_dying(ct))) {
3823 +- nf_conntrack_double_unlock(hash, reply_hash);
3824 +- local_bh_enable();
3825 +- return NF_ACCEPT;
3826 +- }
3827 ++ if (unlikely(nf_ct_is_dying(ct)))
3828 ++ goto out;
3829 +
3830 + /* See if there's one in the list already, including reverse:
3831 + NAT could have grabbed it without realizing, since we're
3832 +@@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
3833 + zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
3834 + goto out;
3835 +
3836 +- nf_ct_del_from_dying_or_unconfirmed_list(ct);
3837 +-
3838 + /* Timer relative to confirmation time, not original
3839 + setting time, otherwise we'd get timer wrap in
3840 + weird delay cases. */
3841 +@@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
3842 + return NF_ACCEPT;
3843 +
3844 + out:
3845 ++ nf_ct_add_to_dying_list(ct);
3846 + nf_conntrack_double_unlock(hash, reply_hash);
3847 + NF_CT_STAT_INC(net, insert_failed);
3848 + local_bh_enable();
3849 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3850 +index 66e8425dbfe7..71b574c7bde9 100644
3851 +--- a/net/netfilter/nf_tables_api.c
3852 ++++ b/net/netfilter/nf_tables_api.c
3853 +@@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
3854 + struct nft_chain *chain, *nc;
3855 + struct nft_set *set, *ns;
3856 +
3857 +- list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
3858 ++ list_for_each_entry(chain, &ctx->table->chains, list) {
3859 + ctx->chain = chain;
3860 +
3861 + err = nft_delrule_by_chain(ctx);
3862 + if (err < 0)
3863 + goto out;
3864 +-
3865 +- err = nft_delchain(ctx);
3866 +- if (err < 0)
3867 +- goto out;
3868 + }
3869 +
3870 + list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
3871 +@@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx)
3872 + goto out;
3873 + }
3874 +
3875 ++ list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
3876 ++ ctx->chain = chain;
3877 ++
3878 ++ err = nft_delchain(ctx);
3879 ++ if (err < 0)
3880 ++ goto out;
3881 ++ }
3882 ++
3883 + err = nft_deltable(ctx);
3884 + out:
3885 + return err;
3886 +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
3887 +index 13c2e17bbe27..1aa7049c93f5 100644
3888 +--- a/net/netfilter/nfnetlink.c
3889 ++++ b/net/netfilter/nfnetlink.c
3890 +@@ -321,7 +321,8 @@ replay:
3891 + nlh = nlmsg_hdr(skb);
3892 + err = 0;
3893 +
3894 +- if (nlh->nlmsg_len < NLMSG_HDRLEN) {
3895 ++ if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
3896 ++ skb->len < nlh->nlmsg_len) {
3897 + err = -EINVAL;
3898 + goto ack;
3899 + }
3900 +@@ -469,7 +470,7 @@ static int nfnetlink_bind(int group)
3901 + int type;
3902 +
3903 + if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
3904 +- return -EINVAL;
3905 ++ return 0;
3906 +
3907 + type = nfnl_group2type[group];
3908 +
3909 +diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
3910 +index d4b665610d67..1f9f08ae60c1 100755
3911 +--- a/scripts/recordmcount.pl
3912 ++++ b/scripts/recordmcount.pl
3913 +@@ -255,7 +255,6 @@ if ($arch eq "x86_64") {
3914 + # force flags for this arch
3915 + $ld .= " -m shlelf_linux";
3916 + $objcopy .= " -O elf32-sh-linux";
3917 +- $cc .= " -m32";
3918 +
3919 + } elsif ($arch eq "powerpc") {
3920 + $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
3921 +diff --git a/security/keys/gc.c b/security/keys/gc.c
3922 +index 9609a7f0faea..c7952375ac53 100644
3923 +--- a/security/keys/gc.c
3924 ++++ b/security/keys/gc.c
3925 +@@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
3926 + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
3927 + atomic_dec(&key->user->nikeys);
3928 +
3929 +- key_user_put(key->user);
3930 +-
3931 + /* now throw away the key memory */
3932 + if (key->type->destroy)
3933 + key->type->destroy(key);
3934 +
3935 ++ key_user_put(key->user);
3936 ++
3937 + kfree(key->description);
3938 +
3939 + #ifdef KEY_DEBUGGING
3940 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3941 +index 6e354d326858..a712d754431c 100644
3942 +--- a/sound/usb/mixer.c
3943 ++++ b/sound/usb/mixer.c
3944 +@@ -909,6 +909,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
3945 + case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
3946 + case USB_ID(0x046d, 0x0808):
3947 + case USB_ID(0x046d, 0x0809):
3948 ++ case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
3949 + case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
3950 + case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
3951 + case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */