Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.10 commit in: /
Date: Fri, 30 Jan 2015 12:51:32
Message-Id: 1422621600.30b7838f41cfebf7103845bb41ef499afe0c5e1e.mpagano@gentoo
1 commit: 30b7838f41cfebf7103845bb41ef499afe0c5e1e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jan 30 12:40:00 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Jan 30 12:40:00 2015 +0000
6 URL: http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=30b7838f
7
8 Linux patch 3.10.67
9
10 ---
11 0000_README | 4 +
12 1066_linux-3.10.67.patch | 2417 ++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 2421 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index 1f0923b..f5f4229 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -306,6 +306,10 @@ Patch: 1065_linux-3.10.66.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.10.66
22
23 +Patch: 1066_linux-3.10.67.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.10.67
26 +
27 Patch: 1500_XATTR_USER_PREFIX.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
29 Desc: Support for namespace user.pax.* on tmpfs.
30
31 diff --git a/1066_linux-3.10.67.patch b/1066_linux-3.10.67.patch
32 new file mode 100644
33 index 0000000..43fdd4f
34 --- /dev/null
35 +++ b/1066_linux-3.10.67.patch
36 @@ -0,0 +1,2417 @@
37 +diff --git a/Makefile b/Makefile
38 +index 12ae1ef5437a..7c6711fa3c3f 100644
39 +--- a/Makefile
40 ++++ b/Makefile
41 +@@ -1,6 +1,6 @@
42 + VERSION = 3
43 + PATCHLEVEL = 10
44 +-SUBLEVEL = 66
45 ++SUBLEVEL = 67
46 + EXTRAVERSION =
47 + NAME = TOSSUG Baby Fish
48 +
49 +diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
50 +index 82897e2d8d5a..97d1a550eb98 100644
51 +--- a/arch/arm/boot/dts/imx25.dtsi
52 ++++ b/arch/arm/boot/dts/imx25.dtsi
53 +@@ -335,7 +335,7 @@
54 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
55 + #pwm-cells = <2>;
56 + reg = <0x53fa0000 0x4000>;
57 +- clocks = <&clks 106>, <&clks 36>;
58 ++ clocks = <&clks 106>, <&clks 52>;
59 + clock-names = "ipg", "per";
60 + interrupts = <36>;
61 + };
62 +@@ -354,7 +354,7 @@
63 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
64 + #pwm-cells = <2>;
65 + reg = <0x53fa8000 0x4000>;
66 +- clocks = <&clks 107>, <&clks 36>;
67 ++ clocks = <&clks 107>, <&clks 52>;
68 + clock-names = "ipg", "per";
69 + interrupts = <41>;
70 + };
71 +@@ -394,7 +394,7 @@
72 + pwm4: pwm@53fc8000 {
73 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
74 + reg = <0x53fc8000 0x4000>;
75 +- clocks = <&clks 108>, <&clks 36>;
76 ++ clocks = <&clks 108>, <&clks 52>;
77 + clock-names = "ipg", "per";
78 + interrupts = <42>;
79 + };
80 +@@ -439,7 +439,7 @@
81 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
82 + #pwm-cells = <2>;
83 + reg = <0x53fe0000 0x4000>;
84 +- clocks = <&clks 105>, <&clks 36>;
85 ++ clocks = <&clks 105>, <&clks 52>;
86 + clock-names = "ipg", "per";
87 + interrupts = <26>;
88 + };
89 +diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
90 +index 59f7877ead6a..e73ec2ab1316 100644
91 +--- a/arch/arm/crypto/aes_glue.c
92 ++++ b/arch/arm/crypto/aes_glue.c
93 +@@ -103,6 +103,6 @@ module_exit(aes_fini);
94 +
95 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
96 + MODULE_LICENSE("GPL");
97 +-MODULE_ALIAS("aes");
98 +-MODULE_ALIAS("aes-asm");
99 ++MODULE_ALIAS_CRYPTO("aes");
100 ++MODULE_ALIAS_CRYPTO("aes-asm");
101 + MODULE_AUTHOR("David McCullough <ucdevel@×××××.com>");
102 +diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
103 +index 76cd976230bc..ace4cd67464c 100644
104 +--- a/arch/arm/crypto/sha1_glue.c
105 ++++ b/arch/arm/crypto/sha1_glue.c
106 +@@ -175,5 +175,5 @@ module_exit(sha1_mod_fini);
107 +
108 + MODULE_LICENSE("GPL");
109 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
110 +-MODULE_ALIAS("sha1");
111 ++MODULE_ALIAS_CRYPTO("sha1");
112 + MODULE_AUTHOR("David McCullough <ucdevel@×××××.com>");
113 +diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
114 +index f9e8b9491efc..b51da9132744 100644
115 +--- a/arch/powerpc/crypto/sha1.c
116 ++++ b/arch/powerpc/crypto/sha1.c
117 +@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
118 + MODULE_LICENSE("GPL");
119 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
120 +
121 +-MODULE_ALIAS("sha1-powerpc");
122 ++MODULE_ALIAS_CRYPTO("sha1");
123 ++MODULE_ALIAS_CRYPTO("sha1-powerpc");
124 +diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
125 +index fd104db9cea1..92eb4d6ad39d 100644
126 +--- a/arch/s390/crypto/aes_s390.c
127 ++++ b/arch/s390/crypto/aes_s390.c
128 +@@ -970,7 +970,7 @@ static void __exit aes_s390_fini(void)
129 + module_init(aes_s390_init);
130 + module_exit(aes_s390_fini);
131 +
132 +-MODULE_ALIAS("aes-all");
133 ++MODULE_ALIAS_CRYPTO("aes-all");
134 +
135 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
136 + MODULE_LICENSE("GPL");
137 +diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
138 +index f2d6cccddcf8..a89feffb22b5 100644
139 +--- a/arch/s390/crypto/des_s390.c
140 ++++ b/arch/s390/crypto/des_s390.c
141 +@@ -619,8 +619,8 @@ static void __exit des_s390_exit(void)
142 + module_init(des_s390_init);
143 + module_exit(des_s390_exit);
144 +
145 +-MODULE_ALIAS("des");
146 +-MODULE_ALIAS("des3_ede");
147 ++MODULE_ALIAS_CRYPTO("des");
148 ++MODULE_ALIAS_CRYPTO("des3_ede");
149 +
150 + MODULE_LICENSE("GPL");
151 + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
152 +diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
153 +index d43485d142e9..7940dc90e80b 100644
154 +--- a/arch/s390/crypto/ghash_s390.c
155 ++++ b/arch/s390/crypto/ghash_s390.c
156 +@@ -160,7 +160,7 @@ static void __exit ghash_mod_exit(void)
157 + module_init(ghash_mod_init);
158 + module_exit(ghash_mod_exit);
159 +
160 +-MODULE_ALIAS("ghash");
161 ++MODULE_ALIAS_CRYPTO("ghash");
162 +
163 + MODULE_LICENSE("GPL");
164 + MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
165 +diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
166 +index a1b3a9dc9d8a..5b2bee323694 100644
167 +--- a/arch/s390/crypto/sha1_s390.c
168 ++++ b/arch/s390/crypto/sha1_s390.c
169 +@@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void)
170 + module_init(sha1_s390_init);
171 + module_exit(sha1_s390_fini);
172 +
173 +-MODULE_ALIAS("sha1");
174 ++MODULE_ALIAS_CRYPTO("sha1");
175 + MODULE_LICENSE("GPL");
176 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
177 +diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
178 +index 9b853809a492..b74ff158108c 100644
179 +--- a/arch/s390/crypto/sha256_s390.c
180 ++++ b/arch/s390/crypto/sha256_s390.c
181 +@@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void)
182 + module_init(sha256_s390_init);
183 + module_exit(sha256_s390_fini);
184 +
185 +-MODULE_ALIAS("sha256");
186 +-MODULE_ALIAS("sha224");
187 ++MODULE_ALIAS_CRYPTO("sha256");
188 ++MODULE_ALIAS_CRYPTO("sha224");
189 + MODULE_LICENSE("GPL");
190 + MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
191 +diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
192 +index 32a81383b69c..0c36989ba182 100644
193 +--- a/arch/s390/crypto/sha512_s390.c
194 ++++ b/arch/s390/crypto/sha512_s390.c
195 +@@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = {
196 + }
197 + };
198 +
199 +-MODULE_ALIAS("sha512");
200 ++MODULE_ALIAS_CRYPTO("sha512");
201 +
202 + static int sha384_init(struct shash_desc *desc)
203 + {
204 +@@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = {
205 + }
206 + };
207 +
208 +-MODULE_ALIAS("sha384");
209 ++MODULE_ALIAS_CRYPTO("sha384");
210 +
211 + static int __init init(void)
212 + {
213 +diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
214 +index 503e6d96ad4e..ded4cee35318 100644
215 +--- a/arch/sparc/crypto/aes_glue.c
216 ++++ b/arch/sparc/crypto/aes_glue.c
217 +@@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini);
218 + MODULE_LICENSE("GPL");
219 + MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
220 +
221 +-MODULE_ALIAS("aes");
222 ++MODULE_ALIAS_CRYPTO("aes");
223 +
224 + #include "crop_devid.c"
225 +diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
226 +index 888f6260b4ec..641f55cb61c3 100644
227 +--- a/arch/sparc/crypto/camellia_glue.c
228 ++++ b/arch/sparc/crypto/camellia_glue.c
229 +@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
230 + MODULE_LICENSE("GPL");
231 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
232 +
233 +-MODULE_ALIAS("aes");
234 ++MODULE_ALIAS_CRYPTO("aes");
235 +
236 + #include "crop_devid.c"
237 +diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
238 +index 5162fad912ce..d1064e46efe8 100644
239 +--- a/arch/sparc/crypto/crc32c_glue.c
240 ++++ b/arch/sparc/crypto/crc32c_glue.c
241 +@@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini);
242 + MODULE_LICENSE("GPL");
243 + MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
244 +
245 +-MODULE_ALIAS("crc32c");
246 ++MODULE_ALIAS_CRYPTO("crc32c");
247 +
248 + #include "crop_devid.c"
249 +diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
250 +index 3065bc61f9d3..d11500972994 100644
251 +--- a/arch/sparc/crypto/des_glue.c
252 ++++ b/arch/sparc/crypto/des_glue.c
253 +@@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini);
254 + MODULE_LICENSE("GPL");
255 + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
256 +
257 +-MODULE_ALIAS("des");
258 ++MODULE_ALIAS_CRYPTO("des");
259 +
260 + #include "crop_devid.c"
261 +diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
262 +index 09a9ea1dfb69..64c7ff5f72a9 100644
263 +--- a/arch/sparc/crypto/md5_glue.c
264 ++++ b/arch/sparc/crypto/md5_glue.c
265 +@@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini);
266 + MODULE_LICENSE("GPL");
267 + MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
268 +
269 +-MODULE_ALIAS("md5");
270 ++MODULE_ALIAS_CRYPTO("md5");
271 +
272 + #include "crop_devid.c"
273 +diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
274 +index 6cd5f29e1e0d..1b3e47accc74 100644
275 +--- a/arch/sparc/crypto/sha1_glue.c
276 ++++ b/arch/sparc/crypto/sha1_glue.c
277 +@@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini);
278 + MODULE_LICENSE("GPL");
279 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
280 +
281 +-MODULE_ALIAS("sha1");
282 ++MODULE_ALIAS_CRYPTO("sha1");
283 +
284 + #include "crop_devid.c"
285 +diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
286 +index 04f555ab2680..41f27cca2a22 100644
287 +--- a/arch/sparc/crypto/sha256_glue.c
288 ++++ b/arch/sparc/crypto/sha256_glue.c
289 +@@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini);
290 + MODULE_LICENSE("GPL");
291 + MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
292 +
293 +-MODULE_ALIAS("sha224");
294 +-MODULE_ALIAS("sha256");
295 ++MODULE_ALIAS_CRYPTO("sha224");
296 ++MODULE_ALIAS_CRYPTO("sha256");
297 +
298 + #include "crop_devid.c"
299 +diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
300 +index f04d1994d19a..9fff88541b8c 100644
301 +--- a/arch/sparc/crypto/sha512_glue.c
302 ++++ b/arch/sparc/crypto/sha512_glue.c
303 +@@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini);
304 + MODULE_LICENSE("GPL");
305 + MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
306 +
307 +-MODULE_ALIAS("sha384");
308 +-MODULE_ALIAS("sha512");
309 ++MODULE_ALIAS_CRYPTO("sha384");
310 ++MODULE_ALIAS_CRYPTO("sha512");
311 +
312 + #include "crop_devid.c"
313 +diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
314 +index aafe8ce0d65d..e26984f7ab8d 100644
315 +--- a/arch/x86/crypto/aes_glue.c
316 ++++ b/arch/x86/crypto/aes_glue.c
317 +@@ -66,5 +66,5 @@ module_exit(aes_fini);
318 +
319 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
320 + MODULE_LICENSE("GPL");
321 +-MODULE_ALIAS("aes");
322 +-MODULE_ALIAS("aes-asm");
323 ++MODULE_ALIAS_CRYPTO("aes");
324 ++MODULE_ALIAS_CRYPTO("aes-asm");
325 +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
326 +index f80e668785c0..f89e7490d303 100644
327 +--- a/arch/x86/crypto/aesni-intel_glue.c
328 ++++ b/arch/x86/crypto/aesni-intel_glue.c
329 +@@ -1373,4 +1373,4 @@ module_exit(aesni_exit);
330 +
331 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
332 + MODULE_LICENSE("GPL");
333 +-MODULE_ALIAS("aes");
334 ++MODULE_ALIAS_CRYPTO("aes");
335 +diff --git a/arch/x86/crypto/blowfish_avx2_glue.c b/arch/x86/crypto/blowfish_avx2_glue.c
336 +index 4417e9aea78d..183395bfc724 100644
337 +--- a/arch/x86/crypto/blowfish_avx2_glue.c
338 ++++ b/arch/x86/crypto/blowfish_avx2_glue.c
339 +@@ -581,5 +581,5 @@ module_exit(fini);
340 +
341 + MODULE_LICENSE("GPL");
342 + MODULE_DESCRIPTION("Blowfish Cipher Algorithm, AVX2 optimized");
343 +-MODULE_ALIAS("blowfish");
344 +-MODULE_ALIAS("blowfish-asm");
345 ++MODULE_ALIAS_CRYPTO("blowfish");
346 ++MODULE_ALIAS_CRYPTO("blowfish-asm");
347 +diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
348 +index 3548d76dbaa9..9f7cc6bde5c8 100644
349 +--- a/arch/x86/crypto/blowfish_glue.c
350 ++++ b/arch/x86/crypto/blowfish_glue.c
351 +@@ -465,5 +465,5 @@ module_exit(fini);
352 +
353 + MODULE_LICENSE("GPL");
354 + MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
355 +-MODULE_ALIAS("blowfish");
356 +-MODULE_ALIAS("blowfish-asm");
357 ++MODULE_ALIAS_CRYPTO("blowfish");
358 ++MODULE_ALIAS_CRYPTO("blowfish-asm");
359 +diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
360 +index 414fe5d7946b..da710fcf8631 100644
361 +--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
362 ++++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
363 +@@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini);
364 +
365 + MODULE_LICENSE("GPL");
366 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized");
367 +-MODULE_ALIAS("camellia");
368 +-MODULE_ALIAS("camellia-asm");
369 ++MODULE_ALIAS_CRYPTO("camellia");
370 ++MODULE_ALIAS_CRYPTO("camellia-asm");
371 +diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
372 +index 37fd0c0a81ea..883e1af10dc5 100644
373 +--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
374 ++++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
375 +@@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini);
376 +
377 + MODULE_LICENSE("GPL");
378 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
379 +-MODULE_ALIAS("camellia");
380 +-MODULE_ALIAS("camellia-asm");
381 ++MODULE_ALIAS_CRYPTO("camellia");
382 ++MODULE_ALIAS_CRYPTO("camellia-asm");
383 +diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
384 +index 5cb86ccd4acb..16d65b0d28d1 100644
385 +--- a/arch/x86/crypto/camellia_glue.c
386 ++++ b/arch/x86/crypto/camellia_glue.c
387 +@@ -1725,5 +1725,5 @@ module_exit(fini);
388 +
389 + MODULE_LICENSE("GPL");
390 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
391 +-MODULE_ALIAS("camellia");
392 +-MODULE_ALIAS("camellia-asm");
393 ++MODULE_ALIAS_CRYPTO("camellia");
394 ++MODULE_ALIAS_CRYPTO("camellia-asm");
395 +diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
396 +index c6631813dc11..d416069e3184 100644
397 +--- a/arch/x86/crypto/cast5_avx_glue.c
398 ++++ b/arch/x86/crypto/cast5_avx_glue.c
399 +@@ -494,4 +494,4 @@ module_exit(cast5_exit);
400 +
401 + MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
402 + MODULE_LICENSE("GPL");
403 +-MODULE_ALIAS("cast5");
404 ++MODULE_ALIAS_CRYPTO("cast5");
405 +diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
406 +index 8d0dfb86a559..c19756265d4e 100644
407 +--- a/arch/x86/crypto/cast6_avx_glue.c
408 ++++ b/arch/x86/crypto/cast6_avx_glue.c
409 +@@ -611,4 +611,4 @@ module_exit(cast6_exit);
410 +
411 + MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
412 + MODULE_LICENSE("GPL");
413 +-MODULE_ALIAS("cast6");
414 ++MODULE_ALIAS_CRYPTO("cast6");
415 +diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
416 +index 9d014a74ef96..1937fc1d8763 100644
417 +--- a/arch/x86/crypto/crc32-pclmul_glue.c
418 ++++ b/arch/x86/crypto/crc32-pclmul_glue.c
419 +@@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini);
420 + MODULE_AUTHOR("Alexander Boyko <alexander_boyko@×××××××.com>");
421 + MODULE_LICENSE("GPL");
422 +
423 +-MODULE_ALIAS("crc32");
424 +-MODULE_ALIAS("crc32-pclmul");
425 ++MODULE_ALIAS_CRYPTO("crc32");
426 ++MODULE_ALIAS_CRYPTO("crc32-pclmul");
427 +diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
428 +index 6812ad98355c..28640c3d6af7 100644
429 +--- a/arch/x86/crypto/crc32c-intel_glue.c
430 ++++ b/arch/x86/crypto/crc32c-intel_glue.c
431 +@@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang <austin.zhang@×××××.com>, Kent Liu <kent.liu@intel.c
432 + MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
433 + MODULE_LICENSE("GPL");
434 +
435 +-MODULE_ALIAS("crc32c");
436 +-MODULE_ALIAS("crc32c-intel");
437 ++MODULE_ALIAS_CRYPTO("crc32c");
438 ++MODULE_ALIAS_CRYPTO("crc32c-intel");
439 +diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
440 +index 98d7a188f46b..f368ba261739 100644
441 +--- a/arch/x86/crypto/fpu.c
442 ++++ b/arch/x86/crypto/fpu.c
443 +@@ -17,6 +17,7 @@
444 + #include <linux/kernel.h>
445 + #include <linux/module.h>
446 + #include <linux/slab.h>
447 ++#include <linux/crypto.h>
448 + #include <asm/i387.h>
449 +
450 + struct crypto_fpu_ctx {
451 +@@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void)
452 + {
453 + crypto_unregister_template(&crypto_fpu_tmpl);
454 + }
455 ++
456 ++MODULE_ALIAS_CRYPTO("fpu");
457 +diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
458 +index d785cf2c529c..a8d6f69f92a3 100644
459 +--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
460 ++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
461 +@@ -341,4 +341,4 @@ module_exit(ghash_pclmulqdqni_mod_exit);
462 + MODULE_LICENSE("GPL");
463 + MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
464 + "acclerated by PCLMULQDQ-NI");
465 +-MODULE_ALIAS("ghash");
466 ++MODULE_ALIAS_CRYPTO("ghash");
467 +diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
468 +index 5e8e67739bb5..399a29d067d6 100644
469 +--- a/arch/x86/crypto/salsa20_glue.c
470 ++++ b/arch/x86/crypto/salsa20_glue.c
471 +@@ -119,5 +119,5 @@ module_exit(fini);
472 +
473 + MODULE_LICENSE("GPL");
474 + MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
475 +-MODULE_ALIAS("salsa20");
476 +-MODULE_ALIAS("salsa20-asm");
477 ++MODULE_ALIAS_CRYPTO("salsa20");
478 ++MODULE_ALIAS_CRYPTO("salsa20-asm");
479 +diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
480 +index 23aabc6c20a5..cb57caf13ef7 100644
481 +--- a/arch/x86/crypto/serpent_avx2_glue.c
482 ++++ b/arch/x86/crypto/serpent_avx2_glue.c
483 +@@ -558,5 +558,5 @@ module_exit(fini);
484 +
485 + MODULE_LICENSE("GPL");
486 + MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
487 +-MODULE_ALIAS("serpent");
488 +-MODULE_ALIAS("serpent-asm");
489 ++MODULE_ALIAS_CRYPTO("serpent");
490 ++MODULE_ALIAS_CRYPTO("serpent-asm");
491 +diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
492 +index 9ae83cf8d21e..0a86e8b65e60 100644
493 +--- a/arch/x86/crypto/serpent_avx_glue.c
494 ++++ b/arch/x86/crypto/serpent_avx_glue.c
495 +@@ -617,4 +617,4 @@ module_exit(serpent_exit);
496 +
497 + MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
498 + MODULE_LICENSE("GPL");
499 +-MODULE_ALIAS("serpent");
500 ++MODULE_ALIAS_CRYPTO("serpent");
501 +diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
502 +index 97a356ece24d..279f3899c779 100644
503 +--- a/arch/x86/crypto/serpent_sse2_glue.c
504 ++++ b/arch/x86/crypto/serpent_sse2_glue.c
505 +@@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit);
506 +
507 + MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
508 + MODULE_LICENSE("GPL");
509 +-MODULE_ALIAS("serpent");
510 ++MODULE_ALIAS_CRYPTO("serpent");
511 +diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
512 +index 4a11a9d72451..29e1060e9001 100644
513 +--- a/arch/x86/crypto/sha1_ssse3_glue.c
514 ++++ b/arch/x86/crypto/sha1_ssse3_glue.c
515 +@@ -237,4 +237,4 @@ module_exit(sha1_ssse3_mod_fini);
516 + MODULE_LICENSE("GPL");
517 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
518 +
519 +-MODULE_ALIAS("sha1");
520 ++MODULE_ALIAS_CRYPTO("sha1");
521 +diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
522 +index 597d4da69656..ceafb01885ed 100644
523 +--- a/arch/x86/crypto/sha256_ssse3_glue.c
524 ++++ b/arch/x86/crypto/sha256_ssse3_glue.c
525 +@@ -272,4 +272,4 @@ module_exit(sha256_ssse3_mod_fini);
526 + MODULE_LICENSE("GPL");
527 + MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
528 +
529 +-MODULE_ALIAS("sha256");
530 ++MODULE_ALIAS_CRYPTO("sha256");
531 +diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
532 +index 9f5e71f06671..d1ee9f638d1c 100644
533 +--- a/arch/x86/crypto/sha512_ssse3_glue.c
534 ++++ b/arch/x86/crypto/sha512_ssse3_glue.c
535 +@@ -279,4 +279,4 @@ module_exit(sha512_ssse3_mod_fini);
536 + MODULE_LICENSE("GPL");
537 + MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
538 +
539 +-MODULE_ALIAS("sha512");
540 ++MODULE_ALIAS_CRYPTO("sha512");
541 +diff --git a/arch/x86/crypto/twofish_avx2_glue.c b/arch/x86/crypto/twofish_avx2_glue.c
542 +index ce33b5be64ee..bb1f0a194d97 100644
543 +--- a/arch/x86/crypto/twofish_avx2_glue.c
544 ++++ b/arch/x86/crypto/twofish_avx2_glue.c
545 +@@ -580,5 +580,5 @@ module_exit(fini);
546 +
547 + MODULE_LICENSE("GPL");
548 + MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX2 optimized");
549 +-MODULE_ALIAS("twofish");
550 +-MODULE_ALIAS("twofish-asm");
551 ++MODULE_ALIAS_CRYPTO("twofish");
552 ++MODULE_ALIAS_CRYPTO("twofish-asm");
553 +diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
554 +index 2047a562f6b3..4a1f94422fbb 100644
555 +--- a/arch/x86/crypto/twofish_avx_glue.c
556 ++++ b/arch/x86/crypto/twofish_avx_glue.c
557 +@@ -589,4 +589,4 @@ module_exit(twofish_exit);
558 +
559 + MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
560 + MODULE_LICENSE("GPL");
561 +-MODULE_ALIAS("twofish");
562 ++MODULE_ALIAS_CRYPTO("twofish");
563 +diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
564 +index 0a5202303501..77e06c2da83d 100644
565 +--- a/arch/x86/crypto/twofish_glue.c
566 ++++ b/arch/x86/crypto/twofish_glue.c
567 +@@ -96,5 +96,5 @@ module_exit(fini);
568 +
569 + MODULE_LICENSE("GPL");
570 + MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
571 +-MODULE_ALIAS("twofish");
572 +-MODULE_ALIAS("twofish-asm");
573 ++MODULE_ALIAS_CRYPTO("twofish");
574 ++MODULE_ALIAS_CRYPTO("twofish-asm");
575 +diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
576 +index 13e63b3e1dfb..56d8a08ee479 100644
577 +--- a/arch/x86/crypto/twofish_glue_3way.c
578 ++++ b/arch/x86/crypto/twofish_glue_3way.c
579 +@@ -495,5 +495,5 @@ module_exit(fini);
580 +
581 + MODULE_LICENSE("GPL");
582 + MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
583 +-MODULE_ALIAS("twofish");
584 +-MODULE_ALIAS("twofish-asm");
585 ++MODULE_ALIAS_CRYPTO("twofish");
586 ++MODULE_ALIAS_CRYPTO("twofish-asm");
587 +diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
588 +index 8bf1c06070d5..23fb67e6f845 100644
589 +--- a/arch/x86/include/asm/desc.h
590 ++++ b/arch/x86/include/asm/desc.h
591 +@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
592 + gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
593 + }
594 +
595 +-#define _LDT_empty(info) \
596 ++/* This intentionally ignores lm, since 32-bit apps don't have that field. */
597 ++#define LDT_empty(info) \
598 + ((info)->base_addr == 0 && \
599 + (info)->limit == 0 && \
600 + (info)->contents == 0 && \
601 +@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
602 + (info)->seg_not_present == 1 && \
603 + (info)->useable == 0)
604 +
605 +-#ifdef CONFIG_X86_64
606 +-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
607 +-#else
608 +-#define LDT_empty(info) (_LDT_empty(info))
609 +-#endif
610 ++/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
611 ++static inline bool LDT_zero(const struct user_desc *info)
612 ++{
613 ++ return (info->base_addr == 0 &&
614 ++ info->limit == 0 &&
615 ++ info->contents == 0 &&
616 ++ info->read_exec_only == 0 &&
617 ++ info->seg_32bit == 0 &&
618 ++ info->limit_in_pages == 0 &&
619 ++ info->seg_not_present == 0 &&
620 ++ info->useable == 0);
621 ++}
622 +
623 + static inline void clear_LDT(void)
624 + {
625 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
626 +index 8f4be53ea04b..1853659820e0 100644
627 +--- a/arch/x86/kernel/cpu/mshyperv.c
628 ++++ b/arch/x86/kernel/cpu/mshyperv.c
629 +@@ -60,6 +60,7 @@ static struct clocksource hyperv_cs = {
630 + .rating = 400, /* use this when running on Hyperv*/
631 + .read = read_hv_clock,
632 + .mask = CLOCKSOURCE_MASK(64),
633 ++ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
634 + };
635 +
636 + static void __init ms_hyperv_init_platform(void)
637 +diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
638 +index 4e942f31b1a7..7fc5e843f247 100644
639 +--- a/arch/x86/kernel/tls.c
640 ++++ b/arch/x86/kernel/tls.c
641 +@@ -29,7 +29,28 @@ static int get_free_idx(void)
642 +
643 + static bool tls_desc_okay(const struct user_desc *info)
644 + {
645 +- if (LDT_empty(info))
646 ++ /*
647 ++ * For historical reasons (i.e. no one ever documented how any
648 ++ * of the segmentation APIs work), user programs can and do
649 ++ * assume that a struct user_desc that's all zeros except for
650 ++ * entry_number means "no segment at all". This never actually
651 ++ * worked. In fact, up to Linux 3.19, a struct user_desc like
652 ++ * this would create a 16-bit read-write segment with base and
653 ++ * limit both equal to zero.
654 ++ *
655 ++ * That was close enough to "no segment at all" until we
656 ++ * hardened this function to disallow 16-bit TLS segments. Fix
657 ++ * it up by interpreting these zeroed segments the way that they
658 ++ * were almost certainly intended to be interpreted.
659 ++ *
660 ++ * The correct way to ask for "no segment at all" is to specify
661 ++ * a user_desc that satisfies LDT_empty. To keep everything
662 ++ * working, we accept both.
663 ++ *
664 ++ * Note that there's a similar kludge in modify_ldt -- look at
665 ++ * the distinction between modes 1 and 0x11.
666 ++ */
667 ++ if (LDT_empty(info) || LDT_zero(info))
668 + return true;
669 +
670 + /*
671 +@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
672 + cpu = get_cpu();
673 +
674 + while (n-- > 0) {
675 +- if (LDT_empty(info))
676 ++ if (LDT_empty(info) || LDT_zero(info))
677 + desc->a = desc->b = 0;
678 + else
679 + fill_ldt(desc, info);
680 +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
681 +index 332cafe909eb..0010ed7c3ec2 100644
682 +--- a/arch/x86/kernel/traps.c
683 ++++ b/arch/x86/kernel/traps.c
684 +@@ -362,7 +362,7 @@ exit:
685 + * for scheduling or signal handling. The actual stack switch is done in
686 + * entry.S
687 + */
688 +-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
689 ++asmlinkage notrace __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
690 + {
691 + struct pt_regs *regs = eregs;
692 + /* Did already sync */
693 +@@ -387,7 +387,7 @@ struct bad_iret_stack {
694 + struct pt_regs regs;
695 + };
696 +
697 +-asmlinkage __visible
698 ++asmlinkage __visible notrace __kprobes
699 + struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
700 + {
701 + /*
702 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
703 +index 4e27ba53c40c..27e3a14fc917 100644
704 +--- a/arch/x86/kernel/tsc.c
705 ++++ b/arch/x86/kernel/tsc.c
706 +@@ -380,7 +380,7 @@ static unsigned long quick_pit_calibrate(void)
707 + goto success;
708 + }
709 + }
710 +- pr_err("Fast TSC calibration failed\n");
711 ++ pr_info("Fast TSC calibration failed\n");
712 + return 0;
713 +
714 + success:
715 +diff --git a/crypto/842.c b/crypto/842.c
716 +index 65c7a89cfa09..b48f4f108c47 100644
717 +--- a/crypto/842.c
718 ++++ b/crypto/842.c
719 +@@ -180,3 +180,4 @@ module_exit(nx842_mod_exit);
720 +
721 + MODULE_LICENSE("GPL");
722 + MODULE_DESCRIPTION("842 Compression Algorithm");
723 ++MODULE_ALIAS_CRYPTO("842");
724 +diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
725 +index 47f2e5c71759..e138ad85bd83 100644
726 +--- a/crypto/aes_generic.c
727 ++++ b/crypto/aes_generic.c
728 +@@ -1474,4 +1474,5 @@ module_exit(aes_fini);
729 +
730 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
731 + MODULE_LICENSE("Dual BSD/GPL");
732 +-MODULE_ALIAS("aes");
733 ++MODULE_ALIAS_CRYPTO("aes");
734 ++MODULE_ALIAS_CRYPTO("aes-generic");
735 +diff --git a/crypto/algapi.c b/crypto/algapi.c
736 +index 7a1ae87f1683..00d8d939733b 100644
737 +--- a/crypto/algapi.c
738 ++++ b/crypto/algapi.c
739 +@@ -495,8 +495,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
740 +
741 + struct crypto_template *crypto_lookup_template(const char *name)
742 + {
743 +- return try_then_request_module(__crypto_lookup_template(name), "%s",
744 +- name);
745 ++ return try_then_request_module(__crypto_lookup_template(name),
746 ++ "crypto-%s", name);
747 + }
748 + EXPORT_SYMBOL_GPL(crypto_lookup_template);
749 +
750 +diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
751 +index 666f1962a160..6f5bebc9bf01 100644
752 +--- a/crypto/ansi_cprng.c
753 ++++ b/crypto/ansi_cprng.c
754 +@@ -476,4 +476,5 @@ module_param(dbg, int, 0);
755 + MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
756 + module_init(prng_mod_init);
757 + module_exit(prng_mod_fini);
758 +-MODULE_ALIAS("stdrng");
759 ++MODULE_ALIAS_CRYPTO("stdrng");
760 ++MODULE_ALIAS_CRYPTO("ansi_cprng");
761 +diff --git a/crypto/anubis.c b/crypto/anubis.c
762 +index 008c8a4fb67c..4bb187c2a902 100644
763 +--- a/crypto/anubis.c
764 ++++ b/crypto/anubis.c
765 +@@ -704,3 +704,4 @@ module_exit(anubis_mod_fini);
766 +
767 + MODULE_LICENSE("GPL");
768 + MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
769 ++MODULE_ALIAS_CRYPTO("anubis");
770 +diff --git a/crypto/api.c b/crypto/api.c
771 +index 37c4c7213de0..335abea14f19 100644
772 +--- a/crypto/api.c
773 ++++ b/crypto/api.c
774 +@@ -216,11 +216,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
775 +
776 + alg = crypto_alg_lookup(name, type, mask);
777 + if (!alg) {
778 +- request_module("%s", name);
779 ++ request_module("crypto-%s", name);
780 +
781 + if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
782 + CRYPTO_ALG_NEED_FALLBACK))
783 +- request_module("%s-all", name);
784 ++ request_module("crypto-%s-all", name);
785 +
786 + alg = crypto_alg_lookup(name, type, mask);
787 + }
788 +diff --git a/crypto/arc4.c b/crypto/arc4.c
789 +index 5a772c3657d5..f1a81925558f 100644
790 +--- a/crypto/arc4.c
791 ++++ b/crypto/arc4.c
792 +@@ -166,3 +166,4 @@ module_exit(arc4_exit);
793 + MODULE_LICENSE("GPL");
794 + MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
795 + MODULE_AUTHOR("Jon Oberheide <jon@×××××××××.org>");
796 ++MODULE_ALIAS_CRYPTO("arc4");
797 +diff --git a/crypto/authenc.c b/crypto/authenc.c
798 +index 528b00bc4769..a2cfae251dd5 100644
799 +--- a/crypto/authenc.c
800 ++++ b/crypto/authenc.c
801 +@@ -709,3 +709,4 @@ module_exit(crypto_authenc_module_exit);
802 +
803 + MODULE_LICENSE("GPL");
804 + MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
805 ++MODULE_ALIAS_CRYPTO("authenc");
806 +diff --git a/crypto/authencesn.c b/crypto/authencesn.c
807 +index ab53762fc309..16c225cb28c2 100644
808 +--- a/crypto/authencesn.c
809 ++++ b/crypto/authencesn.c
810 +@@ -832,3 +832,4 @@ module_exit(crypto_authenc_esn_module_exit);
811 + MODULE_LICENSE("GPL");
812 + MODULE_AUTHOR("Steffen Klassert <steffen.klassert@×××××××.com>");
813 + MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
814 ++MODULE_ALIAS_CRYPTO("authencesn");
815 +diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
816 +index 8baf5447d35b..87b392a77a93 100644
817 +--- a/crypto/blowfish_generic.c
818 ++++ b/crypto/blowfish_generic.c
819 +@@ -138,4 +138,5 @@ module_exit(blowfish_mod_fini);
820 +
821 + MODULE_LICENSE("GPL");
822 + MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
823 +-MODULE_ALIAS("blowfish");
824 ++MODULE_ALIAS_CRYPTO("blowfish");
825 ++MODULE_ALIAS_CRYPTO("blowfish-generic");
826 +diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
827 +index 75efa2052305..029587f808f4 100644
828 +--- a/crypto/camellia_generic.c
829 ++++ b/crypto/camellia_generic.c
830 +@@ -1098,4 +1098,5 @@ module_exit(camellia_fini);
831 +
832 + MODULE_DESCRIPTION("Camellia Cipher Algorithm");
833 + MODULE_LICENSE("GPL");
834 +-MODULE_ALIAS("camellia");
835 ++MODULE_ALIAS_CRYPTO("camellia");
836 ++MODULE_ALIAS_CRYPTO("camellia-generic");
837 +diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
838 +index 5558f630a0eb..df5c72629383 100644
839 +--- a/crypto/cast5_generic.c
840 ++++ b/crypto/cast5_generic.c
841 +@@ -549,4 +549,5 @@ module_exit(cast5_mod_fini);
842 +
843 + MODULE_LICENSE("GPL");
844 + MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
845 +-MODULE_ALIAS("cast5");
846 ++MODULE_ALIAS_CRYPTO("cast5");
847 ++MODULE_ALIAS_CRYPTO("cast5-generic");
848 +diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
849 +index de732528a430..058c8d755d03 100644
850 +--- a/crypto/cast6_generic.c
851 ++++ b/crypto/cast6_generic.c
852 +@@ -291,4 +291,5 @@ module_exit(cast6_mod_fini);
853 +
854 + MODULE_LICENSE("GPL");
855 + MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
856 +-MODULE_ALIAS("cast6");
857 ++MODULE_ALIAS_CRYPTO("cast6");
858 ++MODULE_ALIAS_CRYPTO("cast6-generic");
859 +diff --git a/crypto/cbc.c b/crypto/cbc.c
860 +index 61ac42e1e32b..780ee27b2d43 100644
861 +--- a/crypto/cbc.c
862 ++++ b/crypto/cbc.c
863 +@@ -289,3 +289,4 @@ module_exit(crypto_cbc_module_exit);
864 +
865 + MODULE_LICENSE("GPL");
866 + MODULE_DESCRIPTION("CBC block cipher algorithm");
867 ++MODULE_ALIAS_CRYPTO("cbc");
868 +diff --git a/crypto/ccm.c b/crypto/ccm.c
869 +index ed009b77e67d..c569c9c6afe3 100644
870 +--- a/crypto/ccm.c
871 ++++ b/crypto/ccm.c
872 +@@ -879,5 +879,6 @@ module_exit(crypto_ccm_module_exit);
873 +
874 + MODULE_LICENSE("GPL");
875 + MODULE_DESCRIPTION("Counter with CBC MAC");
876 +-MODULE_ALIAS("ccm_base");
877 +-MODULE_ALIAS("rfc4309");
878 ++MODULE_ALIAS_CRYPTO("ccm_base");
879 ++MODULE_ALIAS_CRYPTO("rfc4309");
880 ++MODULE_ALIAS_CRYPTO("ccm");
881 +diff --git a/crypto/chainiv.c b/crypto/chainiv.c
882 +index 834d8dd3d4fc..22b7e55b0e1b 100644
883 +--- a/crypto/chainiv.c
884 ++++ b/crypto/chainiv.c
885 +@@ -359,3 +359,4 @@ module_exit(chainiv_module_exit);
886 +
887 + MODULE_LICENSE("GPL");
888 + MODULE_DESCRIPTION("Chain IV Generator");
889 ++MODULE_ALIAS_CRYPTO("chainiv");
890 +diff --git a/crypto/cmac.c b/crypto/cmac.c
891 +index 50880cf17fad..7a8bfbd548f6 100644
892 +--- a/crypto/cmac.c
893 ++++ b/crypto/cmac.c
894 +@@ -313,3 +313,4 @@ module_exit(crypto_cmac_module_exit);
895 +
896 + MODULE_LICENSE("GPL");
897 + MODULE_DESCRIPTION("CMAC keyed hash algorithm");
898 ++MODULE_ALIAS_CRYPTO("cmac");
899 +diff --git a/crypto/crc32.c b/crypto/crc32.c
900 +index 9d1c41569898..187ded28cb0b 100644
901 +--- a/crypto/crc32.c
902 ++++ b/crypto/crc32.c
903 +@@ -156,3 +156,4 @@ module_exit(crc32_mod_fini);
904 + MODULE_AUTHOR("Alexander Boyko <alexander_boyko@×××××××.com>");
905 + MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
906 + MODULE_LICENSE("GPL");
907 ++MODULE_ALIAS_CRYPTO("crc32");
908 +diff --git a/crypto/cryptd.c b/crypto/cryptd.c
909 +index 7bdd61b867c8..75c415d37086 100644
910 +--- a/crypto/cryptd.c
911 ++++ b/crypto/cryptd.c
912 +@@ -955,3 +955,4 @@ module_exit(cryptd_exit);
913 +
914 + MODULE_LICENSE("GPL");
915 + MODULE_DESCRIPTION("Software async crypto daemon");
916 ++MODULE_ALIAS_CRYPTO("cryptd");
917 +diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
918 +index fee7265cd35d..7b39fa3deac2 100644
919 +--- a/crypto/crypto_null.c
920 ++++ b/crypto/crypto_null.c
921 +@@ -149,9 +149,9 @@ static struct crypto_alg null_algs[3] = { {
922 + .coa_decompress = null_compress } }
923 + } };
924 +
925 +-MODULE_ALIAS("compress_null");
926 +-MODULE_ALIAS("digest_null");
927 +-MODULE_ALIAS("cipher_null");
928 ++MODULE_ALIAS_CRYPTO("compress_null");
929 ++MODULE_ALIAS_CRYPTO("digest_null");
930 ++MODULE_ALIAS_CRYPTO("cipher_null");
931 +
932 + static int __init crypto_null_mod_init(void)
933 + {
934 +diff --git a/crypto/ctr.c b/crypto/ctr.c
935 +index f2b94f27bb2c..2386f7313952 100644
936 +--- a/crypto/ctr.c
937 ++++ b/crypto/ctr.c
938 +@@ -466,4 +466,5 @@ module_exit(crypto_ctr_module_exit);
939 +
940 + MODULE_LICENSE("GPL");
941 + MODULE_DESCRIPTION("CTR Counter block mode");
942 +-MODULE_ALIAS("rfc3686");
943 ++MODULE_ALIAS_CRYPTO("rfc3686");
944 ++MODULE_ALIAS_CRYPTO("ctr");
945 +diff --git a/crypto/cts.c b/crypto/cts.c
946 +index 042223f8e733..60b9da3fa7c1 100644
947 +--- a/crypto/cts.c
948 ++++ b/crypto/cts.c
949 +@@ -350,3 +350,4 @@ module_exit(crypto_cts_module_exit);
950 +
951 + MODULE_LICENSE("Dual BSD/GPL");
952 + MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
953 ++MODULE_ALIAS_CRYPTO("cts");
954 +diff --git a/crypto/deflate.c b/crypto/deflate.c
955 +index b57d70eb156b..95d8d37c5021 100644
956 +--- a/crypto/deflate.c
957 ++++ b/crypto/deflate.c
958 +@@ -222,4 +222,4 @@ module_exit(deflate_mod_fini);
959 + MODULE_LICENSE("GPL");
960 + MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
961 + MODULE_AUTHOR("James Morris <jmorris@×××××××××××××.au>");
962 +-
963 ++MODULE_ALIAS_CRYPTO("deflate");
964 +diff --git a/crypto/des_generic.c b/crypto/des_generic.c
965 +index f6cf63f88468..3ec6071309d9 100644
966 +--- a/crypto/des_generic.c
967 ++++ b/crypto/des_generic.c
968 +@@ -971,8 +971,6 @@ static struct crypto_alg des_algs[2] = { {
969 + .cia_decrypt = des3_ede_decrypt } }
970 + } };
971 +
972 +-MODULE_ALIAS("des3_ede");
973 +-
974 + static int __init des_generic_mod_init(void)
975 + {
976 + return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
977 +@@ -989,4 +987,7 @@ module_exit(des_generic_mod_fini);
978 + MODULE_LICENSE("GPL");
979 + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
980 + MODULE_AUTHOR("Dag Arne Osvik <da@×××××.no>");
981 +-MODULE_ALIAS("des");
982 ++MODULE_ALIAS_CRYPTO("des");
983 ++MODULE_ALIAS_CRYPTO("des-generic");
984 ++MODULE_ALIAS_CRYPTO("des3_ede");
985 ++MODULE_ALIAS_CRYPTO("des3_ede-generic");
986 +diff --git a/crypto/ecb.c b/crypto/ecb.c
987 +index 935cfef4aa84..12011aff0971 100644
988 +--- a/crypto/ecb.c
989 ++++ b/crypto/ecb.c
990 +@@ -185,3 +185,4 @@ module_exit(crypto_ecb_module_exit);
991 +
992 + MODULE_LICENSE("GPL");
993 + MODULE_DESCRIPTION("ECB block cipher algorithm");
994 ++MODULE_ALIAS_CRYPTO("ecb");
995 +diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
996 +index 42ce9f570aec..388f582ab0b9 100644
997 +--- a/crypto/eseqiv.c
998 ++++ b/crypto/eseqiv.c
999 +@@ -267,3 +267,4 @@ module_exit(eseqiv_module_exit);
1000 +
1001 + MODULE_LICENSE("GPL");
1002 + MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
1003 ++MODULE_ALIAS_CRYPTO("eseqiv");
1004 +diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
1005 +index 3b2cf569c684..300f5b80a074 100644
1006 +--- a/crypto/fcrypt.c
1007 ++++ b/crypto/fcrypt.c
1008 +@@ -420,3 +420,4 @@ module_exit(fcrypt_mod_fini);
1009 + MODULE_LICENSE("Dual BSD/GPL");
1010 + MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
1011 + MODULE_AUTHOR("David Howells <dhowells@××××××.com>");
1012 ++MODULE_ALIAS_CRYPTO("fcrypt");
1013 +diff --git a/crypto/gcm.c b/crypto/gcm.c
1014 +index 43e1fb05ea54..b4c252066f7b 100644
1015 +--- a/crypto/gcm.c
1016 ++++ b/crypto/gcm.c
1017 +@@ -1441,6 +1441,7 @@ module_exit(crypto_gcm_module_exit);
1018 + MODULE_LICENSE("GPL");
1019 + MODULE_DESCRIPTION("Galois/Counter Mode");
1020 + MODULE_AUTHOR("Mikko Herranen <mh1@×××.fi>");
1021 +-MODULE_ALIAS("gcm_base");
1022 +-MODULE_ALIAS("rfc4106");
1023 +-MODULE_ALIAS("rfc4543");
1024 ++MODULE_ALIAS_CRYPTO("gcm_base");
1025 ++MODULE_ALIAS_CRYPTO("rfc4106");
1026 ++MODULE_ALIAS_CRYPTO("rfc4543");
1027 ++MODULE_ALIAS_CRYPTO("gcm");
1028 +diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
1029 +index 9d3f0c69a86f..bac70995e064 100644
1030 +--- a/crypto/ghash-generic.c
1031 ++++ b/crypto/ghash-generic.c
1032 +@@ -172,4 +172,5 @@ module_exit(ghash_mod_exit);
1033 +
1034 + MODULE_LICENSE("GPL");
1035 + MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
1036 +-MODULE_ALIAS("ghash");
1037 ++MODULE_ALIAS_CRYPTO("ghash");
1038 ++MODULE_ALIAS_CRYPTO("ghash-generic");
1039 +diff --git a/crypto/hmac.c b/crypto/hmac.c
1040 +index 8d9544cf8169..ade790b454e9 100644
1041 +--- a/crypto/hmac.c
1042 ++++ b/crypto/hmac.c
1043 +@@ -271,3 +271,4 @@ module_exit(hmac_module_exit);
1044 +
1045 + MODULE_LICENSE("GPL");
1046 + MODULE_DESCRIPTION("HMAC hash algorithm");
1047 ++MODULE_ALIAS_CRYPTO("hmac");
1048 +diff --git a/crypto/khazad.c b/crypto/khazad.c
1049 +index 60e7cd66facc..873eb5ded6d7 100644
1050 +--- a/crypto/khazad.c
1051 ++++ b/crypto/khazad.c
1052 +@@ -880,3 +880,4 @@ module_exit(khazad_mod_fini);
1053 +
1054 + MODULE_LICENSE("GPL");
1055 + MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
1056 ++MODULE_ALIAS_CRYPTO("khazad");
1057 +diff --git a/crypto/krng.c b/crypto/krng.c
1058 +index a2d2b72fc135..0224841b6579 100644
1059 +--- a/crypto/krng.c
1060 ++++ b/crypto/krng.c
1061 +@@ -62,4 +62,5 @@ module_exit(krng_mod_fini);
1062 +
1063 + MODULE_LICENSE("GPL");
1064 + MODULE_DESCRIPTION("Kernel Random Number Generator");
1065 +-MODULE_ALIAS("stdrng");
1066 ++MODULE_ALIAS_CRYPTO("stdrng");
1067 ++MODULE_ALIAS_CRYPTO("krng");
1068 +diff --git a/crypto/lrw.c b/crypto/lrw.c
1069 +index ba42acc4deba..6f9908a7ebcb 100644
1070 +--- a/crypto/lrw.c
1071 ++++ b/crypto/lrw.c
1072 +@@ -400,3 +400,4 @@ module_exit(crypto_module_exit);
1073 +
1074 + MODULE_LICENSE("GPL");
1075 + MODULE_DESCRIPTION("LRW block cipher mode");
1076 ++MODULE_ALIAS_CRYPTO("lrw");
1077 +diff --git a/crypto/lzo.c b/crypto/lzo.c
1078 +index 1c2aa69c54b8..d1ff69404353 100644
1079 +--- a/crypto/lzo.c
1080 ++++ b/crypto/lzo.c
1081 +@@ -103,3 +103,4 @@ module_exit(lzo_mod_fini);
1082 +
1083 + MODULE_LICENSE("GPL");
1084 + MODULE_DESCRIPTION("LZO Compression Algorithm");
1085 ++MODULE_ALIAS_CRYPTO("lzo");
1086 +diff --git a/crypto/md4.c b/crypto/md4.c
1087 +index 0477a6a01d58..3515af425cc9 100644
1088 +--- a/crypto/md4.c
1089 ++++ b/crypto/md4.c
1090 +@@ -255,4 +255,4 @@ module_exit(md4_mod_fini);
1091 +
1092 + MODULE_LICENSE("GPL");
1093 + MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
1094 +-
1095 ++MODULE_ALIAS_CRYPTO("md4");
1096 +diff --git a/crypto/md5.c b/crypto/md5.c
1097 +index 7febeaab923b..36f5e5b103f3 100644
1098 +--- a/crypto/md5.c
1099 ++++ b/crypto/md5.c
1100 +@@ -168,3 +168,4 @@ module_exit(md5_mod_fini);
1101 +
1102 + MODULE_LICENSE("GPL");
1103 + MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
1104 ++MODULE_ALIAS_CRYPTO("md5");
1105 +diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
1106 +index 079b761bc70d..46195e0d0f4d 100644
1107 +--- a/crypto/michael_mic.c
1108 ++++ b/crypto/michael_mic.c
1109 +@@ -184,3 +184,4 @@ module_exit(michael_mic_exit);
1110 + MODULE_LICENSE("GPL v2");
1111 + MODULE_DESCRIPTION("Michael MIC");
1112 + MODULE_AUTHOR("Jouni Malinen <j@××.fi>");
1113 ++MODULE_ALIAS_CRYPTO("michael_mic");
1114 +diff --git a/crypto/pcbc.c b/crypto/pcbc.c
1115 +index d1b8bdfb5855..f654965f0933 100644
1116 +--- a/crypto/pcbc.c
1117 ++++ b/crypto/pcbc.c
1118 +@@ -295,3 +295,4 @@ module_exit(crypto_pcbc_module_exit);
1119 +
1120 + MODULE_LICENSE("GPL");
1121 + MODULE_DESCRIPTION("PCBC block cipher algorithm");
1122 ++MODULE_ALIAS_CRYPTO("pcbc");
1123 +diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
1124 +index b2c99dc1c5e2..61ff946db748 100644
1125 +--- a/crypto/pcrypt.c
1126 ++++ b/crypto/pcrypt.c
1127 +@@ -565,3 +565,4 @@ module_exit(pcrypt_exit);
1128 + MODULE_LICENSE("GPL");
1129 + MODULE_AUTHOR("Steffen Klassert <steffen.klassert@×××××××.com>");
1130 + MODULE_DESCRIPTION("Parallel crypto wrapper");
1131 ++MODULE_ALIAS_CRYPTO("pcrypt");
1132 +diff --git a/crypto/rmd128.c b/crypto/rmd128.c
1133 +index 8a0f68b7f257..049486ede938 100644
1134 +--- a/crypto/rmd128.c
1135 ++++ b/crypto/rmd128.c
1136 +@@ -327,3 +327,4 @@ module_exit(rmd128_mod_fini);
1137 + MODULE_LICENSE("GPL");
1138 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1139 + MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
1140 ++MODULE_ALIAS_CRYPTO("rmd128");
1141 +diff --git a/crypto/rmd160.c b/crypto/rmd160.c
1142 +index 525d7bb752cf..de585e51d455 100644
1143 +--- a/crypto/rmd160.c
1144 ++++ b/crypto/rmd160.c
1145 +@@ -371,3 +371,4 @@ module_exit(rmd160_mod_fini);
1146 + MODULE_LICENSE("GPL");
1147 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1148 + MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
1149 ++MODULE_ALIAS_CRYPTO("rmd160");
1150 +diff --git a/crypto/rmd256.c b/crypto/rmd256.c
1151 +index 69293d9b56e0..4ec02a754e09 100644
1152 +--- a/crypto/rmd256.c
1153 ++++ b/crypto/rmd256.c
1154 +@@ -346,3 +346,4 @@ module_exit(rmd256_mod_fini);
1155 + MODULE_LICENSE("GPL");
1156 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1157 + MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
1158 ++MODULE_ALIAS_CRYPTO("rmd256");
1159 +diff --git a/crypto/rmd320.c b/crypto/rmd320.c
1160 +index 09f97dfdfbba..770f2cb369f8 100644
1161 +--- a/crypto/rmd320.c
1162 ++++ b/crypto/rmd320.c
1163 +@@ -395,3 +395,4 @@ module_exit(rmd320_mod_fini);
1164 + MODULE_LICENSE("GPL");
1165 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1166 + MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
1167 ++MODULE_ALIAS_CRYPTO("rmd320");
1168 +diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
1169 +index 9a4770c02284..f550b5d94630 100644
1170 +--- a/crypto/salsa20_generic.c
1171 ++++ b/crypto/salsa20_generic.c
1172 +@@ -248,4 +248,5 @@ module_exit(salsa20_generic_mod_fini);
1173 +
1174 + MODULE_LICENSE("GPL");
1175 + MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
1176 +-MODULE_ALIAS("salsa20");
1177 ++MODULE_ALIAS_CRYPTO("salsa20");
1178 ++MODULE_ALIAS_CRYPTO("salsa20-generic");
1179 +diff --git a/crypto/seed.c b/crypto/seed.c
1180 +index 9c904d6d2151..c6ba8438be43 100644
1181 +--- a/crypto/seed.c
1182 ++++ b/crypto/seed.c
1183 +@@ -476,3 +476,4 @@ module_exit(seed_fini);
1184 + MODULE_DESCRIPTION("SEED Cipher Algorithm");
1185 + MODULE_LICENSE("GPL");
1186 + MODULE_AUTHOR("Hye-Shik Chang <perky@×××××××.org>, Kim Hyun <hkim@×××××××.kr>");
1187 ++MODULE_ALIAS_CRYPTO("seed");
1188 +diff --git a/crypto/seqiv.c b/crypto/seqiv.c
1189 +index f2cba4ed6f25..49a4069ff453 100644
1190 +--- a/crypto/seqiv.c
1191 ++++ b/crypto/seqiv.c
1192 +@@ -362,3 +362,4 @@ module_exit(seqiv_module_exit);
1193 +
1194 + MODULE_LICENSE("GPL");
1195 + MODULE_DESCRIPTION("Sequence Number IV Generator");
1196 ++MODULE_ALIAS_CRYPTO("seqiv");
1197 +diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
1198 +index 7ddbd7e88859..94970a794975 100644
1199 +--- a/crypto/serpent_generic.c
1200 ++++ b/crypto/serpent_generic.c
1201 +@@ -665,5 +665,6 @@ module_exit(serpent_mod_fini);
1202 + MODULE_LICENSE("GPL");
1203 + MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
1204 + MODULE_AUTHOR("Dag Arne Osvik <osvik@××××××.no>");
1205 +-MODULE_ALIAS("tnepres");
1206 +-MODULE_ALIAS("serpent");
1207 ++MODULE_ALIAS_CRYPTO("tnepres");
1208 ++MODULE_ALIAS_CRYPTO("serpent");
1209 ++MODULE_ALIAS_CRYPTO("serpent-generic");
1210 +diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
1211 +index 42794803c480..fdf7c00de4b0 100644
1212 +--- a/crypto/sha1_generic.c
1213 ++++ b/crypto/sha1_generic.c
1214 +@@ -153,4 +153,5 @@ module_exit(sha1_generic_mod_fini);
1215 + MODULE_LICENSE("GPL");
1216 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
1217 +
1218 +-MODULE_ALIAS("sha1");
1219 ++MODULE_ALIAS_CRYPTO("sha1");
1220 ++MODULE_ALIAS_CRYPTO("sha1-generic");
1221 +diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
1222 +index 543366779524..136381bdd48d 100644
1223 +--- a/crypto/sha256_generic.c
1224 ++++ b/crypto/sha256_generic.c
1225 +@@ -384,5 +384,7 @@ module_exit(sha256_generic_mod_fini);
1226 + MODULE_LICENSE("GPL");
1227 + MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
1228 +
1229 +-MODULE_ALIAS("sha224");
1230 +-MODULE_ALIAS("sha256");
1231 ++MODULE_ALIAS_CRYPTO("sha224");
1232 ++MODULE_ALIAS_CRYPTO("sha224-generic");
1233 ++MODULE_ALIAS_CRYPTO("sha256");
1234 ++MODULE_ALIAS_CRYPTO("sha256-generic");
1235 +diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
1236 +index 4c5862095679..fb2d7b8f163f 100644
1237 +--- a/crypto/sha512_generic.c
1238 ++++ b/crypto/sha512_generic.c
1239 +@@ -285,5 +285,7 @@ module_exit(sha512_generic_mod_fini);
1240 + MODULE_LICENSE("GPL");
1241 + MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
1242 +
1243 +-MODULE_ALIAS("sha384");
1244 +-MODULE_ALIAS("sha512");
1245 ++MODULE_ALIAS_CRYPTO("sha384");
1246 ++MODULE_ALIAS_CRYPTO("sha384-generic");
1247 ++MODULE_ALIAS_CRYPTO("sha512");
1248 ++MODULE_ALIAS_CRYPTO("sha512-generic");
1249 +diff --git a/crypto/tea.c b/crypto/tea.c
1250 +index 0a572323ee4a..b70b441c7d1e 100644
1251 +--- a/crypto/tea.c
1252 ++++ b/crypto/tea.c
1253 +@@ -270,8 +270,9 @@ static void __exit tea_mod_fini(void)
1254 + crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
1255 + }
1256 +
1257 +-MODULE_ALIAS("xtea");
1258 +-MODULE_ALIAS("xeta");
1259 ++MODULE_ALIAS_CRYPTO("tea");
1260 ++MODULE_ALIAS_CRYPTO("xtea");
1261 ++MODULE_ALIAS_CRYPTO("xeta");
1262 +
1263 + module_init(tea_mod_init);
1264 + module_exit(tea_mod_fini);
1265 +diff --git a/crypto/tgr192.c b/crypto/tgr192.c
1266 +index 87403556fd0b..f7ed2fba396c 100644
1267 +--- a/crypto/tgr192.c
1268 ++++ b/crypto/tgr192.c
1269 +@@ -676,8 +676,9 @@ static void __exit tgr192_mod_fini(void)
1270 + crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
1271 + }
1272 +
1273 +-MODULE_ALIAS("tgr160");
1274 +-MODULE_ALIAS("tgr128");
1275 ++MODULE_ALIAS_CRYPTO("tgr192");
1276 ++MODULE_ALIAS_CRYPTO("tgr160");
1277 ++MODULE_ALIAS_CRYPTO("tgr128");
1278 +
1279 + module_init(tgr192_mod_init);
1280 + module_exit(tgr192_mod_fini);
1281 +diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
1282 +index 2d5000552d0f..ebf7a3efb572 100644
1283 +--- a/crypto/twofish_generic.c
1284 ++++ b/crypto/twofish_generic.c
1285 +@@ -211,4 +211,5 @@ module_exit(twofish_mod_fini);
1286 +
1287 + MODULE_LICENSE("GPL");
1288 + MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
1289 +-MODULE_ALIAS("twofish");
1290 ++MODULE_ALIAS_CRYPTO("twofish");
1291 ++MODULE_ALIAS_CRYPTO("twofish-generic");
1292 +diff --git a/crypto/vmac.c b/crypto/vmac.c
1293 +index 2eb11a30c29c..bf2d3a89845f 100644
1294 +--- a/crypto/vmac.c
1295 ++++ b/crypto/vmac.c
1296 +@@ -713,3 +713,4 @@ module_exit(vmac_module_exit);
1297 +
1298 + MODULE_LICENSE("GPL");
1299 + MODULE_DESCRIPTION("VMAC hash algorithm");
1300 ++MODULE_ALIAS_CRYPTO("vmac");
1301 +diff --git a/crypto/wp512.c b/crypto/wp512.c
1302 +index 180f1d6e03f4..253db94b5479 100644
1303 +--- a/crypto/wp512.c
1304 ++++ b/crypto/wp512.c
1305 +@@ -1167,8 +1167,9 @@ static void __exit wp512_mod_fini(void)
1306 + crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
1307 + }
1308 +
1309 +-MODULE_ALIAS("wp384");
1310 +-MODULE_ALIAS("wp256");
1311 ++MODULE_ALIAS_CRYPTO("wp512");
1312 ++MODULE_ALIAS_CRYPTO("wp384");
1313 ++MODULE_ALIAS_CRYPTO("wp256");
1314 +
1315 + module_init(wp512_mod_init);
1316 + module_exit(wp512_mod_fini);
1317 +diff --git a/crypto/xcbc.c b/crypto/xcbc.c
1318 +index a5fbdf3738cf..df90b332554c 100644
1319 +--- a/crypto/xcbc.c
1320 ++++ b/crypto/xcbc.c
1321 +@@ -286,3 +286,4 @@ module_exit(crypto_xcbc_module_exit);
1322 +
1323 + MODULE_LICENSE("GPL");
1324 + MODULE_DESCRIPTION("XCBC keyed hash algorithm");
1325 ++MODULE_ALIAS_CRYPTO("xcbc");
1326 +diff --git a/crypto/xts.c b/crypto/xts.c
1327 +index ca1608f44cb5..f6fd43f100c8 100644
1328 +--- a/crypto/xts.c
1329 ++++ b/crypto/xts.c
1330 +@@ -362,3 +362,4 @@ module_exit(crypto_module_exit);
1331 +
1332 + MODULE_LICENSE("GPL");
1333 + MODULE_DESCRIPTION("XTS block cipher mode");
1334 ++MODULE_ALIAS_CRYPTO("xts");
1335 +diff --git a/crypto/zlib.c b/crypto/zlib.c
1336 +index 06b62e5cdcc7..d98078835281 100644
1337 +--- a/crypto/zlib.c
1338 ++++ b/crypto/zlib.c
1339 +@@ -378,3 +378,4 @@ module_exit(zlib_mod_fini);
1340 + MODULE_LICENSE("GPL");
1341 + MODULE_DESCRIPTION("Zlib Compression Algorithm");
1342 + MODULE_AUTHOR("Sony Corporation");
1343 ++MODULE_ALIAS_CRYPTO("zlib");
1344 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1345 +index 37acda6fa7e4..136803c47cdb 100644
1346 +--- a/drivers/ata/libata-sff.c
1347 ++++ b/drivers/ata/libata-sff.c
1348 +@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
1349 + DPRINTK("ENTER\n");
1350 +
1351 + cancel_delayed_work_sync(&ap->sff_pio_task);
1352 ++
1353 ++ /*
1354 ++ * We wanna reset the HSM state to IDLE. If we do so without
1355 ++ * grabbing the port lock, critical sections protected by it which
1356 ++ * expect the HSM state to stay stable may get surprised. For
1357 ++ * example, we may set IDLE in between the time
1358 ++ * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
1359 ++ * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
1360 ++ */
1361 ++ spin_lock_irq(ap->lock);
1362 + ap->hsm_task_state = HSM_ST_IDLE;
1363 ++ spin_unlock_irq(ap->lock);
1364 ++
1365 + ap->sff_pio_task_link = NULL;
1366 +
1367 + if (ata_msg_ctl(ap))
1368 +diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
1369 +index 2e391730e8be..776b59fbe861 100644
1370 +--- a/drivers/ata/sata_dwc_460ex.c
1371 ++++ b/drivers/ata/sata_dwc_460ex.c
1372 +@@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
1373 + if (err) {
1374 + dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
1375 + " %d\n", __func__, err);
1376 +- goto error_out;
1377 ++ return err;
1378 + }
1379 +
1380 + /* Enabe DMA */
1381 +@@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
1382 + sata_dma_regs);
1383 +
1384 + return 0;
1385 +-
1386 +-error_out:
1387 +- dma_dwc_exit(hsdev);
1388 +-
1389 +- return err;
1390 + }
1391 +
1392 + static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
1393 +@@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1394 + char *ver = (char *)&versionr;
1395 + u8 *base = NULL;
1396 + int err = 0;
1397 +- int irq, rc;
1398 ++ int irq;
1399 + struct ata_host *host;
1400 + struct ata_port_info pi = sata_dwc_port_info[0];
1401 + const struct ata_port_info *ppi[] = { &pi, NULL };
1402 +@@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1403 + if (irq == NO_IRQ) {
1404 + dev_err(&ofdev->dev, "no SATA DMA irq\n");
1405 + err = -ENODEV;
1406 +- goto error_out;
1407 ++ goto error_iomap;
1408 + }
1409 +
1410 + /* Get physical SATA DMA register base address */
1411 +@@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1412 + dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
1413 + " address\n");
1414 + err = -ENODEV;
1415 +- goto error_out;
1416 ++ goto error_iomap;
1417 + }
1418 +
1419 + /* Save dev for later use in dev_xxx() routines */
1420 + host_pvt.dwc_dev = &ofdev->dev;
1421 +
1422 + /* Initialize AHB DMAC */
1423 +- dma_dwc_init(hsdev, irq);
1424 ++ err = dma_dwc_init(hsdev, irq);
1425 ++ if (err)
1426 ++ goto error_dma_iomap;
1427 +
1428 + /* Enable SATA Interrupts */
1429 + sata_dwc_enable_interrupts(hsdev);
1430 +@@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1431 + * device discovery process, invoking our port_start() handler &
1432 + * error_handler() to execute a dummy Softreset EH session
1433 + */
1434 +- rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1435 +-
1436 +- if (rc != 0)
1437 ++ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1438 ++ if (err)
1439 + dev_err(&ofdev->dev, "failed to activate host");
1440 +
1441 + dev_set_drvdata(&ofdev->dev, host);
1442 +@@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1443 + error_out:
1444 + /* Free SATA DMA resources */
1445 + dma_dwc_exit(hsdev);
1446 +-
1447 ++error_dma_iomap:
1448 ++ iounmap((void __iomem *)host_pvt.sata_dma_regs);
1449 + error_iomap:
1450 + iounmap(base);
1451 + error_kmalloc:
1452 +@@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
1453 + /* Free SATA DMA resources */
1454 + dma_dwc_exit(hsdev);
1455 +
1456 ++ iounmap((void __iomem *)host_pvt.sata_dma_regs);
1457 + iounmap(hsdev->reg_base);
1458 + kfree(hsdev);
1459 + kfree(host);
1460 +diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
1461 +index c24379ffd4e3..b2ae184a637c 100644
1462 +--- a/drivers/block/drbd/drbd_req.c
1463 ++++ b/drivers/block/drbd/drbd_req.c
1464 +@@ -1309,6 +1309,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
1465 + struct request_queue * const b =
1466 + mdev->ldev->backing_bdev->bd_disk->queue;
1467 + if (b->merge_bvec_fn) {
1468 ++ bvm->bi_bdev = mdev->ldev->backing_bdev;
1469 + backing_limit = b->merge_bvec_fn(b, bvm, bvec);
1470 + limit = min(limit, backing_limit);
1471 + }
1472 +diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
1473 +index 5dcc8305abd1..711dcf4a0313 100644
1474 +--- a/drivers/bus/mvebu-mbus.c
1475 ++++ b/drivers/bus/mvebu-mbus.c
1476 +@@ -209,12 +209,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
1477 + }
1478 +
1479 + /* Checks whether the given window number is available */
1480 ++
1481 ++/* On Armada XP, 375 and 38x the MBus window 13 has the remap
1482 ++ * capability, like windows 0 to 7. However, the mvebu-mbus driver
1483 ++ * isn't currently taking into account this special case, which means
1484 ++ * that when window 13 is actually used, the remap registers are left
1485 ++ * to 0, making the device using this MBus window unavailable. The
1486 ++ * quick fix for stable is to not use window 13. A follow up patch
1487 ++ * will correctly handle this window.
1488 ++*/
1489 + static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
1490 + const int win)
1491 + {
1492 + void __iomem *addr = mbus->mbuswins_base +
1493 + mbus->soc->win_cfg_offset(win);
1494 + u32 ctrl = readl(addr + WIN_CTRL_OFF);
1495 ++
1496 ++ if (win == 13)
1497 ++ return false;
1498 ++
1499 + return !(ctrl & WIN_CTRL_ENABLE);
1500 + }
1501 +
1502 +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
1503 +index b7960185919d..3dfa3e5e3705 100644
1504 +--- a/drivers/clocksource/exynos_mct.c
1505 ++++ b/drivers/clocksource/exynos_mct.c
1506 +@@ -94,8 +94,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
1507 + __raw_writel(value, reg_base + offset);
1508 +
1509 + if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
1510 +- stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
1511 +- switch (offset & EXYNOS4_MCT_L_MASK) {
1512 ++ stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
1513 ++ switch (offset & ~EXYNOS4_MCT_L_MASK) {
1514 + case MCT_L_TCON_OFFSET:
1515 + mask = 1 << 3; /* L_TCON write status */
1516 + break;
1517 +diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
1518 +index 633ba945e153..c178ed8c3908 100644
1519 +--- a/drivers/crypto/padlock-aes.c
1520 ++++ b/drivers/crypto/padlock-aes.c
1521 +@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
1522 + MODULE_LICENSE("GPL");
1523 + MODULE_AUTHOR("Michal Ludvig");
1524 +
1525 +-MODULE_ALIAS("aes");
1526 ++MODULE_ALIAS_CRYPTO("aes");
1527 +diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
1528 +index 9266c0e25492..93d7753ab38a 100644
1529 +--- a/drivers/crypto/padlock-sha.c
1530 ++++ b/drivers/crypto/padlock-sha.c
1531 +@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
1532 + MODULE_LICENSE("GPL");
1533 + MODULE_AUTHOR("Michal Ludvig");
1534 +
1535 +-MODULE_ALIAS("sha1-all");
1536 +-MODULE_ALIAS("sha256-all");
1537 +-MODULE_ALIAS("sha1-padlock");
1538 +-MODULE_ALIAS("sha256-padlock");
1539 ++MODULE_ALIAS_CRYPTO("sha1-all");
1540 ++MODULE_ALIAS_CRYPTO("sha256-all");
1541 ++MODULE_ALIAS_CRYPTO("sha1-padlock");
1542 ++MODULE_ALIAS_CRYPTO("sha256-padlock");
1543 +diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
1544 +index 3833bd71cc5d..e08275de37ef 100644
1545 +--- a/drivers/crypto/ux500/cryp/cryp_core.c
1546 ++++ b/drivers/crypto/ux500/cryp/cryp_core.c
1547 +@@ -1775,7 +1775,7 @@ module_exit(ux500_cryp_mod_fini);
1548 + module_param(cryp_mode, int, 0);
1549 +
1550 + MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
1551 +-MODULE_ALIAS("aes-all");
1552 +-MODULE_ALIAS("des-all");
1553 ++MODULE_ALIAS_CRYPTO("aes-all");
1554 ++MODULE_ALIAS_CRYPTO("des-all");
1555 +
1556 + MODULE_LICENSE("GPL");
1557 +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
1558 +index cf5508967539..6789c1653913 100644
1559 +--- a/drivers/crypto/ux500/hash/hash_core.c
1560 ++++ b/drivers/crypto/ux500/hash/hash_core.c
1561 +@@ -1998,7 +1998,7 @@ module_exit(ux500_hash_mod_fini);
1562 + MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1563 + MODULE_LICENSE("GPL");
1564 +
1565 +-MODULE_ALIAS("sha1-all");
1566 +-MODULE_ALIAS("sha256-all");
1567 +-MODULE_ALIAS("hmac-sha1-all");
1568 +-MODULE_ALIAS("hmac-sha256-all");
1569 ++MODULE_ALIAS_CRYPTO("sha1-all");
1570 ++MODULE_ALIAS_CRYPTO("sha256-all");
1571 ++MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1572 ++MODULE_ALIAS_CRYPTO("hmac-sha256-all");
1573 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1574 +index c2534d62911c..1d74a80e031e 100644
1575 +--- a/drivers/gpio/gpiolib.c
1576 ++++ b/drivers/gpio/gpiolib.c
1577 +@@ -362,7 +362,7 @@ static ssize_t gpio_value_store(struct device *dev,
1578 + return status;
1579 + }
1580 +
1581 +-static const DEVICE_ATTR(value, 0644,
1582 ++static DEVICE_ATTR(value, 0644,
1583 + gpio_value_show, gpio_value_store);
1584 +
1585 + static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
1586 +@@ -580,17 +580,17 @@ static ssize_t gpio_active_low_store(struct device *dev,
1587 + return status ? : size;
1588 + }
1589 +
1590 +-static const DEVICE_ATTR(active_low, 0644,
1591 ++static DEVICE_ATTR(active_low, 0644,
1592 + gpio_active_low_show, gpio_active_low_store);
1593 +
1594 +-static const struct attribute *gpio_attrs[] = {
1595 ++static struct attribute *gpio_attrs[] = {
1596 + &dev_attr_value.attr,
1597 + &dev_attr_active_low.attr,
1598 + NULL,
1599 + };
1600 +
1601 + static const struct attribute_group gpio_attr_group = {
1602 +- .attrs = (struct attribute **) gpio_attrs,
1603 ++ .attrs = gpio_attrs,
1604 + };
1605 +
1606 + /*
1607 +@@ -627,7 +627,7 @@ static ssize_t chip_ngpio_show(struct device *dev,
1608 + }
1609 + static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
1610 +
1611 +-static const struct attribute *gpiochip_attrs[] = {
1612 ++static struct attribute *gpiochip_attrs[] = {
1613 + &dev_attr_base.attr,
1614 + &dev_attr_label.attr,
1615 + &dev_attr_ngpio.attr,
1616 +@@ -635,7 +635,7 @@ static const struct attribute *gpiochip_attrs[] = {
1617 + };
1618 +
1619 + static const struct attribute_group gpiochip_attr_group = {
1620 +- .attrs = (struct attribute **) gpiochip_attrs,
1621 ++ .attrs = gpiochip_attrs,
1622 + };
1623 +
1624 + /*
1625 +@@ -806,20 +806,24 @@ static int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
1626 + if (direction_may_change) {
1627 + status = device_create_file(dev, &dev_attr_direction);
1628 + if (status)
1629 +- goto fail_unregister_device;
1630 ++ goto fail_remove_attr_group;
1631 + }
1632 +
1633 + if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
1634 + !test_bit(FLAG_IS_OUT, &desc->flags))) {
1635 + status = device_create_file(dev, &dev_attr_edge);
1636 + if (status)
1637 +- goto fail_unregister_device;
1638 ++ goto fail_remove_attr_direction;
1639 + }
1640 +
1641 + set_bit(FLAG_EXPORT, &desc->flags);
1642 + mutex_unlock(&sysfs_lock);
1643 + return 0;
1644 +
1645 ++fail_remove_attr_direction:
1646 ++ device_remove_file(dev, &dev_attr_direction);
1647 ++fail_remove_attr_group:
1648 ++ sysfs_remove_group(&dev->kobj, &gpio_attr_group);
1649 + fail_unregister_device:
1650 + device_unregister(dev);
1651 + fail_unlock:
1652 +@@ -971,6 +975,9 @@ static void gpiod_unexport(struct gpio_desc *desc)
1653 + mutex_unlock(&sysfs_lock);
1654 +
1655 + if (dev) {
1656 ++ device_remove_file(dev, &dev_attr_edge);
1657 ++ device_remove_file(dev, &dev_attr_direction);
1658 ++ sysfs_remove_group(&dev->kobj, &gpio_attr_group);
1659 + device_unregister(dev);
1660 + put_device(dev);
1661 + }
1662 +@@ -1036,6 +1043,7 @@ static void gpiochip_unexport(struct gpio_chip *chip)
1663 + mutex_lock(&sysfs_lock);
1664 + dev = class_find_device(&gpio_class, NULL, chip, match_export);
1665 + if (dev) {
1666 ++ sysfs_remove_group(&dev->kobj, &gpiochip_attr_group);
1667 + put_device(dev);
1668 + device_unregister(dev);
1669 + chip->exported = 0;
1670 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1671 +index 0a30088178b0..0b71a0aaf4fc 100644
1672 +--- a/drivers/gpu/drm/i915/i915_gem.c
1673 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1674 +@@ -4449,7 +4449,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
1675 + if (!mutex_is_locked(mutex))
1676 + return false;
1677 +
1678 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
1679 ++#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
1680 + return mutex->owner == task;
1681 + #else
1682 + /* Since UP may be pre-empted, we cannot assume that we own the lock */
1683 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
1684 +index de737ba1d351..b361ce4ce511 100644
1685 +--- a/drivers/md/dm-cache-metadata.c
1686 ++++ b/drivers/md/dm-cache-metadata.c
1687 +@@ -88,6 +88,9 @@ struct cache_disk_superblock {
1688 + } __packed;
1689 +
1690 + struct dm_cache_metadata {
1691 ++ atomic_t ref_count;
1692 ++ struct list_head list;
1693 ++
1694 + struct block_device *bdev;
1695 + struct dm_block_manager *bm;
1696 + struct dm_space_map *metadata_sm;
1697 +@@ -634,10 +637,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
1698 +
1699 + /*----------------------------------------------------------------*/
1700 +
1701 +-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
1702 +- sector_t data_block_size,
1703 +- bool may_format_device,
1704 +- size_t policy_hint_size)
1705 ++static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
1706 ++ sector_t data_block_size,
1707 ++ bool may_format_device,
1708 ++ size_t policy_hint_size)
1709 + {
1710 + int r;
1711 + struct dm_cache_metadata *cmd;
1712 +@@ -648,6 +651,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
1713 + return NULL;
1714 + }
1715 +
1716 ++ atomic_set(&cmd->ref_count, 1);
1717 + init_rwsem(&cmd->root_lock);
1718 + cmd->bdev = bdev;
1719 + cmd->data_block_size = data_block_size;
1720 +@@ -670,10 +674,95 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
1721 + return cmd;
1722 + }
1723 +
1724 ++/*
1725 ++ * We keep a little list of ref counted metadata objects to prevent two
1726 ++ * different target instances creating separate bufio instances. This is
1727 ++ * an issue if a table is reloaded before the suspend.
1728 ++ */
1729 ++static DEFINE_MUTEX(table_lock);
1730 ++static LIST_HEAD(table);
1731 ++
1732 ++static struct dm_cache_metadata *lookup(struct block_device *bdev)
1733 ++{
1734 ++ struct dm_cache_metadata *cmd;
1735 ++
1736 ++ list_for_each_entry(cmd, &table, list)
1737 ++ if (cmd->bdev == bdev) {
1738 ++ atomic_inc(&cmd->ref_count);
1739 ++ return cmd;
1740 ++ }
1741 ++
1742 ++ return NULL;
1743 ++}
1744 ++
1745 ++static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
1746 ++ sector_t data_block_size,
1747 ++ bool may_format_device,
1748 ++ size_t policy_hint_size)
1749 ++{
1750 ++ struct dm_cache_metadata *cmd, *cmd2;
1751 ++
1752 ++ mutex_lock(&table_lock);
1753 ++ cmd = lookup(bdev);
1754 ++ mutex_unlock(&table_lock);
1755 ++
1756 ++ if (cmd)
1757 ++ return cmd;
1758 ++
1759 ++ cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
1760 ++ if (cmd) {
1761 ++ mutex_lock(&table_lock);
1762 ++ cmd2 = lookup(bdev);
1763 ++ if (cmd2) {
1764 ++ mutex_unlock(&table_lock);
1765 ++ __destroy_persistent_data_objects(cmd);
1766 ++ kfree(cmd);
1767 ++ return cmd2;
1768 ++ }
1769 ++ list_add(&cmd->list, &table);
1770 ++ mutex_unlock(&table_lock);
1771 ++ }
1772 ++
1773 ++ return cmd;
1774 ++}
1775 ++
1776 ++static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
1777 ++{
1778 ++ if (cmd->data_block_size != data_block_size) {
1779 ++ DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
1780 ++ (unsigned long long) data_block_size,
1781 ++ (unsigned long long) cmd->data_block_size);
1782 ++ return false;
1783 ++ }
1784 ++
1785 ++ return true;
1786 ++}
1787 ++
1788 ++struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
1789 ++ sector_t data_block_size,
1790 ++ bool may_format_device,
1791 ++ size_t policy_hint_size)
1792 ++{
1793 ++ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
1794 ++ may_format_device, policy_hint_size);
1795 ++ if (cmd && !same_params(cmd, data_block_size)) {
1796 ++ dm_cache_metadata_close(cmd);
1797 ++ return NULL;
1798 ++ }
1799 ++
1800 ++ return cmd;
1801 ++}
1802 ++
1803 + void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
1804 + {
1805 +- __destroy_persistent_data_objects(cmd);
1806 +- kfree(cmd);
1807 ++ if (atomic_dec_and_test(&cmd->ref_count)) {
1808 ++ mutex_lock(&table_lock);
1809 ++ list_del(&cmd->list);
1810 ++ mutex_unlock(&table_lock);
1811 ++
1812 ++ __destroy_persistent_data_objects(cmd);
1813 ++ kfree(cmd);
1814 ++ }
1815 + }
1816 +
1817 + int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
1818 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1819 +index 2332b5ced0dd..4daf5c03b33b 100644
1820 +--- a/drivers/md/raid5.c
1821 ++++ b/drivers/md/raid5.c
1822 +@@ -2678,7 +2678,8 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
1823 + (s->failed >= 2 && fdev[1]->toread) ||
1824 + (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
1825 + !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
1826 +- (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
1827 ++ ((sh->raid_conf->level == 6 || sh->sector >= sh->raid_conf->mddev->recovery_cp)
1828 ++ && s->failed && s->to_write))) {
1829 + /* we would like to get this block, possibly by computing it,
1830 + * otherwise read it if the backing disk is insync
1831 + */
1832 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1833 +index 9bf47a064cdf..a4694aa20a3e 100644
1834 +--- a/drivers/net/can/dev.c
1835 ++++ b/drivers/net/can/dev.c
1836 +@@ -643,10 +643,14 @@ static int can_changelink(struct net_device *dev,
1837 + if (dev->flags & IFF_UP)
1838 + return -EBUSY;
1839 + cm = nla_data(data[IFLA_CAN_CTRLMODE]);
1840 +- if (cm->flags & ~priv->ctrlmode_supported)
1841 ++
1842 ++ /* check whether changed bits are allowed to be modified */
1843 ++ if (cm->mask & ~priv->ctrlmode_supported)
1844 + return -EOPNOTSUPP;
1845 ++
1846 ++ /* clear bits to be modified and copy the flag values */
1847 + priv->ctrlmode &= ~cm->mask;
1848 +- priv->ctrlmode |= cm->flags;
1849 ++ priv->ctrlmode |= (cm->flags & cm->mask);
1850 + }
1851 +
1852 + if (data[IFLA_CAN_BITTIMING]) {
1853 +diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
1854 +index bb7ee9cb00b1..9c9fc69a01b3 100644
1855 +--- a/drivers/pinctrl/core.c
1856 ++++ b/drivers/pinctrl/core.c
1857 +@@ -1693,14 +1693,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
1858 + if (pctldev == NULL)
1859 + return;
1860 +
1861 +- mutex_lock(&pinctrldev_list_mutex);
1862 + mutex_lock(&pctldev->mutex);
1863 +-
1864 + pinctrl_remove_device_debugfs(pctldev);
1865 ++ mutex_unlock(&pctldev->mutex);
1866 +
1867 + if (!IS_ERR(pctldev->p))
1868 + pinctrl_put(pctldev->p);
1869 +
1870 ++ mutex_lock(&pinctrldev_list_mutex);
1871 ++ mutex_lock(&pctldev->mutex);
1872 + /* TODO: check that no pinmuxes are still active? */
1873 + list_del(&pctldev->node);
1874 + /* Destroy descriptor tree */
1875 +diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
1876 +index 9de41aa14896..6f512fa4fa03 100644
1877 +--- a/drivers/s390/crypto/ap_bus.c
1878 ++++ b/drivers/s390/crypto/ap_bus.c
1879 +@@ -44,6 +44,7 @@
1880 + #include <linux/hrtimer.h>
1881 + #include <linux/ktime.h>
1882 + #include <asm/facility.h>
1883 ++#include <linux/crypto.h>
1884 +
1885 + #include "ap_bus.h"
1886 +
1887 +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
1888 +index 0ff37a5e286c..f7732f3b9804 100644
1889 +--- a/drivers/scsi/ipr.c
1890 ++++ b/drivers/scsi/ipr.c
1891 +@@ -645,6 +645,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
1892 + ipr_reinit_ipr_cmnd(ipr_cmd);
1893 + ipr_cmd->u.scratch = 0;
1894 + ipr_cmd->sibling = NULL;
1895 ++ ipr_cmd->eh_comp = NULL;
1896 + ipr_cmd->fast_done = fast_done;
1897 + init_timer(&ipr_cmd->timer);
1898 + }
1899 +@@ -810,6 +811,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
1900 +
1901 + scsi_dma_unmap(ipr_cmd->scsi_cmd);
1902 + scsi_cmd->scsi_done(scsi_cmd);
1903 ++ if (ipr_cmd->eh_comp)
1904 ++ complete(ipr_cmd->eh_comp);
1905 + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1906 + }
1907 +
1908 +@@ -4767,6 +4770,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
1909 + return rc;
1910 + }
1911 +
1912 ++/**
1913 ++ * ipr_match_lun - Match function for specified LUN
1914 ++ * @ipr_cmd: ipr command struct
1915 ++ * @device: device to match (sdev)
1916 ++ *
1917 ++ * Returns:
1918 ++ * 1 if command matches sdev / 0 if command does not match sdev
1919 ++ **/
1920 ++static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
1921 ++{
1922 ++ if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
1923 ++ return 1;
1924 ++ return 0;
1925 ++}
1926 ++
1927 ++/**
1928 ++ * ipr_wait_for_ops - Wait for matching commands to complete
1929 ++ * @ipr_cmd: ipr command struct
1930 ++ * @device: device to match (sdev)
1931 ++ * @match: match function to use
1932 ++ *
1933 ++ * Returns:
1934 ++ * SUCCESS / FAILED
1935 ++ **/
1936 ++static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
1937 ++ int (*match)(struct ipr_cmnd *, void *))
1938 ++{
1939 ++ struct ipr_cmnd *ipr_cmd;
1940 ++ int wait;
1941 ++ unsigned long flags;
1942 ++ struct ipr_hrr_queue *hrrq;
1943 ++ signed long timeout = IPR_ABORT_TASK_TIMEOUT;
1944 ++ DECLARE_COMPLETION_ONSTACK(comp);
1945 ++
1946 ++ ENTER;
1947 ++ do {
1948 ++ wait = 0;
1949 ++
1950 ++ for_each_hrrq(hrrq, ioa_cfg) {
1951 ++ spin_lock_irqsave(hrrq->lock, flags);
1952 ++ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
1953 ++ if (match(ipr_cmd, device)) {
1954 ++ ipr_cmd->eh_comp = &comp;
1955 ++ wait++;
1956 ++ }
1957 ++ }
1958 ++ spin_unlock_irqrestore(hrrq->lock, flags);
1959 ++ }
1960 ++
1961 ++ if (wait) {
1962 ++ timeout = wait_for_completion_timeout(&comp, timeout);
1963 ++
1964 ++ if (!timeout) {
1965 ++ wait = 0;
1966 ++
1967 ++ for_each_hrrq(hrrq, ioa_cfg) {
1968 ++ spin_lock_irqsave(hrrq->lock, flags);
1969 ++ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
1970 ++ if (match(ipr_cmd, device)) {
1971 ++ ipr_cmd->eh_comp = NULL;
1972 ++ wait++;
1973 ++ }
1974 ++ }
1975 ++ spin_unlock_irqrestore(hrrq->lock, flags);
1976 ++ }
1977 ++
1978 ++ if (wait)
1979 ++ dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
1980 ++ LEAVE;
1981 ++ return wait ? FAILED : SUCCESS;
1982 ++ }
1983 ++ }
1984 ++ } while (wait);
1985 ++
1986 ++ LEAVE;
1987 ++ return SUCCESS;
1988 ++}
1989 ++
1990 + static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
1991 + {
1992 + struct ipr_ioa_cfg *ioa_cfg;
1993 +@@ -4985,11 +5066,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1994 + static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
1995 + {
1996 + int rc;
1997 ++ struct ipr_ioa_cfg *ioa_cfg;
1998 ++
1999 ++ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
2000 +
2001 + spin_lock_irq(cmd->device->host->host_lock);
2002 + rc = __ipr_eh_dev_reset(cmd);
2003 + spin_unlock_irq(cmd->device->host->host_lock);
2004 +
2005 ++ if (rc == SUCCESS)
2006 ++ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
2007 ++
2008 + return rc;
2009 + }
2010 +
2011 +@@ -5167,13 +5254,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
2012 + {
2013 + unsigned long flags;
2014 + int rc;
2015 ++ struct ipr_ioa_cfg *ioa_cfg;
2016 +
2017 + ENTER;
2018 +
2019 ++ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2020 ++
2021 + spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
2022 + rc = ipr_cancel_op(scsi_cmd);
2023 + spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
2024 +
2025 ++ if (rc == SUCCESS)
2026 ++ rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
2027 + LEAVE;
2028 + return rc;
2029 + }
2030 +diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
2031 +index 07a85ce41782..535f57328a72 100644
2032 +--- a/drivers/scsi/ipr.h
2033 ++++ b/drivers/scsi/ipr.h
2034 +@@ -1578,6 +1578,7 @@ struct ipr_cmnd {
2035 + struct scsi_device *sdev;
2036 + } u;
2037 +
2038 ++ struct completion *eh_comp;
2039 + struct ipr_hrr_queue *hrrq;
2040 + struct ipr_ioa_cfg *ioa_cfg;
2041 + };
2042 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
2043 +index 301b08496478..1d94316f0ea4 100644
2044 +--- a/drivers/xen/swiotlb-xen.c
2045 ++++ b/drivers/xen/swiotlb-xen.c
2046 +@@ -390,7 +390,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
2047 +
2048 + /* NOTE: We use dev_addr here, not paddr! */
2049 + if (is_xen_swiotlb_buffer(dev_addr)) {
2050 +- swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir);
2051 ++ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
2052 + return;
2053 + }
2054 +
2055 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2056 +index e4c4ac07cc32..2a71466b0115 100644
2057 +--- a/fs/ext4/ext4.h
2058 ++++ b/fs/ext4/ext4.h
2059 +@@ -589,6 +589,7 @@ enum {
2060 + #define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
2061 + #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
2062 + #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
2063 ++#define EXT4_FREE_BLOCKS_RESERVE 0x0040
2064 +
2065 + /*
2066 + * Flags used by ext4_discard_partial_page_buffers
2067 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2068 +index 84d817b842a8..7fbd1c5b74af 100644
2069 +--- a/fs/ext4/extents.c
2070 ++++ b/fs/ext4/extents.c
2071 +@@ -1722,7 +1722,8 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
2072 +
2073 + brelse(path[1].p_bh);
2074 + ext4_free_blocks(handle, inode, NULL, blk, 1,
2075 +- EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2076 ++ EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET |
2077 ++ EXT4_FREE_BLOCKS_RESERVE);
2078 + }
2079 +
2080 + /*
2081 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
2082 +index 162b80d527a0..df5050f9080b 100644
2083 +--- a/fs/ext4/mballoc.c
2084 ++++ b/fs/ext4/mballoc.c
2085 +@@ -4610,6 +4610,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
2086 + struct buffer_head *gd_bh;
2087 + ext4_group_t block_group;
2088 + struct ext4_sb_info *sbi;
2089 ++ struct ext4_inode_info *ei = EXT4_I(inode);
2090 + struct ext4_buddy e4b;
2091 + unsigned int count_clusters;
2092 + int err = 0;
2093 +@@ -4808,7 +4809,6 @@ do_more:
2094 + ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
2095 + ext4_group_desc_csum_set(sb, block_group, gdp);
2096 + ext4_unlock_group(sb, block_group);
2097 +- percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
2098 +
2099 + if (sbi->s_log_groups_per_flex) {
2100 + ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
2101 +@@ -4816,10 +4816,23 @@ do_more:
2102 + &sbi->s_flex_groups[flex_group].free_clusters);
2103 + }
2104 +
2105 +- ext4_mb_unload_buddy(&e4b);
2106 +-
2107 +- if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
2108 ++ if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) {
2109 ++ percpu_counter_add(&sbi->s_dirtyclusters_counter,
2110 ++ count_clusters);
2111 ++ spin_lock(&ei->i_block_reservation_lock);
2112 ++ if (flags & EXT4_FREE_BLOCKS_METADATA)
2113 ++ ei->i_reserved_meta_blocks += count_clusters;
2114 ++ else
2115 ++ ei->i_reserved_data_blocks += count_clusters;
2116 ++ spin_unlock(&ei->i_block_reservation_lock);
2117 ++ if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
2118 ++ dquot_reclaim_block(inode,
2119 ++ EXT4_C2B(sbi, count_clusters));
2120 ++ } else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
2121 + dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
2122 ++ percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
2123 ++
2124 ++ ext4_mb_unload_buddy(&e4b);
2125 +
2126 + /* We dirtied the bitmap block */
2127 + BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
2128 +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
2129 +index 7a10e047bc33..4f7f451ca70d 100644
2130 +--- a/fs/quota/dquot.c
2131 ++++ b/fs/quota/dquot.c
2132 +@@ -1102,6 +1102,14 @@ static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
2133 + dquot->dq_dqb.dqb_rsvspace -= number;
2134 + }
2135 +
2136 ++static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number)
2137 ++{
2138 ++ if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
2139 ++ number = dquot->dq_dqb.dqb_curspace;
2140 ++ dquot->dq_dqb.dqb_rsvspace += number;
2141 ++ dquot->dq_dqb.dqb_curspace -= number;
2142 ++}
2143 ++
2144 + static inline
2145 + void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
2146 + {
2147 +@@ -1536,6 +1544,15 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number)
2148 + }
2149 + EXPORT_SYMBOL(inode_claim_rsv_space);
2150 +
2151 ++void inode_reclaim_rsv_space(struct inode *inode, qsize_t number)
2152 ++{
2153 ++ spin_lock(&inode->i_lock);
2154 ++ *inode_reserved_space(inode) += number;
2155 ++ __inode_sub_bytes(inode, number);
2156 ++ spin_unlock(&inode->i_lock);
2157 ++}
2158 ++EXPORT_SYMBOL(inode_reclaim_rsv_space);
2159 ++
2160 + void inode_sub_rsv_space(struct inode *inode, qsize_t number)
2161 + {
2162 + spin_lock(&inode->i_lock);
2163 +@@ -1710,6 +1727,35 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
2164 + EXPORT_SYMBOL(dquot_claim_space_nodirty);
2165 +
2166 + /*
2167 ++ * Convert allocated space back to in-memory reserved quotas
2168 ++ */
2169 ++void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
2170 ++{
2171 ++ int cnt;
2172 ++
2173 ++ if (!dquot_active(inode)) {
2174 ++ inode_reclaim_rsv_space(inode, number);
2175 ++ return;
2176 ++ }
2177 ++
2178 ++ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
2179 ++ spin_lock(&dq_data_lock);
2180 ++ /* Claim reserved quotas to allocated quotas */
2181 ++ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2182 ++ if (inode->i_dquot[cnt])
2183 ++ dquot_reclaim_reserved_space(inode->i_dquot[cnt],
2184 ++ number);
2185 ++ }
2186 ++ /* Update inode bytes */
2187 ++ inode_reclaim_rsv_space(inode, number);
2188 ++ spin_unlock(&dq_data_lock);
2189 ++ mark_all_dquot_dirty(inode->i_dquot);
2190 ++ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
2191 ++ return;
2192 ++}
2193 ++EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
2194 ++
2195 ++/*
2196 + * This operation can block, but only after everything is updated
2197 + */
2198 + void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
2199 +diff --git a/fs/stat.c b/fs/stat.c
2200 +index 04ce1ac20d20..d0ea7ef75e26 100644
2201 +--- a/fs/stat.c
2202 ++++ b/fs/stat.c
2203 +@@ -447,9 +447,8 @@ void inode_add_bytes(struct inode *inode, loff_t bytes)
2204 +
2205 + EXPORT_SYMBOL(inode_add_bytes);
2206 +
2207 +-void inode_sub_bytes(struct inode *inode, loff_t bytes)
2208 ++void __inode_sub_bytes(struct inode *inode, loff_t bytes)
2209 + {
2210 +- spin_lock(&inode->i_lock);
2211 + inode->i_blocks -= bytes >> 9;
2212 + bytes &= 511;
2213 + if (inode->i_bytes < bytes) {
2214 +@@ -457,6 +456,14 @@ void inode_sub_bytes(struct inode *inode, loff_t bytes)
2215 + inode->i_bytes += 512;
2216 + }
2217 + inode->i_bytes -= bytes;
2218 ++}
2219 ++
2220 ++EXPORT_SYMBOL(__inode_sub_bytes);
2221 ++
2222 ++void inode_sub_bytes(struct inode *inode, loff_t bytes)
2223 ++{
2224 ++ spin_lock(&inode->i_lock);
2225 ++ __inode_sub_bytes(inode, bytes);
2226 + spin_unlock(&inode->i_lock);
2227 + }
2228 +
2229 +diff --git a/include/linux/crypto.h b/include/linux/crypto.h
2230 +index b92eadf92d72..2b00d92a6e6f 100644
2231 +--- a/include/linux/crypto.h
2232 ++++ b/include/linux/crypto.h
2233 +@@ -26,6 +26,19 @@
2234 + #include <linux/uaccess.h>
2235 +
2236 + /*
2237 ++ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
2238 ++ * arbitrary modules to be loaded. Loading from userspace may still need the
2239 ++ * unprefixed names, so retains those aliases as well.
2240 ++ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
2241 ++ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
2242 ++ * expands twice on the same line. Instead, use a separate base name for the
2243 ++ * alias.
2244 ++ */
2245 ++#define MODULE_ALIAS_CRYPTO(name) \
2246 ++ __MODULE_INFO(alias, alias_userspace, name); \
2247 ++ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
2248 ++
2249 ++/*
2250 + * Algorithm masks and types.
2251 + */
2252 + #define CRYPTO_ALG_TYPE_MASK 0x0000000f
2253 +diff --git a/include/linux/fs.h b/include/linux/fs.h
2254 +index 65c2be22b601..d57bc5df7225 100644
2255 +--- a/include/linux/fs.h
2256 ++++ b/include/linux/fs.h
2257 +@@ -2489,6 +2489,7 @@ extern void generic_fillattr(struct inode *, struct kstat *);
2258 + extern int vfs_getattr(struct path *, struct kstat *);
2259 + void __inode_add_bytes(struct inode *inode, loff_t bytes);
2260 + void inode_add_bytes(struct inode *inode, loff_t bytes);
2261 ++void __inode_sub_bytes(struct inode *inode, loff_t bytes);
2262 + void inode_sub_bytes(struct inode *inode, loff_t bytes);
2263 + loff_t inode_get_bytes(struct inode *inode);
2264 + void inode_set_bytes(struct inode *inode, loff_t bytes);
2265 +diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
2266 +index 1c50093ae656..6965fe394c3b 100644
2267 +--- a/include/linux/quotaops.h
2268 ++++ b/include/linux/quotaops.h
2269 +@@ -41,6 +41,7 @@ void __quota_error(struct super_block *sb, const char *func,
2270 + void inode_add_rsv_space(struct inode *inode, qsize_t number);
2271 + void inode_claim_rsv_space(struct inode *inode, qsize_t number);
2272 + void inode_sub_rsv_space(struct inode *inode, qsize_t number);
2273 ++void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
2274 +
2275 + void dquot_initialize(struct inode *inode);
2276 + void dquot_drop(struct inode *inode);
2277 +@@ -59,6 +60,7 @@ int dquot_alloc_inode(const struct inode *inode);
2278 +
2279 + int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
2280 + void dquot_free_inode(const struct inode *inode);
2281 ++void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
2282 +
2283 + int dquot_disable(struct super_block *sb, int type, unsigned int flags);
2284 + /* Suspend quotas on remount RO */
2285 +@@ -238,6 +240,13 @@ static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
2286 + return 0;
2287 + }
2288 +
2289 ++static inline int dquot_reclaim_space_nodirty(struct inode *inode,
2290 ++ qsize_t number)
2291 ++{
2292 ++ inode_sub_bytes(inode, number);
2293 ++ return 0;
2294 ++}
2295 ++
2296 + static inline int dquot_disable(struct super_block *sb, int type,
2297 + unsigned int flags)
2298 + {
2299 +@@ -336,6 +345,12 @@ static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
2300 + return ret;
2301 + }
2302 +
2303 ++static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr)
2304 ++{
2305 ++ dquot_reclaim_space_nodirty(inode, nr << inode->i_blkbits);
2306 ++ mark_inode_dirty_sync(inode);
2307 ++}
2308 ++
2309 + static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr)
2310 + {
2311 + __dquot_free_space(inode, nr, 0);
2312 +diff --git a/include/linux/time.h b/include/linux/time.h
2313 +index d5d229b2e5af..7d532a32ff3a 100644
2314 +--- a/include/linux/time.h
2315 ++++ b/include/linux/time.h
2316 +@@ -173,6 +173,19 @@ extern void getboottime(struct timespec *ts);
2317 + extern void monotonic_to_bootbased(struct timespec *ts);
2318 + extern void get_monotonic_boottime(struct timespec *ts);
2319 +
2320 ++static inline bool timeval_valid(const struct timeval *tv)
2321 ++{
2322 ++ /* Dates before 1970 are bogus */
2323 ++ if (tv->tv_sec < 0)
2324 ++ return false;
2325 ++
2326 ++ /* Can't have more microseconds then a second */
2327 ++ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
2328 ++ return false;
2329 ++
2330 ++ return true;
2331 ++}
2332 ++
2333 + extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
2334 + extern int timekeeping_valid_for_hres(void);
2335 + extern u64 timekeeping_max_deferment(void);
2336 +diff --git a/kernel/time.c b/kernel/time.c
2337 +index d21398e6da87..31ec845d0e80 100644
2338 +--- a/kernel/time.c
2339 ++++ b/kernel/time.c
2340 +@@ -195,6 +195,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
2341 + if (tv) {
2342 + if (copy_from_user(&user_tv, tv, sizeof(*tv)))
2343 + return -EFAULT;
2344 ++
2345 ++ if (!timeval_valid(&user_tv))
2346 ++ return -EINVAL;
2347 ++
2348 + new_ts.tv_sec = user_tv.tv_sec;
2349 + new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
2350 + }
2351 +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
2352 +index af8d1d4f3d55..28db9bedc857 100644
2353 +--- a/kernel/time/ntp.c
2354 ++++ b/kernel/time/ntp.c
2355 +@@ -631,6 +631,13 @@ int ntp_validate_timex(struct timex *txc)
2356 + if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
2357 + return -EPERM;
2358 +
2359 ++ if (txc->modes & ADJ_FREQUENCY) {
2360 ++ if (LONG_MIN / PPM_SCALE > txc->freq)
2361 ++ return -EINVAL;
2362 ++ if (LONG_MAX / PPM_SCALE < txc->freq)
2363 ++ return -EINVAL;
2364 ++ }
2365 ++
2366 + return 0;
2367 + }
2368 +
2369 +diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
2370 +index 77c173282f38..4a662f15eaee 100644
2371 +--- a/net/netfilter/ipvs/ip_vs_ftp.c
2372 ++++ b/net/netfilter/ipvs/ip_vs_ftp.c
2373 +@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
2374 + struct nf_conn *ct;
2375 + struct net *net;
2376 +
2377 ++ *diff = 0;
2378 ++
2379 + #ifdef CONFIG_IP_VS_IPV6
2380 + /* This application helper doesn't work with IPv6 yet,
2381 + * so turn this into a no-op for IPv6 packets
2382 +@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
2383 + return 1;
2384 + #endif
2385 +
2386 +- *diff = 0;
2387 +-
2388 + /* Only useful for established sessions */
2389 + if (cp->state != IP_VS_TCP_S_ESTABLISHED)
2390 + return 1;
2391 +@@ -321,6 +321,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
2392 + struct ip_vs_conn *n_cp;
2393 + struct net *net;
2394 +
2395 ++ /* no diff required for incoming packets */
2396 ++ *diff = 0;
2397 ++
2398 + #ifdef CONFIG_IP_VS_IPV6
2399 + /* This application helper doesn't work with IPv6 yet,
2400 + * so turn this into a no-op for IPv6 packets
2401 +@@ -329,9 +332,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
2402 + return 1;
2403 + #endif
2404 +
2405 +- /* no diff required for incoming packets */
2406 +- *diff = 0;
2407 +-
2408 + /* Only useful for established sessions */
2409 + if (cp->state != IP_VS_TCP_S_ESTABLISHED)
2410 + return 1;
2411 +diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
2412 +index 858966ab019c..679218b56ede 100755
2413 +--- a/scripts/recordmcount.pl
2414 ++++ b/scripts/recordmcount.pl
2415 +@@ -262,7 +262,6 @@ if ($arch eq "x86_64") {
2416 + # force flags for this arch
2417 + $ld .= " -m shlelf_linux";
2418 + $objcopy .= " -O elf32-sh-linux";
2419 +- $cc .= " -m32";
2420 +
2421 + } elsif ($arch eq "powerpc") {
2422 + $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
2423 +diff --git a/security/keys/gc.c b/security/keys/gc.c
2424 +index d67c97bb1025..797818695c87 100644
2425 +--- a/security/keys/gc.c
2426 ++++ b/security/keys/gc.c
2427 +@@ -201,12 +201,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
2428 + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
2429 + atomic_dec(&key->user->nikeys);
2430 +
2431 +- key_user_put(key->user);
2432 +-
2433 + /* now throw away the key memory */
2434 + if (key->type->destroy)
2435 + key->type->destroy(key);
2436 +
2437 ++ key_user_put(key->user);
2438 ++
2439 + kfree(key->description);
2440 +
2441 + #ifdef KEY_DEBUGGING
2442 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2443 +index be4db47cb2d9..061be0e5fa5a 100644
2444 +--- a/sound/usb/mixer.c
2445 ++++ b/sound/usb/mixer.c
2446 +@@ -886,6 +886,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
2447 + case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
2448 + case USB_ID(0x046d, 0x0808):
2449 + case USB_ID(0x046d, 0x0809):
2450 ++ case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
2451 + case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
2452 + case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
2453 + case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */