Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.14 commit in: /
Date: Fri, 30 Jan 2015 11:12:11
Message-Id: 1422616332.4bbd182a0f62f502b7356f9af219efeda80c718f.mpagano@gentoo
1 commit: 4bbd182a0f62f502b7356f9af219efeda80c718f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jan 30 11:12:12 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Jan 30 11:12:12 2015 +0000
6 URL: http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=4bbd182a
7
8 Linux patch 3.14.31
9
10 ---
11 0000_README | 4 +
12 1030_linux-3.14.31.patch | 5744 ++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 5748 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index 77ab211..18e35f0 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -162,6 +162,10 @@ Patch: 1029_linux-3.14.30.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.14.30
22
23 +Patch: 1030_linux-3.14.31.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.14.31
26 +
27 Patch: 1500_XATTR_USER_PREFIX.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
29 Desc: Support for namespace user.pax.* on tmpfs.
30
31 diff --git a/1030_linux-3.14.31.patch b/1030_linux-3.14.31.patch
32 new file mode 100644
33 index 0000000..c557e21
34 --- /dev/null
35 +++ b/1030_linux-3.14.31.patch
36 @@ -0,0 +1,5744 @@
37 +diff --git a/Makefile b/Makefile
38 +index 5b94752a85e3..5abf670c6651 100644
39 +--- a/Makefile
40 ++++ b/Makefile
41 +@@ -1,6 +1,6 @@
42 + VERSION = 3
43 + PATCHLEVEL = 14
44 +-SUBLEVEL = 30
45 ++SUBLEVEL = 31
46 + EXTRAVERSION =
47 + NAME = Remembering Coco
48 +
49 +diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
50 +deleted file mode 100644
51 +index c32245c3d1e9..000000000000
52 +--- a/arch/arc/include/asm/barrier.h
53 ++++ /dev/null
54 +@@ -1,37 +0,0 @@
55 +-/*
56 +- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
57 +- *
58 +- * This program is free software; you can redistribute it and/or modify
59 +- * it under the terms of the GNU General Public License version 2 as
60 +- * published by the Free Software Foundation.
61 +- */
62 +-
63 +-#ifndef __ASM_BARRIER_H
64 +-#define __ASM_BARRIER_H
65 +-
66 +-#ifndef __ASSEMBLY__
67 +-
68 +-/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
69 +-#define mb() __asm__ __volatile__ ("" : : : "memory")
70 +-#define rmb() mb()
71 +-#define wmb() mb()
72 +-#define set_mb(var, value) do { var = value; mb(); } while (0)
73 +-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
74 +-#define read_barrier_depends() mb()
75 +-
76 +-/* TODO-vineetg verify the correctness of macros here */
77 +-#ifdef CONFIG_SMP
78 +-#define smp_mb() mb()
79 +-#define smp_rmb() rmb()
80 +-#define smp_wmb() wmb()
81 +-#else
82 +-#define smp_mb() barrier()
83 +-#define smp_rmb() barrier()
84 +-#define smp_wmb() barrier()
85 +-#endif
86 +-
87 +-#define smp_read_barrier_depends() do { } while (0)
88 +-
89 +-#endif
90 +-
91 +-#endif
92 +diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
93 +index 2ff0347a2fd7..e248594097e7 100644
94 +--- a/arch/arc/kernel/ctx_sw_asm.S
95 ++++ b/arch/arc/kernel/ctx_sw_asm.S
96 +@@ -10,9 +10,9 @@
97 + * -This is the more "natural" hand written assembler
98 + */
99 +
100 ++#include <linux/linkage.h>
101 + #include <asm/entry.h> /* For the SAVE_* macros */
102 + #include <asm/asm-offsets.h>
103 +-#include <asm/linkage.h>
104 +
105 + #define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
106 +
107 +diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
108 +index 6a26e79f0ef4..cf3300a3071d 100644
109 +--- a/arch/arm/boot/dts/imx25.dtsi
110 ++++ b/arch/arm/boot/dts/imx25.dtsi
111 +@@ -352,7 +352,7 @@
112 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
113 + #pwm-cells = <2>;
114 + reg = <0x53fa0000 0x4000>;
115 +- clocks = <&clks 106>, <&clks 36>;
116 ++ clocks = <&clks 106>, <&clks 52>;
117 + clock-names = "ipg", "per";
118 + interrupts = <36>;
119 + };
120 +@@ -371,7 +371,7 @@
121 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
122 + #pwm-cells = <2>;
123 + reg = <0x53fa8000 0x4000>;
124 +- clocks = <&clks 107>, <&clks 36>;
125 ++ clocks = <&clks 107>, <&clks 52>;
126 + clock-names = "ipg", "per";
127 + interrupts = <41>;
128 + };
129 +@@ -412,7 +412,7 @@
130 + pwm4: pwm@53fc8000 {
131 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
132 + reg = <0x53fc8000 0x4000>;
133 +- clocks = <&clks 108>, <&clks 36>;
134 ++ clocks = <&clks 108>, <&clks 52>;
135 + clock-names = "ipg", "per";
136 + interrupts = <42>;
137 + };
138 +@@ -458,7 +458,7 @@
139 + compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
140 + #pwm-cells = <2>;
141 + reg = <0x53fe0000 0x4000>;
142 +- clocks = <&clks 105>, <&clks 36>;
143 ++ clocks = <&clks 105>, <&clks 52>;
144 + clock-names = "ipg", "per";
145 + interrupts = <26>;
146 + };
147 +diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
148 +index 3003fa1f6fb4..0409b8f89782 100644
149 +--- a/arch/arm/crypto/aes_glue.c
150 ++++ b/arch/arm/crypto/aes_glue.c
151 +@@ -93,6 +93,6 @@ module_exit(aes_fini);
152 +
153 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
154 + MODULE_LICENSE("GPL");
155 +-MODULE_ALIAS("aes");
156 +-MODULE_ALIAS("aes-asm");
157 ++MODULE_ALIAS_CRYPTO("aes");
158 ++MODULE_ALIAS_CRYPTO("aes-asm");
159 + MODULE_AUTHOR("David McCullough <ucdevel@×××××.com>");
160 +diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
161 +index 76cd976230bc..ace4cd67464c 100644
162 +--- a/arch/arm/crypto/sha1_glue.c
163 ++++ b/arch/arm/crypto/sha1_glue.c
164 +@@ -175,5 +175,5 @@ module_exit(sha1_mod_fini);
165 +
166 + MODULE_LICENSE("GPL");
167 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
168 +-MODULE_ALIAS("sha1");
169 ++MODULE_ALIAS_CRYPTO("sha1");
170 + MODULE_AUTHOR("David McCullough <ucdevel@×××××.com>");
171 +diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
172 +index f9e8b9491efc..b51da9132744 100644
173 +--- a/arch/powerpc/crypto/sha1.c
174 ++++ b/arch/powerpc/crypto/sha1.c
175 +@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
176 + MODULE_LICENSE("GPL");
177 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
178 +
179 +-MODULE_ALIAS("sha1-powerpc");
180 ++MODULE_ALIAS_CRYPTO("sha1");
181 ++MODULE_ALIAS_CRYPTO("sha1-powerpc");
182 +diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
183 +index 23223cd63e54..1f272b24fc0b 100644
184 +--- a/arch/s390/crypto/aes_s390.c
185 ++++ b/arch/s390/crypto/aes_s390.c
186 +@@ -979,7 +979,7 @@ static void __exit aes_s390_fini(void)
187 + module_init(aes_s390_init);
188 + module_exit(aes_s390_fini);
189 +
190 +-MODULE_ALIAS("aes-all");
191 ++MODULE_ALIAS_CRYPTO("aes-all");
192 +
193 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
194 + MODULE_LICENSE("GPL");
195 +diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
196 +index 7acb77f7ef1a..9e05cc453a40 100644
197 +--- a/arch/s390/crypto/des_s390.c
198 ++++ b/arch/s390/crypto/des_s390.c
199 +@@ -619,8 +619,8 @@ static void __exit des_s390_exit(void)
200 + module_init(des_s390_init);
201 + module_exit(des_s390_exit);
202 +
203 +-MODULE_ALIAS("des");
204 +-MODULE_ALIAS("des3_ede");
205 ++MODULE_ALIAS_CRYPTO("des");
206 ++MODULE_ALIAS_CRYPTO("des3_ede");
207 +
208 + MODULE_LICENSE("GPL");
209 + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
210 +diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
211 +index d43485d142e9..7940dc90e80b 100644
212 +--- a/arch/s390/crypto/ghash_s390.c
213 ++++ b/arch/s390/crypto/ghash_s390.c
214 +@@ -160,7 +160,7 @@ static void __exit ghash_mod_exit(void)
215 + module_init(ghash_mod_init);
216 + module_exit(ghash_mod_exit);
217 +
218 +-MODULE_ALIAS("ghash");
219 ++MODULE_ALIAS_CRYPTO("ghash");
220 +
221 + MODULE_LICENSE("GPL");
222 + MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
223 +diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
224 +index a1b3a9dc9d8a..5b2bee323694 100644
225 +--- a/arch/s390/crypto/sha1_s390.c
226 ++++ b/arch/s390/crypto/sha1_s390.c
227 +@@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void)
228 + module_init(sha1_s390_init);
229 + module_exit(sha1_s390_fini);
230 +
231 +-MODULE_ALIAS("sha1");
232 ++MODULE_ALIAS_CRYPTO("sha1");
233 + MODULE_LICENSE("GPL");
234 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
235 +diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
236 +index 9b853809a492..b74ff158108c 100644
237 +--- a/arch/s390/crypto/sha256_s390.c
238 ++++ b/arch/s390/crypto/sha256_s390.c
239 +@@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void)
240 + module_init(sha256_s390_init);
241 + module_exit(sha256_s390_fini);
242 +
243 +-MODULE_ALIAS("sha256");
244 +-MODULE_ALIAS("sha224");
245 ++MODULE_ALIAS_CRYPTO("sha256");
246 ++MODULE_ALIAS_CRYPTO("sha224");
247 + MODULE_LICENSE("GPL");
248 + MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
249 +diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
250 +index 32a81383b69c..0c36989ba182 100644
251 +--- a/arch/s390/crypto/sha512_s390.c
252 ++++ b/arch/s390/crypto/sha512_s390.c
253 +@@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = {
254 + }
255 + };
256 +
257 +-MODULE_ALIAS("sha512");
258 ++MODULE_ALIAS_CRYPTO("sha512");
259 +
260 + static int sha384_init(struct shash_desc *desc)
261 + {
262 +@@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = {
263 + }
264 + };
265 +
266 +-MODULE_ALIAS("sha384");
267 ++MODULE_ALIAS_CRYPTO("sha384");
268 +
269 + static int __init init(void)
270 + {
271 +diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
272 +index 503e6d96ad4e..ded4cee35318 100644
273 +--- a/arch/sparc/crypto/aes_glue.c
274 ++++ b/arch/sparc/crypto/aes_glue.c
275 +@@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini);
276 + MODULE_LICENSE("GPL");
277 + MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
278 +
279 +-MODULE_ALIAS("aes");
280 ++MODULE_ALIAS_CRYPTO("aes");
281 +
282 + #include "crop_devid.c"
283 +diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
284 +index 888f6260b4ec..641f55cb61c3 100644
285 +--- a/arch/sparc/crypto/camellia_glue.c
286 ++++ b/arch/sparc/crypto/camellia_glue.c
287 +@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
288 + MODULE_LICENSE("GPL");
289 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
290 +
291 +-MODULE_ALIAS("aes");
292 ++MODULE_ALIAS_CRYPTO("aes");
293 +
294 + #include "crop_devid.c"
295 +diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
296 +index 5162fad912ce..d1064e46efe8 100644
297 +--- a/arch/sparc/crypto/crc32c_glue.c
298 ++++ b/arch/sparc/crypto/crc32c_glue.c
299 +@@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini);
300 + MODULE_LICENSE("GPL");
301 + MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
302 +
303 +-MODULE_ALIAS("crc32c");
304 ++MODULE_ALIAS_CRYPTO("crc32c");
305 +
306 + #include "crop_devid.c"
307 +diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
308 +index 3065bc61f9d3..d11500972994 100644
309 +--- a/arch/sparc/crypto/des_glue.c
310 ++++ b/arch/sparc/crypto/des_glue.c
311 +@@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini);
312 + MODULE_LICENSE("GPL");
313 + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
314 +
315 +-MODULE_ALIAS("des");
316 ++MODULE_ALIAS_CRYPTO("des");
317 +
318 + #include "crop_devid.c"
319 +diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
320 +index 09a9ea1dfb69..64c7ff5f72a9 100644
321 +--- a/arch/sparc/crypto/md5_glue.c
322 ++++ b/arch/sparc/crypto/md5_glue.c
323 +@@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini);
324 + MODULE_LICENSE("GPL");
325 + MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
326 +
327 +-MODULE_ALIAS("md5");
328 ++MODULE_ALIAS_CRYPTO("md5");
329 +
330 + #include "crop_devid.c"
331 +diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
332 +index 6cd5f29e1e0d..1b3e47accc74 100644
333 +--- a/arch/sparc/crypto/sha1_glue.c
334 ++++ b/arch/sparc/crypto/sha1_glue.c
335 +@@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini);
336 + MODULE_LICENSE("GPL");
337 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
338 +
339 +-MODULE_ALIAS("sha1");
340 ++MODULE_ALIAS_CRYPTO("sha1");
341 +
342 + #include "crop_devid.c"
343 +diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
344 +index 04f555ab2680..41f27cca2a22 100644
345 +--- a/arch/sparc/crypto/sha256_glue.c
346 ++++ b/arch/sparc/crypto/sha256_glue.c
347 +@@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini);
348 + MODULE_LICENSE("GPL");
349 + MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
350 +
351 +-MODULE_ALIAS("sha224");
352 +-MODULE_ALIAS("sha256");
353 ++MODULE_ALIAS_CRYPTO("sha224");
354 ++MODULE_ALIAS_CRYPTO("sha256");
355 +
356 + #include "crop_devid.c"
357 +diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
358 +index f04d1994d19a..9fff88541b8c 100644
359 +--- a/arch/sparc/crypto/sha512_glue.c
360 ++++ b/arch/sparc/crypto/sha512_glue.c
361 +@@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini);
362 + MODULE_LICENSE("GPL");
363 + MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
364 +
365 +-MODULE_ALIAS("sha384");
366 +-MODULE_ALIAS("sha512");
367 ++MODULE_ALIAS_CRYPTO("sha384");
368 ++MODULE_ALIAS_CRYPTO("sha512");
369 +
370 + #include "crop_devid.c"
371 +diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
372 +index 004ba568d93f..33294fdc402e 100644
373 +--- a/arch/tile/mm/homecache.c
374 ++++ b/arch/tile/mm/homecache.c
375 +@@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order)
376 + if (put_page_testzero(page)) {
377 + homecache_change_page_home(page, order, PAGE_HOME_HASH);
378 + if (order == 0) {
379 +- free_hot_cold_page(page, 0);
380 ++ free_hot_cold_page(page, false);
381 + } else {
382 + init_page_count(page);
383 + __free_pages(page, order);
384 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
385 +index 98aa930230ec..2f645c90e4d8 100644
386 +--- a/arch/x86/Kconfig
387 ++++ b/arch/x86/Kconfig
388 +@@ -854,7 +854,7 @@ source "kernel/Kconfig.preempt"
389 +
390 + config X86_UP_APIC
391 + bool "Local APIC support on uniprocessors"
392 +- depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
393 ++ depends on X86_32 && !SMP && !X86_32_NON_STANDARD
394 + ---help---
395 + A local APIC (Advanced Programmable Interrupt Controller) is an
396 + integrated interrupt controller in the CPU. If you have a single-CPU
397 +@@ -865,6 +865,10 @@ config X86_UP_APIC
398 + performance counters), and the NMI watchdog which detects hard
399 + lockups.
400 +
401 ++config X86_UP_APIC_MSI
402 ++ def_bool y
403 ++ select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
404 ++
405 + config X86_UP_IOAPIC
406 + bool "IO-APIC support on uniprocessors"
407 + depends on X86_UP_APIC
408 +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
409 +index eb25ca1eb6da..8f45c855f84c 100644
410 +--- a/arch/x86/boot/compressed/misc.c
411 ++++ b/arch/x86/boot/compressed/misc.c
412 +@@ -396,6 +396,8 @@ asmlinkage void *decompress_kernel(void *rmode, memptr heap,
413 + unsigned long output_len,
414 + unsigned long run_size)
415 + {
416 ++ unsigned char *output_orig = output;
417 ++
418 + real_mode = rmode;
419 +
420 + sanitize_boot_params(real_mode);
421 +@@ -444,7 +446,12 @@ asmlinkage void *decompress_kernel(void *rmode, memptr heap,
422 + debug_putstr("\nDecompressing Linux... ");
423 + decompress(input_data, input_len, NULL, NULL, output, NULL, error);
424 + parse_elf(output);
425 +- handle_relocations(output, output_len);
426 ++ /*
427 ++ * 32-bit always performs relocations. 64-bit relocations are only
428 ++ * needed if kASLR has chosen a different load address.
429 ++ */
430 ++ if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
431 ++ handle_relocations(output, output_len);
432 + debug_putstr("done.\nBooting the kernel.\n");
433 + return output;
434 + }
435 +diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
436 +index aafe8ce0d65d..e26984f7ab8d 100644
437 +--- a/arch/x86/crypto/aes_glue.c
438 ++++ b/arch/x86/crypto/aes_glue.c
439 +@@ -66,5 +66,5 @@ module_exit(aes_fini);
440 +
441 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
442 + MODULE_LICENSE("GPL");
443 +-MODULE_ALIAS("aes");
444 +-MODULE_ALIAS("aes-asm");
445 ++MODULE_ALIAS_CRYPTO("aes");
446 ++MODULE_ALIAS_CRYPTO("aes-asm");
447 +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
448 +index 948ad0e77741..6dfb7d0b139a 100644
449 +--- a/arch/x86/crypto/aesni-intel_glue.c
450 ++++ b/arch/x86/crypto/aesni-intel_glue.c
451 +@@ -1514,4 +1514,4 @@ module_exit(aesni_exit);
452 +
453 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
454 + MODULE_LICENSE("GPL");
455 +-MODULE_ALIAS("aes");
456 ++MODULE_ALIAS_CRYPTO("aes");
457 +diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
458 +index 50ec333b70e6..1477cfcdbf6b 100644
459 +--- a/arch/x86/crypto/blowfish_glue.c
460 ++++ b/arch/x86/crypto/blowfish_glue.c
461 +@@ -481,5 +481,5 @@ module_exit(fini);
462 +
463 + MODULE_LICENSE("GPL");
464 + MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
465 +-MODULE_ALIAS("blowfish");
466 +-MODULE_ALIAS("blowfish-asm");
467 ++MODULE_ALIAS_CRYPTO("blowfish");
468 ++MODULE_ALIAS_CRYPTO("blowfish-asm");
469 +diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
470 +index 4209a76fcdaa..9a07fafe3831 100644
471 +--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
472 ++++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
473 +@@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini);
474 +
475 + MODULE_LICENSE("GPL");
476 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized");
477 +-MODULE_ALIAS("camellia");
478 +-MODULE_ALIAS("camellia-asm");
479 ++MODULE_ALIAS_CRYPTO("camellia");
480 ++MODULE_ALIAS_CRYPTO("camellia-asm");
481 +diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
482 +index 87a041a10f4a..ed38d959add6 100644
483 +--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
484 ++++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
485 +@@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini);
486 +
487 + MODULE_LICENSE("GPL");
488 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
489 +-MODULE_ALIAS("camellia");
490 +-MODULE_ALIAS("camellia-asm");
491 ++MODULE_ALIAS_CRYPTO("camellia");
492 ++MODULE_ALIAS_CRYPTO("camellia-asm");
493 +diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
494 +index c171dcbf192d..5c8b6266a394 100644
495 +--- a/arch/x86/crypto/camellia_glue.c
496 ++++ b/arch/x86/crypto/camellia_glue.c
497 +@@ -1725,5 +1725,5 @@ module_exit(fini);
498 +
499 + MODULE_LICENSE("GPL");
500 + MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
501 +-MODULE_ALIAS("camellia");
502 +-MODULE_ALIAS("camellia-asm");
503 ++MODULE_ALIAS_CRYPTO("camellia");
504 ++MODULE_ALIAS_CRYPTO("camellia-asm");
505 +diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
506 +index e6a3700489b9..f62e9db5a462 100644
507 +--- a/arch/x86/crypto/cast5_avx_glue.c
508 ++++ b/arch/x86/crypto/cast5_avx_glue.c
509 +@@ -494,4 +494,4 @@ module_exit(cast5_exit);
510 +
511 + MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
512 + MODULE_LICENSE("GPL");
513 +-MODULE_ALIAS("cast5");
514 ++MODULE_ALIAS_CRYPTO("cast5");
515 +diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
516 +index 09f3677393e4..0160f68a57ff 100644
517 +--- a/arch/x86/crypto/cast6_avx_glue.c
518 ++++ b/arch/x86/crypto/cast6_avx_glue.c
519 +@@ -611,4 +611,4 @@ module_exit(cast6_exit);
520 +
521 + MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
522 + MODULE_LICENSE("GPL");
523 +-MODULE_ALIAS("cast6");
524 ++MODULE_ALIAS_CRYPTO("cast6");
525 +diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
526 +index 9d014a74ef96..1937fc1d8763 100644
527 +--- a/arch/x86/crypto/crc32-pclmul_glue.c
528 ++++ b/arch/x86/crypto/crc32-pclmul_glue.c
529 +@@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini);
530 + MODULE_AUTHOR("Alexander Boyko <alexander_boyko@×××××××.com>");
531 + MODULE_LICENSE("GPL");
532 +
533 +-MODULE_ALIAS("crc32");
534 +-MODULE_ALIAS("crc32-pclmul");
535 ++MODULE_ALIAS_CRYPTO("crc32");
536 ++MODULE_ALIAS_CRYPTO("crc32-pclmul");
537 +diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
538 +index 6812ad98355c..28640c3d6af7 100644
539 +--- a/arch/x86/crypto/crc32c-intel_glue.c
540 ++++ b/arch/x86/crypto/crc32c-intel_glue.c
541 +@@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang <austin.zhang@×××××.com>, Kent Liu <kent.liu@intel.c
542 + MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
543 + MODULE_LICENSE("GPL");
544 +
545 +-MODULE_ALIAS("crc32c");
546 +-MODULE_ALIAS("crc32c-intel");
547 ++MODULE_ALIAS_CRYPTO("crc32c");
548 ++MODULE_ALIAS_CRYPTO("crc32c-intel");
549 +diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
550 +index 7845d7fd54c0..b6c67bf30fdf 100644
551 +--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
552 ++++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
553 +@@ -147,5 +147,5 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@×××××××××××.com>");
554 + MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
555 + MODULE_LICENSE("GPL");
556 +
557 +-MODULE_ALIAS("crct10dif");
558 +-MODULE_ALIAS("crct10dif-pclmul");
559 ++MODULE_ALIAS_CRYPTO("crct10dif");
560 ++MODULE_ALIAS_CRYPTO("crct10dif-pclmul");
561 +diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
562 +index 98d7a188f46b..f368ba261739 100644
563 +--- a/arch/x86/crypto/fpu.c
564 ++++ b/arch/x86/crypto/fpu.c
565 +@@ -17,6 +17,7 @@
566 + #include <linux/kernel.h>
567 + #include <linux/module.h>
568 + #include <linux/slab.h>
569 ++#include <linux/crypto.h>
570 + #include <asm/i387.h>
571 +
572 + struct crypto_fpu_ctx {
573 +@@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void)
574 + {
575 + crypto_unregister_template(&crypto_fpu_tmpl);
576 + }
577 ++
578 ++MODULE_ALIAS_CRYPTO("fpu");
579 +diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
580 +index d785cf2c529c..a8d6f69f92a3 100644
581 +--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
582 ++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
583 +@@ -341,4 +341,4 @@ module_exit(ghash_pclmulqdqni_mod_exit);
584 + MODULE_LICENSE("GPL");
585 + MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
586 + "acclerated by PCLMULQDQ-NI");
587 +-MODULE_ALIAS("ghash");
588 ++MODULE_ALIAS_CRYPTO("ghash");
589 +diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
590 +index 5e8e67739bb5..399a29d067d6 100644
591 +--- a/arch/x86/crypto/salsa20_glue.c
592 ++++ b/arch/x86/crypto/salsa20_glue.c
593 +@@ -119,5 +119,5 @@ module_exit(fini);
594 +
595 + MODULE_LICENSE("GPL");
596 + MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
597 +-MODULE_ALIAS("salsa20");
598 +-MODULE_ALIAS("salsa20-asm");
599 ++MODULE_ALIAS_CRYPTO("salsa20");
600 ++MODULE_ALIAS_CRYPTO("salsa20-asm");
601 +diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
602 +index 2fae489b1524..437e47a4d302 100644
603 +--- a/arch/x86/crypto/serpent_avx2_glue.c
604 ++++ b/arch/x86/crypto/serpent_avx2_glue.c
605 +@@ -558,5 +558,5 @@ module_exit(fini);
606 +
607 + MODULE_LICENSE("GPL");
608 + MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
609 +-MODULE_ALIAS("serpent");
610 +-MODULE_ALIAS("serpent-asm");
611 ++MODULE_ALIAS_CRYPTO("serpent");
612 ++MODULE_ALIAS_CRYPTO("serpent-asm");
613 +diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
614 +index ff4870870972..7e217398b4eb 100644
615 +--- a/arch/x86/crypto/serpent_avx_glue.c
616 ++++ b/arch/x86/crypto/serpent_avx_glue.c
617 +@@ -617,4 +617,4 @@ module_exit(serpent_exit);
618 +
619 + MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
620 + MODULE_LICENSE("GPL");
621 +-MODULE_ALIAS("serpent");
622 ++MODULE_ALIAS_CRYPTO("serpent");
623 +diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
624 +index 8c95f8637306..bf025adaea01 100644
625 +--- a/arch/x86/crypto/serpent_sse2_glue.c
626 ++++ b/arch/x86/crypto/serpent_sse2_glue.c
627 +@@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit);
628 +
629 + MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
630 + MODULE_LICENSE("GPL");
631 +-MODULE_ALIAS("serpent");
632 ++MODULE_ALIAS_CRYPTO("serpent");
633 +diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
634 +index 4a11a9d72451..29e1060e9001 100644
635 +--- a/arch/x86/crypto/sha1_ssse3_glue.c
636 ++++ b/arch/x86/crypto/sha1_ssse3_glue.c
637 +@@ -237,4 +237,4 @@ module_exit(sha1_ssse3_mod_fini);
638 + MODULE_LICENSE("GPL");
639 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
640 +
641 +-MODULE_ALIAS("sha1");
642 ++MODULE_ALIAS_CRYPTO("sha1");
643 +diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
644 +index f248546da1ca..4dc100d82902 100644
645 +--- a/arch/x86/crypto/sha256_ssse3_glue.c
646 ++++ b/arch/x86/crypto/sha256_ssse3_glue.c
647 +@@ -318,5 +318,5 @@ module_exit(sha256_ssse3_mod_fini);
648 + MODULE_LICENSE("GPL");
649 + MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
650 +
651 +-MODULE_ALIAS("sha256");
652 +-MODULE_ALIAS("sha224");
653 ++MODULE_ALIAS_CRYPTO("sha256");
654 ++MODULE_ALIAS_CRYPTO("sha224");
655 +diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
656 +index 8626b03e83b7..26a5898a6f26 100644
657 +--- a/arch/x86/crypto/sha512_ssse3_glue.c
658 ++++ b/arch/x86/crypto/sha512_ssse3_glue.c
659 +@@ -326,5 +326,5 @@ module_exit(sha512_ssse3_mod_fini);
660 + MODULE_LICENSE("GPL");
661 + MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
662 +
663 +-MODULE_ALIAS("sha512");
664 +-MODULE_ALIAS("sha384");
665 ++MODULE_ALIAS_CRYPTO("sha512");
666 ++MODULE_ALIAS_CRYPTO("sha384");
667 +diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
668 +index 4e3c665be129..1ac531ea9bcc 100644
669 +--- a/arch/x86/crypto/twofish_avx_glue.c
670 ++++ b/arch/x86/crypto/twofish_avx_glue.c
671 +@@ -579,4 +579,4 @@ module_exit(twofish_exit);
672 +
673 + MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
674 + MODULE_LICENSE("GPL");
675 +-MODULE_ALIAS("twofish");
676 ++MODULE_ALIAS_CRYPTO("twofish");
677 +diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
678 +index 0a5202303501..77e06c2da83d 100644
679 +--- a/arch/x86/crypto/twofish_glue.c
680 ++++ b/arch/x86/crypto/twofish_glue.c
681 +@@ -96,5 +96,5 @@ module_exit(fini);
682 +
683 + MODULE_LICENSE("GPL");
684 + MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
685 +-MODULE_ALIAS("twofish");
686 +-MODULE_ALIAS("twofish-asm");
687 ++MODULE_ALIAS_CRYPTO("twofish");
688 ++MODULE_ALIAS_CRYPTO("twofish-asm");
689 +diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
690 +index 13e63b3e1dfb..56d8a08ee479 100644
691 +--- a/arch/x86/crypto/twofish_glue_3way.c
692 ++++ b/arch/x86/crypto/twofish_glue_3way.c
693 +@@ -495,5 +495,5 @@ module_exit(fini);
694 +
695 + MODULE_LICENSE("GPL");
696 + MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
697 +-MODULE_ALIAS("twofish");
698 +-MODULE_ALIAS("twofish-asm");
699 ++MODULE_ALIAS_CRYPTO("twofish");
700 ++MODULE_ALIAS_CRYPTO("twofish-asm");
701 +diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
702 +index 50d033a8947d..a94b82e8f156 100644
703 +--- a/arch/x86/include/asm/desc.h
704 ++++ b/arch/x86/include/asm/desc.h
705 +@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
706 + gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
707 + }
708 +
709 +-#define _LDT_empty(info) \
710 ++/* This intentionally ignores lm, since 32-bit apps don't have that field. */
711 ++#define LDT_empty(info) \
712 + ((info)->base_addr == 0 && \
713 + (info)->limit == 0 && \
714 + (info)->contents == 0 && \
715 +@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
716 + (info)->seg_not_present == 1 && \
717 + (info)->useable == 0)
718 +
719 +-#ifdef CONFIG_X86_64
720 +-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
721 +-#else
722 +-#define LDT_empty(info) (_LDT_empty(info))
723 +-#endif
724 ++/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
725 ++static inline bool LDT_zero(const struct user_desc *info)
726 ++{
727 ++ return (info->base_addr == 0 &&
728 ++ info->limit == 0 &&
729 ++ info->contents == 0 &&
730 ++ info->read_exec_only == 0 &&
731 ++ info->seg_32bit == 0 &&
732 ++ info->limit_in_pages == 0 &&
733 ++ info->seg_not_present == 0 &&
734 ++ info->useable == 0);
735 ++}
736 +
737 + static inline void clear_LDT(void)
738 + {
739 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
740 +index 832d05a914ba..317c81172c18 100644
741 +--- a/arch/x86/kernel/cpu/mshyperv.c
742 ++++ b/arch/x86/kernel/cpu/mshyperv.c
743 +@@ -67,6 +67,7 @@ static struct clocksource hyperv_cs = {
744 + .rating = 400, /* use this when running on Hyperv*/
745 + .read = read_hv_clock,
746 + .mask = CLOCKSOURCE_MASK(64),
747 ++ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
748 + };
749 +
750 + static void __init ms_hyperv_init_platform(void)
751 +diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
752 +index 4e942f31b1a7..7fc5e843f247 100644
753 +--- a/arch/x86/kernel/tls.c
754 ++++ b/arch/x86/kernel/tls.c
755 +@@ -29,7 +29,28 @@ static int get_free_idx(void)
756 +
757 + static bool tls_desc_okay(const struct user_desc *info)
758 + {
759 +- if (LDT_empty(info))
760 ++ /*
761 ++ * For historical reasons (i.e. no one ever documented how any
762 ++ * of the segmentation APIs work), user programs can and do
763 ++ * assume that a struct user_desc that's all zeros except for
764 ++ * entry_number means "no segment at all". This never actually
765 ++ * worked. In fact, up to Linux 3.19, a struct user_desc like
766 ++ * this would create a 16-bit read-write segment with base and
767 ++ * limit both equal to zero.
768 ++ *
769 ++ * That was close enough to "no segment at all" until we
770 ++ * hardened this function to disallow 16-bit TLS segments. Fix
771 ++ * it up by interpreting these zeroed segments the way that they
772 ++ * were almost certainly intended to be interpreted.
773 ++ *
774 ++ * The correct way to ask for "no segment at all" is to specify
775 ++ * a user_desc that satisfies LDT_empty. To keep everything
776 ++ * working, we accept both.
777 ++ *
778 ++ * Note that there's a similar kludge in modify_ldt -- look at
779 ++ * the distinction between modes 1 and 0x11.
780 ++ */
781 ++ if (LDT_empty(info) || LDT_zero(info))
782 + return true;
783 +
784 + /*
785 +@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
786 + cpu = get_cpu();
787 +
788 + while (n-- > 0) {
789 +- if (LDT_empty(info))
790 ++ if (LDT_empty(info) || LDT_zero(info))
791 + desc->a = desc->b = 0;
792 + else
793 + fill_ldt(desc, info);
794 +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
795 +index f9d976e0ae67..b1d9002af7db 100644
796 +--- a/arch/x86/kernel/traps.c
797 ++++ b/arch/x86/kernel/traps.c
798 +@@ -365,7 +365,7 @@ exit:
799 + * for scheduling or signal handling. The actual stack switch is done in
800 + * entry.S
801 + */
802 +-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
803 ++asmlinkage notrace __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
804 + {
805 + struct pt_regs *regs = eregs;
806 + /* Did already sync */
807 +@@ -390,7 +390,7 @@ struct bad_iret_stack {
808 + struct pt_regs regs;
809 + };
810 +
811 +-asmlinkage __visible
812 ++asmlinkage __visible notrace __kprobes
813 + struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
814 + {
815 + /*
816 +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
817 +index de0290605903..b20bced0090f 100644
818 +--- a/arch/x86/kernel/tsc.c
819 ++++ b/arch/x86/kernel/tsc.c
820 +@@ -618,7 +618,7 @@ static unsigned long quick_pit_calibrate(void)
821 + goto success;
822 + }
823 + }
824 +- pr_err("Fast TSC calibration failed\n");
825 ++ pr_info("Fast TSC calibration failed\n");
826 + return 0;
827 +
828 + success:
829 +diff --git a/crypto/842.c b/crypto/842.c
830 +index 65c7a89cfa09..b48f4f108c47 100644
831 +--- a/crypto/842.c
832 ++++ b/crypto/842.c
833 +@@ -180,3 +180,4 @@ module_exit(nx842_mod_exit);
834 +
835 + MODULE_LICENSE("GPL");
836 + MODULE_DESCRIPTION("842 Compression Algorithm");
837 ++MODULE_ALIAS_CRYPTO("842");
838 +diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
839 +index fd0d6b454975..3dd101144a58 100644
840 +--- a/crypto/aes_generic.c
841 ++++ b/crypto/aes_generic.c
842 +@@ -1474,4 +1474,5 @@ module_exit(aes_fini);
843 +
844 + MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
845 + MODULE_LICENSE("Dual BSD/GPL");
846 +-MODULE_ALIAS("aes");
847 ++MODULE_ALIAS_CRYPTO("aes");
848 ++MODULE_ALIAS_CRYPTO("aes-generic");
849 +diff --git a/crypto/algapi.c b/crypto/algapi.c
850 +index 7a1ae87f1683..00d8d939733b 100644
851 +--- a/crypto/algapi.c
852 ++++ b/crypto/algapi.c
853 +@@ -495,8 +495,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
854 +
855 + struct crypto_template *crypto_lookup_template(const char *name)
856 + {
857 +- return try_then_request_module(__crypto_lookup_template(name), "%s",
858 +- name);
859 ++ return try_then_request_module(__crypto_lookup_template(name),
860 ++ "crypto-%s", name);
861 + }
862 + EXPORT_SYMBOL_GPL(crypto_lookup_template);
863 +
864 +diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
865 +index 666f1962a160..6f5bebc9bf01 100644
866 +--- a/crypto/ansi_cprng.c
867 ++++ b/crypto/ansi_cprng.c
868 +@@ -476,4 +476,5 @@ module_param(dbg, int, 0);
869 + MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
870 + module_init(prng_mod_init);
871 + module_exit(prng_mod_fini);
872 +-MODULE_ALIAS("stdrng");
873 ++MODULE_ALIAS_CRYPTO("stdrng");
874 ++MODULE_ALIAS_CRYPTO("ansi_cprng");
875 +diff --git a/crypto/anubis.c b/crypto/anubis.c
876 +index 008c8a4fb67c..4bb187c2a902 100644
877 +--- a/crypto/anubis.c
878 ++++ b/crypto/anubis.c
879 +@@ -704,3 +704,4 @@ module_exit(anubis_mod_fini);
880 +
881 + MODULE_LICENSE("GPL");
882 + MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
883 ++MODULE_ALIAS_CRYPTO("anubis");
884 +diff --git a/crypto/api.c b/crypto/api.c
885 +index a2b39c5f3649..2a81e98a0021 100644
886 +--- a/crypto/api.c
887 ++++ b/crypto/api.c
888 +@@ -216,11 +216,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
889 +
890 + alg = crypto_alg_lookup(name, type, mask);
891 + if (!alg) {
892 +- request_module("%s", name);
893 ++ request_module("crypto-%s", name);
894 +
895 + if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
896 + CRYPTO_ALG_NEED_FALLBACK))
897 +- request_module("%s-all", name);
898 ++ request_module("crypto-%s-all", name);
899 +
900 + alg = crypto_alg_lookup(name, type, mask);
901 + }
902 +diff --git a/crypto/arc4.c b/crypto/arc4.c
903 +index 5a772c3657d5..f1a81925558f 100644
904 +--- a/crypto/arc4.c
905 ++++ b/crypto/arc4.c
906 +@@ -166,3 +166,4 @@ module_exit(arc4_exit);
907 + MODULE_LICENSE("GPL");
908 + MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
909 + MODULE_AUTHOR("Jon Oberheide <jon@×××××××××.org>");
910 ++MODULE_ALIAS_CRYPTO("arc4");
911 +diff --git a/crypto/authenc.c b/crypto/authenc.c
912 +index e1223559d5df..78fb16cab13f 100644
913 +--- a/crypto/authenc.c
914 ++++ b/crypto/authenc.c
915 +@@ -721,3 +721,4 @@ module_exit(crypto_authenc_module_exit);
916 +
917 + MODULE_LICENSE("GPL");
918 + MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
919 ++MODULE_ALIAS_CRYPTO("authenc");
920 +diff --git a/crypto/authencesn.c b/crypto/authencesn.c
921 +index 4be0dd4373a9..024bff2344fc 100644
922 +--- a/crypto/authencesn.c
923 ++++ b/crypto/authencesn.c
924 +@@ -814,3 +814,4 @@ module_exit(crypto_authenc_esn_module_exit);
925 + MODULE_LICENSE("GPL");
926 + MODULE_AUTHOR("Steffen Klassert <steffen.klassert@×××××××.com>");
927 + MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
928 ++MODULE_ALIAS_CRYPTO("authencesn");
929 +diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
930 +index 8baf5447d35b..87b392a77a93 100644
931 +--- a/crypto/blowfish_generic.c
932 ++++ b/crypto/blowfish_generic.c
933 +@@ -138,4 +138,5 @@ module_exit(blowfish_mod_fini);
934 +
935 + MODULE_LICENSE("GPL");
936 + MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
937 +-MODULE_ALIAS("blowfish");
938 ++MODULE_ALIAS_CRYPTO("blowfish");
939 ++MODULE_ALIAS_CRYPTO("blowfish-generic");
940 +diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
941 +index 26bcd7a2d6b4..a02286bf319e 100644
942 +--- a/crypto/camellia_generic.c
943 ++++ b/crypto/camellia_generic.c
944 +@@ -1098,4 +1098,5 @@ module_exit(camellia_fini);
945 +
946 + MODULE_DESCRIPTION("Camellia Cipher Algorithm");
947 + MODULE_LICENSE("GPL");
948 +-MODULE_ALIAS("camellia");
949 ++MODULE_ALIAS_CRYPTO("camellia");
950 ++MODULE_ALIAS_CRYPTO("camellia-generic");
951 +diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
952 +index 5558f630a0eb..df5c72629383 100644
953 +--- a/crypto/cast5_generic.c
954 ++++ b/crypto/cast5_generic.c
955 +@@ -549,4 +549,5 @@ module_exit(cast5_mod_fini);
956 +
957 + MODULE_LICENSE("GPL");
958 + MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
959 +-MODULE_ALIAS("cast5");
960 ++MODULE_ALIAS_CRYPTO("cast5");
961 ++MODULE_ALIAS_CRYPTO("cast5-generic");
962 +diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
963 +index de732528a430..058c8d755d03 100644
964 +--- a/crypto/cast6_generic.c
965 ++++ b/crypto/cast6_generic.c
966 +@@ -291,4 +291,5 @@ module_exit(cast6_mod_fini);
967 +
968 + MODULE_LICENSE("GPL");
969 + MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
970 +-MODULE_ALIAS("cast6");
971 ++MODULE_ALIAS_CRYPTO("cast6");
972 ++MODULE_ALIAS_CRYPTO("cast6-generic");
973 +diff --git a/crypto/cbc.c b/crypto/cbc.c
974 +index 61ac42e1e32b..780ee27b2d43 100644
975 +--- a/crypto/cbc.c
976 ++++ b/crypto/cbc.c
977 +@@ -289,3 +289,4 @@ module_exit(crypto_cbc_module_exit);
978 +
979 + MODULE_LICENSE("GPL");
980 + MODULE_DESCRIPTION("CBC block cipher algorithm");
981 ++MODULE_ALIAS_CRYPTO("cbc");
982 +diff --git a/crypto/ccm.c b/crypto/ccm.c
983 +index 1df84217f7c9..003bbbd21a2b 100644
984 +--- a/crypto/ccm.c
985 ++++ b/crypto/ccm.c
986 +@@ -879,5 +879,6 @@ module_exit(crypto_ccm_module_exit);
987 +
988 + MODULE_LICENSE("GPL");
989 + MODULE_DESCRIPTION("Counter with CBC MAC");
990 +-MODULE_ALIAS("ccm_base");
991 +-MODULE_ALIAS("rfc4309");
992 ++MODULE_ALIAS_CRYPTO("ccm_base");
993 ++MODULE_ALIAS_CRYPTO("rfc4309");
994 ++MODULE_ALIAS_CRYPTO("ccm");
995 +diff --git a/crypto/chainiv.c b/crypto/chainiv.c
996 +index 834d8dd3d4fc..22b7e55b0e1b 100644
997 +--- a/crypto/chainiv.c
998 ++++ b/crypto/chainiv.c
999 +@@ -359,3 +359,4 @@ module_exit(chainiv_module_exit);
1000 +
1001 + MODULE_LICENSE("GPL");
1002 + MODULE_DESCRIPTION("Chain IV Generator");
1003 ++MODULE_ALIAS_CRYPTO("chainiv");
1004 +diff --git a/crypto/cmac.c b/crypto/cmac.c
1005 +index 50880cf17fad..7a8bfbd548f6 100644
1006 +--- a/crypto/cmac.c
1007 ++++ b/crypto/cmac.c
1008 +@@ -313,3 +313,4 @@ module_exit(crypto_cmac_module_exit);
1009 +
1010 + MODULE_LICENSE("GPL");
1011 + MODULE_DESCRIPTION("CMAC keyed hash algorithm");
1012 ++MODULE_ALIAS_CRYPTO("cmac");
1013 +diff --git a/crypto/crc32.c b/crypto/crc32.c
1014 +index 9d1c41569898..187ded28cb0b 100644
1015 +--- a/crypto/crc32.c
1016 ++++ b/crypto/crc32.c
1017 +@@ -156,3 +156,4 @@ module_exit(crc32_mod_fini);
1018 + MODULE_AUTHOR("Alexander Boyko <alexander_boyko@×××××××.com>");
1019 + MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
1020 + MODULE_LICENSE("GPL");
1021 ++MODULE_ALIAS_CRYPTO("crc32");
1022 +diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
1023 +index 877e7114ec5c..c1229614c7e3 100644
1024 +--- a/crypto/crct10dif_generic.c
1025 ++++ b/crypto/crct10dif_generic.c
1026 +@@ -124,4 +124,5 @@ module_exit(crct10dif_mod_fini);
1027 + MODULE_AUTHOR("Tim Chen <tim.c.chen@×××××××××××.com>");
1028 + MODULE_DESCRIPTION("T10 DIF CRC calculation.");
1029 + MODULE_LICENSE("GPL");
1030 +-MODULE_ALIAS("crct10dif");
1031 ++MODULE_ALIAS_CRYPTO("crct10dif");
1032 ++MODULE_ALIAS_CRYPTO("crct10dif-generic");
1033 +diff --git a/crypto/cryptd.c b/crypto/cryptd.c
1034 +index 7bdd61b867c8..75c415d37086 100644
1035 +--- a/crypto/cryptd.c
1036 ++++ b/crypto/cryptd.c
1037 +@@ -955,3 +955,4 @@ module_exit(cryptd_exit);
1038 +
1039 + MODULE_LICENSE("GPL");
1040 + MODULE_DESCRIPTION("Software async crypto daemon");
1041 ++MODULE_ALIAS_CRYPTO("cryptd");
1042 +diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
1043 +index fee7265cd35d..7b39fa3deac2 100644
1044 +--- a/crypto/crypto_null.c
1045 ++++ b/crypto/crypto_null.c
1046 +@@ -149,9 +149,9 @@ static struct crypto_alg null_algs[3] = { {
1047 + .coa_decompress = null_compress } }
1048 + } };
1049 +
1050 +-MODULE_ALIAS("compress_null");
1051 +-MODULE_ALIAS("digest_null");
1052 +-MODULE_ALIAS("cipher_null");
1053 ++MODULE_ALIAS_CRYPTO("compress_null");
1054 ++MODULE_ALIAS_CRYPTO("digest_null");
1055 ++MODULE_ALIAS_CRYPTO("cipher_null");
1056 +
1057 + static int __init crypto_null_mod_init(void)
1058 + {
1059 +diff --git a/crypto/ctr.c b/crypto/ctr.c
1060 +index f2b94f27bb2c..2386f7313952 100644
1061 +--- a/crypto/ctr.c
1062 ++++ b/crypto/ctr.c
1063 +@@ -466,4 +466,5 @@ module_exit(crypto_ctr_module_exit);
1064 +
1065 + MODULE_LICENSE("GPL");
1066 + MODULE_DESCRIPTION("CTR Counter block mode");
1067 +-MODULE_ALIAS("rfc3686");
1068 ++MODULE_ALIAS_CRYPTO("rfc3686");
1069 ++MODULE_ALIAS_CRYPTO("ctr");
1070 +diff --git a/crypto/cts.c b/crypto/cts.c
1071 +index 042223f8e733..60b9da3fa7c1 100644
1072 +--- a/crypto/cts.c
1073 ++++ b/crypto/cts.c
1074 +@@ -350,3 +350,4 @@ module_exit(crypto_cts_module_exit);
1075 +
1076 + MODULE_LICENSE("Dual BSD/GPL");
1077 + MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
1078 ++MODULE_ALIAS_CRYPTO("cts");
1079 +diff --git a/crypto/deflate.c b/crypto/deflate.c
1080 +index b57d70eb156b..95d8d37c5021 100644
1081 +--- a/crypto/deflate.c
1082 ++++ b/crypto/deflate.c
1083 +@@ -222,4 +222,4 @@ module_exit(deflate_mod_fini);
1084 + MODULE_LICENSE("GPL");
1085 + MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
1086 + MODULE_AUTHOR("James Morris <jmorris@×××××××××××××.au>");
1087 +-
1088 ++MODULE_ALIAS_CRYPTO("deflate");
1089 +diff --git a/crypto/des_generic.c b/crypto/des_generic.c
1090 +index f6cf63f88468..3ec6071309d9 100644
1091 +--- a/crypto/des_generic.c
1092 ++++ b/crypto/des_generic.c
1093 +@@ -971,8 +971,6 @@ static struct crypto_alg des_algs[2] = { {
1094 + .cia_decrypt = des3_ede_decrypt } }
1095 + } };
1096 +
1097 +-MODULE_ALIAS("des3_ede");
1098 +-
1099 + static int __init des_generic_mod_init(void)
1100 + {
1101 + return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
1102 +@@ -989,4 +987,7 @@ module_exit(des_generic_mod_fini);
1103 + MODULE_LICENSE("GPL");
1104 + MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
1105 + MODULE_AUTHOR("Dag Arne Osvik <da@×××××.no>");
1106 +-MODULE_ALIAS("des");
1107 ++MODULE_ALIAS_CRYPTO("des");
1108 ++MODULE_ALIAS_CRYPTO("des-generic");
1109 ++MODULE_ALIAS_CRYPTO("des3_ede");
1110 ++MODULE_ALIAS_CRYPTO("des3_ede-generic");
1111 +diff --git a/crypto/ecb.c b/crypto/ecb.c
1112 +index 935cfef4aa84..12011aff0971 100644
1113 +--- a/crypto/ecb.c
1114 ++++ b/crypto/ecb.c
1115 +@@ -185,3 +185,4 @@ module_exit(crypto_ecb_module_exit);
1116 +
1117 + MODULE_LICENSE("GPL");
1118 + MODULE_DESCRIPTION("ECB block cipher algorithm");
1119 ++MODULE_ALIAS_CRYPTO("ecb");
1120 +diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
1121 +index 42ce9f570aec..388f582ab0b9 100644
1122 +--- a/crypto/eseqiv.c
1123 ++++ b/crypto/eseqiv.c
1124 +@@ -267,3 +267,4 @@ module_exit(eseqiv_module_exit);
1125 +
1126 + MODULE_LICENSE("GPL");
1127 + MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
1128 ++MODULE_ALIAS_CRYPTO("eseqiv");
1129 +diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
1130 +index 021d7fec6bc8..77286ea28865 100644
1131 +--- a/crypto/fcrypt.c
1132 ++++ b/crypto/fcrypt.c
1133 +@@ -420,3 +420,4 @@ module_exit(fcrypt_mod_fini);
1134 + MODULE_LICENSE("Dual BSD/GPL");
1135 + MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
1136 + MODULE_AUTHOR("David Howells <dhowells@××××××.com>");
1137 ++MODULE_ALIAS_CRYPTO("fcrypt");
1138 +diff --git a/crypto/gcm.c b/crypto/gcm.c
1139 +index b4f017939004..9cea4d0b6904 100644
1140 +--- a/crypto/gcm.c
1141 ++++ b/crypto/gcm.c
1142 +@@ -1441,6 +1441,7 @@ module_exit(crypto_gcm_module_exit);
1143 + MODULE_LICENSE("GPL");
1144 + MODULE_DESCRIPTION("Galois/Counter Mode");
1145 + MODULE_AUTHOR("Mikko Herranen <mh1@×××.fi>");
1146 +-MODULE_ALIAS("gcm_base");
1147 +-MODULE_ALIAS("rfc4106");
1148 +-MODULE_ALIAS("rfc4543");
1149 ++MODULE_ALIAS_CRYPTO("gcm_base");
1150 ++MODULE_ALIAS_CRYPTO("rfc4106");
1151 ++MODULE_ALIAS_CRYPTO("rfc4543");
1152 ++MODULE_ALIAS_CRYPTO("gcm");
1153 +diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
1154 +index 9d3f0c69a86f..bac70995e064 100644
1155 +--- a/crypto/ghash-generic.c
1156 ++++ b/crypto/ghash-generic.c
1157 +@@ -172,4 +172,5 @@ module_exit(ghash_mod_exit);
1158 +
1159 + MODULE_LICENSE("GPL");
1160 + MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
1161 +-MODULE_ALIAS("ghash");
1162 ++MODULE_ALIAS_CRYPTO("ghash");
1163 ++MODULE_ALIAS_CRYPTO("ghash-generic");
1164 +diff --git a/crypto/hmac.c b/crypto/hmac.c
1165 +index 8d9544cf8169..ade790b454e9 100644
1166 +--- a/crypto/hmac.c
1167 ++++ b/crypto/hmac.c
1168 +@@ -271,3 +271,4 @@ module_exit(hmac_module_exit);
1169 +
1170 + MODULE_LICENSE("GPL");
1171 + MODULE_DESCRIPTION("HMAC hash algorithm");
1172 ++MODULE_ALIAS_CRYPTO("hmac");
1173 +diff --git a/crypto/khazad.c b/crypto/khazad.c
1174 +index 60e7cd66facc..873eb5ded6d7 100644
1175 +--- a/crypto/khazad.c
1176 ++++ b/crypto/khazad.c
1177 +@@ -880,3 +880,4 @@ module_exit(khazad_mod_fini);
1178 +
1179 + MODULE_LICENSE("GPL");
1180 + MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
1181 ++MODULE_ALIAS_CRYPTO("khazad");
1182 +diff --git a/crypto/krng.c b/crypto/krng.c
1183 +index a2d2b72fc135..0224841b6579 100644
1184 +--- a/crypto/krng.c
1185 ++++ b/crypto/krng.c
1186 +@@ -62,4 +62,5 @@ module_exit(krng_mod_fini);
1187 +
1188 + MODULE_LICENSE("GPL");
1189 + MODULE_DESCRIPTION("Kernel Random Number Generator");
1190 +-MODULE_ALIAS("stdrng");
1191 ++MODULE_ALIAS_CRYPTO("stdrng");
1192 ++MODULE_ALIAS_CRYPTO("krng");
1193 +diff --git a/crypto/lrw.c b/crypto/lrw.c
1194 +index ba42acc4deba..6f9908a7ebcb 100644
1195 +--- a/crypto/lrw.c
1196 ++++ b/crypto/lrw.c
1197 +@@ -400,3 +400,4 @@ module_exit(crypto_module_exit);
1198 +
1199 + MODULE_LICENSE("GPL");
1200 + MODULE_DESCRIPTION("LRW block cipher mode");
1201 ++MODULE_ALIAS_CRYPTO("lrw");
1202 +diff --git a/crypto/lz4.c b/crypto/lz4.c
1203 +index 4586dd15b0d8..53279ab8c3a6 100644
1204 +--- a/crypto/lz4.c
1205 ++++ b/crypto/lz4.c
1206 +@@ -104,3 +104,4 @@ module_exit(lz4_mod_fini);
1207 +
1208 + MODULE_LICENSE("GPL");
1209 + MODULE_DESCRIPTION("LZ4 Compression Algorithm");
1210 ++MODULE_ALIAS_CRYPTO("lz4");
1211 +diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
1212 +index 151ba31d34e3..eaec5fa3debf 100644
1213 +--- a/crypto/lz4hc.c
1214 ++++ b/crypto/lz4hc.c
1215 +@@ -104,3 +104,4 @@ module_exit(lz4hc_mod_fini);
1216 +
1217 + MODULE_LICENSE("GPL");
1218 + MODULE_DESCRIPTION("LZ4HC Compression Algorithm");
1219 ++MODULE_ALIAS_CRYPTO("lz4hc");
1220 +diff --git a/crypto/lzo.c b/crypto/lzo.c
1221 +index 1c2aa69c54b8..d1ff69404353 100644
1222 +--- a/crypto/lzo.c
1223 ++++ b/crypto/lzo.c
1224 +@@ -103,3 +103,4 @@ module_exit(lzo_mod_fini);
1225 +
1226 + MODULE_LICENSE("GPL");
1227 + MODULE_DESCRIPTION("LZO Compression Algorithm");
1228 ++MODULE_ALIAS_CRYPTO("lzo");
1229 +diff --git a/crypto/md4.c b/crypto/md4.c
1230 +index 0477a6a01d58..3515af425cc9 100644
1231 +--- a/crypto/md4.c
1232 ++++ b/crypto/md4.c
1233 +@@ -255,4 +255,4 @@ module_exit(md4_mod_fini);
1234 +
1235 + MODULE_LICENSE("GPL");
1236 + MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
1237 +-
1238 ++MODULE_ALIAS_CRYPTO("md4");
1239 +diff --git a/crypto/md5.c b/crypto/md5.c
1240 +index 7febeaab923b..36f5e5b103f3 100644
1241 +--- a/crypto/md5.c
1242 ++++ b/crypto/md5.c
1243 +@@ -168,3 +168,4 @@ module_exit(md5_mod_fini);
1244 +
1245 + MODULE_LICENSE("GPL");
1246 + MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
1247 ++MODULE_ALIAS_CRYPTO("md5");
1248 +diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
1249 +index 079b761bc70d..46195e0d0f4d 100644
1250 +--- a/crypto/michael_mic.c
1251 ++++ b/crypto/michael_mic.c
1252 +@@ -184,3 +184,4 @@ module_exit(michael_mic_exit);
1253 + MODULE_LICENSE("GPL v2");
1254 + MODULE_DESCRIPTION("Michael MIC");
1255 + MODULE_AUTHOR("Jouni Malinen <j@××.fi>");
1256 ++MODULE_ALIAS_CRYPTO("michael_mic");
1257 +diff --git a/crypto/pcbc.c b/crypto/pcbc.c
1258 +index d1b8bdfb5855..f654965f0933 100644
1259 +--- a/crypto/pcbc.c
1260 ++++ b/crypto/pcbc.c
1261 +@@ -295,3 +295,4 @@ module_exit(crypto_pcbc_module_exit);
1262 +
1263 + MODULE_LICENSE("GPL");
1264 + MODULE_DESCRIPTION("PCBC block cipher algorithm");
1265 ++MODULE_ALIAS_CRYPTO("pcbc");
1266 +diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
1267 +index 309d345ead95..c305d4112735 100644
1268 +--- a/crypto/pcrypt.c
1269 ++++ b/crypto/pcrypt.c
1270 +@@ -565,3 +565,4 @@ module_exit(pcrypt_exit);
1271 + MODULE_LICENSE("GPL");
1272 + MODULE_AUTHOR("Steffen Klassert <steffen.klassert@×××××××.com>");
1273 + MODULE_DESCRIPTION("Parallel crypto wrapper");
1274 ++MODULE_ALIAS_CRYPTO("pcrypt");
1275 +diff --git a/crypto/rmd128.c b/crypto/rmd128.c
1276 +index 8a0f68b7f257..049486ede938 100644
1277 +--- a/crypto/rmd128.c
1278 ++++ b/crypto/rmd128.c
1279 +@@ -327,3 +327,4 @@ module_exit(rmd128_mod_fini);
1280 + MODULE_LICENSE("GPL");
1281 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1282 + MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
1283 ++MODULE_ALIAS_CRYPTO("rmd128");
1284 +diff --git a/crypto/rmd160.c b/crypto/rmd160.c
1285 +index 525d7bb752cf..de585e51d455 100644
1286 +--- a/crypto/rmd160.c
1287 ++++ b/crypto/rmd160.c
1288 +@@ -371,3 +371,4 @@ module_exit(rmd160_mod_fini);
1289 + MODULE_LICENSE("GPL");
1290 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1291 + MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
1292 ++MODULE_ALIAS_CRYPTO("rmd160");
1293 +diff --git a/crypto/rmd256.c b/crypto/rmd256.c
1294 +index 69293d9b56e0..4ec02a754e09 100644
1295 +--- a/crypto/rmd256.c
1296 ++++ b/crypto/rmd256.c
1297 +@@ -346,3 +346,4 @@ module_exit(rmd256_mod_fini);
1298 + MODULE_LICENSE("GPL");
1299 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1300 + MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
1301 ++MODULE_ALIAS_CRYPTO("rmd256");
1302 +diff --git a/crypto/rmd320.c b/crypto/rmd320.c
1303 +index 09f97dfdfbba..770f2cb369f8 100644
1304 +--- a/crypto/rmd320.c
1305 ++++ b/crypto/rmd320.c
1306 +@@ -395,3 +395,4 @@ module_exit(rmd320_mod_fini);
1307 + MODULE_LICENSE("GPL");
1308 + MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@××××××××.ch>");
1309 + MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
1310 ++MODULE_ALIAS_CRYPTO("rmd320");
1311 +diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
1312 +index 9a4770c02284..f550b5d94630 100644
1313 +--- a/crypto/salsa20_generic.c
1314 ++++ b/crypto/salsa20_generic.c
1315 +@@ -248,4 +248,5 @@ module_exit(salsa20_generic_mod_fini);
1316 +
1317 + MODULE_LICENSE("GPL");
1318 + MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
1319 +-MODULE_ALIAS("salsa20");
1320 ++MODULE_ALIAS_CRYPTO("salsa20");
1321 ++MODULE_ALIAS_CRYPTO("salsa20-generic");
1322 +diff --git a/crypto/seed.c b/crypto/seed.c
1323 +index 9c904d6d2151..c6ba8438be43 100644
1324 +--- a/crypto/seed.c
1325 ++++ b/crypto/seed.c
1326 +@@ -476,3 +476,4 @@ module_exit(seed_fini);
1327 + MODULE_DESCRIPTION("SEED Cipher Algorithm");
1328 + MODULE_LICENSE("GPL");
1329 + MODULE_AUTHOR("Hye-Shik Chang <perky@×××××××.org>, Kim Hyun <hkim@×××××××.kr>");
1330 ++MODULE_ALIAS_CRYPTO("seed");
1331 +diff --git a/crypto/seqiv.c b/crypto/seqiv.c
1332 +index f2cba4ed6f25..49a4069ff453 100644
1333 +--- a/crypto/seqiv.c
1334 ++++ b/crypto/seqiv.c
1335 +@@ -362,3 +362,4 @@ module_exit(seqiv_module_exit);
1336 +
1337 + MODULE_LICENSE("GPL");
1338 + MODULE_DESCRIPTION("Sequence Number IV Generator");
1339 ++MODULE_ALIAS_CRYPTO("seqiv");
1340 +diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
1341 +index 7ddbd7e88859..94970a794975 100644
1342 +--- a/crypto/serpent_generic.c
1343 ++++ b/crypto/serpent_generic.c
1344 +@@ -665,5 +665,6 @@ module_exit(serpent_mod_fini);
1345 + MODULE_LICENSE("GPL");
1346 + MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
1347 + MODULE_AUTHOR("Dag Arne Osvik <osvik@××××××.no>");
1348 +-MODULE_ALIAS("tnepres");
1349 +-MODULE_ALIAS("serpent");
1350 ++MODULE_ALIAS_CRYPTO("tnepres");
1351 ++MODULE_ALIAS_CRYPTO("serpent");
1352 ++MODULE_ALIAS_CRYPTO("serpent-generic");
1353 +diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
1354 +index 42794803c480..fdf7c00de4b0 100644
1355 +--- a/crypto/sha1_generic.c
1356 ++++ b/crypto/sha1_generic.c
1357 +@@ -153,4 +153,5 @@ module_exit(sha1_generic_mod_fini);
1358 + MODULE_LICENSE("GPL");
1359 + MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
1360 +
1361 +-MODULE_ALIAS("sha1");
1362 ++MODULE_ALIAS_CRYPTO("sha1");
1363 ++MODULE_ALIAS_CRYPTO("sha1-generic");
1364 +diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
1365 +index 543366779524..136381bdd48d 100644
1366 +--- a/crypto/sha256_generic.c
1367 ++++ b/crypto/sha256_generic.c
1368 +@@ -384,5 +384,7 @@ module_exit(sha256_generic_mod_fini);
1369 + MODULE_LICENSE("GPL");
1370 + MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
1371 +
1372 +-MODULE_ALIAS("sha224");
1373 +-MODULE_ALIAS("sha256");
1374 ++MODULE_ALIAS_CRYPTO("sha224");
1375 ++MODULE_ALIAS_CRYPTO("sha224-generic");
1376 ++MODULE_ALIAS_CRYPTO("sha256");
1377 ++MODULE_ALIAS_CRYPTO("sha256-generic");
1378 +diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
1379 +index 6ed124f3ea0f..6c6d901a7cc1 100644
1380 +--- a/crypto/sha512_generic.c
1381 ++++ b/crypto/sha512_generic.c
1382 +@@ -287,5 +287,7 @@ module_exit(sha512_generic_mod_fini);
1383 + MODULE_LICENSE("GPL");
1384 + MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
1385 +
1386 +-MODULE_ALIAS("sha384");
1387 +-MODULE_ALIAS("sha512");
1388 ++MODULE_ALIAS_CRYPTO("sha384");
1389 ++MODULE_ALIAS_CRYPTO("sha384-generic");
1390 ++MODULE_ALIAS_CRYPTO("sha512");
1391 ++MODULE_ALIAS_CRYPTO("sha512-generic");
1392 +diff --git a/crypto/tea.c b/crypto/tea.c
1393 +index 0a572323ee4a..b70b441c7d1e 100644
1394 +--- a/crypto/tea.c
1395 ++++ b/crypto/tea.c
1396 +@@ -270,8 +270,9 @@ static void __exit tea_mod_fini(void)
1397 + crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
1398 + }
1399 +
1400 +-MODULE_ALIAS("xtea");
1401 +-MODULE_ALIAS("xeta");
1402 ++MODULE_ALIAS_CRYPTO("tea");
1403 ++MODULE_ALIAS_CRYPTO("xtea");
1404 ++MODULE_ALIAS_CRYPTO("xeta");
1405 +
1406 + module_init(tea_mod_init);
1407 + module_exit(tea_mod_fini);
1408 +diff --git a/crypto/tgr192.c b/crypto/tgr192.c
1409 +index 87403556fd0b..f7ed2fba396c 100644
1410 +--- a/crypto/tgr192.c
1411 ++++ b/crypto/tgr192.c
1412 +@@ -676,8 +676,9 @@ static void __exit tgr192_mod_fini(void)
1413 + crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
1414 + }
1415 +
1416 +-MODULE_ALIAS("tgr160");
1417 +-MODULE_ALIAS("tgr128");
1418 ++MODULE_ALIAS_CRYPTO("tgr192");
1419 ++MODULE_ALIAS_CRYPTO("tgr160");
1420 ++MODULE_ALIAS_CRYPTO("tgr128");
1421 +
1422 + module_init(tgr192_mod_init);
1423 + module_exit(tgr192_mod_fini);
1424 +diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
1425 +index 2d5000552d0f..ebf7a3efb572 100644
1426 +--- a/crypto/twofish_generic.c
1427 ++++ b/crypto/twofish_generic.c
1428 +@@ -211,4 +211,5 @@ module_exit(twofish_mod_fini);
1429 +
1430 + MODULE_LICENSE("GPL");
1431 + MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
1432 +-MODULE_ALIAS("twofish");
1433 ++MODULE_ALIAS_CRYPTO("twofish");
1434 ++MODULE_ALIAS_CRYPTO("twofish-generic");
1435 +diff --git a/crypto/vmac.c b/crypto/vmac.c
1436 +index 2eb11a30c29c..bf2d3a89845f 100644
1437 +--- a/crypto/vmac.c
1438 ++++ b/crypto/vmac.c
1439 +@@ -713,3 +713,4 @@ module_exit(vmac_module_exit);
1440 +
1441 + MODULE_LICENSE("GPL");
1442 + MODULE_DESCRIPTION("VMAC hash algorithm");
1443 ++MODULE_ALIAS_CRYPTO("vmac");
1444 +diff --git a/crypto/wp512.c b/crypto/wp512.c
1445 +index 180f1d6e03f4..253db94b5479 100644
1446 +--- a/crypto/wp512.c
1447 ++++ b/crypto/wp512.c
1448 +@@ -1167,8 +1167,9 @@ static void __exit wp512_mod_fini(void)
1449 + crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
1450 + }
1451 +
1452 +-MODULE_ALIAS("wp384");
1453 +-MODULE_ALIAS("wp256");
1454 ++MODULE_ALIAS_CRYPTO("wp512");
1455 ++MODULE_ALIAS_CRYPTO("wp384");
1456 ++MODULE_ALIAS_CRYPTO("wp256");
1457 +
1458 + module_init(wp512_mod_init);
1459 + module_exit(wp512_mod_fini);
1460 +diff --git a/crypto/xcbc.c b/crypto/xcbc.c
1461 +index a5fbdf3738cf..df90b332554c 100644
1462 +--- a/crypto/xcbc.c
1463 ++++ b/crypto/xcbc.c
1464 +@@ -286,3 +286,4 @@ module_exit(crypto_xcbc_module_exit);
1465 +
1466 + MODULE_LICENSE("GPL");
1467 + MODULE_DESCRIPTION("XCBC keyed hash algorithm");
1468 ++MODULE_ALIAS_CRYPTO("xcbc");
1469 +diff --git a/crypto/xts.c b/crypto/xts.c
1470 +index ca1608f44cb5..f6fd43f100c8 100644
1471 +--- a/crypto/xts.c
1472 ++++ b/crypto/xts.c
1473 +@@ -362,3 +362,4 @@ module_exit(crypto_module_exit);
1474 +
1475 + MODULE_LICENSE("GPL");
1476 + MODULE_DESCRIPTION("XTS block cipher mode");
1477 ++MODULE_ALIAS_CRYPTO("xts");
1478 +diff --git a/crypto/zlib.c b/crypto/zlib.c
1479 +index 06b62e5cdcc7..d98078835281 100644
1480 +--- a/crypto/zlib.c
1481 ++++ b/crypto/zlib.c
1482 +@@ -378,3 +378,4 @@ module_exit(zlib_mod_fini);
1483 + MODULE_LICENSE("GPL");
1484 + MODULE_DESCRIPTION("Zlib Compression Algorithm");
1485 + MODULE_AUTHOR("Sony Corporation");
1486 ++MODULE_ALIAS_CRYPTO("zlib");
1487 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1488 +index 37acda6fa7e4..136803c47cdb 100644
1489 +--- a/drivers/ata/libata-sff.c
1490 ++++ b/drivers/ata/libata-sff.c
1491 +@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
1492 + DPRINTK("ENTER\n");
1493 +
1494 + cancel_delayed_work_sync(&ap->sff_pio_task);
1495 ++
1496 ++ /*
1497 ++ * We wanna reset the HSM state to IDLE. If we do so without
1498 ++ * grabbing the port lock, critical sections protected by it which
1499 ++ * expect the HSM state to stay stable may get surprised. For
1500 ++ * example, we may set IDLE in between the time
1501 ++ * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
1502 ++ * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
1503 ++ */
1504 ++ spin_lock_irq(ap->lock);
1505 + ap->hsm_task_state = HSM_ST_IDLE;
1506 ++ spin_unlock_irq(ap->lock);
1507 ++
1508 + ap->sff_pio_task_link = NULL;
1509 +
1510 + if (ata_msg_ctl(ap))
1511 +diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
1512 +index 523524b68022..f71e09d6cfe6 100644
1513 +--- a/drivers/ata/sata_dwc_460ex.c
1514 ++++ b/drivers/ata/sata_dwc_460ex.c
1515 +@@ -799,7 +799,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
1516 + if (err) {
1517 + dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
1518 + " %d\n", __func__, err);
1519 +- goto error_out;
1520 ++ return err;
1521 + }
1522 +
1523 + /* Enabe DMA */
1524 +@@ -810,11 +810,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
1525 + sata_dma_regs);
1526 +
1527 + return 0;
1528 +-
1529 +-error_out:
1530 +- dma_dwc_exit(hsdev);
1531 +-
1532 +- return err;
1533 + }
1534 +
1535 + static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
1536 +@@ -1664,7 +1659,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1537 + char *ver = (char *)&versionr;
1538 + u8 *base = NULL;
1539 + int err = 0;
1540 +- int irq, rc;
1541 ++ int irq;
1542 + struct ata_host *host;
1543 + struct ata_port_info pi = sata_dwc_port_info[0];
1544 + const struct ata_port_info *ppi[] = { &pi, NULL };
1545 +@@ -1727,7 +1722,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1546 + if (irq == NO_IRQ) {
1547 + dev_err(&ofdev->dev, "no SATA DMA irq\n");
1548 + err = -ENODEV;
1549 +- goto error_out;
1550 ++ goto error_iomap;
1551 + }
1552 +
1553 + /* Get physical SATA DMA register base address */
1554 +@@ -1736,14 +1731,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1555 + dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
1556 + " address\n");
1557 + err = -ENODEV;
1558 +- goto error_out;
1559 ++ goto error_iomap;
1560 + }
1561 +
1562 + /* Save dev for later use in dev_xxx() routines */
1563 + host_pvt.dwc_dev = &ofdev->dev;
1564 +
1565 + /* Initialize AHB DMAC */
1566 +- dma_dwc_init(hsdev, irq);
1567 ++ err = dma_dwc_init(hsdev, irq);
1568 ++ if (err)
1569 ++ goto error_dma_iomap;
1570 +
1571 + /* Enable SATA Interrupts */
1572 + sata_dwc_enable_interrupts(hsdev);
1573 +@@ -1761,9 +1758,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1574 + * device discovery process, invoking our port_start() handler &
1575 + * error_handler() to execute a dummy Softreset EH session
1576 + */
1577 +- rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1578 +-
1579 +- if (rc != 0)
1580 ++ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1581 ++ if (err)
1582 + dev_err(&ofdev->dev, "failed to activate host");
1583 +
1584 + dev_set_drvdata(&ofdev->dev, host);
1585 +@@ -1772,7 +1768,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
1586 + error_out:
1587 + /* Free SATA DMA resources */
1588 + dma_dwc_exit(hsdev);
1589 +-
1590 ++error_dma_iomap:
1591 ++ iounmap((void __iomem *)host_pvt.sata_dma_regs);
1592 + error_iomap:
1593 + iounmap(base);
1594 + error_kmalloc:
1595 +@@ -1793,6 +1790,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
1596 + /* Free SATA DMA resources */
1597 + dma_dwc_exit(hsdev);
1598 +
1599 ++ iounmap((void __iomem *)host_pvt.sata_dma_regs);
1600 + iounmap(hsdev->reg_base);
1601 + kfree(hsdev);
1602 + kfree(host);
1603 +diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
1604 +index 104a040f24de..6efdbeafa33c 100644
1605 +--- a/drivers/block/drbd/drbd_req.c
1606 ++++ b/drivers/block/drbd/drbd_req.c
1607 +@@ -1310,6 +1310,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
1608 + struct request_queue * const b =
1609 + mdev->ldev->backing_bdev->bd_disk->queue;
1610 + if (b->merge_bvec_fn) {
1611 ++ bvm->bi_bdev = mdev->ldev->backing_bdev;
1612 + backing_limit = b->merge_bvec_fn(b, bvm, bvec);
1613 + limit = min(limit, backing_limit);
1614 + }
1615 +diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
1616 +index 372ae72cce34..e990deed2d33 100644
1617 +--- a/drivers/bus/mvebu-mbus.c
1618 ++++ b/drivers/bus/mvebu-mbus.c
1619 +@@ -181,12 +181,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
1620 + }
1621 +
1622 + /* Checks whether the given window number is available */
1623 ++
1624 ++/* On Armada XP, 375 and 38x the MBus window 13 has the remap
1625 ++ * capability, like windows 0 to 7. However, the mvebu-mbus driver
1626 ++ * isn't currently taking into account this special case, which means
1627 ++ * that when window 13 is actually used, the remap registers are left
1628 ++ * to 0, making the device using this MBus window unavailable. The
1629 ++ * quick fix for stable is to not use window 13. A follow up patch
1630 ++ * will correctly handle this window.
1631 ++*/
1632 + static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
1633 + const int win)
1634 + {
1635 + void __iomem *addr = mbus->mbuswins_base +
1636 + mbus->soc->win_cfg_offset(win);
1637 + u32 ctrl = readl(addr + WIN_CTRL_OFF);
1638 ++
1639 ++ if (win == 13)
1640 ++ return false;
1641 ++
1642 + return !(ctrl & WIN_CTRL_ENABLE);
1643 + }
1644 +
1645 +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
1646 +index e252939b9ee1..831b48287a22 100644
1647 +--- a/drivers/clocksource/exynos_mct.c
1648 ++++ b/drivers/clocksource/exynos_mct.c
1649 +@@ -98,8 +98,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
1650 + __raw_writel(value, reg_base + offset);
1651 +
1652 + if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
1653 +- stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
1654 +- switch (offset & EXYNOS4_MCT_L_MASK) {
1655 ++ stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
1656 ++ switch (offset & ~EXYNOS4_MCT_L_MASK) {
1657 + case MCT_L_TCON_OFFSET:
1658 + mask = 1 << 3; /* L_TCON write status */
1659 + break;
1660 +diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
1661 +index 633ba945e153..c178ed8c3908 100644
1662 +--- a/drivers/crypto/padlock-aes.c
1663 ++++ b/drivers/crypto/padlock-aes.c
1664 +@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
1665 + MODULE_LICENSE("GPL");
1666 + MODULE_AUTHOR("Michal Ludvig");
1667 +
1668 +-MODULE_ALIAS("aes");
1669 ++MODULE_ALIAS_CRYPTO("aes");
1670 +diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
1671 +index 9266c0e25492..93d7753ab38a 100644
1672 +--- a/drivers/crypto/padlock-sha.c
1673 ++++ b/drivers/crypto/padlock-sha.c
1674 +@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
1675 + MODULE_LICENSE("GPL");
1676 + MODULE_AUTHOR("Michal Ludvig");
1677 +
1678 +-MODULE_ALIAS("sha1-all");
1679 +-MODULE_ALIAS("sha256-all");
1680 +-MODULE_ALIAS("sha1-padlock");
1681 +-MODULE_ALIAS("sha256-padlock");
1682 ++MODULE_ALIAS_CRYPTO("sha1-all");
1683 ++MODULE_ALIAS_CRYPTO("sha256-all");
1684 ++MODULE_ALIAS_CRYPTO("sha1-padlock");
1685 ++MODULE_ALIAS_CRYPTO("sha256-padlock");
1686 +diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
1687 +index 92105f3dc8e0..e4cea7c45142 100644
1688 +--- a/drivers/crypto/ux500/cryp/cryp_core.c
1689 ++++ b/drivers/crypto/ux500/cryp/cryp_core.c
1690 +@@ -1810,7 +1810,7 @@ module_exit(ux500_cryp_mod_fini);
1691 + module_param(cryp_mode, int, 0);
1692 +
1693 + MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
1694 +-MODULE_ALIAS("aes-all");
1695 +-MODULE_ALIAS("des-all");
1696 ++MODULE_ALIAS_CRYPTO("aes-all");
1697 ++MODULE_ALIAS_CRYPTO("des-all");
1698 +
1699 + MODULE_LICENSE("GPL");
1700 +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
1701 +index 1c73f4fbc252..8e5e0187506f 100644
1702 +--- a/drivers/crypto/ux500/hash/hash_core.c
1703 ++++ b/drivers/crypto/ux500/hash/hash_core.c
1704 +@@ -1995,7 +1995,7 @@ module_exit(ux500_hash_mod_fini);
1705 + MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1706 + MODULE_LICENSE("GPL");
1707 +
1708 +-MODULE_ALIAS("sha1-all");
1709 +-MODULE_ALIAS("sha256-all");
1710 +-MODULE_ALIAS("hmac-sha1-all");
1711 +-MODULE_ALIAS("hmac-sha256-all");
1712 ++MODULE_ALIAS_CRYPTO("sha1-all");
1713 ++MODULE_ALIAS_CRYPTO("sha256-all");
1714 ++MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1715 ++MODULE_ALIAS_CRYPTO("hmac-sha256-all");
1716 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1717 +index 5b88c83888d1..ccbffd0d7a02 100644
1718 +--- a/drivers/gpio/gpiolib.c
1719 ++++ b/drivers/gpio/gpiolib.c
1720 +@@ -408,7 +408,7 @@ static ssize_t gpio_value_store(struct device *dev,
1721 + return status;
1722 + }
1723 +
1724 +-static const DEVICE_ATTR(value, 0644,
1725 ++static DEVICE_ATTR(value, 0644,
1726 + gpio_value_show, gpio_value_store);
1727 +
1728 + static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
1729 +@@ -633,18 +633,16 @@ static ssize_t gpio_active_low_store(struct device *dev,
1730 + return status ? : size;
1731 + }
1732 +
1733 +-static const DEVICE_ATTR(active_low, 0644,
1734 ++static DEVICE_ATTR(active_low, 0644,
1735 + gpio_active_low_show, gpio_active_low_store);
1736 +
1737 +-static const struct attribute *gpio_attrs[] = {
1738 ++static struct attribute *gpio_attrs[] = {
1739 + &dev_attr_value.attr,
1740 + &dev_attr_active_low.attr,
1741 + NULL,
1742 + };
1743 +
1744 +-static const struct attribute_group gpio_attr_group = {
1745 +- .attrs = (struct attribute **) gpio_attrs,
1746 +-};
1747 ++ATTRIBUTE_GROUPS(gpio);
1748 +
1749 + /*
1750 + * /sys/class/gpio/gpiochipN/
1751 +@@ -680,16 +678,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
1752 + }
1753 + static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
1754 +
1755 +-static const struct attribute *gpiochip_attrs[] = {
1756 ++static struct attribute *gpiochip_attrs[] = {
1757 + &dev_attr_base.attr,
1758 + &dev_attr_label.attr,
1759 + &dev_attr_ngpio.attr,
1760 + NULL,
1761 + };
1762 +-
1763 +-static const struct attribute_group gpiochip_attr_group = {
1764 +- .attrs = (struct attribute **) gpiochip_attrs,
1765 +-};
1766 ++ATTRIBUTE_GROUPS(gpiochip);
1767 +
1768 + /*
1769 + * /sys/class/gpio/export ... write-only
1770 +@@ -844,18 +839,15 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
1771 + if (desc->chip->names && desc->chip->names[offset])
1772 + ioname = desc->chip->names[offset];
1773 +
1774 +- dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
1775 +- desc, ioname ? ioname : "gpio%u",
1776 +- desc_to_gpio(desc));
1777 ++ dev = device_create_with_groups(&gpio_class, desc->chip->dev,
1778 ++ MKDEV(0, 0), desc, gpio_groups,
1779 ++ ioname ? ioname : "gpio%u",
1780 ++ desc_to_gpio(desc));
1781 + if (IS_ERR(dev)) {
1782 + status = PTR_ERR(dev);
1783 + goto fail_unlock;
1784 + }
1785 +
1786 +- status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
1787 +- if (status)
1788 +- goto fail_unregister_device;
1789 +-
1790 + if (direction_may_change) {
1791 + status = device_create_file(dev, &dev_attr_direction);
1792 + if (status)
1793 +@@ -866,13 +858,15 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
1794 + !test_bit(FLAG_IS_OUT, &desc->flags))) {
1795 + status = device_create_file(dev, &dev_attr_edge);
1796 + if (status)
1797 +- goto fail_unregister_device;
1798 ++ goto fail_remove_attr_direction;
1799 + }
1800 +
1801 + set_bit(FLAG_EXPORT, &desc->flags);
1802 + mutex_unlock(&sysfs_lock);
1803 + return 0;
1804 +
1805 ++fail_remove_attr_direction:
1806 ++ device_remove_file(dev, &dev_attr_direction);
1807 + fail_unregister_device:
1808 + device_unregister(dev);
1809 + fail_unlock:
1810 +@@ -1006,6 +1000,8 @@ void gpiod_unexport(struct gpio_desc *desc)
1811 + mutex_unlock(&sysfs_lock);
1812 +
1813 + if (dev) {
1814 ++ device_remove_file(dev, &dev_attr_edge);
1815 ++ device_remove_file(dev, &dev_attr_direction);
1816 + device_unregister(dev);
1817 + put_device(dev);
1818 + }
1819 +@@ -1030,13 +1026,13 @@ static int gpiochip_export(struct gpio_chip *chip)
1820 +
1821 + /* use chip->base for the ID; it's already known to be unique */
1822 + mutex_lock(&sysfs_lock);
1823 +- dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
1824 +- "gpiochip%d", chip->base);
1825 +- if (!IS_ERR(dev)) {
1826 +- status = sysfs_create_group(&dev->kobj,
1827 +- &gpiochip_attr_group);
1828 +- } else
1829 ++ dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
1830 ++ chip, gpiochip_groups,
1831 ++ "gpiochip%d", chip->base);
1832 ++ if (IS_ERR(dev))
1833 + status = PTR_ERR(dev);
1834 ++ else
1835 ++ status = 0;
1836 + chip->exported = (status == 0);
1837 + mutex_unlock(&sysfs_lock);
1838 +
1839 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1840 +index 7410a507eacc..3153eabde39b 100644
1841 +--- a/drivers/gpu/drm/i915/i915_gem.c
1842 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1843 +@@ -4978,7 +4978,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
1844 + if (!mutex_is_locked(mutex))
1845 + return false;
1846 +
1847 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
1848 ++#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
1849 + return mutex->owner == task;
1850 + #else
1851 + /* Since UP may be pre-empted, we cannot assume that we own the lock */
1852 +diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
1853 +index 5600d4c5f981..64d6cfba9952 100644
1854 +--- a/drivers/gpu/drm/radeon/radeon_asic.c
1855 ++++ b/drivers/gpu/drm/radeon/radeon_asic.c
1856 +@@ -335,6 +335,20 @@ static struct radeon_asic_ring r300_gfx_ring = {
1857 + .set_wptr = &r100_gfx_set_wptr,
1858 + };
1859 +
1860 ++static struct radeon_asic_ring rv515_gfx_ring = {
1861 ++ .ib_execute = &r100_ring_ib_execute,
1862 ++ .emit_fence = &r300_fence_ring_emit,
1863 ++ .emit_semaphore = &r100_semaphore_ring_emit,
1864 ++ .cs_parse = &r300_cs_parse,
1865 ++ .ring_start = &rv515_ring_start,
1866 ++ .ring_test = &r100_ring_test,
1867 ++ .ib_test = &r100_ib_test,
1868 ++ .is_lockup = &r100_gpu_is_lockup,
1869 ++ .get_rptr = &r100_gfx_get_rptr,
1870 ++ .get_wptr = &r100_gfx_get_wptr,
1871 ++ .set_wptr = &r100_gfx_set_wptr,
1872 ++};
1873 ++
1874 + static struct radeon_asic r300_asic = {
1875 + .init = &r300_init,
1876 + .fini = &r300_fini,
1877 +@@ -756,7 +770,7 @@ static struct radeon_asic rv515_asic = {
1878 + .set_page = &rv370_pcie_gart_set_page,
1879 + },
1880 + .ring = {
1881 +- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
1882 ++ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
1883 + },
1884 + .irq = {
1885 + .set = &rs600_irq_set,
1886 +@@ -823,7 +837,7 @@ static struct radeon_asic r520_asic = {
1887 + .set_page = &rv370_pcie_gart_set_page,
1888 + },
1889 + .ring = {
1890 +- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
1891 ++ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
1892 + },
1893 + .irq = {
1894 + .set = &rs600_irq_set,
1895 +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
1896 +index cfb513f933d5..0095ee7fce34 100644
1897 +--- a/drivers/gpu/drm/radeon/radeon_pm.c
1898 ++++ b/drivers/gpu/drm/radeon/radeon_pm.c
1899 +@@ -1260,8 +1260,39 @@ dpm_failed:
1900 + return ret;
1901 + }
1902 +
1903 ++struct radeon_dpm_quirk {
1904 ++ u32 chip_vendor;
1905 ++ u32 chip_device;
1906 ++ u32 subsys_vendor;
1907 ++ u32 subsys_device;
1908 ++};
1909 ++
1910 ++/* cards with dpm stability problems */
1911 ++static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1912 ++ /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1913 ++ { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
1914 ++ /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
1915 ++ { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
1916 ++ { 0, 0, 0, 0 },
1917 ++};
1918 ++
1919 + int radeon_pm_init(struct radeon_device *rdev)
1920 + {
1921 ++ struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
1922 ++ bool disable_dpm = false;
1923 ++
1924 ++ /* Apply dpm quirks */
1925 ++ while (p && p->chip_device != 0) {
1926 ++ if (rdev->pdev->vendor == p->chip_vendor &&
1927 ++ rdev->pdev->device == p->chip_device &&
1928 ++ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1929 ++ rdev->pdev->subsystem_device == p->subsys_device) {
1930 ++ disable_dpm = true;
1931 ++ break;
1932 ++ }
1933 ++ ++p;
1934 ++ }
1935 ++
1936 + /* enable dpm on rv6xx+ */
1937 + switch (rdev->family) {
1938 + case CHIP_RV610:
1939 +@@ -1316,6 +1347,8 @@ int radeon_pm_init(struct radeon_device *rdev)
1940 + (!(rdev->flags & RADEON_IS_IGP)) &&
1941 + (!rdev->smc_fw))
1942 + rdev->pm.pm_method = PM_METHOD_PROFILE;
1943 ++ else if (disable_dpm && (radeon_dpm == -1))
1944 ++ rdev->pm.pm_method = PM_METHOD_PROFILE;
1945 + else if (radeon_dpm == 0)
1946 + rdev->pm.pm_method = PM_METHOD_PROFILE;
1947 + else
1948 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1949 +index 879e62844b2b..35bf2bba69bf 100644
1950 +--- a/drivers/gpu/drm/radeon/si_dpm.c
1951 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
1952 +@@ -2900,6 +2900,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
1953 + return ret;
1954 + }
1955 +
1956 ++struct si_dpm_quirk {
1957 ++ u32 chip_vendor;
1958 ++ u32 chip_device;
1959 ++ u32 subsys_vendor;
1960 ++ u32 subsys_device;
1961 ++ u32 max_sclk;
1962 ++ u32 max_mclk;
1963 ++};
1964 ++
1965 ++/* cards with dpm stability problems */
1966 ++static struct si_dpm_quirk si_dpm_quirk_list[] = {
1967 ++ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
1968 ++ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
1969 ++ { 0, 0, 0, 0 },
1970 ++};
1971 ++
1972 + static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1973 + struct radeon_ps *rps)
1974 + {
1975 +@@ -2910,7 +2926,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1976 + u32 mclk, sclk;
1977 + u16 vddc, vddci;
1978 + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
1979 ++ u32 max_sclk = 0, max_mclk = 0;
1980 + int i;
1981 ++ struct si_dpm_quirk *p = si_dpm_quirk_list;
1982 ++
1983 ++ /* Apply dpm quirks */
1984 ++ while (p && p->chip_device != 0) {
1985 ++ if (rdev->pdev->vendor == p->chip_vendor &&
1986 ++ rdev->pdev->device == p->chip_device &&
1987 ++ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1988 ++ rdev->pdev->subsystem_device == p->subsys_device) {
1989 ++ max_sclk = p->max_sclk;
1990 ++ max_mclk = p->max_mclk;
1991 ++ break;
1992 ++ }
1993 ++ ++p;
1994 ++ }
1995 +
1996 + if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
1997 + ni_dpm_vblank_too_short(rdev))
1998 +@@ -2964,6 +2995,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1999 + if (ps->performance_levels[i].mclk > max_mclk_vddc)
2000 + ps->performance_levels[i].mclk = max_mclk_vddc;
2001 + }
2002 ++ if (max_mclk) {
2003 ++ if (ps->performance_levels[i].mclk > max_mclk)
2004 ++ ps->performance_levels[i].mclk = max_mclk;
2005 ++ }
2006 ++ if (max_sclk) {
2007 ++ if (ps->performance_levels[i].sclk > max_sclk)
2008 ++ ps->performance_levels[i].sclk = max_sclk;
2009 ++ }
2010 + }
2011 +
2012 + /* XXX validate the min clocks required for display */
2013 +diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
2014 +index ce953d895f5b..fb787c3e88d9 100644
2015 +--- a/drivers/input/evdev.c
2016 ++++ b/drivers/input/evdev.c
2017 +@@ -757,20 +757,23 @@ static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p)
2018 + */
2019 + static int evdev_handle_get_val(struct evdev_client *client,
2020 + struct input_dev *dev, unsigned int type,
2021 +- unsigned long *bits, unsigned int max,
2022 +- unsigned int size, void __user *p, int compat)
2023 ++ unsigned long *bits, unsigned int maxbit,
2024 ++ unsigned int maxlen, void __user *p,
2025 ++ int compat)
2026 + {
2027 + int ret;
2028 + unsigned long *mem;
2029 ++ size_t len;
2030 +
2031 +- mem = kmalloc(sizeof(unsigned long) * max, GFP_KERNEL);
2032 ++ len = BITS_TO_LONGS(maxbit) * sizeof(unsigned long);
2033 ++ mem = kmalloc(len, GFP_KERNEL);
2034 + if (!mem)
2035 + return -ENOMEM;
2036 +
2037 + spin_lock_irq(&dev->event_lock);
2038 + spin_lock(&client->buffer_lock);
2039 +
2040 +- memcpy(mem, bits, sizeof(unsigned long) * max);
2041 ++ memcpy(mem, bits, len);
2042 +
2043 + spin_unlock(&dev->event_lock);
2044 +
2045 +@@ -778,7 +781,7 @@ static int evdev_handle_get_val(struct evdev_client *client,
2046 +
2047 + spin_unlock_irq(&client->buffer_lock);
2048 +
2049 +- ret = bits_to_user(mem, max, size, p, compat);
2050 ++ ret = bits_to_user(mem, maxbit, maxlen, p, compat);
2051 + if (ret < 0)
2052 + evdev_queue_syn_dropped(client);
2053 +
2054 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
2055 +index a87d3fab0271..d290e8396116 100644
2056 +--- a/drivers/md/dm-cache-metadata.c
2057 ++++ b/drivers/md/dm-cache-metadata.c
2058 +@@ -94,6 +94,9 @@ struct cache_disk_superblock {
2059 + } __packed;
2060 +
2061 + struct dm_cache_metadata {
2062 ++ atomic_t ref_count;
2063 ++ struct list_head list;
2064 ++
2065 + struct block_device *bdev;
2066 + struct dm_block_manager *bm;
2067 + struct dm_space_map *metadata_sm;
2068 +@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
2069 +
2070 + /*----------------------------------------------------------------*/
2071 +
2072 +-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
2073 +- sector_t data_block_size,
2074 +- bool may_format_device,
2075 +- size_t policy_hint_size)
2076 ++static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
2077 ++ sector_t data_block_size,
2078 ++ bool may_format_device,
2079 ++ size_t policy_hint_size)
2080 + {
2081 + int r;
2082 + struct dm_cache_metadata *cmd;
2083 +@@ -683,6 +686,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
2084 + return NULL;
2085 + }
2086 +
2087 ++ atomic_set(&cmd->ref_count, 1);
2088 + init_rwsem(&cmd->root_lock);
2089 + cmd->bdev = bdev;
2090 + cmd->data_block_size = data_block_size;
2091 +@@ -705,10 +709,95 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
2092 + return cmd;
2093 + }
2094 +
2095 ++/*
2096 ++ * We keep a little list of ref counted metadata objects to prevent two
2097 ++ * different target instances creating separate bufio instances. This is
2098 ++ * an issue if a table is reloaded before the suspend.
2099 ++ */
2100 ++static DEFINE_MUTEX(table_lock);
2101 ++static LIST_HEAD(table);
2102 ++
2103 ++static struct dm_cache_metadata *lookup(struct block_device *bdev)
2104 ++{
2105 ++ struct dm_cache_metadata *cmd;
2106 ++
2107 ++ list_for_each_entry(cmd, &table, list)
2108 ++ if (cmd->bdev == bdev) {
2109 ++ atomic_inc(&cmd->ref_count);
2110 ++ return cmd;
2111 ++ }
2112 ++
2113 ++ return NULL;
2114 ++}
2115 ++
2116 ++static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
2117 ++ sector_t data_block_size,
2118 ++ bool may_format_device,
2119 ++ size_t policy_hint_size)
2120 ++{
2121 ++ struct dm_cache_metadata *cmd, *cmd2;
2122 ++
2123 ++ mutex_lock(&table_lock);
2124 ++ cmd = lookup(bdev);
2125 ++ mutex_unlock(&table_lock);
2126 ++
2127 ++ if (cmd)
2128 ++ return cmd;
2129 ++
2130 ++ cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
2131 ++ if (cmd) {
2132 ++ mutex_lock(&table_lock);
2133 ++ cmd2 = lookup(bdev);
2134 ++ if (cmd2) {
2135 ++ mutex_unlock(&table_lock);
2136 ++ __destroy_persistent_data_objects(cmd);
2137 ++ kfree(cmd);
2138 ++ return cmd2;
2139 ++ }
2140 ++ list_add(&cmd->list, &table);
2141 ++ mutex_unlock(&table_lock);
2142 ++ }
2143 ++
2144 ++ return cmd;
2145 ++}
2146 ++
2147 ++static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
2148 ++{
2149 ++ if (cmd->data_block_size != data_block_size) {
2150 ++ DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
2151 ++ (unsigned long long) data_block_size,
2152 ++ (unsigned long long) cmd->data_block_size);
2153 ++ return false;
2154 ++ }
2155 ++
2156 ++ return true;
2157 ++}
2158 ++
2159 ++struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
2160 ++ sector_t data_block_size,
2161 ++ bool may_format_device,
2162 ++ size_t policy_hint_size)
2163 ++{
2164 ++ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
2165 ++ may_format_device, policy_hint_size);
2166 ++ if (cmd && !same_params(cmd, data_block_size)) {
2167 ++ dm_cache_metadata_close(cmd);
2168 ++ return NULL;
2169 ++ }
2170 ++
2171 ++ return cmd;
2172 ++}
2173 ++
2174 + void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
2175 + {
2176 +- __destroy_persistent_data_objects(cmd);
2177 +- kfree(cmd);
2178 ++ if (atomic_dec_and_test(&cmd->ref_count)) {
2179 ++ mutex_lock(&table_lock);
2180 ++ list_del(&cmd->list);
2181 ++ mutex_unlock(&table_lock);
2182 ++
2183 ++ __destroy_persistent_data_objects(cmd);
2184 ++ kfree(cmd);
2185 ++ }
2186 + }
2187 +
2188 + /*
2189 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
2190 +index ff284b7a17bd..c10dec0f6e9d 100644
2191 +--- a/drivers/md/dm-cache-target.c
2192 ++++ b/drivers/md/dm-cache-target.c
2193 +@@ -222,7 +222,13 @@ struct cache {
2194 + struct list_head need_commit_migrations;
2195 + sector_t migration_threshold;
2196 + wait_queue_head_t migration_wait;
2197 +- atomic_t nr_migrations;
2198 ++ atomic_t nr_allocated_migrations;
2199 ++
2200 ++ /*
2201 ++ * The number of in flight migrations that are performing
2202 ++ * background io. eg, promotion, writeback.
2203 ++ */
2204 ++ atomic_t nr_io_migrations;
2205 +
2206 + wait_queue_head_t quiescing_wait;
2207 + atomic_t quiescing;
2208 +@@ -259,7 +265,6 @@ struct cache {
2209 + struct dm_deferred_set *all_io_ds;
2210 +
2211 + mempool_t *migration_pool;
2212 +- struct dm_cache_migration *next_migration;
2213 +
2214 + struct dm_cache_policy *policy;
2215 + unsigned policy_nr_args;
2216 +@@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
2217 + dm_bio_prison_free_cell(cache->prison, cell);
2218 + }
2219 +
2220 ++static struct dm_cache_migration *alloc_migration(struct cache *cache)
2221 ++{
2222 ++ struct dm_cache_migration *mg;
2223 ++
2224 ++ mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
2225 ++ if (mg) {
2226 ++ mg->cache = cache;
2227 ++ atomic_inc(&mg->cache->nr_allocated_migrations);
2228 ++ }
2229 ++
2230 ++ return mg;
2231 ++}
2232 ++
2233 ++static void free_migration(struct dm_cache_migration *mg)
2234 ++{
2235 ++ if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
2236 ++ wake_up(&mg->cache->migration_wait);
2237 ++
2238 ++ mempool_free(mg, mg->cache->migration_pool);
2239 ++}
2240 ++
2241 + static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
2242 + {
2243 + if (!p->mg) {
2244 +- p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
2245 ++ p->mg = alloc_migration(cache);
2246 + if (!p->mg)
2247 + return -ENOMEM;
2248 + }
2249 +@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
2250 + free_prison_cell(cache, p->cell1);
2251 +
2252 + if (p->mg)
2253 +- mempool_free(p->mg, cache->migration_pool);
2254 ++ free_migration(p->mg);
2255 + }
2256 +
2257 + static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
2258 +@@ -812,24 +838,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
2259 + * Migration covers moving data from the origin device to the cache, or
2260 + * vice versa.
2261 + *--------------------------------------------------------------*/
2262 +-static void free_migration(struct dm_cache_migration *mg)
2263 +-{
2264 +- mempool_free(mg, mg->cache->migration_pool);
2265 +-}
2266 +-
2267 +-static void inc_nr_migrations(struct cache *cache)
2268 ++static void inc_io_migrations(struct cache *cache)
2269 + {
2270 +- atomic_inc(&cache->nr_migrations);
2271 ++ atomic_inc(&cache->nr_io_migrations);
2272 + }
2273 +
2274 +-static void dec_nr_migrations(struct cache *cache)
2275 ++static void dec_io_migrations(struct cache *cache)
2276 + {
2277 +- atomic_dec(&cache->nr_migrations);
2278 +-
2279 +- /*
2280 +- * Wake the worker in case we're suspending the target.
2281 +- */
2282 +- wake_up(&cache->migration_wait);
2283 ++ atomic_dec(&cache->nr_io_migrations);
2284 + }
2285 +
2286 + static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
2287 +@@ -852,11 +868,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
2288 + wake_worker(cache);
2289 + }
2290 +
2291 +-static void cleanup_migration(struct dm_cache_migration *mg)
2292 ++static void free_io_migration(struct dm_cache_migration *mg)
2293 + {
2294 +- struct cache *cache = mg->cache;
2295 ++ dec_io_migrations(mg->cache);
2296 + free_migration(mg);
2297 +- dec_nr_migrations(cache);
2298 + }
2299 +
2300 + static void migration_failure(struct dm_cache_migration *mg)
2301 +@@ -881,7 +896,7 @@ static void migration_failure(struct dm_cache_migration *mg)
2302 + cell_defer(cache, mg->new_ocell, true);
2303 + }
2304 +
2305 +- cleanup_migration(mg);
2306 ++ free_io_migration(mg);
2307 + }
2308 +
2309 + static void migration_success_pre_commit(struct dm_cache_migration *mg)
2310 +@@ -892,7 +907,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
2311 + if (mg->writeback) {
2312 + clear_dirty(cache, mg->old_oblock, mg->cblock);
2313 + cell_defer(cache, mg->old_ocell, false);
2314 +- cleanup_migration(mg);
2315 ++ free_io_migration(mg);
2316 + return;
2317 +
2318 + } else if (mg->demote) {
2319 +@@ -902,14 +917,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
2320 + mg->old_oblock);
2321 + if (mg->promote)
2322 + cell_defer(cache, mg->new_ocell, true);
2323 +- cleanup_migration(mg);
2324 ++ free_io_migration(mg);
2325 + return;
2326 + }
2327 + } else {
2328 + if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
2329 + DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
2330 + policy_remove_mapping(cache->policy, mg->new_oblock);
2331 +- cleanup_migration(mg);
2332 ++ free_io_migration(mg);
2333 + return;
2334 + }
2335 + }
2336 +@@ -942,7 +957,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
2337 + } else {
2338 + if (mg->invalidate)
2339 + policy_remove_mapping(cache->policy, mg->old_oblock);
2340 +- cleanup_migration(mg);
2341 ++ free_io_migration(mg);
2342 + }
2343 +
2344 + } else {
2345 +@@ -957,7 +972,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
2346 + bio_endio(mg->new_ocell->holder, 0);
2347 + cell_defer(cache, mg->new_ocell, false);
2348 + }
2349 +- cleanup_migration(mg);
2350 ++ free_io_migration(mg);
2351 + }
2352 + }
2353 +
2354 +@@ -1169,7 +1184,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
2355 + mg->new_ocell = cell;
2356 + mg->start_jiffies = jiffies;
2357 +
2358 +- inc_nr_migrations(cache);
2359 ++ inc_io_migrations(cache);
2360 + quiesce_migration(mg);
2361 + }
2362 +
2363 +@@ -1192,7 +1207,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
2364 + mg->new_ocell = NULL;
2365 + mg->start_jiffies = jiffies;
2366 +
2367 +- inc_nr_migrations(cache);
2368 ++ inc_io_migrations(cache);
2369 + quiesce_migration(mg);
2370 + }
2371 +
2372 +@@ -1218,7 +1233,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
2373 + mg->new_ocell = new_ocell;
2374 + mg->start_jiffies = jiffies;
2375 +
2376 +- inc_nr_migrations(cache);
2377 ++ inc_io_migrations(cache);
2378 + quiesce_migration(mg);
2379 + }
2380 +
2381 +@@ -1245,7 +1260,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
2382 + mg->new_ocell = NULL;
2383 + mg->start_jiffies = jiffies;
2384 +
2385 +- inc_nr_migrations(cache);
2386 ++ inc_io_migrations(cache);
2387 + quiesce_migration(mg);
2388 + }
2389 +
2390 +@@ -1306,7 +1321,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio)
2391 +
2392 + static bool spare_migration_bandwidth(struct cache *cache)
2393 + {
2394 +- sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
2395 ++ sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
2396 + cache->sectors_per_block;
2397 + return current_volume < cache->migration_threshold;
2398 + }
2399 +@@ -1661,7 +1676,7 @@ static void stop_quiescing(struct cache *cache)
2400 +
2401 + static void wait_for_migrations(struct cache *cache)
2402 + {
2403 +- wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
2404 ++ wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
2405 + }
2406 +
2407 + static void stop_worker(struct cache *cache)
2408 +@@ -1772,9 +1787,6 @@ static void destroy(struct cache *cache)
2409 + {
2410 + unsigned i;
2411 +
2412 +- if (cache->next_migration)
2413 +- mempool_free(cache->next_migration, cache->migration_pool);
2414 +-
2415 + if (cache->migration_pool)
2416 + mempool_destroy(cache->migration_pool);
2417 +
2418 +@@ -2282,7 +2294,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2419 + INIT_LIST_HEAD(&cache->quiesced_migrations);
2420 + INIT_LIST_HEAD(&cache->completed_migrations);
2421 + INIT_LIST_HEAD(&cache->need_commit_migrations);
2422 +- atomic_set(&cache->nr_migrations, 0);
2423 ++ atomic_set(&cache->nr_allocated_migrations, 0);
2424 ++ atomic_set(&cache->nr_io_migrations, 0);
2425 + init_waitqueue_head(&cache->migration_wait);
2426 +
2427 + init_waitqueue_head(&cache->quiescing_wait);
2428 +@@ -2342,8 +2355,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2429 + goto bad;
2430 + }
2431 +
2432 +- cache->next_migration = NULL;
2433 +-
2434 + cache->need_tick_bio = true;
2435 + cache->sized = false;
2436 + cache->invalidate = false;
2437 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2438 +index 4913c0690872..175584ad643f 100644
2439 +--- a/drivers/md/raid5.c
2440 ++++ b/drivers/md/raid5.c
2441 +@@ -2896,7 +2896,8 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2442 + (s->failed >= 2 && fdev[1]->toread) ||
2443 + (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2444 + !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2445 +- (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
2446 ++ ((sh->raid_conf->level == 6 || sh->sector >= sh->raid_conf->mddev->recovery_cp)
2447 ++ && s->failed && s->to_write))) {
2448 + /* we would like to get this block, possibly by computing it,
2449 + * otherwise read it if the backing disk is insync
2450 + */
2451 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2452 +index 7e0176321aff..881bf89acfcc 100644
2453 +--- a/drivers/mmc/host/sdhci.c
2454 ++++ b/drivers/mmc/host/sdhci.c
2455 +@@ -2537,7 +2537,7 @@ out:
2456 + /*
2457 + * We have to delay this as it calls back into the driver.
2458 + */
2459 +- if (cardint)
2460 ++ if (cardint && host->mmc->sdio_irqs)
2461 + mmc_signal_sdio_irq(host->mmc);
2462 +
2463 + return result;
2464 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
2465 +index cc11f7f5e91d..1468c4658804 100644
2466 +--- a/drivers/net/can/dev.c
2467 ++++ b/drivers/net/can/dev.c
2468 +@@ -664,10 +664,14 @@ static int can_changelink(struct net_device *dev,
2469 + if (dev->flags & IFF_UP)
2470 + return -EBUSY;
2471 + cm = nla_data(data[IFLA_CAN_CTRLMODE]);
2472 +- if (cm->flags & ~priv->ctrlmode_supported)
2473 ++
2474 ++ /* check whether changed bits are allowed to be modified */
2475 ++ if (cm->mask & ~priv->ctrlmode_supported)
2476 + return -EOPNOTSUPP;
2477 ++
2478 ++ /* clear bits to be modified and copy the flag values */
2479 + priv->ctrlmode &= ~cm->mask;
2480 +- priv->ctrlmode |= cm->flags;
2481 ++ priv->ctrlmode |= (cm->flags & cm->mask);
2482 + }
2483 +
2484 + if (data[IFLA_CAN_RESTART_MS]) {
2485 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2486 +index dae70d216762..78c65d327e33 100644
2487 +--- a/drivers/pci/pci.c
2488 ++++ b/drivers/pci/pci.c
2489 +@@ -3187,7 +3187,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2490 + {
2491 + struct pci_dev *pdev;
2492 +
2493 +- if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
2494 ++ if (pci_is_root_bus(dev->bus) || dev->subordinate ||
2495 ++ !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
2496 + return -ENOTTY;
2497 +
2498 + list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2499 +@@ -3221,7 +3222,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
2500 + {
2501 + struct pci_dev *pdev;
2502 +
2503 +- if (dev->subordinate || !dev->slot)
2504 ++ if (dev->subordinate || !dev->slot ||
2505 ++ dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
2506 + return -ENOTTY;
2507 +
2508 + list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2509 +@@ -3452,6 +3454,20 @@ int pci_try_reset_function(struct pci_dev *dev)
2510 + }
2511 + EXPORT_SYMBOL_GPL(pci_try_reset_function);
2512 +
2513 ++/* Do any devices on or below this bus prevent a bus reset? */
2514 ++static bool pci_bus_resetable(struct pci_bus *bus)
2515 ++{
2516 ++ struct pci_dev *dev;
2517 ++
2518 ++ list_for_each_entry(dev, &bus->devices, bus_list) {
2519 ++ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
2520 ++ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
2521 ++ return false;
2522 ++ }
2523 ++
2524 ++ return true;
2525 ++}
2526 ++
2527 + /* Lock devices from the top of the tree down */
2528 + static void pci_bus_lock(struct pci_bus *bus)
2529 + {
2530 +@@ -3502,6 +3518,22 @@ unlock:
2531 + return 0;
2532 + }
2533 +
2534 ++/* Do any devices on or below this slot prevent a bus reset? */
2535 ++static bool pci_slot_resetable(struct pci_slot *slot)
2536 ++{
2537 ++ struct pci_dev *dev;
2538 ++
2539 ++ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
2540 ++ if (!dev->slot || dev->slot != slot)
2541 ++ continue;
2542 ++ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
2543 ++ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
2544 ++ return false;
2545 ++ }
2546 ++
2547 ++ return true;
2548 ++}
2549 ++
2550 + /* Lock devices from the top of the tree down */
2551 + static void pci_slot_lock(struct pci_slot *slot)
2552 + {
2553 +@@ -3623,7 +3655,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
2554 + {
2555 + int rc;
2556 +
2557 +- if (!slot)
2558 ++ if (!slot || !pci_slot_resetable(slot))
2559 + return -ENOTTY;
2560 +
2561 + if (!probe)
2562 +@@ -3715,7 +3747,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
2563 +
2564 + static int pci_bus_reset(struct pci_bus *bus, int probe)
2565 + {
2566 +- if (!bus->self)
2567 ++ if (!bus->self || !pci_bus_resetable(bus))
2568 + return -ENOTTY;
2569 +
2570 + if (probe)
2571 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2572 +index 6e8776b59a2c..27abeb40dfab 100644
2573 +--- a/drivers/pci/quirks.c
2574 ++++ b/drivers/pci/quirks.c
2575 +@@ -3008,6 +3008,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
2576 + DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
2577 + quirk_broken_intx_masking);
2578 +
2579 ++static void quirk_no_bus_reset(struct pci_dev *dev)
2580 ++{
2581 ++ dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
2582 ++}
2583 ++
2584 ++/*
2585 ++ * Atheros AR93xx chips do not behave after a bus reset. The device will
2586 ++ * throw a Link Down error on AER-capable systems and regardless of AER,
2587 ++ * config space of the device is never accessible again and typically
2588 ++ * causes the system to hang or reset when access is attempted.
2589 ++ * http://www.spinics.net/lists/linux-pci/msg34797.html
2590 ++ */
2591 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
2592 ++
2593 + static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
2594 + struct pci_fixup *end)
2595 + {
2596 +diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
2597 +index c0fe6091566a..988f5e18763a 100644
2598 +--- a/drivers/pinctrl/core.c
2599 ++++ b/drivers/pinctrl/core.c
2600 +@@ -1812,14 +1812,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
2601 + if (pctldev == NULL)
2602 + return;
2603 +
2604 +- mutex_lock(&pinctrldev_list_mutex);
2605 + mutex_lock(&pctldev->mutex);
2606 +-
2607 + pinctrl_remove_device_debugfs(pctldev);
2608 ++ mutex_unlock(&pctldev->mutex);
2609 +
2610 + if (!IS_ERR(pctldev->p))
2611 + pinctrl_put(pctldev->p);
2612 +
2613 ++ mutex_lock(&pinctrldev_list_mutex);
2614 ++ mutex_lock(&pctldev->mutex);
2615 + /* TODO: check that no pinmuxes are still active? */
2616 + list_del(&pctldev->node);
2617 + /* Destroy descriptor tree */
2618 +diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
2619 +index ab3baa7f9508..86ade85481bd 100644
2620 +--- a/drivers/s390/crypto/ap_bus.c
2621 ++++ b/drivers/s390/crypto/ap_bus.c
2622 +@@ -44,6 +44,7 @@
2623 + #include <linux/hrtimer.h>
2624 + #include <linux/ktime.h>
2625 + #include <asm/facility.h>
2626 ++#include <linux/crypto.h>
2627 +
2628 + #include "ap_bus.h"
2629 +
2630 +@@ -71,7 +72,7 @@ MODULE_AUTHOR("IBM Corporation");
2631 + MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
2632 + "Copyright IBM Corp. 2006, 2012");
2633 + MODULE_LICENSE("GPL");
2634 +-MODULE_ALIAS("z90crypt");
2635 ++MODULE_ALIAS_CRYPTO("z90crypt");
2636 +
2637 + /*
2638 + * Module parameter
2639 +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
2640 +index 3f5b56a99892..b4ddb7310e36 100644
2641 +--- a/drivers/scsi/ipr.c
2642 ++++ b/drivers/scsi/ipr.c
2643 +@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
2644 + ipr_reinit_ipr_cmnd(ipr_cmd);
2645 + ipr_cmd->u.scratch = 0;
2646 + ipr_cmd->sibling = NULL;
2647 ++ ipr_cmd->eh_comp = NULL;
2648 + ipr_cmd->fast_done = fast_done;
2649 + init_timer(&ipr_cmd->timer);
2650 + }
2651 +@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
2652 +
2653 + scsi_dma_unmap(ipr_cmd->scsi_cmd);
2654 + scsi_cmd->scsi_done(scsi_cmd);
2655 ++ if (ipr_cmd->eh_comp)
2656 ++ complete(ipr_cmd->eh_comp);
2657 + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2658 + }
2659 +
2660 +@@ -4805,6 +4808,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
2661 + return rc;
2662 + }
2663 +
2664 ++/**
2665 ++ * ipr_match_lun - Match function for specified LUN
2666 ++ * @ipr_cmd: ipr command struct
2667 ++ * @device: device to match (sdev)
2668 ++ *
2669 ++ * Returns:
2670 ++ * 1 if command matches sdev / 0 if command does not match sdev
2671 ++ **/
2672 ++static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
2673 ++{
2674 ++ if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
2675 ++ return 1;
2676 ++ return 0;
2677 ++}
2678 ++
2679 ++/**
2680 ++ * ipr_wait_for_ops - Wait for matching commands to complete
2681 ++ * @ipr_cmd: ipr command struct
2682 ++ * @device: device to match (sdev)
2683 ++ * @match: match function to use
2684 ++ *
2685 ++ * Returns:
2686 ++ * SUCCESS / FAILED
2687 ++ **/
2688 ++static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
2689 ++ int (*match)(struct ipr_cmnd *, void *))
2690 ++{
2691 ++ struct ipr_cmnd *ipr_cmd;
2692 ++ int wait;
2693 ++ unsigned long flags;
2694 ++ struct ipr_hrr_queue *hrrq;
2695 ++ signed long timeout = IPR_ABORT_TASK_TIMEOUT;
2696 ++ DECLARE_COMPLETION_ONSTACK(comp);
2697 ++
2698 ++ ENTER;
2699 ++ do {
2700 ++ wait = 0;
2701 ++
2702 ++ for_each_hrrq(hrrq, ioa_cfg) {
2703 ++ spin_lock_irqsave(hrrq->lock, flags);
2704 ++ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
2705 ++ if (match(ipr_cmd, device)) {
2706 ++ ipr_cmd->eh_comp = &comp;
2707 ++ wait++;
2708 ++ }
2709 ++ }
2710 ++ spin_unlock_irqrestore(hrrq->lock, flags);
2711 ++ }
2712 ++
2713 ++ if (wait) {
2714 ++ timeout = wait_for_completion_timeout(&comp, timeout);
2715 ++
2716 ++ if (!timeout) {
2717 ++ wait = 0;
2718 ++
2719 ++ for_each_hrrq(hrrq, ioa_cfg) {
2720 ++ spin_lock_irqsave(hrrq->lock, flags);
2721 ++ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
2722 ++ if (match(ipr_cmd, device)) {
2723 ++ ipr_cmd->eh_comp = NULL;
2724 ++ wait++;
2725 ++ }
2726 ++ }
2727 ++ spin_unlock_irqrestore(hrrq->lock, flags);
2728 ++ }
2729 ++
2730 ++ if (wait)
2731 ++ dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
2732 ++ LEAVE;
2733 ++ return wait ? FAILED : SUCCESS;
2734 ++ }
2735 ++ }
2736 ++ } while (wait);
2737 ++
2738 ++ LEAVE;
2739 ++ return SUCCESS;
2740 ++}
2741 ++
2742 + static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
2743 + {
2744 + struct ipr_ioa_cfg *ioa_cfg;
2745 +@@ -5023,11 +5104,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
2746 + static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
2747 + {
2748 + int rc;
2749 ++ struct ipr_ioa_cfg *ioa_cfg;
2750 ++
2751 ++ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
2752 +
2753 + spin_lock_irq(cmd->device->host->host_lock);
2754 + rc = __ipr_eh_dev_reset(cmd);
2755 + spin_unlock_irq(cmd->device->host->host_lock);
2756 +
2757 ++ if (rc == SUCCESS)
2758 ++ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
2759 ++
2760 + return rc;
2761 + }
2762 +
2763 +@@ -5205,13 +5292,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
2764 + {
2765 + unsigned long flags;
2766 + int rc;
2767 ++ struct ipr_ioa_cfg *ioa_cfg;
2768 +
2769 + ENTER;
2770 +
2771 ++ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
2772 ++
2773 + spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
2774 + rc = ipr_cancel_op(scsi_cmd);
2775 + spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
2776 +
2777 ++ if (rc == SUCCESS)
2778 ++ rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
2779 + LEAVE;
2780 + return rc;
2781 + }
2782 +diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
2783 +index 9ce38a22647e..0801f3df4b27 100644
2784 +--- a/drivers/scsi/ipr.h
2785 ++++ b/drivers/scsi/ipr.h
2786 +@@ -1585,6 +1585,7 @@ struct ipr_cmnd {
2787 + struct scsi_device *sdev;
2788 + } u;
2789 +
2790 ++ struct completion *eh_comp;
2791 + struct ipr_hrr_queue *hrrq;
2792 + struct ipr_ioa_cfg *ioa_cfg;
2793 + };
2794 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
2795 +index d46b4ccec8cd..850e232d086e 100644
2796 +--- a/drivers/tty/n_tty.c
2797 ++++ b/drivers/tty/n_tty.c
2798 +@@ -2417,12 +2417,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
2799 +
2800 + poll_wait(file, &tty->read_wait, wait);
2801 + poll_wait(file, &tty->write_wait, wait);
2802 ++ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
2803 ++ mask |= POLLHUP;
2804 + if (input_available_p(tty, 1))
2805 + mask |= POLLIN | POLLRDNORM;
2806 ++ else if (mask & POLLHUP) {
2807 ++ tty_flush_to_ldisc(tty);
2808 ++ if (input_available_p(tty, 1))
2809 ++ mask |= POLLIN | POLLRDNORM;
2810 ++ }
2811 + if (tty->packet && tty->link->ctrl_status)
2812 + mask |= POLLPRI | POLLIN | POLLRDNORM;
2813 +- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
2814 +- mask |= POLLHUP;
2815 + if (tty_hung_up_p(file))
2816 + mask |= POLLHUP;
2817 + if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
2818 +diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
2819 +index 9df5d6ec7eec..f3a9d831d0f9 100644
2820 +--- a/drivers/xen/swiotlb-xen.c
2821 ++++ b/drivers/xen/swiotlb-xen.c
2822 +@@ -449,7 +449,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
2823 +
2824 + /* NOTE: We use dev_addr here, not paddr! */
2825 + if (is_xen_swiotlb_buffer(dev_addr)) {
2826 +- swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir);
2827 ++ swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
2828 + return;
2829 + }
2830 +
2831 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
2832 +index 1a858947006e..fa9f90049099 100644
2833 +--- a/fs/btrfs/extent_io.c
2834 ++++ b/fs/btrfs/extent_io.c
2835 +@@ -4507,7 +4507,8 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
2836 + spin_unlock(&eb->refs_lock);
2837 + }
2838 +
2839 +-static void mark_extent_buffer_accessed(struct extent_buffer *eb)
2840 ++static void mark_extent_buffer_accessed(struct extent_buffer *eb,
2841 ++ struct page *accessed)
2842 + {
2843 + unsigned long num_pages, i;
2844 +
2845 +@@ -4516,7 +4517,8 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb)
2846 + num_pages = num_extent_pages(eb->start, eb->len);
2847 + for (i = 0; i < num_pages; i++) {
2848 + struct page *p = extent_buffer_page(eb, i);
2849 +- mark_page_accessed(p);
2850 ++ if (p != accessed)
2851 ++ mark_page_accessed(p);
2852 + }
2853 + }
2854 +
2855 +@@ -4530,7 +4532,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
2856 + start >> PAGE_CACHE_SHIFT);
2857 + if (eb && atomic_inc_not_zero(&eb->refs)) {
2858 + rcu_read_unlock();
2859 +- mark_extent_buffer_accessed(eb);
2860 ++ mark_extent_buffer_accessed(eb, NULL);
2861 + return eb;
2862 + }
2863 + rcu_read_unlock();
2864 +@@ -4578,7 +4580,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
2865 + spin_unlock(&mapping->private_lock);
2866 + unlock_page(p);
2867 + page_cache_release(p);
2868 +- mark_extent_buffer_accessed(exists);
2869 ++ mark_extent_buffer_accessed(exists, p);
2870 + goto free_eb;
2871 + }
2872 +
2873 +@@ -4593,7 +4595,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
2874 + attach_extent_buffer_page(eb, p);
2875 + spin_unlock(&mapping->private_lock);
2876 + WARN_ON(PageDirty(p));
2877 +- mark_page_accessed(p);
2878 + eb->pages[i] = p;
2879 + if (!PageUptodate(p))
2880 + uptodate = 0;
2881 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
2882 +index f6d00df99a8c..279b06ef5522 100644
2883 +--- a/fs/btrfs/file.c
2884 ++++ b/fs/btrfs/file.c
2885 +@@ -470,11 +470,12 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
2886 + for (i = 0; i < num_pages; i++) {
2887 + /* page checked is some magic around finding pages that
2888 + * have been modified without going through btrfs_set_page_dirty
2889 +- * clear it here
2890 ++ * clear it here. There should be no need to mark the pages
2891 ++ * accessed as prepare_pages should have marked them accessed
2892 ++ * in prepare_pages via find_or_create_page()
2893 + */
2894 + ClearPageChecked(pages[i]);
2895 + unlock_page(pages[i]);
2896 +- mark_page_accessed(pages[i]);
2897 + page_cache_release(pages[i]);
2898 + }
2899 + }
2900 +diff --git a/fs/buffer.c b/fs/buffer.c
2901 +index 4d06a573d199..eef21c69f2d7 100644
2902 +--- a/fs/buffer.c
2903 ++++ b/fs/buffer.c
2904 +@@ -227,7 +227,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
2905 + int all_mapped = 1;
2906 +
2907 + index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
2908 +- page = find_get_page(bd_mapping, index);
2909 ++ page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
2910 + if (!page)
2911 + goto out;
2912 +
2913 +@@ -1368,12 +1368,13 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
2914 + struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
2915 +
2916 + if (bh == NULL) {
2917 ++ /* __find_get_block_slow will mark the page accessed */
2918 + bh = __find_get_block_slow(bdev, block);
2919 + if (bh)
2920 + bh_lru_install(bh);
2921 +- }
2922 +- if (bh)
2923 ++ } else
2924 + touch_buffer(bh);
2925 ++
2926 + return bh;
2927 + }
2928 + EXPORT_SYMBOL(__find_get_block);
2929 +@@ -1485,16 +1486,27 @@ EXPORT_SYMBOL(set_bh_page);
2930 + /*
2931 + * Called when truncating a buffer on a page completely.
2932 + */
2933 ++
2934 ++/* Bits that are cleared during an invalidate */
2935 ++#define BUFFER_FLAGS_DISCARD \
2936 ++ (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
2937 ++ 1 << BH_Delay | 1 << BH_Unwritten)
2938 ++
2939 + static void discard_buffer(struct buffer_head * bh)
2940 + {
2941 ++ unsigned long b_state, b_state_old;
2942 ++
2943 + lock_buffer(bh);
2944 + clear_buffer_dirty(bh);
2945 + bh->b_bdev = NULL;
2946 +- clear_buffer_mapped(bh);
2947 +- clear_buffer_req(bh);
2948 +- clear_buffer_new(bh);
2949 +- clear_buffer_delay(bh);
2950 +- clear_buffer_unwritten(bh);
2951 ++ b_state = bh->b_state;
2952 ++ for (;;) {
2953 ++ b_state_old = cmpxchg(&bh->b_state, b_state,
2954 ++ (b_state & ~BUFFER_FLAGS_DISCARD));
2955 ++ if (b_state_old == b_state)
2956 ++ break;
2957 ++ b_state = b_state_old;
2958 ++ }
2959 + unlock_buffer(bh);
2960 + }
2961 +
2962 +diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
2963 +index 77492301cc2b..dfc95646b88c 100644
2964 +--- a/fs/cifs/ioctl.c
2965 ++++ b/fs/cifs/ioctl.c
2966 +@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
2967 + }
2968 +
2969 + src_inode = src_file.file->f_dentry->d_inode;
2970 ++ rc = -EINVAL;
2971 ++ if (S_ISDIR(src_inode->i_mode))
2972 ++ goto out_fput;
2973 +
2974 + /*
2975 + * Note: cifs case is easier than btrfs since server responsible for
2976 + * checks for proper open modes and file type and if it wants
2977 + * server could even support copy of range where source = target
2978 + */
2979 +-
2980 +- /* so we do not deadlock racing two ioctls on same files */
2981 +- if (target_inode < src_inode) {
2982 +- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
2983 +- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
2984 +- } else {
2985 +- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
2986 +- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
2987 +- }
2988 ++ lock_two_nondirectories(target_inode, src_inode);
2989 +
2990 + /* determine range to clone */
2991 + rc = -EINVAL;
2992 +@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
2993 + out_unlock:
2994 + /* although unlocking in the reverse order from locking is not
2995 + strictly necessary here it is a little cleaner to be consistent */
2996 +- if (target_inode < src_inode) {
2997 +- mutex_unlock(&src_inode->i_mutex);
2998 +- mutex_unlock(&target_inode->i_mutex);
2999 +- } else {
3000 +- mutex_unlock(&target_inode->i_mutex);
3001 +- mutex_unlock(&src_inode->i_mutex);
3002 +- }
3003 ++ unlock_two_nondirectories(src_inode, target_inode);
3004 + out_fput:
3005 + fdput(src_file);
3006 + out_drop_write:
3007 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3008 +index 242226a87be7..7620133f78bf 100644
3009 +--- a/fs/ext4/mballoc.c
3010 ++++ b/fs/ext4/mballoc.c
3011 +@@ -1044,6 +1044,8 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
3012 + * allocating. If we are looking at the buddy cache we would
3013 + * have taken a reference using ext4_mb_load_buddy and that
3014 + * would have pinned buddy page to page cache.
3015 ++ * The call to ext4_mb_get_buddy_page_lock will mark the
3016 ++ * page accessed.
3017 + */
3018 + ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
3019 + if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
3020 +@@ -1062,7 +1064,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
3021 + ret = -EIO;
3022 + goto err;
3023 + }
3024 +- mark_page_accessed(page);
3025 +
3026 + if (e4b.bd_buddy_page == NULL) {
3027 + /*
3028 +@@ -1082,7 +1083,6 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
3029 + ret = -EIO;
3030 + goto err;
3031 + }
3032 +- mark_page_accessed(page);
3033 + err:
3034 + ext4_mb_put_buddy_page_lock(&e4b);
3035 + return ret;
3036 +@@ -1141,7 +1141,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
3037 +
3038 + /* we could use find_or_create_page(), but it locks page
3039 + * what we'd like to avoid in fast path ... */
3040 +- page = find_get_page(inode->i_mapping, pnum);
3041 ++ page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
3042 + if (page == NULL || !PageUptodate(page)) {
3043 + if (page)
3044 + /*
3045 +@@ -1172,15 +1172,16 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
3046 + ret = -EIO;
3047 + goto err;
3048 + }
3049 ++
3050 ++ /* Pages marked accessed already */
3051 + e4b->bd_bitmap_page = page;
3052 + e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
3053 +- mark_page_accessed(page);
3054 +
3055 + block++;
3056 + pnum = block / blocks_per_page;
3057 + poff = block % blocks_per_page;
3058 +
3059 +- page = find_get_page(inode->i_mapping, pnum);
3060 ++ page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
3061 + if (page == NULL || !PageUptodate(page)) {
3062 + if (page)
3063 + page_cache_release(page);
3064 +@@ -1201,9 +1202,10 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
3065 + ret = -EIO;
3066 + goto err;
3067 + }
3068 ++
3069 ++ /* Pages marked accessed already */
3070 + e4b->bd_buddy_page = page;
3071 + e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
3072 +- mark_page_accessed(page);
3073 +
3074 + BUG_ON(e4b->bd_bitmap_page == NULL);
3075 + BUG_ON(e4b->bd_buddy_page == NULL);
3076 +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
3077 +index 293d0486a40f..5c6fe278fb63 100644
3078 +--- a/fs/f2fs/checkpoint.c
3079 ++++ b/fs/f2fs/checkpoint.c
3080 +@@ -71,7 +71,6 @@ repeat:
3081 + goto repeat;
3082 + }
3083 + out:
3084 +- mark_page_accessed(page);
3085 + return page;
3086 + }
3087 +
3088 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
3089 +index b0649b76eb4f..bb6478acb369 100644
3090 +--- a/fs/f2fs/node.c
3091 ++++ b/fs/f2fs/node.c
3092 +@@ -969,7 +969,6 @@ repeat:
3093 + }
3094 + got_it:
3095 + f2fs_bug_on(nid != nid_of_node(page));
3096 +- mark_page_accessed(page);
3097 + return page;
3098 + }
3099 +
3100 +@@ -1024,7 +1023,6 @@ page_hit:
3101 + f2fs_put_page(page, 1);
3102 + return ERR_PTR(-EIO);
3103 + }
3104 +- mark_page_accessed(page);
3105 + return page;
3106 + }
3107 +
3108 +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
3109 +index 0a648bb455ae..6eb13c621a14 100644
3110 +--- a/fs/fuse/dev.c
3111 ++++ b/fs/fuse/dev.c
3112 +@@ -1614,7 +1614,7 @@ out_finish:
3113 +
3114 + static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
3115 + {
3116 +- release_pages(req->pages, req->num_pages, 0);
3117 ++ release_pages(req->pages, req->num_pages, false);
3118 + }
3119 +
3120 + static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
3121 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
3122 +index a91d3b4d32f3..d8a60270581c 100644
3123 +--- a/fs/fuse/file.c
3124 ++++ b/fs/fuse/file.c
3125 +@@ -1006,8 +1006,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
3126 + tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
3127 + flush_dcache_page(page);
3128 +
3129 +- mark_page_accessed(page);
3130 +-
3131 + if (!tmp) {
3132 + unlock_page(page);
3133 + page_cache_release(page);
3134 +diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
3135 +index 49436fa7cd4f..4ccb60d943bb 100644
3136 +--- a/fs/gfs2/aops.c
3137 ++++ b/fs/gfs2/aops.c
3138 +@@ -517,7 +517,6 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
3139 + p = kmap_atomic(page);
3140 + memcpy(buf + copied, p + offset, amt);
3141 + kunmap_atomic(p);
3142 +- mark_page_accessed(page);
3143 + page_cache_release(page);
3144 + copied += amt;
3145 + index++;
3146 +diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
3147 +index b82a9c99e18b..e7b149614f5e 100644
3148 +--- a/fs/gfs2/meta_io.c
3149 ++++ b/fs/gfs2/meta_io.c
3150 +@@ -136,7 +136,8 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
3151 + yield();
3152 + }
3153 + } else {
3154 +- page = find_lock_page(mapping, index);
3155 ++ page = find_get_page_flags(mapping, index,
3156 ++ FGP_LOCK|FGP_ACCESSED);
3157 + if (!page)
3158 + return NULL;
3159 + }
3160 +@@ -153,7 +154,6 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
3161 + map_bh(bh, sdp->sd_vfs, blkno);
3162 +
3163 + unlock_page(page);
3164 +- mark_page_accessed(page);
3165 + page_cache_release(page);
3166 +
3167 + return bh;
3168 +diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
3169 +index a27e3fecefaf..250ed5b20c8f 100644
3170 +--- a/fs/ntfs/attrib.c
3171 ++++ b/fs/ntfs/attrib.c
3172 +@@ -1748,7 +1748,6 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
3173 + if (page) {
3174 + set_page_dirty(page);
3175 + unlock_page(page);
3176 +- mark_page_accessed(page);
3177 + page_cache_release(page);
3178 + }
3179 + ntfs_debug("Done.");
3180 +diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
3181 +index db9bd8a31725..86ddab916b66 100644
3182 +--- a/fs/ntfs/file.c
3183 ++++ b/fs/ntfs/file.c
3184 +@@ -2060,7 +2060,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
3185 + }
3186 + do {
3187 + unlock_page(pages[--do_pages]);
3188 +- mark_page_accessed(pages[do_pages]);
3189 + page_cache_release(pages[do_pages]);
3190 + } while (do_pages);
3191 + if (unlikely(status))
3192 +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
3193 +index b19d3dc2e651..ade2390ffe92 100644
3194 +--- a/include/linux/cpuset.h
3195 ++++ b/include/linux/cpuset.h
3196 +@@ -12,10 +12,31 @@
3197 + #include <linux/cpumask.h>
3198 + #include <linux/nodemask.h>
3199 + #include <linux/mm.h>
3200 ++#include <linux/jump_label.h>
3201 +
3202 + #ifdef CONFIG_CPUSETS
3203 +
3204 +-extern int number_of_cpusets; /* How many cpusets are defined in system? */
3205 ++extern struct static_key cpusets_enabled_key;
3206 ++static inline bool cpusets_enabled(void)
3207 ++{
3208 ++ return static_key_false(&cpusets_enabled_key);
3209 ++}
3210 ++
3211 ++static inline int nr_cpusets(void)
3212 ++{
3213 ++ /* jump label reference count + the top-level cpuset */
3214 ++ return static_key_count(&cpusets_enabled_key) + 1;
3215 ++}
3216 ++
3217 ++static inline void cpuset_inc(void)
3218 ++{
3219 ++ static_key_slow_inc(&cpusets_enabled_key);
3220 ++}
3221 ++
3222 ++static inline void cpuset_dec(void)
3223 ++{
3224 ++ static_key_slow_dec(&cpusets_enabled_key);
3225 ++}
3226 +
3227 + extern int cpuset_init(void);
3228 + extern void cpuset_init_smp(void);
3229 +@@ -32,13 +53,13 @@ extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
3230 +
3231 + static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
3232 + {
3233 +- return number_of_cpusets <= 1 ||
3234 ++ return nr_cpusets() <= 1 ||
3235 + __cpuset_node_allowed_softwall(node, gfp_mask);
3236 + }
3237 +
3238 + static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
3239 + {
3240 +- return number_of_cpusets <= 1 ||
3241 ++ return nr_cpusets() <= 1 ||
3242 + __cpuset_node_allowed_hardwall(node, gfp_mask);
3243 + }
3244 +
3245 +@@ -124,6 +145,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
3246 +
3247 + #else /* !CONFIG_CPUSETS */
3248 +
3249 ++static inline bool cpusets_enabled(void) { return false; }
3250 ++
3251 + static inline int cpuset_init(void) { return 0; }
3252 + static inline void cpuset_init_smp(void) {}
3253 +
3254 +diff --git a/include/linux/crypto.h b/include/linux/crypto.h
3255 +index b92eadf92d72..2b00d92a6e6f 100644
3256 +--- a/include/linux/crypto.h
3257 ++++ b/include/linux/crypto.h
3258 +@@ -26,6 +26,19 @@
3259 + #include <linux/uaccess.h>
3260 +
3261 + /*
3262 ++ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
3263 ++ * arbitrary modules to be loaded. Loading from userspace may still need the
3264 ++ * unprefixed names, so retains those aliases as well.
3265 ++ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
3266 ++ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
3267 ++ * expands twice on the same line. Instead, use a separate base name for the
3268 ++ * alias.
3269 ++ */
3270 ++#define MODULE_ALIAS_CRYPTO(name) \
3271 ++ __MODULE_INFO(alias, alias_userspace, name); \
3272 ++ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
3273 ++
3274 ++/*
3275 + * Algorithm masks and types.
3276 + */
3277 + #define CRYPTO_ALG_TYPE_MASK 0x0000000f
3278 +diff --git a/include/linux/gfp.h b/include/linux/gfp.h
3279 +index 39b81dc7d01a..3824ac62f395 100644
3280 +--- a/include/linux/gfp.h
3281 ++++ b/include/linux/gfp.h
3282 +@@ -369,8 +369,8 @@ void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
3283 +
3284 + extern void __free_pages(struct page *page, unsigned int order);
3285 + extern void free_pages(unsigned long addr, unsigned int order);
3286 +-extern void free_hot_cold_page(struct page *page, int cold);
3287 +-extern void free_hot_cold_page_list(struct list_head *list, int cold);
3288 ++extern void free_hot_cold_page(struct page *page, bool cold);
3289 ++extern void free_hot_cold_page_list(struct list_head *list, bool cold);
3290 +
3291 + extern void __free_memcg_kmem_pages(struct page *page, unsigned int order);
3292 + extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order);
3293 +diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
3294 +index b826239bdce0..63579cb8d3dc 100644
3295 +--- a/include/linux/huge_mm.h
3296 ++++ b/include/linux/huge_mm.h
3297 +@@ -93,10 +93,6 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
3298 + #endif /* CONFIG_DEBUG_VM */
3299 +
3300 + extern unsigned long transparent_hugepage_flags;
3301 +-extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
3302 +- pmd_t *dst_pmd, pmd_t *src_pmd,
3303 +- struct vm_area_struct *vma,
3304 +- unsigned long addr, unsigned long end);
3305 + extern int split_huge_page_to_list(struct page *page, struct list_head *list);
3306 + static inline int split_huge_page(struct page *page)
3307 + {
3308 +diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
3309 +index 5c1dfb2a9e73..784304b222b3 100644
3310 +--- a/include/linux/jump_label.h
3311 ++++ b/include/linux/jump_label.h
3312 +@@ -69,6 +69,10 @@ struct static_key {
3313 +
3314 + # include <asm/jump_label.h>
3315 + # define HAVE_JUMP_LABEL
3316 ++#else
3317 ++struct static_key {
3318 ++ atomic_t enabled;
3319 ++};
3320 + #endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
3321 +
3322 + enum jump_label_type {
3323 +@@ -79,6 +83,12 @@ enum jump_label_type {
3324 + struct module;
3325 +
3326 + #include <linux/atomic.h>
3327 ++
3328 ++static inline int static_key_count(struct static_key *key)
3329 ++{
3330 ++ return atomic_read(&key->enabled);
3331 ++}
3332 ++
3333 + #ifdef HAVE_JUMP_LABEL
3334 +
3335 + #define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL
3336 +@@ -134,10 +144,6 @@ extern void jump_label_apply_nops(struct module *mod);
3337 +
3338 + #else /* !HAVE_JUMP_LABEL */
3339 +
3340 +-struct static_key {
3341 +- atomic_t enabled;
3342 +-};
3343 +-
3344 + static __always_inline void jump_label_init(void)
3345 + {
3346 + static_key_initialized = true;
3347 +@@ -145,14 +151,14 @@ static __always_inline void jump_label_init(void)
3348 +
3349 + static __always_inline bool static_key_false(struct static_key *key)
3350 + {
3351 +- if (unlikely(atomic_read(&key->enabled) > 0))
3352 ++ if (unlikely(static_key_count(key) > 0))
3353 + return true;
3354 + return false;
3355 + }
3356 +
3357 + static __always_inline bool static_key_true(struct static_key *key)
3358 + {
3359 +- if (likely(atomic_read(&key->enabled) > 0))
3360 ++ if (likely(static_key_count(key) > 0))
3361 + return true;
3362 + return false;
3363 + }
3364 +@@ -194,7 +200,7 @@ static inline int jump_label_apply_nops(struct module *mod)
3365 +
3366 + static inline bool static_key_enabled(struct static_key *key)
3367 + {
3368 +- return (atomic_read(&key->enabled) > 0);
3369 ++ return static_key_count(key) > 0;
3370 + }
3371 +
3372 + #endif /* _LINUX_JUMP_LABEL_H */
3373 +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
3374 +index 18843532a0c9..ac819bf9522c 100644
3375 +--- a/include/linux/mmzone.h
3376 ++++ b/include/linux/mmzone.h
3377 +@@ -78,10 +78,15 @@ extern int page_group_by_mobility_disabled;
3378 + #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
3379 + #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
3380 +
3381 +-static inline int get_pageblock_migratetype(struct page *page)
3382 ++#define get_pageblock_migratetype(page) \
3383 ++ get_pfnblock_flags_mask(page, page_to_pfn(page), \
3384 ++ PB_migrate_end, MIGRATETYPE_MASK)
3385 ++
3386 ++static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
3387 + {
3388 + BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
3389 +- return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK);
3390 ++ return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
3391 ++ MIGRATETYPE_MASK);
3392 + }
3393 +
3394 + struct free_area {
3395 +@@ -138,6 +143,7 @@ enum zone_stat_item {
3396 + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
3397 + NR_DIRTIED, /* page dirtyings since bootup */
3398 + NR_WRITTEN, /* page writings since bootup */
3399 ++ NR_PAGES_SCANNED, /* pages scanned since last reclaim */
3400 + #ifdef CONFIG_NUMA
3401 + NUMA_HIT, /* allocated in intended node */
3402 + NUMA_MISS, /* allocated in non intended node */
3403 +@@ -316,19 +322,12 @@ enum zone_type {
3404 + #ifndef __GENERATING_BOUNDS_H
3405 +
3406 + struct zone {
3407 +- /* Fields commonly accessed by the page allocator */
3408 ++ /* Read-mostly fields */
3409 +
3410 + /* zone watermarks, access with *_wmark_pages(zone) macros */
3411 + unsigned long watermark[NR_WMARK];
3412 +
3413 + /*
3414 +- * When free pages are below this point, additional steps are taken
3415 +- * when reading the number of free pages to avoid per-cpu counter
3416 +- * drift allowing watermarks to be breached
3417 +- */
3418 +- unsigned long percpu_drift_mark;
3419 +-
3420 +- /*
3421 + * We don't know if the memory that we're going to allocate will be freeable
3422 + * or/and it will be released eventually, so to avoid totally wasting several
3423 + * GB of ram we must reserve some of the lower zone memory (otherwise we risk
3424 +@@ -336,41 +335,26 @@ struct zone {
3425 + * on the higher zones). This array is recalculated at runtime if the
3426 + * sysctl_lowmem_reserve_ratio sysctl changes.
3427 + */
3428 +- unsigned long lowmem_reserve[MAX_NR_ZONES];
3429 +-
3430 +- /*
3431 +- * This is a per-zone reserve of pages that should not be
3432 +- * considered dirtyable memory.
3433 +- */
3434 +- unsigned long dirty_balance_reserve;
3435 ++ long lowmem_reserve[MAX_NR_ZONES];
3436 +
3437 + #ifdef CONFIG_NUMA
3438 + int node;
3439 ++#endif
3440 ++
3441 + /*
3442 +- * zone reclaim becomes active if more unmapped pages exist.
3443 ++ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
3444 ++ * this zone's LRU. Maintained by the pageout code.
3445 + */
3446 +- unsigned long min_unmapped_pages;
3447 +- unsigned long min_slab_pages;
3448 +-#endif
3449 ++ unsigned int inactive_ratio;
3450 ++
3451 ++ struct pglist_data *zone_pgdat;
3452 + struct per_cpu_pageset __percpu *pageset;
3453 ++
3454 + /*
3455 +- * free areas of different sizes
3456 ++ * This is a per-zone reserve of pages that should not be
3457 ++ * considered dirtyable memory.
3458 + */
3459 +- spinlock_t lock;
3460 +-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
3461 +- /* Set to true when the PG_migrate_skip bits should be cleared */
3462 +- bool compact_blockskip_flush;
3463 +-
3464 +- /* pfn where compaction free scanner should start */
3465 +- unsigned long compact_cached_free_pfn;
3466 +- /* pfn where async and sync compaction migration scanner should start */
3467 +- unsigned long compact_cached_migrate_pfn[2];
3468 +-#endif
3469 +-#ifdef CONFIG_MEMORY_HOTPLUG
3470 +- /* see spanned/present_pages for more description */
3471 +- seqlock_t span_seqlock;
3472 +-#endif
3473 +- struct free_area free_area[MAX_ORDER];
3474 ++ unsigned long dirty_balance_reserve;
3475 +
3476 + #ifndef CONFIG_SPARSEMEM
3477 + /*
3478 +@@ -380,71 +364,14 @@ struct zone {
3479 + unsigned long *pageblock_flags;
3480 + #endif /* CONFIG_SPARSEMEM */
3481 +
3482 +-#ifdef CONFIG_COMPACTION
3483 +- /*
3484 +- * On compaction failure, 1<<compact_defer_shift compactions
3485 +- * are skipped before trying again. The number attempted since
3486 +- * last failure is tracked with compact_considered.
3487 +- */
3488 +- unsigned int compact_considered;
3489 +- unsigned int compact_defer_shift;
3490 +- int compact_order_failed;
3491 +-#endif
3492 +-
3493 +- ZONE_PADDING(_pad1_)
3494 +-
3495 +- /* Fields commonly accessed by the page reclaim scanner */
3496 +- spinlock_t lru_lock;
3497 +- struct lruvec lruvec;
3498 +-
3499 +- unsigned long pages_scanned; /* since last reclaim */
3500 +- unsigned long flags; /* zone flags, see below */
3501 +-
3502 +- /* Zone statistics */
3503 +- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
3504 +-
3505 +- /*
3506 +- * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
3507 +- * this zone's LRU. Maintained by the pageout code.
3508 +- */
3509 +- unsigned int inactive_ratio;
3510 +-
3511 +-
3512 +- ZONE_PADDING(_pad2_)
3513 +- /* Rarely used or read-mostly fields */
3514 +-
3515 ++#ifdef CONFIG_NUMA
3516 + /*
3517 +- * wait_table -- the array holding the hash table
3518 +- * wait_table_hash_nr_entries -- the size of the hash table array
3519 +- * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
3520 +- *
3521 +- * The purpose of all these is to keep track of the people
3522 +- * waiting for a page to become available and make them
3523 +- * runnable again when possible. The trouble is that this
3524 +- * consumes a lot of space, especially when so few things
3525 +- * wait on pages at a given time. So instead of using
3526 +- * per-page waitqueues, we use a waitqueue hash table.
3527 +- *
3528 +- * The bucket discipline is to sleep on the same queue when
3529 +- * colliding and wake all in that wait queue when removing.
3530 +- * When something wakes, it must check to be sure its page is
3531 +- * truly available, a la thundering herd. The cost of a
3532 +- * collision is great, but given the expected load of the
3533 +- * table, they should be so rare as to be outweighed by the
3534 +- * benefits from the saved space.
3535 +- *
3536 +- * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
3537 +- * primary users of these fields, and in mm/page_alloc.c
3538 +- * free_area_init_core() performs the initialization of them.
3539 ++ * zone reclaim becomes active if more unmapped pages exist.
3540 + */
3541 +- wait_queue_head_t * wait_table;
3542 +- unsigned long wait_table_hash_nr_entries;
3543 +- unsigned long wait_table_bits;
3544 ++ unsigned long min_unmapped_pages;
3545 ++ unsigned long min_slab_pages;
3546 ++#endif /* CONFIG_NUMA */
3547 +
3548 +- /*
3549 +- * Discontig memory support fields.
3550 +- */
3551 +- struct pglist_data *zone_pgdat;
3552 + /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
3553 + unsigned long zone_start_pfn;
3554 +
3555 +@@ -490,9 +417,11 @@ struct zone {
3556 + * adjust_managed_page_count() should be used instead of directly
3557 + * touching zone->managed_pages and totalram_pages.
3558 + */
3559 ++ unsigned long managed_pages;
3560 + unsigned long spanned_pages;
3561 + unsigned long present_pages;
3562 +- unsigned long managed_pages;
3563 ++
3564 ++ const char *name;
3565 +
3566 + /*
3567 + * Number of MIGRATE_RESEVE page block. To maintain for just
3568 +@@ -500,10 +429,91 @@ struct zone {
3569 + */
3570 + int nr_migrate_reserve_block;
3571 +
3572 ++#ifdef CONFIG_MEMORY_HOTPLUG
3573 ++ /* see spanned/present_pages for more description */
3574 ++ seqlock_t span_seqlock;
3575 ++#endif
3576 ++
3577 + /*
3578 +- * rarely used fields:
3579 ++ * wait_table -- the array holding the hash table
3580 ++ * wait_table_hash_nr_entries -- the size of the hash table array
3581 ++ * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
3582 ++ *
3583 ++ * The purpose of all these is to keep track of the people
3584 ++ * waiting for a page to become available and make them
3585 ++ * runnable again when possible. The trouble is that this
3586 ++ * consumes a lot of space, especially when so few things
3587 ++ * wait on pages at a given time. So instead of using
3588 ++ * per-page waitqueues, we use a waitqueue hash table.
3589 ++ *
3590 ++ * The bucket discipline is to sleep on the same queue when
3591 ++ * colliding and wake all in that wait queue when removing.
3592 ++ * When something wakes, it must check to be sure its page is
3593 ++ * truly available, a la thundering herd. The cost of a
3594 ++ * collision is great, but given the expected load of the
3595 ++ * table, they should be so rare as to be outweighed by the
3596 ++ * benefits from the saved space.
3597 ++ *
3598 ++ * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
3599 ++ * primary users of these fields, and in mm/page_alloc.c
3600 ++ * free_area_init_core() performs the initialization of them.
3601 + */
3602 +- const char *name;
3603 ++ wait_queue_head_t *wait_table;
3604 ++ unsigned long wait_table_hash_nr_entries;
3605 ++ unsigned long wait_table_bits;
3606 ++
3607 ++ ZONE_PADDING(_pad1_)
3608 ++
3609 ++ /* Write-intensive fields used from the page allocator */
3610 ++ spinlock_t lock;
3611 ++
3612 ++ /* free areas of different sizes */
3613 ++ struct free_area free_area[MAX_ORDER];
3614 ++
3615 ++ /* zone flags, see below */
3616 ++ unsigned long flags;
3617 ++
3618 ++ ZONE_PADDING(_pad2_)
3619 ++
3620 ++ /* Write-intensive fields used by page reclaim */
3621 ++
3622 ++ /* Fields commonly accessed by the page reclaim scanner */
3623 ++ spinlock_t lru_lock;
3624 ++ struct lruvec lruvec;
3625 ++
3626 ++ /*
3627 ++ * When free pages are below this point, additional steps are taken
3628 ++ * when reading the number of free pages to avoid per-cpu counter
3629 ++ * drift allowing watermarks to be breached
3630 ++ */
3631 ++ unsigned long percpu_drift_mark;
3632 ++
3633 ++#if defined CONFIG_COMPACTION || defined CONFIG_CMA
3634 ++ /* pfn where compaction free scanner should start */
3635 ++ unsigned long compact_cached_free_pfn;
3636 ++ /* pfn where async and sync compaction migration scanner should start */
3637 ++ unsigned long compact_cached_migrate_pfn[2];
3638 ++#endif
3639 ++
3640 ++#ifdef CONFIG_COMPACTION
3641 ++ /*
3642 ++ * On compaction failure, 1<<compact_defer_shift compactions
3643 ++ * are skipped before trying again. The number attempted since
3644 ++ * last failure is tracked with compact_considered.
3645 ++ */
3646 ++ unsigned int compact_considered;
3647 ++ unsigned int compact_defer_shift;
3648 ++ int compact_order_failed;
3649 ++#endif
3650 ++
3651 ++#if defined CONFIG_COMPACTION || defined CONFIG_CMA
3652 ++ /* Set to true when the PG_migrate_skip bits should be cleared */
3653 ++ bool compact_blockskip_flush;
3654 ++#endif
3655 ++
3656 ++ ZONE_PADDING(_pad3_)
3657 ++ /* Zone statistics */
3658 ++ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
3659 + } ____cacheline_internodealigned_in_smp;
3660 +
3661 + typedef enum {
3662 +@@ -519,6 +529,7 @@ typedef enum {
3663 + ZONE_WRITEBACK, /* reclaim scanning has recently found
3664 + * many pages under writeback
3665 + */
3666 ++ ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
3667 + } zone_flags_t;
3668 +
3669 + static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
3670 +@@ -556,6 +567,11 @@ static inline int zone_is_reclaim_locked(const struct zone *zone)
3671 + return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
3672 + }
3673 +
3674 ++static inline int zone_is_fair_depleted(const struct zone *zone)
3675 ++{
3676 ++ return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
3677 ++}
3678 ++
3679 + static inline int zone_is_oom_locked(const struct zone *zone)
3680 + {
3681 + return test_bit(ZONE_OOM_LOCKED, &zone->flags);
3682 +@@ -807,10 +823,10 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
3683 + extern struct mutex zonelists_mutex;
3684 + void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
3685 + void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
3686 +-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
3687 +- int classzone_idx, int alloc_flags);
3688 +-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
3689 +- int classzone_idx, int alloc_flags);
3690 ++bool zone_watermark_ok(struct zone *z, unsigned int order,
3691 ++ unsigned long mark, int classzone_idx, int alloc_flags);
3692 ++bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3693 ++ unsigned long mark, int classzone_idx, int alloc_flags);
3694 + enum memmap_context {
3695 + MEMMAP_EARLY,
3696 + MEMMAP_HOTPLUG,
3697 +diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
3698 +index ca71a1d347a0..3c545b48aeab 100644
3699 +--- a/include/linux/page-flags.h
3700 ++++ b/include/linux/page-flags.h
3701 +@@ -198,6 +198,7 @@ struct page; /* forward declaration */
3702 + TESTPAGEFLAG(Locked, locked)
3703 + PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
3704 + PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
3705 ++ __SETPAGEFLAG(Referenced, referenced)
3706 + PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
3707 + PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
3708 + PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
3709 +@@ -208,6 +209,7 @@ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
3710 + PAGEFLAG(SavePinned, savepinned); /* Xen */
3711 + PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
3712 + PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
3713 ++ __SETPAGEFLAG(SwapBacked, swapbacked)
3714 +
3715 + __PAGEFLAG(SlobFree, slob_free)
3716 +
3717 +diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
3718 +index c08730c10c7a..2baeee12f48e 100644
3719 +--- a/include/linux/pageblock-flags.h
3720 ++++ b/include/linux/pageblock-flags.h
3721 +@@ -65,33 +65,26 @@ extern int pageblock_order;
3722 + /* Forward declaration */
3723 + struct page;
3724 +
3725 +-unsigned long get_pageblock_flags_mask(struct page *page,
3726 ++unsigned long get_pfnblock_flags_mask(struct page *page,
3727 ++ unsigned long pfn,
3728 + unsigned long end_bitidx,
3729 + unsigned long mask);
3730 +-void set_pageblock_flags_mask(struct page *page,
3731 ++
3732 ++void set_pfnblock_flags_mask(struct page *page,
3733 + unsigned long flags,
3734 ++ unsigned long pfn,
3735 + unsigned long end_bitidx,
3736 + unsigned long mask);
3737 +
3738 + /* Declarations for getting and setting flags. See mm/page_alloc.c */
3739 +-static inline unsigned long get_pageblock_flags_group(struct page *page,
3740 +- int start_bitidx, int end_bitidx)
3741 +-{
3742 +- unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
3743 +- unsigned long mask = (1 << nr_flag_bits) - 1;
3744 +-
3745 +- return get_pageblock_flags_mask(page, end_bitidx, mask);
3746 +-}
3747 +-
3748 +-static inline void set_pageblock_flags_group(struct page *page,
3749 +- unsigned long flags,
3750 +- int start_bitidx, int end_bitidx)
3751 +-{
3752 +- unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
3753 +- unsigned long mask = (1 << nr_flag_bits) - 1;
3754 +-
3755 +- set_pageblock_flags_mask(page, flags, end_bitidx, mask);
3756 +-}
3757 ++#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
3758 ++ get_pfnblock_flags_mask(page, page_to_pfn(page), \
3759 ++ end_bitidx, \
3760 ++ (1 << (end_bitidx - start_bitidx + 1)) - 1)
3761 ++#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
3762 ++ set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
3763 ++ end_bitidx, \
3764 ++ (1 << (end_bitidx - start_bitidx + 1)) - 1)
3765 +
3766 + #ifdef CONFIG_COMPACTION
3767 + #define get_pageblock_skip(page) \
3768 +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
3769 +index 09c1b03867d9..fcebdda3651c 100644
3770 +--- a/include/linux/pagemap.h
3771 ++++ b/include/linux/pagemap.h
3772 +@@ -99,7 +99,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
3773 +
3774 + #define page_cache_get(page) get_page(page)
3775 + #define page_cache_release(page) put_page(page)
3776 +-void release_pages(struct page **pages, int nr, int cold);
3777 ++void release_pages(struct page **pages, int nr, bool cold);
3778 +
3779 + /*
3780 + * speculatively take a reference to a page.
3781 +@@ -248,12 +248,108 @@ pgoff_t page_cache_next_hole(struct address_space *mapping,
3782 + pgoff_t page_cache_prev_hole(struct address_space *mapping,
3783 + pgoff_t index, unsigned long max_scan);
3784 +
3785 ++#define FGP_ACCESSED 0x00000001
3786 ++#define FGP_LOCK 0x00000002
3787 ++#define FGP_CREAT 0x00000004
3788 ++#define FGP_WRITE 0x00000008
3789 ++#define FGP_NOFS 0x00000010
3790 ++#define FGP_NOWAIT 0x00000020
3791 ++
3792 ++struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
3793 ++ int fgp_flags, gfp_t cache_gfp_mask);
3794 ++
3795 ++/**
3796 ++ * find_get_page - find and get a page reference
3797 ++ * @mapping: the address_space to search
3798 ++ * @offset: the page index
3799 ++ *
3800 ++ * Looks up the page cache slot at @mapping & @offset. If there is a
3801 ++ * page cache page, it is returned with an increased refcount.
3802 ++ *
3803 ++ * Otherwise, %NULL is returned.
3804 ++ */
3805 ++static inline struct page *find_get_page(struct address_space *mapping,
3806 ++ pgoff_t offset)
3807 ++{
3808 ++ return pagecache_get_page(mapping, offset, 0, 0);
3809 ++}
3810 ++
3811 ++static inline struct page *find_get_page_flags(struct address_space *mapping,
3812 ++ pgoff_t offset, int fgp_flags)
3813 ++{
3814 ++ return pagecache_get_page(mapping, offset, fgp_flags, 0);
3815 ++}
3816 ++
3817 ++/**
3818 ++ * find_lock_page - locate, pin and lock a pagecache page
3819 ++ * pagecache_get_page - find and get a page reference
3820 ++ * @mapping: the address_space to search
3821 ++ * @offset: the page index
3822 ++ *
3823 ++ * Looks up the page cache slot at @mapping & @offset. If there is a
3824 ++ * page cache page, it is returned locked and with an increased
3825 ++ * refcount.
3826 ++ *
3827 ++ * Otherwise, %NULL is returned.
3828 ++ *
3829 ++ * find_lock_page() may sleep.
3830 ++ */
3831 ++static inline struct page *find_lock_page(struct address_space *mapping,
3832 ++ pgoff_t offset)
3833 ++{
3834 ++ return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
3835 ++}
3836 ++
3837 ++/**
3838 ++ * find_or_create_page - locate or add a pagecache page
3839 ++ * @mapping: the page's address_space
3840 ++ * @index: the page's index into the mapping
3841 ++ * @gfp_mask: page allocation mode
3842 ++ *
3843 ++ * Looks up the page cache slot at @mapping & @offset. If there is a
3844 ++ * page cache page, it is returned locked and with an increased
3845 ++ * refcount.
3846 ++ *
3847 ++ * If the page is not present, a new page is allocated using @gfp_mask
3848 ++ * and added to the page cache and the VM's LRU list. The page is
3849 ++ * returned locked and with an increased refcount.
3850 ++ *
3851 ++ * On memory exhaustion, %NULL is returned.
3852 ++ *
3853 ++ * find_or_create_page() may sleep, even if @gfp_flags specifies an
3854 ++ * atomic allocation!
3855 ++ */
3856 ++static inline struct page *find_or_create_page(struct address_space *mapping,
3857 ++ pgoff_t offset, gfp_t gfp_mask)
3858 ++{
3859 ++ return pagecache_get_page(mapping, offset,
3860 ++ FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
3861 ++ gfp_mask);
3862 ++}
3863 ++
3864 ++/**
3865 ++ * grab_cache_page_nowait - returns locked page at given index in given cache
3866 ++ * @mapping: target address_space
3867 ++ * @index: the page index
3868 ++ *
3869 ++ * Same as grab_cache_page(), but do not wait if the page is unavailable.
3870 ++ * This is intended for speculative data generators, where the data can
3871 ++ * be regenerated if the page couldn't be grabbed. This routine should
3872 ++ * be safe to call while holding the lock for another page.
3873 ++ *
3874 ++ * Clear __GFP_FS when allocating the page to avoid recursion into the fs
3875 ++ * and deadlock against the caller's locked page.
3876 ++ */
3877 ++static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
3878 ++ pgoff_t index)
3879 ++{
3880 ++ return pagecache_get_page(mapping, index,
3881 ++ FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
3882 ++ mapping_gfp_mask(mapping));
3883 ++}
3884 ++
3885 + struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
3886 +-struct page *find_get_page(struct address_space *mapping, pgoff_t offset);
3887 + struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
3888 +-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset);
3889 +-struct page *find_or_create_page(struct address_space *mapping, pgoff_t index,
3890 +- gfp_t gfp_mask);
3891 + unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
3892 + unsigned int nr_entries, struct page **entries,
3893 + pgoff_t *indices);
3894 +@@ -276,8 +372,6 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
3895 + return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
3896 + }
3897 +
3898 +-extern struct page * grab_cache_page_nowait(struct address_space *mapping,
3899 +- pgoff_t index);
3900 + extern struct page * read_cache_page(struct address_space *mapping,
3901 + pgoff_t index, filler_t *filler, void *data);
3902 + extern struct page * read_cache_page_gfp(struct address_space *mapping,
3903 +diff --git a/include/linux/pci.h b/include/linux/pci.h
3904 +index 0e5e16c6f7f1..d662546f77d8 100644
3905 +--- a/include/linux/pci.h
3906 ++++ b/include/linux/pci.h
3907 +@@ -170,6 +170,8 @@ enum pci_dev_flags {
3908 + PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
3909 + /* Provide indication device is assigned by a Virtual Machine Manager */
3910 + PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
3911 ++ /* Do not use bus resets for device */
3912 ++ PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
3913 + };
3914 +
3915 + enum pci_irq_reroute_variant {
3916 +diff --git a/include/linux/swap.h b/include/linux/swap.h
3917 +index 789324976801..241bf0922770 100644
3918 +--- a/include/linux/swap.h
3919 ++++ b/include/linux/swap.h
3920 +@@ -268,12 +268,14 @@ extern unsigned long nr_free_pagecache_pages(void);
3921 +
3922 +
3923 + /* linux/mm/swap.c */
3924 +-extern void __lru_cache_add(struct page *);
3925 + extern void lru_cache_add(struct page *);
3926 ++extern void lru_cache_add_anon(struct page *page);
3927 ++extern void lru_cache_add_file(struct page *page);
3928 + extern void lru_add_page_tail(struct page *page, struct page *page_tail,
3929 + struct lruvec *lruvec, struct list_head *head);
3930 + extern void activate_page(struct page *);
3931 + extern void mark_page_accessed(struct page *);
3932 ++extern void init_page_accessed(struct page *page);
3933 + extern void lru_add_drain(void);
3934 + extern void lru_add_drain_cpu(int cpu);
3935 + extern void lru_add_drain_all(void);
3936 +@@ -283,22 +285,6 @@ extern void swap_setup(void);
3937 +
3938 + extern void add_page_to_unevictable_list(struct page *page);
3939 +
3940 +-/**
3941 +- * lru_cache_add: add a page to the page lists
3942 +- * @page: the page to add
3943 +- */
3944 +-static inline void lru_cache_add_anon(struct page *page)
3945 +-{
3946 +- ClearPageActive(page);
3947 +- __lru_cache_add(page);
3948 +-}
3949 +-
3950 +-static inline void lru_cache_add_file(struct page *page)
3951 +-{
3952 +- ClearPageActive(page);
3953 +- __lru_cache_add(page);
3954 +-}
3955 +-
3956 + /* linux/mm/vmscan.c */
3957 + extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3958 + gfp_t gfp_mask, nodemask_t *mask);
3959 +@@ -456,7 +442,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3960 + #define free_page_and_swap_cache(page) \
3961 + page_cache_release(page)
3962 + #define free_pages_and_swap_cache(pages, nr) \
3963 +- release_pages((pages), (nr), 0);
3964 ++ release_pages((pages), (nr), false);
3965 +
3966 + static inline void show_swap_cache_info(void)
3967 + {
3968 +diff --git a/include/linux/time.h b/include/linux/time.h
3969 +index d5d229b2e5af..7d532a32ff3a 100644
3970 +--- a/include/linux/time.h
3971 ++++ b/include/linux/time.h
3972 +@@ -173,6 +173,19 @@ extern void getboottime(struct timespec *ts);
3973 + extern void monotonic_to_bootbased(struct timespec *ts);
3974 + extern void get_monotonic_boottime(struct timespec *ts);
3975 +
3976 ++static inline bool timeval_valid(const struct timeval *tv)
3977 ++{
3978 ++ /* Dates before 1970 are bogus */
3979 ++ if (tv->tv_sec < 0)
3980 ++ return false;
3981 ++
3982 ++ /* Can't have more microseconds then a second */
3983 ++ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
3984 ++ return false;
3985 ++
3986 ++ return true;
3987 ++}
3988 ++
3989 + extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
3990 + extern int timekeeping_valid_for_hres(void);
3991 + extern u64 timekeeping_max_deferment(void);
3992 +diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h
3993 +index 1c9fabde69e4..ce0803b8d05f 100644
3994 +--- a/include/trace/events/pagemap.h
3995 ++++ b/include/trace/events/pagemap.h
3996 +@@ -28,12 +28,10 @@ TRACE_EVENT(mm_lru_insertion,
3997 +
3998 + TP_PROTO(
3999 + struct page *page,
4000 +- unsigned long pfn,
4001 +- int lru,
4002 +- unsigned long flags
4003 ++ int lru
4004 + ),
4005 +
4006 +- TP_ARGS(page, pfn, lru, flags),
4007 ++ TP_ARGS(page, lru),
4008 +
4009 + TP_STRUCT__entry(
4010 + __field(struct page *, page )
4011 +@@ -44,9 +42,9 @@ TRACE_EVENT(mm_lru_insertion,
4012 +
4013 + TP_fast_assign(
4014 + __entry->page = page;
4015 +- __entry->pfn = pfn;
4016 ++ __entry->pfn = page_to_pfn(page);
4017 + __entry->lru = lru;
4018 +- __entry->flags = flags;
4019 ++ __entry->flags = trace_pagemap_flags(page);
4020 + ),
4021 +
4022 + /* Flag format is based on page-types.c formatting for pagemap */
4023 +@@ -64,9 +62,9 @@ TRACE_EVENT(mm_lru_insertion,
4024 +
4025 + TRACE_EVENT(mm_lru_activate,
4026 +
4027 +- TP_PROTO(struct page *page, unsigned long pfn),
4028 ++ TP_PROTO(struct page *page),
4029 +
4030 +- TP_ARGS(page, pfn),
4031 ++ TP_ARGS(page),
4032 +
4033 + TP_STRUCT__entry(
4034 + __field(struct page *, page )
4035 +@@ -75,7 +73,7 @@ TRACE_EVENT(mm_lru_activate,
4036 +
4037 + TP_fast_assign(
4038 + __entry->page = page;
4039 +- __entry->pfn = pfn;
4040 ++ __entry->pfn = page_to_pfn(page);
4041 + ),
4042 +
4043 + /* Flag format is based on page-types.c formatting for pagemap */
4044 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
4045 +index 15b3ea693225..2fb2877e6961 100644
4046 +--- a/kernel/cpuset.c
4047 ++++ b/kernel/cpuset.c
4048 +@@ -61,12 +61,7 @@
4049 + #include <linux/cgroup.h>
4050 + #include <linux/wait.h>
4051 +
4052 +-/*
4053 +- * Tracks how many cpusets are currently defined in system.
4054 +- * When there is only one cpuset (the root cpuset) we can
4055 +- * short circuit some hooks.
4056 +- */
4057 +-int number_of_cpusets __read_mostly;
4058 ++struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
4059 +
4060 + /* See "Frequency meter" comments, below. */
4061 +
4062 +@@ -611,7 +606,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
4063 + goto done;
4064 + }
4065 +
4066 +- csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
4067 ++ csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
4068 + if (!csa)
4069 + goto done;
4070 + csn = 0;
4071 +@@ -1961,7 +1956,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
4072 + if (is_spread_slab(parent))
4073 + set_bit(CS_SPREAD_SLAB, &cs->flags);
4074 +
4075 +- number_of_cpusets++;
4076 ++ cpuset_inc();
4077 +
4078 + if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
4079 + goto out_unlock;
4080 +@@ -2012,7 +2007,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
4081 + if (is_sched_load_balance(cs))
4082 + update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
4083 +
4084 +- number_of_cpusets--;
4085 ++ cpuset_dec();
4086 + clear_bit(CS_ONLINE, &cs->flags);
4087 +
4088 + mutex_unlock(&cpuset_mutex);
4089 +@@ -2067,7 +2062,6 @@ int __init cpuset_init(void)
4090 + if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
4091 + BUG();
4092 +
4093 +- number_of_cpusets = 1;
4094 + return 0;
4095 + }
4096 +
4097 +diff --git a/kernel/time.c b/kernel/time.c
4098 +index 3c49ab45f822..3eb322e518a3 100644
4099 +--- a/kernel/time.c
4100 ++++ b/kernel/time.c
4101 +@@ -195,6 +195,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
4102 + if (tv) {
4103 + if (copy_from_user(&user_tv, tv, sizeof(*tv)))
4104 + return -EFAULT;
4105 ++
4106 ++ if (!timeval_valid(&user_tv))
4107 ++ return -EINVAL;
4108 ++
4109 + new_ts.tv_sec = user_tv.tv_sec;
4110 + new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
4111 + }
4112 +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
4113 +index af8d1d4f3d55..28db9bedc857 100644
4114 +--- a/kernel/time/ntp.c
4115 ++++ b/kernel/time/ntp.c
4116 +@@ -631,6 +631,13 @@ int ntp_validate_timex(struct timex *txc)
4117 + if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
4118 + return -EPERM;
4119 +
4120 ++ if (txc->modes & ADJ_FREQUENCY) {
4121 ++ if (LONG_MIN / PPM_SCALE > txc->freq)
4122 ++ return -EINVAL;
4123 ++ if (LONG_MAX / PPM_SCALE < txc->freq)
4124 ++ return -EINVAL;
4125 ++ }
4126 ++
4127 + return 0;
4128 + }
4129 +
4130 +diff --git a/mm/filemap.c b/mm/filemap.c
4131 +index bdaa21555abe..217cfd3b3264 100644
4132 +--- a/mm/filemap.c
4133 ++++ b/mm/filemap.c
4134 +@@ -644,8 +644,17 @@ EXPORT_SYMBOL(unlock_page);
4135 + */
4136 + void end_page_writeback(struct page *page)
4137 + {
4138 +- if (TestClearPageReclaim(page))
4139 ++ /*
4140 ++ * TestClearPageReclaim could be used here but it is an atomic
4141 ++ * operation and overkill in this particular case. Failing to
4142 ++ * shuffle a page marked for immediate reclaim is too mild to
4143 ++ * justify taking an atomic operation penalty at the end of
4144 ++ * ever page writeback.
4145 ++ */
4146 ++ if (PageReclaim(page)) {
4147 ++ ClearPageReclaim(page);
4148 + rotate_reclaimable_page(page);
4149 ++ }
4150 +
4151 + if (!test_clear_page_writeback(page))
4152 + BUG();
4153 +@@ -848,26 +857,6 @@ out:
4154 + EXPORT_SYMBOL(find_get_entry);
4155 +
4156 + /**
4157 +- * find_get_page - find and get a page reference
4158 +- * @mapping: the address_space to search
4159 +- * @offset: the page index
4160 +- *
4161 +- * Looks up the page cache slot at @mapping & @offset. If there is a
4162 +- * page cache page, it is returned with an increased refcount.
4163 +- *
4164 +- * Otherwise, %NULL is returned.
4165 +- */
4166 +-struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
4167 +-{
4168 +- struct page *page = find_get_entry(mapping, offset);
4169 +-
4170 +- if (radix_tree_exceptional_entry(page))
4171 +- page = NULL;
4172 +- return page;
4173 +-}
4174 +-EXPORT_SYMBOL(find_get_page);
4175 +-
4176 +-/**
4177 + * find_lock_entry - locate, pin and lock a page cache entry
4178 + * @mapping: the address_space to search
4179 + * @offset: the page cache index
4180 +@@ -904,66 +893,83 @@ repeat:
4181 + EXPORT_SYMBOL(find_lock_entry);
4182 +
4183 + /**
4184 +- * find_lock_page - locate, pin and lock a pagecache page
4185 ++ * pagecache_get_page - find and get a page reference
4186 + * @mapping: the address_space to search
4187 + * @offset: the page index
4188 ++ * @fgp_flags: PCG flags
4189 ++ * @gfp_mask: gfp mask to use for the page cache data page allocation
4190 + *
4191 +- * Looks up the page cache slot at @mapping & @offset. If there is a
4192 +- * page cache page, it is returned locked and with an increased
4193 +- * refcount.
4194 +- *
4195 +- * Otherwise, %NULL is returned.
4196 ++ * Looks up the page cache slot at @mapping & @offset.
4197 + *
4198 +- * find_lock_page() may sleep.
4199 +- */
4200 +-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
4201 +-{
4202 +- struct page *page = find_lock_entry(mapping, offset);
4203 +-
4204 +- if (radix_tree_exceptional_entry(page))
4205 +- page = NULL;
4206 +- return page;
4207 +-}
4208 +-EXPORT_SYMBOL(find_lock_page);
4209 +-
4210 +-/**
4211 +- * find_or_create_page - locate or add a pagecache page
4212 +- * @mapping: the page's address_space
4213 +- * @index: the page's index into the mapping
4214 +- * @gfp_mask: page allocation mode
4215 ++ * PCG flags modify how the page is returned
4216 + *
4217 +- * Looks up the page cache slot at @mapping & @offset. If there is a
4218 +- * page cache page, it is returned locked and with an increased
4219 +- * refcount.
4220 +- *
4221 +- * If the page is not present, a new page is allocated using @gfp_mask
4222 +- * and added to the page cache and the VM's LRU list. The page is
4223 +- * returned locked and with an increased refcount.
4224 ++ * FGP_ACCESSED: the page will be marked accessed
4225 ++ * FGP_LOCK: Page is return locked
4226 ++ * FGP_CREAT: If page is not present then a new page is allocated using
4227 ++ * @gfp_mask and added to the page cache and the VM's LRU
4228 ++ * list. The page is returned locked and with an increased
4229 ++ * refcount. Otherwise, %NULL is returned.
4230 + *
4231 +- * On memory exhaustion, %NULL is returned.
4232 ++ * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
4233 ++ * if the GFP flags specified for FGP_CREAT are atomic.
4234 + *
4235 +- * find_or_create_page() may sleep, even if @gfp_flags specifies an
4236 +- * atomic allocation!
4237 ++ * If there is a page cache page, it is returned with an increased refcount.
4238 + */
4239 +-struct page *find_or_create_page(struct address_space *mapping,
4240 +- pgoff_t index, gfp_t gfp_mask)
4241 ++struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
4242 ++ int fgp_flags, gfp_t gfp_mask)
4243 + {
4244 + struct page *page;
4245 +- int err;
4246 ++
4247 + repeat:
4248 +- page = find_lock_page(mapping, index);
4249 +- if (!page) {
4250 ++ page = find_get_entry(mapping, offset);
4251 ++ if (radix_tree_exceptional_entry(page))
4252 ++ page = NULL;
4253 ++ if (!page)
4254 ++ goto no_page;
4255 ++
4256 ++ if (fgp_flags & FGP_LOCK) {
4257 ++ if (fgp_flags & FGP_NOWAIT) {
4258 ++ if (!trylock_page(page)) {
4259 ++ page_cache_release(page);
4260 ++ return NULL;
4261 ++ }
4262 ++ } else {
4263 ++ lock_page(page);
4264 ++ }
4265 ++
4266 ++ /* Has the page been truncated? */
4267 ++ if (unlikely(page->mapping != mapping)) {
4268 ++ unlock_page(page);
4269 ++ page_cache_release(page);
4270 ++ goto repeat;
4271 ++ }
4272 ++ VM_BUG_ON(page->index != offset);
4273 ++ }
4274 ++
4275 ++ if (page && (fgp_flags & FGP_ACCESSED))
4276 ++ mark_page_accessed(page);
4277 ++
4278 ++no_page:
4279 ++ if (!page && (fgp_flags & FGP_CREAT)) {
4280 ++ int err;
4281 ++ if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
4282 ++ gfp_mask |= __GFP_WRITE;
4283 ++ if (fgp_flags & FGP_NOFS)
4284 ++ gfp_mask &= ~__GFP_FS;
4285 ++
4286 + page = __page_cache_alloc(gfp_mask);
4287 + if (!page)
4288 + return NULL;
4289 +- /*
4290 +- * We want a regular kernel memory (not highmem or DMA etc)
4291 +- * allocation for the radix tree nodes, but we need to honour
4292 +- * the context-specific requirements the caller has asked for.
4293 +- * GFP_RECLAIM_MASK collects those requirements.
4294 +- */
4295 +- err = add_to_page_cache_lru(page, mapping, index,
4296 +- (gfp_mask & GFP_RECLAIM_MASK));
4297 ++
4298 ++ if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
4299 ++ fgp_flags |= FGP_LOCK;
4300 ++
4301 ++ /* Init accessed so avoit atomic mark_page_accessed later */
4302 ++ if (fgp_flags & FGP_ACCESSED)
4303 ++ init_page_accessed(page);
4304 ++
4305 ++ err = add_to_page_cache_lru(page, mapping, offset,
4306 ++ gfp_mask & GFP_RECLAIM_MASK);
4307 + if (unlikely(err)) {
4308 + page_cache_release(page);
4309 + page = NULL;
4310 +@@ -971,9 +977,10 @@ repeat:
4311 + goto repeat;
4312 + }
4313 + }
4314 ++
4315 + return page;
4316 + }
4317 +-EXPORT_SYMBOL(find_or_create_page);
4318 ++EXPORT_SYMBOL(pagecache_get_page);
4319 +
4320 + /**
4321 + * find_get_entries - gang pagecache lookup
4322 +@@ -1263,39 +1270,6 @@ repeat:
4323 + }
4324 + EXPORT_SYMBOL(find_get_pages_tag);
4325 +
4326 +-/**
4327 +- * grab_cache_page_nowait - returns locked page at given index in given cache
4328 +- * @mapping: target address_space
4329 +- * @index: the page index
4330 +- *
4331 +- * Same as grab_cache_page(), but do not wait if the page is unavailable.
4332 +- * This is intended for speculative data generators, where the data can
4333 +- * be regenerated if the page couldn't be grabbed. This routine should
4334 +- * be safe to call while holding the lock for another page.
4335 +- *
4336 +- * Clear __GFP_FS when allocating the page to avoid recursion into the fs
4337 +- * and deadlock against the caller's locked page.
4338 +- */
4339 +-struct page *
4340 +-grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
4341 +-{
4342 +- struct page *page = find_get_page(mapping, index);
4343 +-
4344 +- if (page) {
4345 +- if (trylock_page(page))
4346 +- return page;
4347 +- page_cache_release(page);
4348 +- return NULL;
4349 +- }
4350 +- page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
4351 +- if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
4352 +- page_cache_release(page);
4353 +- page = NULL;
4354 +- }
4355 +- return page;
4356 +-}
4357 +-EXPORT_SYMBOL(grab_cache_page_nowait);
4358 +-
4359 + /*
4360 + * CD/DVDs are error prone. When a medium error occurs, the driver may fail
4361 + * a _large_ part of the i/o request. Imagine the worst scenario:
4362 +@@ -2397,7 +2371,6 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
4363 + {
4364 + const struct address_space_operations *aops = mapping->a_ops;
4365 +
4366 +- mark_page_accessed(page);
4367 + return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
4368 + }
4369 + EXPORT_SYMBOL(pagecache_write_end);
4370 +@@ -2479,34 +2452,17 @@ EXPORT_SYMBOL(generic_file_direct_write);
4371 + struct page *grab_cache_page_write_begin(struct address_space *mapping,
4372 + pgoff_t index, unsigned flags)
4373 + {
4374 +- int status;
4375 +- gfp_t gfp_mask;
4376 + struct page *page;
4377 +- gfp_t gfp_notmask = 0;
4378 ++ int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT;
4379 +
4380 +- gfp_mask = mapping_gfp_mask(mapping);
4381 +- if (mapping_cap_account_dirty(mapping))
4382 +- gfp_mask |= __GFP_WRITE;
4383 + if (flags & AOP_FLAG_NOFS)
4384 +- gfp_notmask = __GFP_FS;
4385 +-repeat:
4386 +- page = find_lock_page(mapping, index);
4387 ++ fgp_flags |= FGP_NOFS;
4388 ++
4389 ++ page = pagecache_get_page(mapping, index, fgp_flags,
4390 ++ mapping_gfp_mask(mapping));
4391 + if (page)
4392 +- goto found;
4393 ++ wait_for_stable_page(page);
4394 +
4395 +- page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
4396 +- if (!page)
4397 +- return NULL;
4398 +- status = add_to_page_cache_lru(page, mapping, index,
4399 +- GFP_KERNEL & ~gfp_notmask);
4400 +- if (unlikely(status)) {
4401 +- page_cache_release(page);
4402 +- if (status == -EEXIST)
4403 +- goto repeat;
4404 +- return NULL;
4405 +- }
4406 +-found:
4407 +- wait_for_stable_page(page);
4408 + return page;
4409 + }
4410 + EXPORT_SYMBOL(grab_cache_page_write_begin);
4411 +@@ -2555,7 +2511,7 @@ again:
4412 +
4413 + status = a_ops->write_begin(file, mapping, pos, bytes, flags,
4414 + &page, &fsdata);
4415 +- if (unlikely(status))
4416 ++ if (unlikely(status < 0))
4417 + break;
4418 +
4419 + if (mapping_writably_mapped(mapping))
4420 +@@ -2564,7 +2520,6 @@ again:
4421 + copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
4422 + flush_dcache_page(page);
4423 +
4424 +- mark_page_accessed(page);
4425 + status = a_ops->write_end(file, mapping, pos, bytes, copied,
4426 + page, fsdata);
4427 + if (unlikely(status < 0))
4428 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4429 +index 331faa5c0d5e..adce656d2e9c 100644
4430 +--- a/mm/huge_memory.c
4431 ++++ b/mm/huge_memory.c
4432 +@@ -2273,6 +2273,30 @@ static void khugepaged_alloc_sleep(void)
4433 +
4434 + static int khugepaged_node_load[MAX_NUMNODES];
4435 +
4436 ++static bool khugepaged_scan_abort(int nid)
4437 ++{
4438 ++ int i;
4439 ++
4440 ++ /*
4441 ++ * If zone_reclaim_mode is disabled, then no extra effort is made to
4442 ++ * allocate memory locally.
4443 ++ */
4444 ++ if (!zone_reclaim_mode)
4445 ++ return false;
4446 ++
4447 ++ /* If there is a count for this node already, it must be acceptable */
4448 ++ if (khugepaged_node_load[nid])
4449 ++ return false;
4450 ++
4451 ++ for (i = 0; i < MAX_NUMNODES; i++) {
4452 ++ if (!khugepaged_node_load[i])
4453 ++ continue;
4454 ++ if (node_distance(nid, i) > RECLAIM_DISTANCE)
4455 ++ return true;
4456 ++ }
4457 ++ return false;
4458 ++}
4459 ++
4460 + #ifdef CONFIG_NUMA
4461 + static int khugepaged_find_target_node(void)
4462 + {
4463 +@@ -2589,6 +2613,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
4464 + * hit record.
4465 + */
4466 + node = page_to_nid(page);
4467 ++ if (khugepaged_scan_abort(node))
4468 ++ goto out_unmap;
4469 + khugepaged_node_load[node]++;
4470 + VM_BUG_ON_PAGE(PageCompound(page), page);
4471 + if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
4472 +diff --git a/mm/memory.c b/mm/memory.c
4473 +index 924429e5ef4d..7f30beaba74f 100644
4474 +--- a/mm/memory.c
4475 ++++ b/mm/memory.c
4476 +@@ -878,7 +878,7 @@ out_set_pte:
4477 + return 0;
4478 + }
4479 +
4480 +-int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
4481 ++static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
4482 + pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
4483 + unsigned long addr, unsigned long end)
4484 + {
4485 +@@ -3646,7 +3646,7 @@ static int handle_pte_fault(struct mm_struct *mm,
4486 + pte_t entry;
4487 + spinlock_t *ptl;
4488 +
4489 +- entry = *pte;
4490 ++ entry = ACCESS_ONCE(*pte);
4491 + if (!pte_present(entry)) {
4492 + if (pte_none(entry)) {
4493 + if (vma->vm_ops) {
4494 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4495 +index 4b258297cc7c..ea419137f845 100644
4496 +--- a/mm/page_alloc.c
4497 ++++ b/mm/page_alloc.c
4498 +@@ -408,7 +408,8 @@ static int destroy_compound_page(struct page *page, unsigned long order)
4499 + return bad;
4500 + }
4501 +
4502 +-static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
4503 ++static inline void prep_zero_page(struct page *page, unsigned int order,
4504 ++ gfp_t gfp_flags)
4505 + {
4506 + int i;
4507 +
4508 +@@ -452,7 +453,7 @@ static inline void set_page_guard_flag(struct page *page) { }
4509 + static inline void clear_page_guard_flag(struct page *page) { }
4510 + #endif
4511 +
4512 +-static inline void set_page_order(struct page *page, int order)
4513 ++static inline void set_page_order(struct page *page, unsigned int order)
4514 + {
4515 + set_page_private(page, order);
4516 + __SetPageBuddy(page);
4517 +@@ -503,21 +504,31 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
4518 + * For recording page's order, we use page_private(page).
4519 + */
4520 + static inline int page_is_buddy(struct page *page, struct page *buddy,
4521 +- int order)
4522 ++ unsigned int order)
4523 + {
4524 + if (!pfn_valid_within(page_to_pfn(buddy)))
4525 + return 0;
4526 +
4527 +- if (page_zone_id(page) != page_zone_id(buddy))
4528 +- return 0;
4529 +-
4530 + if (page_is_guard(buddy) && page_order(buddy) == order) {
4531 + VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
4532 ++
4533 ++ if (page_zone_id(page) != page_zone_id(buddy))
4534 ++ return 0;
4535 ++
4536 + return 1;
4537 + }
4538 +
4539 + if (PageBuddy(buddy) && page_order(buddy) == order) {
4540 + VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
4541 ++
4542 ++ /*
4543 ++ * zone check is done late to avoid uselessly
4544 ++ * calculating zone/node ids for pages that could
4545 ++ * never merge.
4546 ++ */
4547 ++ if (page_zone_id(page) != page_zone_id(buddy))
4548 ++ return 0;
4549 ++
4550 + return 1;
4551 + }
4552 + return 0;
4553 +@@ -549,6 +560,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
4554 + */
4555 +
4556 + static inline void __free_one_page(struct page *page,
4557 ++ unsigned long pfn,
4558 + struct zone *zone, unsigned int order,
4559 + int migratetype)
4560 + {
4561 +@@ -565,7 +577,7 @@ static inline void __free_one_page(struct page *page,
4562 +
4563 + VM_BUG_ON(migratetype == -1);
4564 +
4565 +- page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
4566 ++ page_idx = pfn & ((1 << MAX_ORDER) - 1);
4567 +
4568 + VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
4569 + VM_BUG_ON_PAGE(bad_range(zone, page), page);
4570 +@@ -666,9 +678,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
4571 + int migratetype = 0;
4572 + int batch_free = 0;
4573 + int to_free = count;
4574 ++ unsigned long nr_scanned;
4575 +
4576 + spin_lock(&zone->lock);
4577 +- zone->pages_scanned = 0;
4578 ++ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
4579 ++ if (nr_scanned)
4580 ++ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
4581 +
4582 + while (to_free) {
4583 + struct page *page;
4584 +@@ -700,7 +715,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
4585 + list_del(&page->lru);
4586 + mt = get_freepage_migratetype(page);
4587 + /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
4588 +- __free_one_page(page, zone, 0, mt);
4589 ++ __free_one_page(page, page_to_pfn(page), zone, 0, mt);
4590 + trace_mm_page_pcpu_drain(page, 0, mt);
4591 + if (likely(!is_migrate_isolate_page(page))) {
4592 + __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
4593 +@@ -712,13 +727,18 @@ static void free_pcppages_bulk(struct zone *zone, int count,
4594 + spin_unlock(&zone->lock);
4595 + }
4596 +
4597 +-static void free_one_page(struct zone *zone, struct page *page, int order,
4598 ++static void free_one_page(struct zone *zone,
4599 ++ struct page *page, unsigned long pfn,
4600 ++ unsigned int order,
4601 + int migratetype)
4602 + {
4603 ++ unsigned long nr_scanned;
4604 + spin_lock(&zone->lock);
4605 +- zone->pages_scanned = 0;
4606 ++ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
4607 ++ if (nr_scanned)
4608 ++ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
4609 +
4610 +- __free_one_page(page, zone, order, migratetype);
4611 ++ __free_one_page(page, pfn, zone, order, migratetype);
4612 + if (unlikely(!is_migrate_isolate(migratetype)))
4613 + __mod_zone_freepage_state(zone, 1 << order, migratetype);
4614 + spin_unlock(&zone->lock);
4615 +@@ -755,15 +775,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
4616 + {
4617 + unsigned long flags;
4618 + int migratetype;
4619 ++ unsigned long pfn = page_to_pfn(page);
4620 +
4621 + if (!free_pages_prepare(page, order))
4622 + return;
4623 +
4624 ++ migratetype = get_pfnblock_migratetype(page, pfn);
4625 + local_irq_save(flags);
4626 + __count_vm_events(PGFREE, 1 << order);
4627 +- migratetype = get_pageblock_migratetype(page);
4628 + set_freepage_migratetype(page, migratetype);
4629 +- free_one_page(page_zone(page), page, order, migratetype);
4630 ++ free_one_page(page_zone(page), page, pfn, order, migratetype);
4631 + local_irq_restore(flags);
4632 + }
4633 +
4634 +@@ -894,7 +915,7 @@ static inline int check_new_page(struct page *page)
4635 + return 0;
4636 + }
4637 +
4638 +-static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
4639 ++static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
4640 + {
4641 + int i;
4642 +
4643 +@@ -1105,16 +1126,17 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
4644 +
4645 + /* Remove an element from the buddy allocator from the fallback list */
4646 + static inline struct page *
4647 +-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
4648 ++__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
4649 + {
4650 + struct free_area *area;
4651 +- int current_order;
4652 ++ unsigned int current_order;
4653 + struct page *page;
4654 + int migratetype, new_type, i;
4655 +
4656 + /* Find the largest possible block of pages in the other list */
4657 +- for (current_order = MAX_ORDER-1; current_order >= order;
4658 +- --current_order) {
4659 ++ for (current_order = MAX_ORDER-1;
4660 ++ current_order >= order && current_order <= MAX_ORDER-1;
4661 ++ --current_order) {
4662 + for (i = 0;; i++) {
4663 + migratetype = fallbacks[start_migratetype][i];
4664 +
4665 +@@ -1194,7 +1216,7 @@ retry_reserve:
4666 + */
4667 + static int rmqueue_bulk(struct zone *zone, unsigned int order,
4668 + unsigned long count, struct list_head *list,
4669 +- int migratetype, int cold)
4670 ++ int migratetype, bool cold)
4671 + {
4672 + int i;
4673 +
4674 +@@ -1213,7 +1235,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
4675 + * merge IO requests if the physical pages are ordered
4676 + * properly.
4677 + */
4678 +- if (likely(cold == 0))
4679 ++ if (likely(!cold))
4680 + list_add(&page->lru, list);
4681 + else
4682 + list_add_tail(&page->lru, list);
4683 +@@ -1342,7 +1364,7 @@ void mark_free_pages(struct zone *zone)
4684 + {
4685 + unsigned long pfn, max_zone_pfn;
4686 + unsigned long flags;
4687 +- int order, t;
4688 ++ unsigned int order, t;
4689 + struct list_head *curr;
4690 +
4691 + if (zone_is_empty(zone))
4692 +@@ -1374,19 +1396,20 @@ void mark_free_pages(struct zone *zone)
4693 +
4694 + /*
4695 + * Free a 0-order page
4696 +- * cold == 1 ? free a cold page : free a hot page
4697 ++ * cold == true ? free a cold page : free a hot page
4698 + */
4699 +-void free_hot_cold_page(struct page *page, int cold)
4700 ++void free_hot_cold_page(struct page *page, bool cold)
4701 + {
4702 + struct zone *zone = page_zone(page);
4703 + struct per_cpu_pages *pcp;
4704 + unsigned long flags;
4705 ++ unsigned long pfn = page_to_pfn(page);
4706 + int migratetype;
4707 +
4708 + if (!free_pages_prepare(page, 0))
4709 + return;
4710 +
4711 +- migratetype = get_pageblock_migratetype(page);
4712 ++ migratetype = get_pfnblock_migratetype(page, pfn);
4713 + set_freepage_migratetype(page, migratetype);
4714 + local_irq_save(flags);
4715 + __count_vm_event(PGFREE);
4716 +@@ -1400,17 +1423,17 @@ void free_hot_cold_page(struct page *page, int cold)
4717 + */
4718 + if (migratetype >= MIGRATE_PCPTYPES) {
4719 + if (unlikely(is_migrate_isolate(migratetype))) {
4720 +- free_one_page(zone, page, 0, migratetype);
4721 ++ free_one_page(zone, page, pfn, 0, migratetype);
4722 + goto out;
4723 + }
4724 + migratetype = MIGRATE_MOVABLE;
4725 + }
4726 +
4727 + pcp = &this_cpu_ptr(zone->pageset)->pcp;
4728 +- if (cold)
4729 +- list_add_tail(&page->lru, &pcp->lists[migratetype]);
4730 +- else
4731 ++ if (!cold)
4732 + list_add(&page->lru, &pcp->lists[migratetype]);
4733 ++ else
4734 ++ list_add_tail(&page->lru, &pcp->lists[migratetype]);
4735 + pcp->count++;
4736 + if (pcp->count >= pcp->high) {
4737 + unsigned long batch = ACCESS_ONCE(pcp->batch);
4738 +@@ -1425,7 +1448,7 @@ out:
4739 + /*
4740 + * Free a list of 0-order pages
4741 + */
4742 +-void free_hot_cold_page_list(struct list_head *list, int cold)
4743 ++void free_hot_cold_page_list(struct list_head *list, bool cold)
4744 + {
4745 + struct page *page, *next;
4746 +
4747 +@@ -1537,12 +1560,12 @@ int split_free_page(struct page *page)
4748 + */
4749 + static inline
4750 + struct page *buffered_rmqueue(struct zone *preferred_zone,
4751 +- struct zone *zone, int order, gfp_t gfp_flags,
4752 +- int migratetype)
4753 ++ struct zone *zone, unsigned int order,
4754 ++ gfp_t gfp_flags, int migratetype)
4755 + {
4756 + unsigned long flags;
4757 + struct page *page;
4758 +- int cold = !!(gfp_flags & __GFP_COLD);
4759 ++ bool cold = ((gfp_flags & __GFP_COLD) != 0);
4760 +
4761 + again:
4762 + if (likely(order == 0)) {
4763 +@@ -1591,6 +1614,9 @@ again:
4764 + }
4765 +
4766 + __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
4767 ++ if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 &&
4768 ++ !zone_is_fair_depleted(zone))
4769 ++ zone_set_flag(zone, ZONE_FAIR_DEPLETED);
4770 +
4771 + __count_zone_vm_events(PGALLOC, zone, 1 << order);
4772 + zone_statistics(preferred_zone, zone, gfp_flags);
4773 +@@ -1687,12 +1713,12 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
4774 + * Return true if free pages are above 'mark'. This takes into account the order
4775 + * of the allocation.
4776 + */
4777 +-static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
4778 +- int classzone_idx, int alloc_flags, long free_pages)
4779 ++static bool __zone_watermark_ok(struct zone *z, unsigned int order,
4780 ++ unsigned long mark, int classzone_idx, int alloc_flags,
4781 ++ long free_pages)
4782 + {
4783 + /* free_pages my go negative - that's OK */
4784 + long min = mark;
4785 +- long lowmem_reserve = z->lowmem_reserve[classzone_idx];
4786 + int o;
4787 + long free_cma = 0;
4788 +
4789 +@@ -1707,7 +1733,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
4790 + free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
4791 + #endif
4792 +
4793 +- if (free_pages - free_cma <= min + lowmem_reserve)
4794 ++ if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
4795 + return false;
4796 + for (o = 0; o < order; o++) {
4797 + /* At the next order, this order's pages become unavailable */
4798 +@@ -1722,15 +1748,15 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
4799 + return true;
4800 + }
4801 +
4802 +-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
4803 ++bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
4804 + int classzone_idx, int alloc_flags)
4805 + {
4806 + return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
4807 + zone_page_state(z, NR_FREE_PAGES));
4808 + }
4809 +
4810 +-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
4811 +- int classzone_idx, int alloc_flags)
4812 ++bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
4813 ++ unsigned long mark, int classzone_idx, int alloc_flags)
4814 + {
4815 + long free_pages = zone_page_state(z, NR_FREE_PAGES);
4816 +
4817 +@@ -1915,6 +1941,18 @@ static inline void init_zone_allows_reclaim(int nid)
4818 + }
4819 + #endif /* CONFIG_NUMA */
4820 +
4821 ++static void reset_alloc_batches(struct zone *preferred_zone)
4822 ++{
4823 ++ struct zone *zone = preferred_zone->zone_pgdat->node_zones;
4824 ++
4825 ++ do {
4826 ++ mod_zone_page_state(zone, NR_ALLOC_BATCH,
4827 ++ high_wmark_pages(zone) - low_wmark_pages(zone) -
4828 ++ atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
4829 ++ zone_clear_flag(zone, ZONE_FAIR_DEPLETED);
4830 ++ } while (zone++ != preferred_zone);
4831 ++}
4832 ++
4833 + /*
4834 + * get_page_from_freelist goes through the zonelist trying to allocate
4835 + * a page.
4836 +@@ -1922,18 +1960,22 @@ static inline void init_zone_allows_reclaim(int nid)
4837 + static struct page *
4838 + get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
4839 + struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
4840 +- struct zone *preferred_zone, int migratetype)
4841 ++ struct zone *preferred_zone, int classzone_idx, int migratetype)
4842 + {
4843 + struct zoneref *z;
4844 + struct page *page = NULL;
4845 +- int classzone_idx;
4846 + struct zone *zone;
4847 + nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
4848 + int zlc_active = 0; /* set if using zonelist_cache */
4849 + int did_zlc_setup = 0; /* just call zlc_setup() one time */
4850 ++ bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
4851 ++ (gfp_mask & __GFP_WRITE);
4852 ++ int nr_fair_skipped = 0;
4853 ++ bool zonelist_rescan;
4854 +
4855 +- classzone_idx = zone_idx(preferred_zone);
4856 + zonelist_scan:
4857 ++ zonelist_rescan = false;
4858 ++
4859 + /*
4860 + * Scan zonelist, looking for a zone with enough free.
4861 + * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
4862 +@@ -1945,12 +1987,10 @@ zonelist_scan:
4863 + if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
4864 + !zlc_zone_worth_trying(zonelist, z, allowednodes))
4865 + continue;
4866 +- if ((alloc_flags & ALLOC_CPUSET) &&
4867 ++ if (cpusets_enabled() &&
4868 ++ (alloc_flags & ALLOC_CPUSET) &&
4869 + !cpuset_zone_allowed_softwall(zone, gfp_mask))
4870 + continue;
4871 +- BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4872 +- if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS))
4873 +- goto try_this_zone;
4874 + /*
4875 + * Distribute pages in proportion to the individual
4876 + * zone size to ensure fair page aging. The zone a
4877 +@@ -1959,9 +1999,11 @@ zonelist_scan:
4878 + */
4879 + if (alloc_flags & ALLOC_FAIR) {
4880 + if (!zone_local(preferred_zone, zone))
4881 ++ break;
4882 ++ if (zone_is_fair_depleted(zone)) {
4883 ++ nr_fair_skipped++;
4884 + continue;
4885 +- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0)
4886 +- continue;
4887 ++ }
4888 + }
4889 + /*
4890 + * When allocating a page cache page for writing, we
4891 +@@ -1989,15 +2031,19 @@ zonelist_scan:
4892 + * will require awareness of zones in the
4893 + * dirty-throttling and the flusher threads.
4894 + */
4895 +- if ((alloc_flags & ALLOC_WMARK_LOW) &&
4896 +- (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
4897 +- goto this_zone_full;
4898 ++ if (consider_zone_dirty && !zone_dirty_ok(zone))
4899 ++ continue;
4900 +
4901 + mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
4902 + if (!zone_watermark_ok(zone, order, mark,
4903 + classzone_idx, alloc_flags)) {
4904 + int ret;
4905 +
4906 ++ /* Checked here to keep the fast path fast */
4907 ++ BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4908 ++ if (alloc_flags & ALLOC_NO_WATERMARKS)
4909 ++ goto try_this_zone;
4910 ++
4911 + if (IS_ENABLED(CONFIG_NUMA) &&
4912 + !did_zlc_setup && nr_online_nodes > 1) {
4913 + /*
4914 +@@ -2059,17 +2105,11 @@ try_this_zone:
4915 + if (page)
4916 + break;
4917 + this_zone_full:
4918 +- if (IS_ENABLED(CONFIG_NUMA))
4919 ++ if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
4920 + zlc_mark_zone_full(zonelist, z);
4921 + }
4922 +
4923 +- if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
4924 +- /* Disable zlc cache for second zonelist scan */
4925 +- zlc_active = 0;
4926 +- goto zonelist_scan;
4927 +- }
4928 +-
4929 +- if (page)
4930 ++ if (page) {
4931 + /*
4932 + * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
4933 + * necessary to allocate the page. The expectation is
4934 +@@ -2078,8 +2118,37 @@ this_zone_full:
4935 + * for !PFMEMALLOC purposes.
4936 + */
4937 + page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
4938 ++ return page;
4939 ++ }
4940 +
4941 +- return page;
4942 ++ /*
4943 ++ * The first pass makes sure allocations are spread fairly within the
4944 ++ * local node. However, the local node might have free pages left
4945 ++ * after the fairness batches are exhausted, and remote zones haven't
4946 ++ * even been considered yet. Try once more without fairness, and
4947 ++ * include remote zones now, before entering the slowpath and waking
4948 ++ * kswapd: prefer spilling to a remote zone over swapping locally.
4949 ++ */
4950 ++ if (alloc_flags & ALLOC_FAIR) {
4951 ++ alloc_flags &= ~ALLOC_FAIR;
4952 ++ if (nr_fair_skipped) {
4953 ++ zonelist_rescan = true;
4954 ++ reset_alloc_batches(preferred_zone);
4955 ++ }
4956 ++ if (nr_online_nodes > 1)
4957 ++ zonelist_rescan = true;
4958 ++ }
4959 ++
4960 ++ if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
4961 ++ /* Disable zlc cache for second zonelist scan */
4962 ++ zlc_active = 0;
4963 ++ zonelist_rescan = true;
4964 ++ }
4965 ++
4966 ++ if (zonelist_rescan)
4967 ++ goto zonelist_scan;
4968 ++
4969 ++ return NULL;
4970 + }
4971 +
4972 + /*
4973 +@@ -2188,7 +2257,7 @@ static inline struct page *
4974 + __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4975 + struct zonelist *zonelist, enum zone_type high_zoneidx,
4976 + nodemask_t *nodemask, struct zone *preferred_zone,
4977 +- int migratetype)
4978 ++ int classzone_idx, int migratetype)
4979 + {
4980 + struct page *page;
4981 +
4982 +@@ -2214,7 +2283,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4983 + page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
4984 + order, zonelist, high_zoneidx,
4985 + ALLOC_WMARK_HIGH|ALLOC_CPUSET,
4986 +- preferred_zone, migratetype);
4987 ++ preferred_zone, classzone_idx, migratetype);
4988 + if (page)
4989 + goto out;
4990 +
4991 +@@ -2249,7 +2318,7 @@ static struct page *
4992 + __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4993 + struct zonelist *zonelist, enum zone_type high_zoneidx,
4994 + nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
4995 +- int migratetype, enum migrate_mode mode,
4996 ++ int classzone_idx, int migratetype, enum migrate_mode mode,
4997 + bool *contended_compaction, bool *deferred_compaction,
4998 + unsigned long *did_some_progress)
4999 + {
5000 +@@ -2277,7 +2346,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
5001 + page = get_page_from_freelist(gfp_mask, nodemask,
5002 + order, zonelist, high_zoneidx,
5003 + alloc_flags & ~ALLOC_NO_WATERMARKS,
5004 +- preferred_zone, migratetype);
5005 ++ preferred_zone, classzone_idx, migratetype);
5006 + if (page) {
5007 + preferred_zone->compact_blockskip_flush = false;
5008 + compaction_defer_reset(preferred_zone, order, true);
5009 +@@ -2309,7 +2378,8 @@ static inline struct page *
5010 + __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
5011 + struct zonelist *zonelist, enum zone_type high_zoneidx,
5012 + nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
5013 +- int migratetype, enum migrate_mode mode, bool *contended_compaction,
5014 ++ int classzone_idx, int migratetype,
5015 ++ enum migrate_mode mode, bool *contended_compaction,
5016 + bool *deferred_compaction, unsigned long *did_some_progress)
5017 + {
5018 + return NULL;
5019 +@@ -2349,7 +2419,7 @@ static inline struct page *
5020 + __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
5021 + struct zonelist *zonelist, enum zone_type high_zoneidx,
5022 + nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
5023 +- int migratetype, unsigned long *did_some_progress)
5024 ++ int classzone_idx, int migratetype, unsigned long *did_some_progress)
5025 + {
5026 + struct page *page = NULL;
5027 + bool drained = false;
5028 +@@ -2367,7 +2437,8 @@ retry:
5029 + page = get_page_from_freelist(gfp_mask, nodemask, order,
5030 + zonelist, high_zoneidx,
5031 + alloc_flags & ~ALLOC_NO_WATERMARKS,
5032 +- preferred_zone, migratetype);
5033 ++ preferred_zone, classzone_idx,
5034 ++ migratetype);
5035 +
5036 + /*
5037 + * If an allocation failed after direct reclaim, it could be because
5038 +@@ -2390,14 +2461,14 @@ static inline struct page *
5039 + __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
5040 + struct zonelist *zonelist, enum zone_type high_zoneidx,
5041 + nodemask_t *nodemask, struct zone *preferred_zone,
5042 +- int migratetype)
5043 ++ int classzone_idx, int migratetype)
5044 + {
5045 + struct page *page;
5046 +
5047 + do {
5048 + page = get_page_from_freelist(gfp_mask, nodemask, order,
5049 + zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
5050 +- preferred_zone, migratetype);
5051 ++ preferred_zone, classzone_idx, migratetype);
5052 +
5053 + if (!page && gfp_mask & __GFP_NOFAIL)
5054 + wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
5055 +@@ -2406,28 +2477,6 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
5056 + return page;
5057 + }
5058 +
5059 +-static void reset_alloc_batches(struct zonelist *zonelist,
5060 +- enum zone_type high_zoneidx,
5061 +- struct zone *preferred_zone)
5062 +-{
5063 +- struct zoneref *z;
5064 +- struct zone *zone;
5065 +-
5066 +- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
5067 +- /*
5068 +- * Only reset the batches of zones that were actually
5069 +- * considered in the fairness pass, we don't want to
5070 +- * trash fairness information for zones that are not
5071 +- * actually part of this zonelist's round-robin cycle.
5072 +- */
5073 +- if (!zone_local(preferred_zone, zone))
5074 +- continue;
5075 +- mod_zone_page_state(zone, NR_ALLOC_BATCH,
5076 +- high_wmark_pages(zone) - low_wmark_pages(zone) -
5077 +- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
5078 +- }
5079 +-}
5080 +-
5081 + static void wake_all_kswapds(unsigned int order,
5082 + struct zonelist *zonelist,
5083 + enum zone_type high_zoneidx,
5084 +@@ -2498,7 +2547,7 @@ static inline struct page *
5085 + __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
5086 + struct zonelist *zonelist, enum zone_type high_zoneidx,
5087 + nodemask_t *nodemask, struct zone *preferred_zone,
5088 +- int migratetype)
5089 ++ int classzone_idx, int migratetype)
5090 + {
5091 + const gfp_t wait = gfp_mask & __GFP_WAIT;
5092 + struct page *page = NULL;
5093 +@@ -2547,15 +2596,19 @@ restart:
5094 + * Find the true preferred zone if the allocation is unconstrained by
5095 + * cpusets.
5096 + */
5097 +- if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
5098 +- first_zones_zonelist(zonelist, high_zoneidx, NULL,
5099 +- &preferred_zone);
5100 ++ if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
5101 ++ struct zoneref *preferred_zoneref;
5102 ++ preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
5103 ++ NULL,
5104 ++ &preferred_zone);
5105 ++ classzone_idx = zonelist_zone_idx(preferred_zoneref);
5106 ++ }
5107 +
5108 + rebalance:
5109 + /* This is the last chance, in general, before the goto nopage. */
5110 + page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
5111 + high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
5112 +- preferred_zone, migratetype);
5113 ++ preferred_zone, classzone_idx, migratetype);
5114 + if (page)
5115 + goto got_pg;
5116 +
5117 +@@ -2570,7 +2623,7 @@ rebalance:
5118 +
5119 + page = __alloc_pages_high_priority(gfp_mask, order,
5120 + zonelist, high_zoneidx, nodemask,
5121 +- preferred_zone, migratetype);
5122 ++ preferred_zone, classzone_idx, migratetype);
5123 + if (page) {
5124 + goto got_pg;
5125 + }
5126 +@@ -2601,7 +2654,8 @@ rebalance:
5127 + */
5128 + page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
5129 + high_zoneidx, nodemask, alloc_flags,
5130 +- preferred_zone, migratetype,
5131 ++ preferred_zone,
5132 ++ classzone_idx, migratetype,
5133 + migration_mode, &contended_compaction,
5134 + &deferred_compaction,
5135 + &did_some_progress);
5136 +@@ -2624,7 +2678,8 @@ rebalance:
5137 + zonelist, high_zoneidx,
5138 + nodemask,
5139 + alloc_flags, preferred_zone,
5140 +- migratetype, &did_some_progress);
5141 ++ classzone_idx, migratetype,
5142 ++ &did_some_progress);
5143 + if (page)
5144 + goto got_pg;
5145 +
5146 +@@ -2643,7 +2698,7 @@ rebalance:
5147 + page = __alloc_pages_may_oom(gfp_mask, order,
5148 + zonelist, high_zoneidx,
5149 + nodemask, preferred_zone,
5150 +- migratetype);
5151 ++ classzone_idx, migratetype);
5152 + if (page)
5153 + goto got_pg;
5154 +
5155 +@@ -2684,7 +2739,8 @@ rebalance:
5156 + */
5157 + page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
5158 + high_zoneidx, nodemask, alloc_flags,
5159 +- preferred_zone, migratetype,
5160 ++ preferred_zone,
5161 ++ classzone_idx, migratetype,
5162 + migration_mode, &contended_compaction,
5163 + &deferred_compaction,
5164 + &did_some_progress);
5165 +@@ -2711,11 +2767,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
5166 + {
5167 + enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5168 + struct zone *preferred_zone;
5169 ++ struct zoneref *preferred_zoneref;
5170 + struct page *page = NULL;
5171 + int migratetype = allocflags_to_migratetype(gfp_mask);
5172 + unsigned int cpuset_mems_cookie;
5173 + int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
5174 + struct mem_cgroup *memcg = NULL;
5175 ++ int classzone_idx;
5176 +
5177 + gfp_mask &= gfp_allowed_mask;
5178 +
5179 +@@ -2745,39 +2803,23 @@ retry_cpuset:
5180 + cpuset_mems_cookie = read_mems_allowed_begin();
5181 +
5182 + /* The preferred zone is used for statistics later */
5183 +- first_zones_zonelist(zonelist, high_zoneidx,
5184 ++ preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
5185 + nodemask ? : &cpuset_current_mems_allowed,
5186 + &preferred_zone);
5187 + if (!preferred_zone)
5188 + goto out;
5189 ++ classzone_idx = zonelist_zone_idx(preferred_zoneref);
5190 +
5191 + #ifdef CONFIG_CMA
5192 + if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
5193 + alloc_flags |= ALLOC_CMA;
5194 + #endif
5195 +-retry:
5196 + /* First allocation attempt */
5197 + page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
5198 + zonelist, high_zoneidx, alloc_flags,
5199 +- preferred_zone, migratetype);
5200 ++ preferred_zone, classzone_idx, migratetype);
5201 + if (unlikely(!page)) {
5202 + /*
5203 +- * The first pass makes sure allocations are spread
5204 +- * fairly within the local node. However, the local
5205 +- * node might have free pages left after the fairness
5206 +- * batches are exhausted, and remote zones haven't
5207 +- * even been considered yet. Try once more without
5208 +- * fairness, and include remote zones now, before
5209 +- * entering the slowpath and waking kswapd: prefer
5210 +- * spilling to a remote zone over swapping locally.
5211 +- */
5212 +- if (alloc_flags & ALLOC_FAIR) {
5213 +- reset_alloc_batches(zonelist, high_zoneidx,
5214 +- preferred_zone);
5215 +- alloc_flags &= ~ALLOC_FAIR;
5216 +- goto retry;
5217 +- }
5218 +- /*
5219 + * Runtime PM, block IO and its error handling path
5220 + * can deadlock because I/O on the device might not
5221 + * complete.
5222 +@@ -2785,7 +2827,7 @@ retry:
5223 + gfp_mask = memalloc_noio_flags(gfp_mask);
5224 + page = __alloc_pages_slowpath(gfp_mask, order,
5225 + zonelist, high_zoneidx, nodemask,
5226 +- preferred_zone, migratetype);
5227 ++ preferred_zone, classzone_idx, migratetype);
5228 + }
5229 +
5230 + trace_mm_page_alloc(page, order, gfp_mask, migratetype);
5231 +@@ -2836,7 +2878,7 @@ void __free_pages(struct page *page, unsigned int order)
5232 + {
5233 + if (put_page_testzero(page)) {
5234 + if (order == 0)
5235 +- free_hot_cold_page(page, 0);
5236 ++ free_hot_cold_page(page, false);
5237 + else
5238 + __free_pages_ok(page, order);
5239 + }
5240 +@@ -3220,12 +3262,12 @@ void show_free_areas(unsigned int filter)
5241 + K(zone_page_state(zone, NR_BOUNCE)),
5242 + K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
5243 + K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
5244 +- zone->pages_scanned,
5245 ++ K(zone_page_state(zone, NR_PAGES_SCANNED)),
5246 + (!zone_reclaimable(zone) ? "yes" : "no")
5247 + );
5248 + printk("lowmem_reserve[]:");
5249 + for (i = 0; i < MAX_NR_ZONES; i++)
5250 +- printk(" %lu", zone->lowmem_reserve[i]);
5251 ++ printk(" %ld", zone->lowmem_reserve[i]);
5252 + printk("\n");
5253 + }
5254 +
5255 +@@ -4113,7 +4155,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5256 +
5257 + static void __meminit zone_init_free_lists(struct zone *zone)
5258 + {
5259 +- int order, t;
5260 ++ unsigned int order, t;
5261 + for_each_migratetype_order(order, t) {
5262 + INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
5263 + zone->free_area[order].nr_free = 0;
5264 +@@ -5553,7 +5595,7 @@ static void calculate_totalreserve_pages(void)
5265 + for_each_online_pgdat(pgdat) {
5266 + for (i = 0; i < MAX_NR_ZONES; i++) {
5267 + struct zone *zone = pgdat->node_zones + i;
5268 +- unsigned long max = 0;
5269 ++ long max = 0;
5270 +
5271 + /* Find valid and maximum lowmem_reserve in the zone */
5272 + for (j = i; j < MAX_NR_ZONES; j++) {
5273 +@@ -6041,17 +6083,16 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5274 + * @end_bitidx: The last bit of interest
5275 + * returns pageblock_bits flags
5276 + */
5277 +-unsigned long get_pageblock_flags_mask(struct page *page,
5278 ++unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
5279 + unsigned long end_bitidx,
5280 + unsigned long mask)
5281 + {
5282 + struct zone *zone;
5283 + unsigned long *bitmap;
5284 +- unsigned long pfn, bitidx, word_bitidx;
5285 ++ unsigned long bitidx, word_bitidx;
5286 + unsigned long word;
5287 +
5288 + zone = page_zone(page);
5289 +- pfn = page_to_pfn(page);
5290 + bitmap = get_pageblock_bitmap(zone, pfn);
5291 + bitidx = pfn_to_bitidx(zone, pfn);
5292 + word_bitidx = bitidx / BITS_PER_LONG;
5293 +@@ -6063,25 +6104,25 @@ unsigned long get_pageblock_flags_mask(struct page *page,
5294 + }
5295 +
5296 + /**
5297 +- * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
5298 ++ * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
5299 + * @page: The page within the block of interest
5300 + * @start_bitidx: The first bit of interest
5301 + * @end_bitidx: The last bit of interest
5302 + * @flags: The flags to set
5303 + */
5304 +-void set_pageblock_flags_mask(struct page *page, unsigned long flags,
5305 ++void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
5306 ++ unsigned long pfn,
5307 + unsigned long end_bitidx,
5308 + unsigned long mask)
5309 + {
5310 + struct zone *zone;
5311 + unsigned long *bitmap;
5312 +- unsigned long pfn, bitidx, word_bitidx;
5313 ++ unsigned long bitidx, word_bitidx;
5314 + unsigned long old_word, word;
5315 +
5316 + BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
5317 +
5318 + zone = page_zone(page);
5319 +- pfn = page_to_pfn(page);
5320 + bitmap = get_pageblock_bitmap(zone, pfn);
5321 + bitidx = pfn_to_bitidx(zone, pfn);
5322 + word_bitidx = bitidx / BITS_PER_LONG;
5323 +@@ -6453,7 +6494,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5324 + {
5325 + struct page *page;
5326 + struct zone *zone;
5327 +- int order, i;
5328 ++ unsigned int order, i;
5329 + unsigned long pfn;
5330 + unsigned long flags;
5331 + /* find the first valid pfn */
5332 +@@ -6505,7 +6546,7 @@ bool is_free_buddy_page(struct page *page)
5333 + struct zone *zone = page_zone(page);
5334 + unsigned long pfn = page_to_pfn(page);
5335 + unsigned long flags;
5336 +- int order;
5337 ++ unsigned int order;
5338 +
5339 + spin_lock_irqsave(&zone->lock, flags);
5340 + for (order = 0; order < MAX_ORDER; order++) {
5341 +diff --git a/mm/shmem.c b/mm/shmem.c
5342 +index 0f1447563f17..85d8a1a3626c 100644
5343 +--- a/mm/shmem.c
5344 ++++ b/mm/shmem.c
5345 +@@ -1035,6 +1035,9 @@ repeat:
5346 + goto failed;
5347 + }
5348 +
5349 ++ if (page && sgp == SGP_WRITE)
5350 ++ mark_page_accessed(page);
5351 ++
5352 + /* fallocated page? */
5353 + if (page && !PageUptodate(page)) {
5354 + if (sgp != SGP_READ)
5355 +@@ -1116,6 +1119,9 @@ repeat:
5356 + shmem_recalc_inode(inode);
5357 + spin_unlock(&info->lock);
5358 +
5359 ++ if (sgp == SGP_WRITE)
5360 ++ mark_page_accessed(page);
5361 ++
5362 + delete_from_swap_cache(page);
5363 + set_page_dirty(page);
5364 + swap_free(swap);
5365 +@@ -1140,8 +1146,11 @@ repeat:
5366 + goto decused;
5367 + }
5368 +
5369 +- SetPageSwapBacked(page);
5370 ++ __SetPageSwapBacked(page);
5371 + __set_page_locked(page);
5372 ++ if (sgp == SGP_WRITE)
5373 ++ init_page_accessed(page);
5374 ++
5375 + error = mem_cgroup_cache_charge(page, current->mm,
5376 + gfp & GFP_RECLAIM_MASK);
5377 + if (error)
5378 +diff --git a/mm/swap.c b/mm/swap.c
5379 +index c8048d71c642..d2ceddf70d42 100644
5380 +--- a/mm/swap.c
5381 ++++ b/mm/swap.c
5382 +@@ -67,7 +67,7 @@ static void __page_cache_release(struct page *page)
5383 + static void __put_single_page(struct page *page)
5384 + {
5385 + __page_cache_release(page);
5386 +- free_hot_cold_page(page, 0);
5387 ++ free_hot_cold_page(page, false);
5388 + }
5389 +
5390 + static void __put_compound_page(struct page *page)
5391 +@@ -469,7 +469,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
5392 + SetPageActive(page);
5393 + lru += LRU_ACTIVE;
5394 + add_page_to_lru_list(page, lruvec, lru);
5395 +- trace_mm_lru_activate(page, page_to_pfn(page));
5396 ++ trace_mm_lru_activate(page);
5397 +
5398 + __count_vm_event(PGACTIVATE);
5399 + update_page_reclaim_stat(lruvec, file, 1);
5400 +@@ -581,12 +581,17 @@ void mark_page_accessed(struct page *page)
5401 + EXPORT_SYMBOL(mark_page_accessed);
5402 +
5403 + /*
5404 +- * Queue the page for addition to the LRU via pagevec. The decision on whether
5405 +- * to add the page to the [in]active [file|anon] list is deferred until the
5406 +- * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
5407 +- * have the page added to the active list using mark_page_accessed().
5408 ++ * Used to mark_page_accessed(page) that is not visible yet and when it is
5409 ++ * still safe to use non-atomic ops
5410 + */
5411 +-void __lru_cache_add(struct page *page)
5412 ++void init_page_accessed(struct page *page)
5413 ++{
5414 ++ if (!PageReferenced(page))
5415 ++ __SetPageReferenced(page);
5416 ++}
5417 ++EXPORT_SYMBOL(init_page_accessed);
5418 ++
5419 ++static void __lru_cache_add(struct page *page)
5420 + {
5421 + struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
5422 +
5423 +@@ -596,11 +601,34 @@ void __lru_cache_add(struct page *page)
5424 + pagevec_add(pvec, page);
5425 + put_cpu_var(lru_add_pvec);
5426 + }
5427 +-EXPORT_SYMBOL(__lru_cache_add);
5428 ++
5429 ++/**
5430 ++ * lru_cache_add: add a page to the page lists
5431 ++ * @page: the page to add
5432 ++ */
5433 ++void lru_cache_add_anon(struct page *page)
5434 ++{
5435 ++ if (PageActive(page))
5436 ++ ClearPageActive(page);
5437 ++ __lru_cache_add(page);
5438 ++}
5439 ++
5440 ++void lru_cache_add_file(struct page *page)
5441 ++{
5442 ++ if (PageActive(page))
5443 ++ ClearPageActive(page);
5444 ++ __lru_cache_add(page);
5445 ++}
5446 ++EXPORT_SYMBOL(lru_cache_add_file);
5447 +
5448 + /**
5449 + * lru_cache_add - add a page to a page list
5450 + * @page: the page to be added to the LRU.
5451 ++ *
5452 ++ * Queue the page for addition to the LRU via pagevec. The decision on whether
5453 ++ * to add the page to the [in]active [file|anon] list is deferred until the
5454 ++ * pagevec is drained. This gives a chance for the caller of lru_cache_add()
5455 ++ * have the page added to the active list using mark_page_accessed().
5456 + */
5457 + void lru_cache_add(struct page *page)
5458 + {
5459 +@@ -811,7 +839,7 @@ void lru_add_drain_all(void)
5460 + * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
5461 + * will free it.
5462 + */
5463 +-void release_pages(struct page **pages, int nr, int cold)
5464 ++void release_pages(struct page **pages, int nr, bool cold)
5465 + {
5466 + int i;
5467 + LIST_HEAD(pages_to_free);
5468 +@@ -852,7 +880,7 @@ void release_pages(struct page **pages, int nr, int cold)
5469 + }
5470 +
5471 + /* Clear Active bit in case of parallel mark_page_accessed */
5472 +- ClearPageActive(page);
5473 ++ __ClearPageActive(page);
5474 +
5475 + list_add(&page->lru, &pages_to_free);
5476 + }
5477 +@@ -934,7 +962,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
5478 + SetPageLRU(page);
5479 + add_page_to_lru_list(page, lruvec, lru);
5480 + update_page_reclaim_stat(lruvec, file, active);
5481 +- trace_mm_lru_insertion(page, page_to_pfn(page), lru, trace_pagemap_flags(page));
5482 ++ trace_mm_lru_insertion(page, lru);
5483 + }
5484 +
5485 + /*
5486 +diff --git a/mm/swap_state.c b/mm/swap_state.c
5487 +index e76ace30d436..2972eee184a4 100644
5488 +--- a/mm/swap_state.c
5489 ++++ b/mm/swap_state.c
5490 +@@ -270,7 +270,7 @@ void free_pages_and_swap_cache(struct page **pages, int nr)
5491 +
5492 + for (i = 0; i < todo; i++)
5493 + free_swap_cache(pagep[i]);
5494 +- release_pages(pagep, todo, 0);
5495 ++ release_pages(pagep, todo, false);
5496 + pagep += todo;
5497 + nr -= todo;
5498 + }
5499 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
5500 +index 0fdf96803c5b..aa3891e8e388 100644
5501 +--- a/mm/vmalloc.c
5502 ++++ b/mm/vmalloc.c
5503 +@@ -2681,14 +2681,14 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
5504 +
5505 + prev_end = VMALLOC_START;
5506 +
5507 +- spin_lock(&vmap_area_lock);
5508 ++ rcu_read_lock();
5509 +
5510 + if (list_empty(&vmap_area_list)) {
5511 + vmi->largest_chunk = VMALLOC_TOTAL;
5512 + goto out;
5513 + }
5514 +
5515 +- list_for_each_entry(va, &vmap_area_list, list) {
5516 ++ list_for_each_entry_rcu(va, &vmap_area_list, list) {
5517 + unsigned long addr = va->va_start;
5518 +
5519 + /*
5520 +@@ -2715,7 +2715,7 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
5521 + vmi->largest_chunk = VMALLOC_END - prev_end;
5522 +
5523 + out:
5524 +- spin_unlock(&vmap_area_lock);
5525 ++ rcu_read_unlock();
5526 + }
5527 + #endif
5528 +
5529 +diff --git a/mm/vmscan.c b/mm/vmscan.c
5530 +index be6a689a71a6..b850ced69ed6 100644
5531 +--- a/mm/vmscan.c
5532 ++++ b/mm/vmscan.c
5533 +@@ -163,7 +163,8 @@ static unsigned long zone_reclaimable_pages(struct zone *zone)
5534 +
5535 + bool zone_reclaimable(struct zone *zone)
5536 + {
5537 +- return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
5538 ++ return zone_page_state(zone, NR_PAGES_SCANNED) <
5539 ++ zone_reclaimable_pages(zone) * 6;
5540 + }
5541 +
5542 + static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
5543 +@@ -1107,7 +1108,7 @@ keep:
5544 + VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
5545 + }
5546 +
5547 +- free_hot_cold_page_list(&free_pages, 1);
5548 ++ free_hot_cold_page_list(&free_pages, true);
5549 +
5550 + list_splice(&ret_pages, page_list);
5551 + count_vm_events(PGACTIVATE, pgactivate);
5552 +@@ -1470,7 +1471,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
5553 + __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
5554 +
5555 + if (global_reclaim(sc)) {
5556 +- zone->pages_scanned += nr_scanned;
5557 ++ __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
5558 + if (current_is_kswapd())
5559 + __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
5560 + else
5561 +@@ -1505,7 +1506,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
5562 +
5563 + spin_unlock_irq(&zone->lru_lock);
5564 +
5565 +- free_hot_cold_page_list(&page_list, 1);
5566 ++ free_hot_cold_page_list(&page_list, true);
5567 +
5568 + /*
5569 + * If reclaim is isolating dirty pages under writeback, it implies
5570 +@@ -1659,7 +1660,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
5571 + nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
5572 + &nr_scanned, sc, isolate_mode, lru);
5573 + if (global_reclaim(sc))
5574 +- zone->pages_scanned += nr_scanned;
5575 ++ __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
5576 +
5577 + reclaim_stat->recent_scanned[file] += nr_taken;
5578 +
5579 +@@ -1725,7 +1726,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
5580 + __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
5581 + spin_unlock_irq(&zone->lru_lock);
5582 +
5583 +- free_hot_cold_page_list(&l_hold, 1);
5584 ++ free_hot_cold_page_list(&l_hold, true);
5585 + }
5586 +
5587 + #ifdef CONFIG_SWAP
5588 +@@ -1847,7 +1848,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
5589 + struct zone *zone = lruvec_zone(lruvec);
5590 + unsigned long anon_prio, file_prio;
5591 + enum scan_balance scan_balance;
5592 +- unsigned long anon, file, free;
5593 ++ unsigned long anon, file;
5594 + bool force_scan = false;
5595 + unsigned long ap, fp;
5596 + enum lru_list lru;
5597 +@@ -1895,11 +1896,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
5598 + goto out;
5599 + }
5600 +
5601 +- anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
5602 +- get_lru_size(lruvec, LRU_INACTIVE_ANON);
5603 +- file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
5604 +- get_lru_size(lruvec, LRU_INACTIVE_FILE);
5605 +-
5606 + /*
5607 + * If it's foreseeable that reclaiming the file cache won't be
5608 + * enough to get the zone back into a desirable shape, we have
5609 +@@ -1907,8 +1903,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
5610 + * thrashing - remaining file pages alone.
5611 + */
5612 + if (global_reclaim(sc)) {
5613 +- free = zone_page_state(zone, NR_FREE_PAGES);
5614 +- if (unlikely(file + free <= high_wmark_pages(zone))) {
5615 ++ unsigned long zonefile;
5616 ++ unsigned long zonefree;
5617 ++
5618 ++ zonefree = zone_page_state(zone, NR_FREE_PAGES);
5619 ++ zonefile = zone_page_state(zone, NR_ACTIVE_FILE) +
5620 ++ zone_page_state(zone, NR_INACTIVE_FILE);
5621 ++
5622 ++ if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) {
5623 + scan_balance = SCAN_ANON;
5624 + goto out;
5625 + }
5626 +@@ -1943,6 +1945,12 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
5627 + *
5628 + * anon in [0], file in [1]
5629 + */
5630 ++
5631 ++ anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
5632 ++ get_lru_size(lruvec, LRU_INACTIVE_ANON);
5633 ++ file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
5634 ++ get_lru_size(lruvec, LRU_INACTIVE_FILE);
5635 ++
5636 + spin_lock_irq(&zone->lru_lock);
5637 + if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
5638 + reclaim_stat->recent_scanned[0] /= 2;
5639 +diff --git a/mm/vmstat.c b/mm/vmstat.c
5640 +index def5dd2fbe61..eded1909a690 100644
5641 +--- a/mm/vmstat.c
5642 ++++ b/mm/vmstat.c
5643 +@@ -200,7 +200,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
5644 + continue;
5645 +
5646 + threshold = (*calculate_pressure)(zone);
5647 +- for_each_possible_cpu(cpu)
5648 ++ for_each_online_cpu(cpu)
5649 + per_cpu_ptr(zone->pageset, cpu)->stat_threshold
5650 + = threshold;
5651 + }
5652 +@@ -761,6 +761,7 @@ const char * const vmstat_text[] = {
5653 + "nr_shmem",
5654 + "nr_dirtied",
5655 + "nr_written",
5656 ++ "nr_pages_scanned",
5657 +
5658 + #ifdef CONFIG_NUMA
5659 + "numa_hit",
5660 +@@ -1055,7 +1056,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
5661 + min_wmark_pages(zone),
5662 + low_wmark_pages(zone),
5663 + high_wmark_pages(zone),
5664 +- zone->pages_scanned,
5665 ++ zone_page_state(zone, NR_PAGES_SCANNED),
5666 + zone->spanned_pages,
5667 + zone->present_pages,
5668 + zone->managed_pages);
5669 +@@ -1065,10 +1066,10 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
5670 + zone_page_state(zone, i));
5671 +
5672 + seq_printf(m,
5673 +- "\n protection: (%lu",
5674 ++ "\n protection: (%ld",
5675 + zone->lowmem_reserve[0]);
5676 + for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
5677 +- seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
5678 ++ seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
5679 + seq_printf(m,
5680 + ")"
5681 + "\n pagesets");
5682 +diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
5683 +index 77c173282f38..4a662f15eaee 100644
5684 +--- a/net/netfilter/ipvs/ip_vs_ftp.c
5685 ++++ b/net/netfilter/ipvs/ip_vs_ftp.c
5686 +@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
5687 + struct nf_conn *ct;
5688 + struct net *net;
5689 +
5690 ++ *diff = 0;
5691 ++
5692 + #ifdef CONFIG_IP_VS_IPV6
5693 + /* This application helper doesn't work with IPv6 yet,
5694 + * so turn this into a no-op for IPv6 packets
5695 +@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
5696 + return 1;
5697 + #endif
5698 +
5699 +- *diff = 0;
5700 +-
5701 + /* Only useful for established sessions */
5702 + if (cp->state != IP_VS_TCP_S_ESTABLISHED)
5703 + return 1;
5704 +@@ -321,6 +321,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
5705 + struct ip_vs_conn *n_cp;
5706 + struct net *net;
5707 +
5708 ++ /* no diff required for incoming packets */
5709 ++ *diff = 0;
5710 ++
5711 + #ifdef CONFIG_IP_VS_IPV6
5712 + /* This application helper doesn't work with IPv6 yet,
5713 + * so turn this into a no-op for IPv6 packets
5714 +@@ -329,9 +332,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
5715 + return 1;
5716 + #endif
5717 +
5718 +- /* no diff required for incoming packets */
5719 +- *diff = 0;
5720 +-
5721 + /* Only useful for established sessions */
5722 + if (cp->state != IP_VS_TCP_S_ESTABLISHED)
5723 + return 1;
5724 +diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
5725 +index bf8a108b46e2..6cf2f077e09c 100644
5726 +--- a/net/netfilter/nfnetlink.c
5727 ++++ b/net/netfilter/nfnetlink.c
5728 +@@ -265,7 +265,8 @@ replay:
5729 + nlh = nlmsg_hdr(skb);
5730 + err = 0;
5731 +
5732 +- if (nlh->nlmsg_len < NLMSG_HDRLEN) {
5733 ++ if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
5734 ++ skb->len < nlh->nlmsg_len) {
5735 + err = -EINVAL;
5736 + goto ack;
5737 + }
5738 +diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
5739 +index 91280b82da08..513f7bd85cb7 100755
5740 +--- a/scripts/recordmcount.pl
5741 ++++ b/scripts/recordmcount.pl
5742 +@@ -262,7 +262,6 @@ if ($arch eq "x86_64") {
5743 + # force flags for this arch
5744 + $ld .= " -m shlelf_linux";
5745 + $objcopy .= " -O elf32-sh-linux";
5746 +- $cc .= " -m32";
5747 +
5748 + } elsif ($arch eq "powerpc") {
5749 + $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
5750 +diff --git a/security/keys/gc.c b/security/keys/gc.c
5751 +index d3222b6d7d59..009d9370c8fd 100644
5752 +--- a/security/keys/gc.c
5753 ++++ b/security/keys/gc.c
5754 +@@ -157,12 +157,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
5755 + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
5756 + atomic_dec(&key->user->nikeys);
5757 +
5758 +- key_user_put(key->user);
5759 +-
5760 + /* now throw away the key memory */
5761 + if (key->type->destroy)
5762 + key->type->destroy(key);
5763 +
5764 ++ key_user_put(key->user);
5765 ++
5766 + kfree(key->description);
5767 +
5768 + #ifdef KEY_DEBUGGING
5769 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
5770 +index 1bed780e21d9..2d37b3fc3a21 100644
5771 +--- a/sound/usb/mixer.c
5772 ++++ b/sound/usb/mixer.c
5773 +@@ -886,6 +886,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
5774 + case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
5775 + case USB_ID(0x046d, 0x0808):
5776 + case USB_ID(0x046d, 0x0809):
5777 ++ case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
5778 + case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
5779 + case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
5780 + case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */