1 |
commit: 9a044a4deae2ea2a876cb6bea5415a1efac72a9e |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sun Sep 9 11:25:12 2018 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sun Sep 9 11:25:12 2018 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9a044a4d |
7 |
|
8 |
Linux patch 4.18.7 |
9 |
|
10 |
0000_README | 4 + |
11 |
1006_linux-4.18.7.patch | 5658 +++++++++++++++++++++++++++++++++++++++++++++++ |
12 |
2 files changed, 5662 insertions(+) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index 8bfc2e4..f3682ca 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -67,6 +67,10 @@ Patch: 1005_linux-4.18.6.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 4.18.6 |
21 |
|
22 |
+Patch: 1006_linux-4.18.7.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 4.18.7 |
25 |
+ |
26 |
Patch: 1500_XATTR_USER_PREFIX.patch |
27 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
28 |
Desc: Support for namespace user.pax.* on tmpfs. |
29 |
|
30 |
diff --git a/1006_linux-4.18.7.patch b/1006_linux-4.18.7.patch |
31 |
new file mode 100644 |
32 |
index 0000000..7ab3155 |
33 |
--- /dev/null |
34 |
+++ b/1006_linux-4.18.7.patch |
35 |
@@ -0,0 +1,5658 @@ |
36 |
+diff --git a/Makefile b/Makefile |
37 |
+index 62524f4d42ad..711b04d00e49 100644 |
38 |
+--- a/Makefile |
39 |
++++ b/Makefile |
40 |
+@@ -1,7 +1,7 @@ |
41 |
+ # SPDX-License-Identifier: GPL-2.0 |
42 |
+ VERSION = 4 |
43 |
+ PATCHLEVEL = 18 |
44 |
+-SUBLEVEL = 6 |
45 |
++SUBLEVEL = 7 |
46 |
+ EXTRAVERSION = |
47 |
+ NAME = Merciless Moray |
48 |
+ |
49 |
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c |
50 |
+index c210a25dd6da..cff52d8ffdb1 100644 |
51 |
+--- a/arch/alpha/kernel/osf_sys.c |
52 |
++++ b/arch/alpha/kernel/osf_sys.c |
53 |
+@@ -530,24 +530,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, |
54 |
+ SYSCALL_DEFINE1(osf_utsname, char __user *, name) |
55 |
+ { |
56 |
+ int error; |
57 |
++ char tmp[5 * 32]; |
58 |
+ |
59 |
+ down_read(&uts_sem); |
60 |
+- error = -EFAULT; |
61 |
+- if (copy_to_user(name + 0, utsname()->sysname, 32)) |
62 |
+- goto out; |
63 |
+- if (copy_to_user(name + 32, utsname()->nodename, 32)) |
64 |
+- goto out; |
65 |
+- if (copy_to_user(name + 64, utsname()->release, 32)) |
66 |
+- goto out; |
67 |
+- if (copy_to_user(name + 96, utsname()->version, 32)) |
68 |
+- goto out; |
69 |
+- if (copy_to_user(name + 128, utsname()->machine, 32)) |
70 |
+- goto out; |
71 |
++ memcpy(tmp + 0 * 32, utsname()->sysname, 32); |
72 |
++ memcpy(tmp + 1 * 32, utsname()->nodename, 32); |
73 |
++ memcpy(tmp + 2 * 32, utsname()->release, 32); |
74 |
++ memcpy(tmp + 3 * 32, utsname()->version, 32); |
75 |
++ memcpy(tmp + 4 * 32, utsname()->machine, 32); |
76 |
++ up_read(&uts_sem); |
77 |
+ |
78 |
+- error = 0; |
79 |
+- out: |
80 |
+- up_read(&uts_sem); |
81 |
+- return error; |
82 |
++ if (copy_to_user(name, tmp, sizeof(tmp))) |
83 |
++ return -EFAULT; |
84 |
++ return 0; |
85 |
+ } |
86 |
+ |
87 |
+ SYSCALL_DEFINE0(getpagesize) |
88 |
+@@ -567,18 +562,21 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) |
89 |
+ { |
90 |
+ int len, err = 0; |
91 |
+ char *kname; |
92 |
++ char tmp[32]; |
93 |
+ |
94 |
+- if (namelen > 32) |
95 |
++ if (namelen < 0 || namelen > 32) |
96 |
+ namelen = 32; |
97 |
+ |
98 |
+ down_read(&uts_sem); |
99 |
+ kname = utsname()->domainname; |
100 |
+ len = strnlen(kname, namelen); |
101 |
+- if (copy_to_user(name, kname, min(len + 1, namelen))) |
102 |
+- err = -EFAULT; |
103 |
++ len = min(len + 1, namelen); |
104 |
++ memcpy(tmp, kname, len); |
105 |
+ up_read(&uts_sem); |
106 |
+ |
107 |
+- return err; |
108 |
++ if (copy_to_user(name, tmp, len)) |
109 |
++ return -EFAULT; |
110 |
++ return 0; |
111 |
+ } |
112 |
+ |
113 |
+ /* |
114 |
+@@ -739,13 +737,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) |
115 |
+ }; |
116 |
+ unsigned long offset; |
117 |
+ const char *res; |
118 |
+- long len, err = -EINVAL; |
119 |
++ long len; |
120 |
++ char tmp[__NEW_UTS_LEN + 1]; |
121 |
+ |
122 |
+ offset = command-1; |
123 |
+ if (offset >= ARRAY_SIZE(sysinfo_table)) { |
124 |
+ /* Digital UNIX has a few unpublished interfaces here */ |
125 |
+ printk("sysinfo(%d)", command); |
126 |
+- goto out; |
127 |
++ return -EINVAL; |
128 |
+ } |
129 |
+ |
130 |
+ down_read(&uts_sem); |
131 |
+@@ -753,13 +752,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) |
132 |
+ len = strlen(res)+1; |
133 |
+ if ((unsigned long)len > (unsigned long)count) |
134 |
+ len = count; |
135 |
+- if (copy_to_user(buf, res, len)) |
136 |
+- err = -EFAULT; |
137 |
+- else |
138 |
+- err = 0; |
139 |
++ memcpy(tmp, res, len); |
140 |
+ up_read(&uts_sem); |
141 |
+- out: |
142 |
+- return err; |
143 |
++ if (copy_to_user(buf, tmp, len)) |
144 |
++ return -EFAULT; |
145 |
++ return 0; |
146 |
+ } |
147 |
+ |
148 |
+ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, |
149 |
+diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts |
150 |
+index 5bb9d68d6e90..d9a2049a1ea8 100644 |
151 |
+--- a/arch/arm/boot/dts/am571x-idk.dts |
152 |
++++ b/arch/arm/boot/dts/am571x-idk.dts |
153 |
+@@ -66,10 +66,6 @@ |
154 |
+ }; |
155 |
+ }; |
156 |
+ |
157 |
+-&omap_dwc3_2 { |
158 |
+- extcon = <&extcon_usb2>; |
159 |
+-}; |
160 |
+- |
161 |
+ &extcon_usb2 { |
162 |
+ id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>; |
163 |
+ vbus-gpio = <&gpio7 22 GPIO_ACTIVE_HIGH>; |
164 |
+diff --git a/arch/arm/boot/dts/am572x-idk-common.dtsi b/arch/arm/boot/dts/am572x-idk-common.dtsi |
165 |
+index c6d858b31011..784639ddf451 100644 |
166 |
+--- a/arch/arm/boot/dts/am572x-idk-common.dtsi |
167 |
++++ b/arch/arm/boot/dts/am572x-idk-common.dtsi |
168 |
+@@ -57,10 +57,6 @@ |
169 |
+ }; |
170 |
+ }; |
171 |
+ |
172 |
+-&omap_dwc3_2 { |
173 |
+- extcon = <&extcon_usb2>; |
174 |
+-}; |
175 |
+- |
176 |
+ &extcon_usb2 { |
177 |
+ id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>; |
178 |
+ vbus-gpio = <&gpio3 26 GPIO_ACTIVE_HIGH>; |
179 |
+diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi |
180 |
+index ad87f1ae904d..c9063ffca524 100644 |
181 |
+--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi |
182 |
++++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi |
183 |
+@@ -395,8 +395,13 @@ |
184 |
+ dr_mode = "host"; |
185 |
+ }; |
186 |
+ |
187 |
++&omap_dwc3_2 { |
188 |
++ extcon = <&extcon_usb2>; |
189 |
++}; |
190 |
++ |
191 |
+ &usb2 { |
192 |
+- dr_mode = "peripheral"; |
193 |
++ extcon = <&extcon_usb2>; |
194 |
++ dr_mode = "otg"; |
195 |
+ }; |
196 |
+ |
197 |
+ &mmc1 { |
198 |
+diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi |
199 |
+index 92a9740c533f..3b1db7b9ec50 100644 |
200 |
+--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi |
201 |
++++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi |
202 |
+@@ -206,6 +206,7 @@ |
203 |
+ #address-cells = <1>; |
204 |
+ #size-cells = <0>; |
205 |
+ reg = <0x70>; |
206 |
++ reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>; |
207 |
+ }; |
208 |
+ }; |
209 |
+ |
210 |
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig |
211 |
+index 42c090cf0292..3eb034189cf8 100644 |
212 |
+--- a/arch/arm64/Kconfig |
213 |
++++ b/arch/arm64/Kconfig |
214 |
+@@ -754,7 +754,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK |
215 |
+ |
216 |
+ config HOLES_IN_ZONE |
217 |
+ def_bool y |
218 |
+- depends on NUMA |
219 |
+ |
220 |
+ source kernel/Kconfig.preempt |
221 |
+ source kernel/Kconfig.hz |
222 |
+diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c |
223 |
+index b7fb5274b250..0c4fc223f225 100644 |
224 |
+--- a/arch/arm64/crypto/sm4-ce-glue.c |
225 |
++++ b/arch/arm64/crypto/sm4-ce-glue.c |
226 |
+@@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void) |
227 |
+ crypto_unregister_alg(&sm4_ce_alg); |
228 |
+ } |
229 |
+ |
230 |
+-module_cpu_feature_match(SM3, sm4_ce_mod_init); |
231 |
++module_cpu_feature_match(SM4, sm4_ce_mod_init); |
232 |
+ module_exit(sm4_ce_mod_fini); |
233 |
+diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h |
234 |
+index 5a23010af600..1e7a33592e29 100644 |
235 |
+--- a/arch/powerpc/include/asm/fadump.h |
236 |
++++ b/arch/powerpc/include/asm/fadump.h |
237 |
+@@ -195,9 +195,6 @@ struct fadump_crash_info_header { |
238 |
+ struct cpumask online_mask; |
239 |
+ }; |
240 |
+ |
241 |
+-/* Crash memory ranges */ |
242 |
+-#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2) |
243 |
+- |
244 |
+ struct fad_crash_memory_ranges { |
245 |
+ unsigned long long base; |
246 |
+ unsigned long long size; |
247 |
+diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h |
248 |
+index 2160be2e4339..b321c82b3624 100644 |
249 |
+--- a/arch/powerpc/include/asm/nohash/pgtable.h |
250 |
++++ b/arch/powerpc/include/asm/nohash/pgtable.h |
251 |
+@@ -51,17 +51,14 @@ static inline int pte_present(pte_t pte) |
252 |
+ #define pte_access_permitted pte_access_permitted |
253 |
+ static inline bool pte_access_permitted(pte_t pte, bool write) |
254 |
+ { |
255 |
+- unsigned long pteval = pte_val(pte); |
256 |
+ /* |
257 |
+ * A read-only access is controlled by _PAGE_USER bit. |
258 |
+ * We have _PAGE_READ set for WRITE and EXECUTE |
259 |
+ */ |
260 |
+- unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER; |
261 |
+- |
262 |
+- if (write) |
263 |
+- need_pte_bits |= _PAGE_WRITE; |
264 |
++ if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) |
265 |
++ return false; |
266 |
+ |
267 |
+- if ((pteval & need_pte_bits) != need_pte_bits) |
268 |
++ if (write && !pte_write(pte)) |
269 |
+ return false; |
270 |
+ |
271 |
+ return true; |
272 |
+diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h |
273 |
+index 5ba80cffb505..3312606fda07 100644 |
274 |
+--- a/arch/powerpc/include/asm/pkeys.h |
275 |
++++ b/arch/powerpc/include/asm/pkeys.h |
276 |
+@@ -94,8 +94,6 @@ static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) |
277 |
+ __mm_pkey_is_allocated(mm, pkey)); |
278 |
+ } |
279 |
+ |
280 |
+-extern void __arch_activate_pkey(int pkey); |
281 |
+-extern void __arch_deactivate_pkey(int pkey); |
282 |
+ /* |
283 |
+ * Returns a positive, 5-bit key on success, or -1 on failure. |
284 |
+ * Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and |
285 |
+@@ -124,11 +122,6 @@ static inline int mm_pkey_alloc(struct mm_struct *mm) |
286 |
+ ret = ffz((u32)mm_pkey_allocation_map(mm)); |
287 |
+ __mm_pkey_allocated(mm, ret); |
288 |
+ |
289 |
+- /* |
290 |
+- * Enable the key in the hardware |
291 |
+- */ |
292 |
+- if (ret > 0) |
293 |
+- __arch_activate_pkey(ret); |
294 |
+ return ret; |
295 |
+ } |
296 |
+ |
297 |
+@@ -140,10 +133,6 @@ static inline int mm_pkey_free(struct mm_struct *mm, int pkey) |
298 |
+ if (!mm_pkey_is_allocated(mm, pkey)) |
299 |
+ return -EINVAL; |
300 |
+ |
301 |
+- /* |
302 |
+- * Disable the key in the hardware |
303 |
+- */ |
304 |
+- __arch_deactivate_pkey(pkey); |
305 |
+ __mm_pkey_free(mm, pkey); |
306 |
+ |
307 |
+ return 0; |
308 |
+diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c |
309 |
+index 07e8396d472b..958eb5cd2a9e 100644 |
310 |
+--- a/arch/powerpc/kernel/fadump.c |
311 |
++++ b/arch/powerpc/kernel/fadump.c |
312 |
+@@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm; |
313 |
+ static const struct fadump_mem_struct *fdm_active; |
314 |
+ |
315 |
+ static DEFINE_MUTEX(fadump_mutex); |
316 |
+-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES]; |
317 |
++struct fad_crash_memory_ranges *crash_memory_ranges; |
318 |
++int crash_memory_ranges_size; |
319 |
+ int crash_mem_ranges; |
320 |
++int max_crash_mem_ranges; |
321 |
+ |
322 |
+ /* Scan the Firmware Assisted dump configuration details. */ |
323 |
+ int __init early_init_dt_scan_fw_dump(unsigned long node, |
324 |
+@@ -868,38 +870,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active) |
325 |
+ return 0; |
326 |
+ } |
327 |
+ |
328 |
+-static inline void fadump_add_crash_memory(unsigned long long base, |
329 |
+- unsigned long long end) |
330 |
++static void free_crash_memory_ranges(void) |
331 |
++{ |
332 |
++ kfree(crash_memory_ranges); |
333 |
++ crash_memory_ranges = NULL; |
334 |
++ crash_memory_ranges_size = 0; |
335 |
++ max_crash_mem_ranges = 0; |
336 |
++} |
337 |
++ |
338 |
++/* |
339 |
++ * Allocate or reallocate crash memory ranges array in incremental units |
340 |
++ * of PAGE_SIZE. |
341 |
++ */ |
342 |
++static int allocate_crash_memory_ranges(void) |
343 |
++{ |
344 |
++ struct fad_crash_memory_ranges *new_array; |
345 |
++ u64 new_size; |
346 |
++ |
347 |
++ new_size = crash_memory_ranges_size + PAGE_SIZE; |
348 |
++ pr_debug("Allocating %llu bytes of memory for crash memory ranges\n", |
349 |
++ new_size); |
350 |
++ |
351 |
++ new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL); |
352 |
++ if (new_array == NULL) { |
353 |
++ pr_err("Insufficient memory for setting up crash memory ranges\n"); |
354 |
++ free_crash_memory_ranges(); |
355 |
++ return -ENOMEM; |
356 |
++ } |
357 |
++ |
358 |
++ crash_memory_ranges = new_array; |
359 |
++ crash_memory_ranges_size = new_size; |
360 |
++ max_crash_mem_ranges = (new_size / |
361 |
++ sizeof(struct fad_crash_memory_ranges)); |
362 |
++ return 0; |
363 |
++} |
364 |
++ |
365 |
++static inline int fadump_add_crash_memory(unsigned long long base, |
366 |
++ unsigned long long end) |
367 |
+ { |
368 |
+ if (base == end) |
369 |
+- return; |
370 |
++ return 0; |
371 |
++ |
372 |
++ if (crash_mem_ranges == max_crash_mem_ranges) { |
373 |
++ int ret; |
374 |
++ |
375 |
++ ret = allocate_crash_memory_ranges(); |
376 |
++ if (ret) |
377 |
++ return ret; |
378 |
++ } |
379 |
+ |
380 |
+ pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n", |
381 |
+ crash_mem_ranges, base, end - 1, (end - base)); |
382 |
+ crash_memory_ranges[crash_mem_ranges].base = base; |
383 |
+ crash_memory_ranges[crash_mem_ranges].size = end - base; |
384 |
+ crash_mem_ranges++; |
385 |
++ return 0; |
386 |
+ } |
387 |
+ |
388 |
+-static void fadump_exclude_reserved_area(unsigned long long start, |
389 |
++static int fadump_exclude_reserved_area(unsigned long long start, |
390 |
+ unsigned long long end) |
391 |
+ { |
392 |
+ unsigned long long ra_start, ra_end; |
393 |
++ int ret = 0; |
394 |
+ |
395 |
+ ra_start = fw_dump.reserve_dump_area_start; |
396 |
+ ra_end = ra_start + fw_dump.reserve_dump_area_size; |
397 |
+ |
398 |
+ if ((ra_start < end) && (ra_end > start)) { |
399 |
+ if ((start < ra_start) && (end > ra_end)) { |
400 |
+- fadump_add_crash_memory(start, ra_start); |
401 |
+- fadump_add_crash_memory(ra_end, end); |
402 |
++ ret = fadump_add_crash_memory(start, ra_start); |
403 |
++ if (ret) |
404 |
++ return ret; |
405 |
++ |
406 |
++ ret = fadump_add_crash_memory(ra_end, end); |
407 |
+ } else if (start < ra_start) { |
408 |
+- fadump_add_crash_memory(start, ra_start); |
409 |
++ ret = fadump_add_crash_memory(start, ra_start); |
410 |
+ } else if (ra_end < end) { |
411 |
+- fadump_add_crash_memory(ra_end, end); |
412 |
++ ret = fadump_add_crash_memory(ra_end, end); |
413 |
+ } |
414 |
+ } else |
415 |
+- fadump_add_crash_memory(start, end); |
416 |
++ ret = fadump_add_crash_memory(start, end); |
417 |
++ |
418 |
++ return ret; |
419 |
+ } |
420 |
+ |
421 |
+ static int fadump_init_elfcore_header(char *bufp) |
422 |
+@@ -939,10 +991,11 @@ static int fadump_init_elfcore_header(char *bufp) |
423 |
+ * Traverse through memblock structure and setup crash memory ranges. These |
424 |
+ * ranges will be used create PT_LOAD program headers in elfcore header. |
425 |
+ */ |
426 |
+-static void fadump_setup_crash_memory_ranges(void) |
427 |
++static int fadump_setup_crash_memory_ranges(void) |
428 |
+ { |
429 |
+ struct memblock_region *reg; |
430 |
+ unsigned long long start, end; |
431 |
++ int ret; |
432 |
+ |
433 |
+ pr_debug("Setup crash memory ranges.\n"); |
434 |
+ crash_mem_ranges = 0; |
435 |
+@@ -953,7 +1006,9 @@ static void fadump_setup_crash_memory_ranges(void) |
436 |
+ * specified during fadump registration. We need to create a separate |
437 |
+ * program header for this chunk with the correct offset. |
438 |
+ */ |
439 |
+- fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size); |
440 |
++ ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size); |
441 |
++ if (ret) |
442 |
++ return ret; |
443 |
+ |
444 |
+ for_each_memblock(memory, reg) { |
445 |
+ start = (unsigned long long)reg->base; |
446 |
+@@ -973,8 +1028,12 @@ static void fadump_setup_crash_memory_ranges(void) |
447 |
+ } |
448 |
+ |
449 |
+ /* add this range excluding the reserved dump area. */ |
450 |
+- fadump_exclude_reserved_area(start, end); |
451 |
++ ret = fadump_exclude_reserved_area(start, end); |
452 |
++ if (ret) |
453 |
++ return ret; |
454 |
+ } |
455 |
++ |
456 |
++ return 0; |
457 |
+ } |
458 |
+ |
459 |
+ /* |
460 |
+@@ -1097,6 +1156,7 @@ static int register_fadump(void) |
461 |
+ { |
462 |
+ unsigned long addr; |
463 |
+ void *vaddr; |
464 |
++ int ret; |
465 |
+ |
466 |
+ /* |
467 |
+ * If no memory is reserved then we can not register for firmware- |
468 |
+@@ -1105,7 +1165,9 @@ static int register_fadump(void) |
469 |
+ if (!fw_dump.reserve_dump_area_size) |
470 |
+ return -ENODEV; |
471 |
+ |
472 |
+- fadump_setup_crash_memory_ranges(); |
473 |
++ ret = fadump_setup_crash_memory_ranges(); |
474 |
++ if (ret) |
475 |
++ return ret; |
476 |
+ |
477 |
+ addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len); |
478 |
+ /* Initialize fadump crash info header. */ |
479 |
+@@ -1183,6 +1245,7 @@ void fadump_cleanup(void) |
480 |
+ } else if (fw_dump.dump_registered) { |
481 |
+ /* Un-register Firmware-assisted dump if it was registered. */ |
482 |
+ fadump_unregister_dump(&fdm); |
483 |
++ free_crash_memory_ranges(); |
484 |
+ } |
485 |
+ } |
486 |
+ |
487 |
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c |
488 |
+index 9ef4aea9fffe..991d09774108 100644 |
489 |
+--- a/arch/powerpc/kernel/process.c |
490 |
++++ b/arch/powerpc/kernel/process.c |
491 |
+@@ -583,6 +583,7 @@ static void save_all(struct task_struct *tsk) |
492 |
+ __giveup_spe(tsk); |
493 |
+ |
494 |
+ msr_check_and_clear(msr_all_available); |
495 |
++ thread_pkey_regs_save(&tsk->thread); |
496 |
+ } |
497 |
+ |
498 |
+ void flush_all_to_thread(struct task_struct *tsk) |
499 |
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
500 |
+index de686b340f4a..a995513573c2 100644 |
501 |
+--- a/arch/powerpc/kvm/book3s_hv.c |
502 |
++++ b/arch/powerpc/kvm/book3s_hv.c |
503 |
+@@ -46,6 +46,7 @@ |
504 |
+ #include <linux/compiler.h> |
505 |
+ #include <linux/of.h> |
506 |
+ |
507 |
++#include <asm/ftrace.h> |
508 |
+ #include <asm/reg.h> |
509 |
+ #include <asm/ppc-opcode.h> |
510 |
+ #include <asm/asm-prototypes.h> |
511 |
+diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c |
512 |
+index f3d4b4a0e561..3bb5cec03d1f 100644 |
513 |
+--- a/arch/powerpc/mm/mmu_context_book3s64.c |
514 |
++++ b/arch/powerpc/mm/mmu_context_book3s64.c |
515 |
+@@ -200,9 +200,9 @@ static void pte_frag_destroy(void *pte_frag) |
516 |
+ /* drop all the pending references */ |
517 |
+ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; |
518 |
+ /* We allow PTE_FRAG_NR fragments from a PTE page */ |
519 |
+- if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) { |
520 |
++ if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) { |
521 |
+ pgtable_page_dtor(page); |
522 |
+- free_unref_page(page); |
523 |
++ __free_page(page); |
524 |
+ } |
525 |
+ } |
526 |
+ |
527 |
+@@ -215,9 +215,9 @@ static void pmd_frag_destroy(void *pmd_frag) |
528 |
+ /* drop all the pending references */ |
529 |
+ count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; |
530 |
+ /* We allow PTE_FRAG_NR fragments from a PTE page */ |
531 |
+- if (page_ref_sub_and_test(page, PMD_FRAG_NR - count)) { |
532 |
++ if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) { |
533 |
+ pgtable_pmd_page_dtor(page); |
534 |
+- free_unref_page(page); |
535 |
++ __free_page(page); |
536 |
+ } |
537 |
+ } |
538 |
+ |
539 |
+diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c |
540 |
+index a4ca57612558..c9ee9e23845f 100644 |
541 |
+--- a/arch/powerpc/mm/mmu_context_iommu.c |
542 |
++++ b/arch/powerpc/mm/mmu_context_iommu.c |
543 |
+@@ -129,6 +129,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
544 |
+ long i, j, ret = 0, locked_entries = 0; |
545 |
+ unsigned int pageshift; |
546 |
+ unsigned long flags; |
547 |
++ unsigned long cur_ua; |
548 |
+ struct page *page = NULL; |
549 |
+ |
550 |
+ mutex_lock(&mem_list_mutex); |
551 |
+@@ -177,7 +178,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
552 |
+ } |
553 |
+ |
554 |
+ for (i = 0; i < entries; ++i) { |
555 |
+- if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), |
556 |
++ cur_ua = ua + (i << PAGE_SHIFT); |
557 |
++ if (1 != get_user_pages_fast(cur_ua, |
558 |
+ 1/* pages */, 1/* iswrite */, &page)) { |
559 |
+ ret = -EFAULT; |
560 |
+ for (j = 0; j < i; ++j) |
561 |
+@@ -196,7 +198,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
562 |
+ if (is_migrate_cma_page(page)) { |
563 |
+ if (mm_iommu_move_page_from_cma(page)) |
564 |
+ goto populate; |
565 |
+- if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), |
566 |
++ if (1 != get_user_pages_fast(cur_ua, |
567 |
+ 1/* pages */, 1/* iswrite */, |
568 |
+ &page)) { |
569 |
+ ret = -EFAULT; |
570 |
+@@ -210,20 +212,21 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
571 |
+ } |
572 |
+ populate: |
573 |
+ pageshift = PAGE_SHIFT; |
574 |
+- if (PageCompound(page)) { |
575 |
++ if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) { |
576 |
+ pte_t *pte; |
577 |
+ struct page *head = compound_head(page); |
578 |
+ unsigned int compshift = compound_order(head); |
579 |
++ unsigned int pteshift; |
580 |
+ |
581 |
+ local_irq_save(flags); /* disables as well */ |
582 |
+- pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); |
583 |
+- local_irq_restore(flags); |
584 |
++ pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift); |
585 |
+ |
586 |
+ /* Double check it is still the same pinned page */ |
587 |
+ if (pte && pte_page(*pte) == head && |
588 |
+- pageshift == compshift) |
589 |
+- pageshift = max_t(unsigned int, pageshift, |
590 |
++ pteshift == compshift + PAGE_SHIFT) |
591 |
++ pageshift = max_t(unsigned int, pteshift, |
592 |
+ PAGE_SHIFT); |
593 |
++ local_irq_restore(flags); |
594 |
+ } |
595 |
+ mem->pageshift = min(mem->pageshift, pageshift); |
596 |
+ mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; |
597 |
+diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c |
598 |
+index 4afbfbb64bfd..78d0b3d5ebad 100644 |
599 |
+--- a/arch/powerpc/mm/pgtable-book3s64.c |
600 |
++++ b/arch/powerpc/mm/pgtable-book3s64.c |
601 |
+@@ -270,6 +270,8 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) |
602 |
+ return NULL; |
603 |
+ } |
604 |
+ |
605 |
++ atomic_set(&page->pt_frag_refcount, 1); |
606 |
++ |
607 |
+ ret = page_address(page); |
608 |
+ /* |
609 |
+ * if we support only one fragment just return the |
610 |
+@@ -285,7 +287,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) |
611 |
+ * count. |
612 |
+ */ |
613 |
+ if (likely(!mm->context.pmd_frag)) { |
614 |
+- set_page_count(page, PMD_FRAG_NR); |
615 |
++ atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR); |
616 |
+ mm->context.pmd_frag = ret + PMD_FRAG_SIZE; |
617 |
+ } |
618 |
+ spin_unlock(&mm->page_table_lock); |
619 |
+@@ -308,9 +310,10 @@ void pmd_fragment_free(unsigned long *pmd) |
620 |
+ { |
621 |
+ struct page *page = virt_to_page(pmd); |
622 |
+ |
623 |
+- if (put_page_testzero(page)) { |
624 |
++ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); |
625 |
++ if (atomic_dec_and_test(&page->pt_frag_refcount)) { |
626 |
+ pgtable_pmd_page_dtor(page); |
627 |
+- free_unref_page(page); |
628 |
++ __free_page(page); |
629 |
+ } |
630 |
+ } |
631 |
+ |
632 |
+@@ -352,6 +355,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) |
633 |
+ return NULL; |
634 |
+ } |
635 |
+ |
636 |
++ atomic_set(&page->pt_frag_refcount, 1); |
637 |
+ |
638 |
+ ret = page_address(page); |
639 |
+ /* |
640 |
+@@ -367,7 +371,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) |
641 |
+ * count. |
642 |
+ */ |
643 |
+ if (likely(!mm->context.pte_frag)) { |
644 |
+- set_page_count(page, PTE_FRAG_NR); |
645 |
++ atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR); |
646 |
+ mm->context.pte_frag = ret + PTE_FRAG_SIZE; |
647 |
+ } |
648 |
+ spin_unlock(&mm->page_table_lock); |
649 |
+@@ -390,10 +394,11 @@ void pte_fragment_free(unsigned long *table, int kernel) |
650 |
+ { |
651 |
+ struct page *page = virt_to_page(table); |
652 |
+ |
653 |
+- if (put_page_testzero(page)) { |
654 |
++ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0); |
655 |
++ if (atomic_dec_and_test(&page->pt_frag_refcount)) { |
656 |
+ if (!kernel) |
657 |
+ pgtable_page_dtor(page); |
658 |
+- free_unref_page(page); |
659 |
++ __free_page(page); |
660 |
+ } |
661 |
+ } |
662 |
+ |
663 |
+diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c |
664 |
+index e6f500fabf5e..0e7810ccd1ae 100644 |
665 |
+--- a/arch/powerpc/mm/pkeys.c |
666 |
++++ b/arch/powerpc/mm/pkeys.c |
667 |
+@@ -15,8 +15,10 @@ bool pkey_execute_disable_supported; |
668 |
+ int pkeys_total; /* Total pkeys as per device tree */ |
669 |
+ bool pkeys_devtree_defined; /* pkey property exported by device tree */ |
670 |
+ u32 initial_allocation_mask; /* Bits set for reserved keys */ |
671 |
+-u64 pkey_amr_uamor_mask; /* Bits in AMR/UMOR not to be touched */ |
672 |
++u64 pkey_amr_mask; /* Bits in AMR not to be touched */ |
673 |
+ u64 pkey_iamr_mask; /* Bits in AMR not to be touched */ |
674 |
++u64 pkey_uamor_mask; /* Bits in UMOR not to be touched */ |
675 |
++int execute_only_key = 2; |
676 |
+ |
677 |
+ #define AMR_BITS_PER_PKEY 2 |
678 |
+ #define AMR_RD_BIT 0x1UL |
679 |
+@@ -91,7 +93,7 @@ int pkey_initialize(void) |
680 |
+ * arch-neutral code. |
681 |
+ */ |
682 |
+ pkeys_total = min_t(int, pkeys_total, |
683 |
+- (ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)); |
684 |
++ ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)+1)); |
685 |
+ |
686 |
+ if (!pkey_mmu_enabled() || radix_enabled() || !pkeys_total) |
687 |
+ static_branch_enable(&pkey_disabled); |
688 |
+@@ -119,20 +121,38 @@ int pkey_initialize(void) |
689 |
+ #else |
690 |
+ os_reserved = 0; |
691 |
+ #endif |
692 |
+- initial_allocation_mask = ~0x0; |
693 |
+- pkey_amr_uamor_mask = ~0x0ul; |
694 |
++ initial_allocation_mask = (0x1 << 0) | (0x1 << 1) | |
695 |
++ (0x1 << execute_only_key); |
696 |
++ |
697 |
++ /* register mask is in BE format */ |
698 |
++ pkey_amr_mask = ~0x0ul; |
699 |
++ pkey_amr_mask &= ~(0x3ul << pkeyshift(0)); |
700 |
++ |
701 |
+ pkey_iamr_mask = ~0x0ul; |
702 |
+- /* |
703 |
+- * key 0, 1 are reserved. |
704 |
+- * key 0 is the default key, which allows read/write/execute. |
705 |
+- * key 1 is recommended not to be used. PowerISA(3.0) page 1015, |
706 |
+- * programming note. |
707 |
+- */ |
708 |
+- for (i = 2; i < (pkeys_total - os_reserved); i++) { |
709 |
+- initial_allocation_mask &= ~(0x1 << i); |
710 |
+- pkey_amr_uamor_mask &= ~(0x3ul << pkeyshift(i)); |
711 |
+- pkey_iamr_mask &= ~(0x1ul << pkeyshift(i)); |
712 |
++ pkey_iamr_mask &= ~(0x3ul << pkeyshift(0)); |
713 |
++ pkey_iamr_mask &= ~(0x3ul << pkeyshift(execute_only_key)); |
714 |
++ |
715 |
++ pkey_uamor_mask = ~0x0ul; |
716 |
++ pkey_uamor_mask &= ~(0x3ul << pkeyshift(0)); |
717 |
++ pkey_uamor_mask &= ~(0x3ul << pkeyshift(execute_only_key)); |
718 |
++ |
719 |
++ /* mark the rest of the keys as reserved and hence unavailable */ |
720 |
++ for (i = (pkeys_total - os_reserved); i < pkeys_total; i++) { |
721 |
++ initial_allocation_mask |= (0x1 << i); |
722 |
++ pkey_uamor_mask &= ~(0x3ul << pkeyshift(i)); |
723 |
++ } |
724 |
++ |
725 |
++ if (unlikely((pkeys_total - os_reserved) <= execute_only_key)) { |
726 |
++ /* |
727 |
++ * Insufficient number of keys to support |
728 |
++ * execute only key. Mark it unavailable. |
729 |
++ * Any AMR, UAMOR, IAMR bit set for |
730 |
++ * this key is irrelevant since this key |
731 |
++ * can never be allocated. |
732 |
++ */ |
733 |
++ execute_only_key = -1; |
734 |
+ } |
735 |
++ |
736 |
+ return 0; |
737 |
+ } |
738 |
+ |
739 |
+@@ -143,8 +163,7 @@ void pkey_mm_init(struct mm_struct *mm) |
740 |
+ if (static_branch_likely(&pkey_disabled)) |
741 |
+ return; |
742 |
+ mm_pkey_allocation_map(mm) = initial_allocation_mask; |
743 |
+- /* -1 means unallocated or invalid */ |
744 |
+- mm->context.execute_only_pkey = -1; |
745 |
++ mm->context.execute_only_pkey = execute_only_key; |
746 |
+ } |
747 |
+ |
748 |
+ static inline u64 read_amr(void) |
749 |
+@@ -213,33 +232,6 @@ static inline void init_iamr(int pkey, u8 init_bits) |
750 |
+ write_iamr(old_iamr | new_iamr_bits); |
751 |
+ } |
752 |
+ |
753 |
+-static void pkey_status_change(int pkey, bool enable) |
754 |
+-{ |
755 |
+- u64 old_uamor; |
756 |
+- |
757 |
+- /* Reset the AMR and IAMR bits for this key */ |
758 |
+- init_amr(pkey, 0x0); |
759 |
+- init_iamr(pkey, 0x0); |
760 |
+- |
761 |
+- /* Enable/disable key */ |
762 |
+- old_uamor = read_uamor(); |
763 |
+- if (enable) |
764 |
+- old_uamor |= (0x3ul << pkeyshift(pkey)); |
765 |
+- else |
766 |
+- old_uamor &= ~(0x3ul << pkeyshift(pkey)); |
767 |
+- write_uamor(old_uamor); |
768 |
+-} |
769 |
+- |
770 |
+-void __arch_activate_pkey(int pkey) |
771 |
+-{ |
772 |
+- pkey_status_change(pkey, true); |
773 |
+-} |
774 |
+- |
775 |
+-void __arch_deactivate_pkey(int pkey) |
776 |
+-{ |
777 |
+- pkey_status_change(pkey, false); |
778 |
+-} |
779 |
+- |
780 |
+ /* |
781 |
+ * Set the access rights in AMR IAMR and UAMOR registers for @pkey to that |
782 |
+ * specified in @init_val. |
783 |
+@@ -289,9 +281,6 @@ void thread_pkey_regs_restore(struct thread_struct *new_thread, |
784 |
+ if (static_branch_likely(&pkey_disabled)) |
785 |
+ return; |
786 |
+ |
787 |
+- /* |
788 |
+- * TODO: Just set UAMOR to zero if @new_thread hasn't used any keys yet. |
789 |
+- */ |
790 |
+ if (old_thread->amr != new_thread->amr) |
791 |
+ write_amr(new_thread->amr); |
792 |
+ if (old_thread->iamr != new_thread->iamr) |
793 |
+@@ -305,9 +294,13 @@ void thread_pkey_regs_init(struct thread_struct *thread) |
794 |
+ if (static_branch_likely(&pkey_disabled)) |
795 |
+ return; |
796 |
+ |
797 |
+- thread->amr = read_amr() & pkey_amr_uamor_mask; |
798 |
+- thread->iamr = read_iamr() & pkey_iamr_mask; |
799 |
+- thread->uamor = read_uamor() & pkey_amr_uamor_mask; |
800 |
++ thread->amr = pkey_amr_mask; |
801 |
++ thread->iamr = pkey_iamr_mask; |
802 |
++ thread->uamor = pkey_uamor_mask; |
803 |
++ |
804 |
++ write_uamor(pkey_uamor_mask); |
805 |
++ write_amr(pkey_amr_mask); |
806 |
++ write_iamr(pkey_iamr_mask); |
807 |
+ } |
808 |
+ |
809 |
+ static inline bool pkey_allows_readwrite(int pkey) |
810 |
+@@ -322,48 +315,7 @@ static inline bool pkey_allows_readwrite(int pkey) |
811 |
+ |
812 |
+ int __execute_only_pkey(struct mm_struct *mm) |
813 |
+ { |
814 |
+- bool need_to_set_mm_pkey = false; |
815 |
+- int execute_only_pkey = mm->context.execute_only_pkey; |
816 |
+- int ret; |
817 |
+- |
818 |
+- /* Do we need to assign a pkey for mm's execute-only maps? */ |
819 |
+- if (execute_only_pkey == -1) { |
820 |
+- /* Go allocate one to use, which might fail */ |
821 |
+- execute_only_pkey = mm_pkey_alloc(mm); |
822 |
+- if (execute_only_pkey < 0) |
823 |
+- return -1; |
824 |
+- need_to_set_mm_pkey = true; |
825 |
+- } |
826 |
+- |
827 |
+- /* |
828 |
+- * We do not want to go through the relatively costly dance to set AMR |
829 |
+- * if we do not need to. Check it first and assume that if the |
830 |
+- * execute-only pkey is readwrite-disabled than we do not have to set it |
831 |
+- * ourselves. |
832 |
+- */ |
833 |
+- if (!need_to_set_mm_pkey && !pkey_allows_readwrite(execute_only_pkey)) |
834 |
+- return execute_only_pkey; |
835 |
+- |
836 |
+- /* |
837 |
+- * Set up AMR so that it denies access for everything other than |
838 |
+- * execution. |
839 |
+- */ |
840 |
+- ret = __arch_set_user_pkey_access(current, execute_only_pkey, |
841 |
+- PKEY_DISABLE_ACCESS | |
842 |
+- PKEY_DISABLE_WRITE); |
843 |
+- /* |
844 |
+- * If the AMR-set operation failed somehow, just return 0 and |
845 |
+- * effectively disable execute-only support. |
846 |
+- */ |
847 |
+- if (ret) { |
848 |
+- mm_pkey_free(mm, execute_only_pkey); |
849 |
+- return -1; |
850 |
+- } |
851 |
+- |
852 |
+- /* We got one, store it and use it from here on out */ |
853 |
+- if (need_to_set_mm_pkey) |
854 |
+- mm->context.execute_only_pkey = execute_only_pkey; |
855 |
+- return execute_only_pkey; |
856 |
++ return mm->context.execute_only_pkey; |
857 |
+ } |
858 |
+ |
859 |
+ static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) |
860 |
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c |
861 |
+index 70b2e1e0f23c..a2cdf358a3ac 100644 |
862 |
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c |
863 |
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c |
864 |
+@@ -3368,12 +3368,49 @@ static void pnv_pci_ioda_create_dbgfs(void) |
865 |
+ #endif /* CONFIG_DEBUG_FS */ |
866 |
+ } |
867 |
+ |
868 |
++static void pnv_pci_enable_bridge(struct pci_bus *bus) |
869 |
++{ |
870 |
++ struct pci_dev *dev = bus->self; |
871 |
++ struct pci_bus *child; |
872 |
++ |
873 |
++ /* Empty bus ? bail */ |
874 |
++ if (list_empty(&bus->devices)) |
875 |
++ return; |
876 |
++ |
877 |
++ /* |
878 |
++ * If there's a bridge associated with that bus enable it. This works |
879 |
++ * around races in the generic code if the enabling is done during |
880 |
++ * parallel probing. This can be removed once those races have been |
881 |
++ * fixed. |
882 |
++ */ |
883 |
++ if (dev) { |
884 |
++ int rc = pci_enable_device(dev); |
885 |
++ if (rc) |
886 |
++ pci_err(dev, "Error enabling bridge (%d)\n", rc); |
887 |
++ pci_set_master(dev); |
888 |
++ } |
889 |
++ |
890 |
++ /* Perform the same to child busses */ |
891 |
++ list_for_each_entry(child, &bus->children, node) |
892 |
++ pnv_pci_enable_bridge(child); |
893 |
++} |
894 |
++ |
895 |
++static void pnv_pci_enable_bridges(void) |
896 |
++{ |
897 |
++ struct pci_controller *hose; |
898 |
++ |
899 |
++ list_for_each_entry(hose, &hose_list, list_node) |
900 |
++ pnv_pci_enable_bridge(hose->bus); |
901 |
++} |
902 |
++ |
903 |
+ static void pnv_pci_ioda_fixup(void) |
904 |
+ { |
905 |
+ pnv_pci_ioda_setup_PEs(); |
906 |
+ pnv_pci_ioda_setup_iommu_api(); |
907 |
+ pnv_pci_ioda_create_dbgfs(); |
908 |
+ |
909 |
++ pnv_pci_enable_bridges(); |
910 |
++ |
911 |
+ #ifdef CONFIG_EEH |
912 |
+ pnv_eeh_post_init(); |
913 |
+ #endif |
914 |
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c |
915 |
+index 5e1ef9150182..2edc673be137 100644 |
916 |
+--- a/arch/powerpc/platforms/pseries/ras.c |
917 |
++++ b/arch/powerpc/platforms/pseries/ras.c |
918 |
+@@ -360,7 +360,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) |
919 |
+ } |
920 |
+ |
921 |
+ savep = __va(regs->gpr[3]); |
922 |
+- regs->gpr[3] = savep[0]; /* restore original r3 */ |
923 |
++ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */ |
924 |
+ |
925 |
+ /* If it isn't an extended log we can use the per cpu 64bit buffer */ |
926 |
+ h = (struct rtas_error_log *)&savep[1]; |
927 |
+diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c |
928 |
+index 7f3d9c59719a..452e4d080855 100644 |
929 |
+--- a/arch/sparc/kernel/sys_sparc_32.c |
930 |
++++ b/arch/sparc/kernel/sys_sparc_32.c |
931 |
+@@ -197,23 +197,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, |
932 |
+ |
933 |
+ SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len) |
934 |
+ { |
935 |
+- int nlen, err; |
936 |
+- |
937 |
++ int nlen, err; |
938 |
++ char tmp[__NEW_UTS_LEN + 1]; |
939 |
++ |
940 |
+ if (len < 0) |
941 |
+ return -EINVAL; |
942 |
+ |
943 |
+- down_read(&uts_sem); |
944 |
+- |
945 |
++ down_read(&uts_sem); |
946 |
++ |
947 |
+ nlen = strlen(utsname()->domainname) + 1; |
948 |
+ err = -EINVAL; |
949 |
+ if (nlen > len) |
950 |
+- goto out; |
951 |
++ goto out_unlock; |
952 |
++ memcpy(tmp, utsname()->domainname, nlen); |
953 |
+ |
954 |
+- err = -EFAULT; |
955 |
+- if (!copy_to_user(name, utsname()->domainname, nlen)) |
956 |
+- err = 0; |
957 |
++ up_read(&uts_sem); |
958 |
+ |
959 |
+-out: |
960 |
++ if (copy_to_user(name, tmp, nlen)) |
961 |
++ return -EFAULT; |
962 |
++ return 0; |
963 |
++ |
964 |
++out_unlock: |
965 |
+ up_read(&uts_sem); |
966 |
+ return err; |
967 |
+ } |
968 |
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c |
969 |
+index 63baa8aa9414..274ed0b9b3e0 100644 |
970 |
+--- a/arch/sparc/kernel/sys_sparc_64.c |
971 |
++++ b/arch/sparc/kernel/sys_sparc_64.c |
972 |
+@@ -519,23 +519,27 @@ asmlinkage void sparc_breakpoint(struct pt_regs *regs) |
973 |
+ |
974 |
+ SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len) |
975 |
+ { |
976 |
+- int nlen, err; |
977 |
++ int nlen, err; |
978 |
++ char tmp[__NEW_UTS_LEN + 1]; |
979 |
+ |
980 |
+ if (len < 0) |
981 |
+ return -EINVAL; |
982 |
+ |
983 |
+- down_read(&uts_sem); |
984 |
+- |
985 |
++ down_read(&uts_sem); |
986 |
++ |
987 |
+ nlen = strlen(utsname()->domainname) + 1; |
988 |
+ err = -EINVAL; |
989 |
+ if (nlen > len) |
990 |
+- goto out; |
991 |
++ goto out_unlock; |
992 |
++ memcpy(tmp, utsname()->domainname, nlen); |
993 |
++ |
994 |
++ up_read(&uts_sem); |
995 |
+ |
996 |
+- err = -EFAULT; |
997 |
+- if (!copy_to_user(name, utsname()->domainname, nlen)) |
998 |
+- err = 0; |
999 |
++ if (copy_to_user(name, tmp, nlen)) |
1000 |
++ return -EFAULT; |
1001 |
++ return 0; |
1002 |
+ |
1003 |
+-out: |
1004 |
++out_unlock: |
1005 |
+ up_read(&uts_sem); |
1006 |
+ return err; |
1007 |
+ } |
1008 |
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S |
1009 |
+index e762ef417562..d27a50656aa1 100644 |
1010 |
+--- a/arch/x86/crypto/aesni-intel_asm.S |
1011 |
++++ b/arch/x86/crypto/aesni-intel_asm.S |
1012 |
+@@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff |
1013 |
+ pcmpeqd TWOONE(%rip), \TMP2 |
1014 |
+ pand POLY(%rip), \TMP2 |
1015 |
+ pxor \TMP2, \TMP3 |
1016 |
+- movdqa \TMP3, HashKey(%arg2) |
1017 |
++ movdqu \TMP3, HashKey(%arg2) |
1018 |
+ |
1019 |
+ movdqa \TMP3, \TMP5 |
1020 |
+ pshufd $78, \TMP3, \TMP1 |
1021 |
+ pxor \TMP3, \TMP1 |
1022 |
+- movdqa \TMP1, HashKey_k(%arg2) |
1023 |
++ movdqu \TMP1, HashKey_k(%arg2) |
1024 |
+ |
1025 |
+ GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
1026 |
+ # TMP5 = HashKey^2<<1 (mod poly) |
1027 |
+- movdqa \TMP5, HashKey_2(%arg2) |
1028 |
++ movdqu \TMP5, HashKey_2(%arg2) |
1029 |
+ # HashKey_2 = HashKey^2<<1 (mod poly) |
1030 |
+ pshufd $78, \TMP5, \TMP1 |
1031 |
+ pxor \TMP5, \TMP1 |
1032 |
+- movdqa \TMP1, HashKey_2_k(%arg2) |
1033 |
++ movdqu \TMP1, HashKey_2_k(%arg2) |
1034 |
+ |
1035 |
+ GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
1036 |
+ # TMP5 = HashKey^3<<1 (mod poly) |
1037 |
+- movdqa \TMP5, HashKey_3(%arg2) |
1038 |
++ movdqu \TMP5, HashKey_3(%arg2) |
1039 |
+ pshufd $78, \TMP5, \TMP1 |
1040 |
+ pxor \TMP5, \TMP1 |
1041 |
+- movdqa \TMP1, HashKey_3_k(%arg2) |
1042 |
++ movdqu \TMP1, HashKey_3_k(%arg2) |
1043 |
+ |
1044 |
+ GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 |
1045 |
+ # TMP5 = HashKey^3<<1 (mod poly) |
1046 |
+- movdqa \TMP5, HashKey_4(%arg2) |
1047 |
++ movdqu \TMP5, HashKey_4(%arg2) |
1048 |
+ pshufd $78, \TMP5, \TMP1 |
1049 |
+ pxor \TMP5, \TMP1 |
1050 |
+- movdqa \TMP1, HashKey_4_k(%arg2) |
1051 |
++ movdqu \TMP1, HashKey_4_k(%arg2) |
1052 |
+ .endm |
1053 |
+ |
1054 |
+ # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. |
1055 |
+@@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff |
1056 |
+ movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv |
1057 |
+ |
1058 |
+ PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, |
1059 |
+- movdqa HashKey(%arg2), %xmm13 |
1060 |
++ movdqu HashKey(%arg2), %xmm13 |
1061 |
+ |
1062 |
+ CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ |
1063 |
+ %xmm4, %xmm5, %xmm6 |
1064 |
+@@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1065 |
+ pshufd $78, \XMM5, \TMP6 |
1066 |
+ pxor \XMM5, \TMP6 |
1067 |
+ paddd ONE(%rip), \XMM0 # INCR CNT |
1068 |
+- movdqa HashKey_4(%arg2), \TMP5 |
1069 |
++ movdqu HashKey_4(%arg2), \TMP5 |
1070 |
+ PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 |
1071 |
+ movdqa \XMM0, \XMM1 |
1072 |
+ paddd ONE(%rip), \XMM0 # INCR CNT |
1073 |
+@@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1074 |
+ pxor (%arg1), \XMM2 |
1075 |
+ pxor (%arg1), \XMM3 |
1076 |
+ pxor (%arg1), \XMM4 |
1077 |
+- movdqa HashKey_4_k(%arg2), \TMP5 |
1078 |
++ movdqu HashKey_4_k(%arg2), \TMP5 |
1079 |
+ PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) |
1080 |
+ movaps 0x10(%arg1), \TMP1 |
1081 |
+ AESENC \TMP1, \XMM1 # Round 1 |
1082 |
+@@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1083 |
+ movdqa \XMM6, \TMP1 |
1084 |
+ pshufd $78, \XMM6, \TMP2 |
1085 |
+ pxor \XMM6, \TMP2 |
1086 |
+- movdqa HashKey_3(%arg2), \TMP5 |
1087 |
++ movdqu HashKey_3(%arg2), \TMP5 |
1088 |
+ PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 |
1089 |
+ movaps 0x30(%arg1), \TMP3 |
1090 |
+ AESENC \TMP3, \XMM1 # Round 3 |
1091 |
+@@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1092 |
+ AESENC \TMP3, \XMM2 |
1093 |
+ AESENC \TMP3, \XMM3 |
1094 |
+ AESENC \TMP3, \XMM4 |
1095 |
+- movdqa HashKey_3_k(%arg2), \TMP5 |
1096 |
++ movdqu HashKey_3_k(%arg2), \TMP5 |
1097 |
+ PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1098 |
+ movaps 0x50(%arg1), \TMP3 |
1099 |
+ AESENC \TMP3, \XMM1 # Round 5 |
1100 |
+@@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1101 |
+ movdqa \XMM7, \TMP1 |
1102 |
+ pshufd $78, \XMM7, \TMP2 |
1103 |
+ pxor \XMM7, \TMP2 |
1104 |
+- movdqa HashKey_2(%arg2), \TMP5 |
1105 |
++ movdqu HashKey_2(%arg2), \TMP5 |
1106 |
+ |
1107 |
+ # Multiply TMP5 * HashKey using karatsuba |
1108 |
+ |
1109 |
+@@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1110 |
+ AESENC \TMP3, \XMM2 |
1111 |
+ AESENC \TMP3, \XMM3 |
1112 |
+ AESENC \TMP3, \XMM4 |
1113 |
+- movdqa HashKey_2_k(%arg2), \TMP5 |
1114 |
++ movdqu HashKey_2_k(%arg2), \TMP5 |
1115 |
+ PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1116 |
+ movaps 0x80(%arg1), \TMP3 |
1117 |
+ AESENC \TMP3, \XMM1 # Round 8 |
1118 |
+@@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1119 |
+ movdqa \XMM8, \TMP1 |
1120 |
+ pshufd $78, \XMM8, \TMP2 |
1121 |
+ pxor \XMM8, \TMP2 |
1122 |
+- movdqa HashKey(%arg2), \TMP5 |
1123 |
++ movdqu HashKey(%arg2), \TMP5 |
1124 |
+ PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1125 |
+ movaps 0x90(%arg1), \TMP3 |
1126 |
+ AESENC \TMP3, \XMM1 # Round 9 |
1127 |
+@@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@: |
1128 |
+ AESENCLAST \TMP3, \XMM2 |
1129 |
+ AESENCLAST \TMP3, \XMM3 |
1130 |
+ AESENCLAST \TMP3, \XMM4 |
1131 |
+- movdqa HashKey_k(%arg2), \TMP5 |
1132 |
++ movdqu HashKey_k(%arg2), \TMP5 |
1133 |
+ PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1134 |
+ movdqu (%arg4,%r11,1), \TMP3 |
1135 |
+ pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK |
1136 |
+@@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1137 |
+ pshufd $78, \XMM5, \TMP6 |
1138 |
+ pxor \XMM5, \TMP6 |
1139 |
+ paddd ONE(%rip), \XMM0 # INCR CNT |
1140 |
+- movdqa HashKey_4(%arg2), \TMP5 |
1141 |
++ movdqu HashKey_4(%arg2), \TMP5 |
1142 |
+ PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 |
1143 |
+ movdqa \XMM0, \XMM1 |
1144 |
+ paddd ONE(%rip), \XMM0 # INCR CNT |
1145 |
+@@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1146 |
+ pxor (%arg1), \XMM2 |
1147 |
+ pxor (%arg1), \XMM3 |
1148 |
+ pxor (%arg1), \XMM4 |
1149 |
+- movdqa HashKey_4_k(%arg2), \TMP5 |
1150 |
++ movdqu HashKey_4_k(%arg2), \TMP5 |
1151 |
+ PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) |
1152 |
+ movaps 0x10(%arg1), \TMP1 |
1153 |
+ AESENC \TMP1, \XMM1 # Round 1 |
1154 |
+@@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1155 |
+ movdqa \XMM6, \TMP1 |
1156 |
+ pshufd $78, \XMM6, \TMP2 |
1157 |
+ pxor \XMM6, \TMP2 |
1158 |
+- movdqa HashKey_3(%arg2), \TMP5 |
1159 |
++ movdqu HashKey_3(%arg2), \TMP5 |
1160 |
+ PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 |
1161 |
+ movaps 0x30(%arg1), \TMP3 |
1162 |
+ AESENC \TMP3, \XMM1 # Round 3 |
1163 |
+@@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1164 |
+ AESENC \TMP3, \XMM2 |
1165 |
+ AESENC \TMP3, \XMM3 |
1166 |
+ AESENC \TMP3, \XMM4 |
1167 |
+- movdqa HashKey_3_k(%arg2), \TMP5 |
1168 |
++ movdqu HashKey_3_k(%arg2), \TMP5 |
1169 |
+ PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1170 |
+ movaps 0x50(%arg1), \TMP3 |
1171 |
+ AESENC \TMP3, \XMM1 # Round 5 |
1172 |
+@@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1173 |
+ movdqa \XMM7, \TMP1 |
1174 |
+ pshufd $78, \XMM7, \TMP2 |
1175 |
+ pxor \XMM7, \TMP2 |
1176 |
+- movdqa HashKey_2(%arg2), \TMP5 |
1177 |
++ movdqu HashKey_2(%arg2), \TMP5 |
1178 |
+ |
1179 |
+ # Multiply TMP5 * HashKey using karatsuba |
1180 |
+ |
1181 |
+@@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1182 |
+ AESENC \TMP3, \XMM2 |
1183 |
+ AESENC \TMP3, \XMM3 |
1184 |
+ AESENC \TMP3, \XMM4 |
1185 |
+- movdqa HashKey_2_k(%arg2), \TMP5 |
1186 |
++ movdqu HashKey_2_k(%arg2), \TMP5 |
1187 |
+ PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1188 |
+ movaps 0x80(%arg1), \TMP3 |
1189 |
+ AESENC \TMP3, \XMM1 # Round 8 |
1190 |
+@@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation |
1191 |
+ movdqa \XMM8, \TMP1 |
1192 |
+ pshufd $78, \XMM8, \TMP2 |
1193 |
+ pxor \XMM8, \TMP2 |
1194 |
+- movdqa HashKey(%arg2), \TMP5 |
1195 |
++ movdqu HashKey(%arg2), \TMP5 |
1196 |
+ PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1197 |
+ movaps 0x90(%arg1), \TMP3 |
1198 |
+ AESENC \TMP3, \XMM1 # Round 9 |
1199 |
+@@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@: |
1200 |
+ AESENCLAST \TMP3, \XMM2 |
1201 |
+ AESENCLAST \TMP3, \XMM3 |
1202 |
+ AESENCLAST \TMP3, \XMM4 |
1203 |
+- movdqa HashKey_k(%arg2), \TMP5 |
1204 |
++ movdqu HashKey_k(%arg2), \TMP5 |
1205 |
+ PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1206 |
+ movdqu (%arg4,%r11,1), \TMP3 |
1207 |
+ pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK |
1208 |
+@@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst |
1209 |
+ movdqa \XMM1, \TMP6 |
1210 |
+ pshufd $78, \XMM1, \TMP2 |
1211 |
+ pxor \XMM1, \TMP2 |
1212 |
+- movdqa HashKey_4(%arg2), \TMP5 |
1213 |
++ movdqu HashKey_4(%arg2), \TMP5 |
1214 |
+ PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 |
1215 |
+ PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 |
1216 |
+- movdqa HashKey_4_k(%arg2), \TMP4 |
1217 |
++ movdqu HashKey_4_k(%arg2), \TMP4 |
1218 |
+ PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1219 |
+ movdqa \XMM1, \XMMDst |
1220 |
+ movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 |
1221 |
+@@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst |
1222 |
+ movdqa \XMM2, \TMP1 |
1223 |
+ pshufd $78, \XMM2, \TMP2 |
1224 |
+ pxor \XMM2, \TMP2 |
1225 |
+- movdqa HashKey_3(%arg2), \TMP5 |
1226 |
++ movdqu HashKey_3(%arg2), \TMP5 |
1227 |
+ PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1228 |
+ PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 |
1229 |
+- movdqa HashKey_3_k(%arg2), \TMP4 |
1230 |
++ movdqu HashKey_3_k(%arg2), \TMP4 |
1231 |
+ PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1232 |
+ pxor \TMP1, \TMP6 |
1233 |
+ pxor \XMM2, \XMMDst |
1234 |
+@@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst |
1235 |
+ movdqa \XMM3, \TMP1 |
1236 |
+ pshufd $78, \XMM3, \TMP2 |
1237 |
+ pxor \XMM3, \TMP2 |
1238 |
+- movdqa HashKey_2(%arg2), \TMP5 |
1239 |
++ movdqu HashKey_2(%arg2), \TMP5 |
1240 |
+ PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1241 |
+ PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 |
1242 |
+- movdqa HashKey_2_k(%arg2), \TMP4 |
1243 |
++ movdqu HashKey_2_k(%arg2), \TMP4 |
1244 |
+ PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1245 |
+ pxor \TMP1, \TMP6 |
1246 |
+ pxor \XMM3, \XMMDst |
1247 |
+@@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst |
1248 |
+ movdqa \XMM4, \TMP1 |
1249 |
+ pshufd $78, \XMM4, \TMP2 |
1250 |
+ pxor \XMM4, \TMP2 |
1251 |
+- movdqa HashKey(%arg2), \TMP5 |
1252 |
++ movdqu HashKey(%arg2), \TMP5 |
1253 |
+ PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 |
1254 |
+ PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 |
1255 |
+- movdqa HashKey_k(%arg2), \TMP4 |
1256 |
++ movdqu HashKey_k(%arg2), \TMP4 |
1257 |
+ PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) |
1258 |
+ pxor \TMP1, \TMP6 |
1259 |
+ pxor \XMM4, \XMMDst |
1260 |
+diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c |
1261 |
+index 7326078eaa7a..278cd07228dd 100644 |
1262 |
+--- a/arch/x86/kernel/kexec-bzimage64.c |
1263 |
++++ b/arch/x86/kernel/kexec-bzimage64.c |
1264 |
+@@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loader_data) |
1265 |
+ static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) |
1266 |
+ { |
1267 |
+ return verify_pefile_signature(kernel, kernel_len, |
1268 |
+- NULL, |
1269 |
++ VERIFY_USE_SECONDARY_KEYRING, |
1270 |
+ VERIFYING_KEXEC_PE_SIGNATURE); |
1271 |
+ } |
1272 |
+ #endif |
1273 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
1274 |
+index 46b428c0990e..bedabcf33a3e 100644 |
1275 |
+--- a/arch/x86/kvm/vmx.c |
1276 |
++++ b/arch/x86/kvm/vmx.c |
1277 |
+@@ -197,12 +197,14 @@ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_ |
1278 |
+ |
1279 |
+ static const struct { |
1280 |
+ const char *option; |
1281 |
+- enum vmx_l1d_flush_state cmd; |
1282 |
++ bool for_parse; |
1283 |
+ } vmentry_l1d_param[] = { |
1284 |
+- {"auto", VMENTER_L1D_FLUSH_AUTO}, |
1285 |
+- {"never", VMENTER_L1D_FLUSH_NEVER}, |
1286 |
+- {"cond", VMENTER_L1D_FLUSH_COND}, |
1287 |
+- {"always", VMENTER_L1D_FLUSH_ALWAYS}, |
1288 |
++ [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, |
1289 |
++ [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, |
1290 |
++ [VMENTER_L1D_FLUSH_COND] = {"cond", true}, |
1291 |
++ [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, |
1292 |
++ [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, |
1293 |
++ [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, |
1294 |
+ }; |
1295 |
+ |
1296 |
+ #define L1D_CACHE_ORDER 4 |
1297 |
+@@ -286,8 +288,9 @@ static int vmentry_l1d_flush_parse(const char *s) |
1298 |
+ |
1299 |
+ if (s) { |
1300 |
+ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { |
1301 |
+- if (sysfs_streq(s, vmentry_l1d_param[i].option)) |
1302 |
+- return vmentry_l1d_param[i].cmd; |
1303 |
++ if (vmentry_l1d_param[i].for_parse && |
1304 |
++ sysfs_streq(s, vmentry_l1d_param[i].option)) |
1305 |
++ return i; |
1306 |
+ } |
1307 |
+ } |
1308 |
+ return -EINVAL; |
1309 |
+@@ -297,13 +300,13 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) |
1310 |
+ { |
1311 |
+ int l1tf, ret; |
1312 |
+ |
1313 |
+- if (!boot_cpu_has(X86_BUG_L1TF)) |
1314 |
+- return 0; |
1315 |
+- |
1316 |
+ l1tf = vmentry_l1d_flush_parse(s); |
1317 |
+ if (l1tf < 0) |
1318 |
+ return l1tf; |
1319 |
+ |
1320 |
++ if (!boot_cpu_has(X86_BUG_L1TF)) |
1321 |
++ return 0; |
1322 |
++ |
1323 |
+ /* |
1324 |
+ * Has vmx_init() run already? If not then this is the pre init |
1325 |
+ * parameter parsing. In that case just store the value and let |
1326 |
+@@ -323,6 +326,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) |
1327 |
+ |
1328 |
+ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) |
1329 |
+ { |
1330 |
++ if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) |
1331 |
++ return sprintf(s, "???\n"); |
1332 |
++ |
1333 |
+ return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); |
1334 |
+ } |
1335 |
+ |
1336 |
+diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h |
1337 |
+index 2041abb10a23..34545ecfdd6b 100644 |
1338 |
+--- a/arch/xtensa/include/asm/cacheasm.h |
1339 |
++++ b/arch/xtensa/include/asm/cacheasm.h |
1340 |
+@@ -31,16 +31,32 @@ |
1341 |
+ * |
1342 |
+ */ |
1343 |
+ |
1344 |
+- .macro __loop_cache_all ar at insn size line_width |
1345 |
+ |
1346 |
+- movi \ar, 0 |
1347 |
++ .macro __loop_cache_unroll ar at insn size line_width max_immed |
1348 |
++ |
1349 |
++ .if (1 << (\line_width)) > (\max_immed) |
1350 |
++ .set _reps, 1 |
1351 |
++ .elseif (2 << (\line_width)) > (\max_immed) |
1352 |
++ .set _reps, 2 |
1353 |
++ .else |
1354 |
++ .set _reps, 4 |
1355 |
++ .endif |
1356 |
++ |
1357 |
++ __loopi \ar, \at, \size, (_reps << (\line_width)) |
1358 |
++ .set _index, 0 |
1359 |
++ .rep _reps |
1360 |
++ \insn \ar, _index << (\line_width) |
1361 |
++ .set _index, _index + 1 |
1362 |
++ .endr |
1363 |
++ __endla \ar, \at, _reps << (\line_width) |
1364 |
++ |
1365 |
++ .endm |
1366 |
++ |
1367 |
+ |
1368 |
+- __loopi \ar, \at, \size, (4 << (\line_width)) |
1369 |
+- \insn \ar, 0 << (\line_width) |
1370 |
+- \insn \ar, 1 << (\line_width) |
1371 |
+- \insn \ar, 2 << (\line_width) |
1372 |
+- \insn \ar, 3 << (\line_width) |
1373 |
+- __endla \ar, \at, 4 << (\line_width) |
1374 |
++ .macro __loop_cache_all ar at insn size line_width max_immed |
1375 |
++ |
1376 |
++ movi \ar, 0 |
1377 |
++ __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed |
1378 |
+ |
1379 |
+ .endm |
1380 |
+ |
1381 |
+@@ -57,14 +73,9 @@ |
1382 |
+ .endm |
1383 |
+ |
1384 |
+ |
1385 |
+- .macro __loop_cache_page ar at insn line_width |
1386 |
++ .macro __loop_cache_page ar at insn line_width max_immed |
1387 |
+ |
1388 |
+- __loopi \ar, \at, PAGE_SIZE, 4 << (\line_width) |
1389 |
+- \insn \ar, 0 << (\line_width) |
1390 |
+- \insn \ar, 1 << (\line_width) |
1391 |
+- \insn \ar, 2 << (\line_width) |
1392 |
+- \insn \ar, 3 << (\line_width) |
1393 |
+- __endla \ar, \at, 4 << (\line_width) |
1394 |
++ __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed |
1395 |
+ |
1396 |
+ .endm |
1397 |
+ |
1398 |
+@@ -72,7 +83,8 @@ |
1399 |
+ .macro ___unlock_dcache_all ar at |
1400 |
+ |
1401 |
+ #if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE |
1402 |
+- __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH |
1403 |
++ __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \ |
1404 |
++ XCHAL_DCACHE_LINEWIDTH 240 |
1405 |
+ #endif |
1406 |
+ |
1407 |
+ .endm |
1408 |
+@@ -81,7 +93,8 @@ |
1409 |
+ .macro ___unlock_icache_all ar at |
1410 |
+ |
1411 |
+ #if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE |
1412 |
+- __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH |
1413 |
++ __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \ |
1414 |
++ XCHAL_ICACHE_LINEWIDTH 240 |
1415 |
+ #endif |
1416 |
+ |
1417 |
+ .endm |
1418 |
+@@ -90,7 +103,8 @@ |
1419 |
+ .macro ___flush_invalidate_dcache_all ar at |
1420 |
+ |
1421 |
+ #if XCHAL_DCACHE_SIZE |
1422 |
+- __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH |
1423 |
++ __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \ |
1424 |
++ XCHAL_DCACHE_LINEWIDTH 240 |
1425 |
+ #endif |
1426 |
+ |
1427 |
+ .endm |
1428 |
+@@ -99,7 +113,8 @@ |
1429 |
+ .macro ___flush_dcache_all ar at |
1430 |
+ |
1431 |
+ #if XCHAL_DCACHE_SIZE |
1432 |
+- __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH |
1433 |
++ __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \ |
1434 |
++ XCHAL_DCACHE_LINEWIDTH 240 |
1435 |
+ #endif |
1436 |
+ |
1437 |
+ .endm |
1438 |
+@@ -108,8 +123,8 @@ |
1439 |
+ .macro ___invalidate_dcache_all ar at |
1440 |
+ |
1441 |
+ #if XCHAL_DCACHE_SIZE |
1442 |
+- __loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \ |
1443 |
+- XCHAL_DCACHE_LINEWIDTH |
1444 |
++ __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \ |
1445 |
++ XCHAL_DCACHE_LINEWIDTH 1020 |
1446 |
+ #endif |
1447 |
+ |
1448 |
+ .endm |
1449 |
+@@ -118,8 +133,8 @@ |
1450 |
+ .macro ___invalidate_icache_all ar at |
1451 |
+ |
1452 |
+ #if XCHAL_ICACHE_SIZE |
1453 |
+- __loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \ |
1454 |
+- XCHAL_ICACHE_LINEWIDTH |
1455 |
++ __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \ |
1456 |
++ XCHAL_ICACHE_LINEWIDTH 1020 |
1457 |
+ #endif |
1458 |
+ |
1459 |
+ .endm |
1460 |
+@@ -166,7 +181,7 @@ |
1461 |
+ .macro ___flush_invalidate_dcache_page ar as |
1462 |
+ |
1463 |
+ #if XCHAL_DCACHE_SIZE |
1464 |
+- __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH |
1465 |
++ __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020 |
1466 |
+ #endif |
1467 |
+ |
1468 |
+ .endm |
1469 |
+@@ -175,7 +190,7 @@ |
1470 |
+ .macro ___flush_dcache_page ar as |
1471 |
+ |
1472 |
+ #if XCHAL_DCACHE_SIZE |
1473 |
+- __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH |
1474 |
++ __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020 |
1475 |
+ #endif |
1476 |
+ |
1477 |
+ .endm |
1478 |
+@@ -184,7 +199,7 @@ |
1479 |
+ .macro ___invalidate_dcache_page ar as |
1480 |
+ |
1481 |
+ #if XCHAL_DCACHE_SIZE |
1482 |
+- __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH |
1483 |
++ __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020 |
1484 |
+ #endif |
1485 |
+ |
1486 |
+ .endm |
1487 |
+@@ -193,7 +208,7 @@ |
1488 |
+ .macro ___invalidate_icache_page ar as |
1489 |
+ |
1490 |
+ #if XCHAL_ICACHE_SIZE |
1491 |
+- __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH |
1492 |
++ __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020 |
1493 |
+ #endif |
1494 |
+ |
1495 |
+ .endm |
1496 |
+diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c |
1497 |
+index a9e8633388f4..58c6efa9f9a9 100644 |
1498 |
+--- a/block/bfq-cgroup.c |
1499 |
++++ b/block/bfq-cgroup.c |
1500 |
+@@ -913,7 +913,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, |
1501 |
+ if (ret) |
1502 |
+ return ret; |
1503 |
+ |
1504 |
+- return bfq_io_set_weight_legacy(of_css(of), NULL, weight); |
1505 |
++ ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight); |
1506 |
++ return ret ?: nbytes; |
1507 |
+ } |
1508 |
+ |
1509 |
+ #ifdef CONFIG_DEBUG_BLK_CGROUP |
1510 |
+diff --git a/block/blk-core.c b/block/blk-core.c |
1511 |
+index ee33590f54eb..1646ea85dade 100644 |
1512 |
+--- a/block/blk-core.c |
1513 |
++++ b/block/blk-core.c |
1514 |
+@@ -715,6 +715,35 @@ void blk_set_queue_dying(struct request_queue *q) |
1515 |
+ } |
1516 |
+ EXPORT_SYMBOL_GPL(blk_set_queue_dying); |
1517 |
+ |
1518 |
++/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ |
1519 |
++void blk_exit_queue(struct request_queue *q) |
1520 |
++{ |
1521 |
++ /* |
1522 |
++ * Since the I/O scheduler exit code may access cgroup information, |
1523 |
++ * perform I/O scheduler exit before disassociating from the block |
1524 |
++ * cgroup controller. |
1525 |
++ */ |
1526 |
++ if (q->elevator) { |
1527 |
++ ioc_clear_queue(q); |
1528 |
++ elevator_exit(q, q->elevator); |
1529 |
++ q->elevator = NULL; |
1530 |
++ } |
1531 |
++ |
1532 |
++ /* |
1533 |
++ * Remove all references to @q from the block cgroup controller before |
1534 |
++ * restoring @q->queue_lock to avoid that restoring this pointer causes |
1535 |
++ * e.g. blkcg_print_blkgs() to crash. |
1536 |
++ */ |
1537 |
++ blkcg_exit_queue(q); |
1538 |
++ |
1539 |
++ /* |
1540 |
++ * Since the cgroup code may dereference the @q->backing_dev_info |
1541 |
++ * pointer, only decrease its reference count after having removed the |
1542 |
++ * association with the block cgroup controller. |
1543 |
++ */ |
1544 |
++ bdi_put(q->backing_dev_info); |
1545 |
++} |
1546 |
++ |
1547 |
+ /** |
1548 |
+ * blk_cleanup_queue - shutdown a request queue |
1549 |
+ * @q: request queue to shutdown |
1550 |
+@@ -780,30 +809,7 @@ void blk_cleanup_queue(struct request_queue *q) |
1551 |
+ */ |
1552 |
+ WARN_ON_ONCE(q->kobj.state_in_sysfs); |
1553 |
+ |
1554 |
+- /* |
1555 |
+- * Since the I/O scheduler exit code may access cgroup information, |
1556 |
+- * perform I/O scheduler exit before disassociating from the block |
1557 |
+- * cgroup controller. |
1558 |
+- */ |
1559 |
+- if (q->elevator) { |
1560 |
+- ioc_clear_queue(q); |
1561 |
+- elevator_exit(q, q->elevator); |
1562 |
+- q->elevator = NULL; |
1563 |
+- } |
1564 |
+- |
1565 |
+- /* |
1566 |
+- * Remove all references to @q from the block cgroup controller before |
1567 |
+- * restoring @q->queue_lock to avoid that restoring this pointer causes |
1568 |
+- * e.g. blkcg_print_blkgs() to crash. |
1569 |
+- */ |
1570 |
+- blkcg_exit_queue(q); |
1571 |
+- |
1572 |
+- /* |
1573 |
+- * Since the cgroup code may dereference the @q->backing_dev_info |
1574 |
+- * pointer, only decrease its reference count after having removed the |
1575 |
+- * association with the block cgroup controller. |
1576 |
+- */ |
1577 |
+- bdi_put(q->backing_dev_info); |
1578 |
++ blk_exit_queue(q); |
1579 |
+ |
1580 |
+ if (q->mq_ops) |
1581 |
+ blk_mq_free_queue(q); |
1582 |
+@@ -1180,6 +1186,7 @@ out_exit_flush_rq: |
1583 |
+ q->exit_rq_fn(q, q->fq->flush_rq); |
1584 |
+ out_free_flush_queue: |
1585 |
+ blk_free_flush_queue(q->fq); |
1586 |
++ q->fq = NULL; |
1587 |
+ return -ENOMEM; |
1588 |
+ } |
1589 |
+ EXPORT_SYMBOL(blk_init_allocated_queue); |
1590 |
+@@ -3763,9 +3770,11 @@ EXPORT_SYMBOL(blk_finish_plug); |
1591 |
+ */ |
1592 |
+ void blk_pm_runtime_init(struct request_queue *q, struct device *dev) |
1593 |
+ { |
1594 |
+- /* not support for RQF_PM and ->rpm_status in blk-mq yet */ |
1595 |
+- if (q->mq_ops) |
1596 |
++ /* Don't enable runtime PM for blk-mq until it is ready */ |
1597 |
++ if (q->mq_ops) { |
1598 |
++ pm_runtime_disable(dev); |
1599 |
+ return; |
1600 |
++ } |
1601 |
+ |
1602 |
+ q->dev = dev; |
1603 |
+ q->rpm_status = RPM_ACTIVE; |
1604 |
+diff --git a/block/blk-lib.c b/block/blk-lib.c |
1605 |
+index 8faa70f26fcd..d1b9dd03da25 100644 |
1606 |
+--- a/block/blk-lib.c |
1607 |
++++ b/block/blk-lib.c |
1608 |
+@@ -68,6 +68,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
1609 |
+ */ |
1610 |
+ req_sects = min_t(sector_t, nr_sects, |
1611 |
+ q->limits.max_discard_sectors); |
1612 |
++ if (!req_sects) |
1613 |
++ goto fail; |
1614 |
+ if (req_sects > UINT_MAX >> 9) |
1615 |
+ req_sects = UINT_MAX >> 9; |
1616 |
+ |
1617 |
+@@ -105,6 +107,14 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
1618 |
+ |
1619 |
+ *biop = bio; |
1620 |
+ return 0; |
1621 |
++ |
1622 |
++fail: |
1623 |
++ if (bio) { |
1624 |
++ submit_bio_wait(bio); |
1625 |
++ bio_put(bio); |
1626 |
++ } |
1627 |
++ *biop = NULL; |
1628 |
++ return -EOPNOTSUPP; |
1629 |
+ } |
1630 |
+ EXPORT_SYMBOL(__blkdev_issue_discard); |
1631 |
+ |
1632 |
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c |
1633 |
+index 94987b1f69e1..96c7dfc04852 100644 |
1634 |
+--- a/block/blk-sysfs.c |
1635 |
++++ b/block/blk-sysfs.c |
1636 |
+@@ -804,6 +804,21 @@ static void __blk_release_queue(struct work_struct *work) |
1637 |
+ blk_stat_remove_callback(q, q->poll_cb); |
1638 |
+ blk_stat_free_callback(q->poll_cb); |
1639 |
+ |
1640 |
++ if (!blk_queue_dead(q)) { |
1641 |
++ /* |
1642 |
++ * Last reference was dropped without having called |
1643 |
++ * blk_cleanup_queue(). |
1644 |
++ */ |
1645 |
++ WARN_ONCE(blk_queue_init_done(q), |
1646 |
++ "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n", |
1647 |
++ q); |
1648 |
++ blk_exit_queue(q); |
1649 |
++ } |
1650 |
++ |
1651 |
++ WARN(blkg_root_lookup(q), |
1652 |
++ "request queue %p is being released but it has not yet been removed from the blkcg controller\n", |
1653 |
++ q); |
1654 |
++ |
1655 |
+ blk_free_queue_stats(q->stats); |
1656 |
+ |
1657 |
+ blk_exit_rl(q, &q->root_rl); |
1658 |
+diff --git a/block/blk.h b/block/blk.h |
1659 |
+index 8d23aea96ce9..a8f0f7986cfd 100644 |
1660 |
+--- a/block/blk.h |
1661 |
++++ b/block/blk.h |
1662 |
+@@ -130,6 +130,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q); |
1663 |
+ int blk_init_rl(struct request_list *rl, struct request_queue *q, |
1664 |
+ gfp_t gfp_mask); |
1665 |
+ void blk_exit_rl(struct request_queue *q, struct request_list *rl); |
1666 |
++void blk_exit_queue(struct request_queue *q); |
1667 |
+ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
1668 |
+ struct bio *bio); |
1669 |
+ void blk_queue_bypass_start(struct request_queue *q); |
1670 |
+diff --git a/certs/system_keyring.c b/certs/system_keyring.c |
1671 |
+index 6251d1b27f0c..81728717523d 100644 |
1672 |
+--- a/certs/system_keyring.c |
1673 |
++++ b/certs/system_keyring.c |
1674 |
+@@ -15,6 +15,7 @@ |
1675 |
+ #include <linux/cred.h> |
1676 |
+ #include <linux/err.h> |
1677 |
+ #include <linux/slab.h> |
1678 |
++#include <linux/verification.h> |
1679 |
+ #include <keys/asymmetric-type.h> |
1680 |
+ #include <keys/system_keyring.h> |
1681 |
+ #include <crypto/pkcs7.h> |
1682 |
+@@ -230,7 +231,7 @@ int verify_pkcs7_signature(const void *data, size_t len, |
1683 |
+ |
1684 |
+ if (!trusted_keys) { |
1685 |
+ trusted_keys = builtin_trusted_keys; |
1686 |
+- } else if (trusted_keys == (void *)1UL) { |
1687 |
++ } else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) { |
1688 |
+ #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING |
1689 |
+ trusted_keys = secondary_trusted_keys; |
1690 |
+ #else |
1691 |
+diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c |
1692 |
+index e284d9cb9237..5b2f6a2b5585 100644 |
1693 |
+--- a/crypto/asymmetric_keys/pkcs7_key_type.c |
1694 |
++++ b/crypto/asymmetric_keys/pkcs7_key_type.c |
1695 |
+@@ -63,7 +63,7 @@ static int pkcs7_preparse(struct key_preparsed_payload *prep) |
1696 |
+ |
1697 |
+ return verify_pkcs7_signature(NULL, 0, |
1698 |
+ prep->data, prep->datalen, |
1699 |
+- (void *)1UL, usage, |
1700 |
++ VERIFY_USE_SECONDARY_KEYRING, usage, |
1701 |
+ pkcs7_view_content, prep); |
1702 |
+ } |
1703 |
+ |
1704 |
+diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c |
1705 |
+index fe9d46d81750..d8b8fc2ff563 100644 |
1706 |
+--- a/drivers/acpi/acpica/hwsleep.c |
1707 |
++++ b/drivers/acpi/acpica/hwsleep.c |
1708 |
+@@ -56,14 +56,9 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) |
1709 |
+ if (ACPI_FAILURE(status)) { |
1710 |
+ return_ACPI_STATUS(status); |
1711 |
+ } |
1712 |
+- /* |
1713 |
+- * If the target sleep state is S5, clear all GPEs and fixed events too |
1714 |
+- */ |
1715 |
+- if (sleep_state == ACPI_STATE_S5) { |
1716 |
+- status = acpi_hw_clear_acpi_status(); |
1717 |
+- if (ACPI_FAILURE(status)) { |
1718 |
+- return_ACPI_STATUS(status); |
1719 |
+- } |
1720 |
++ status = acpi_hw_clear_acpi_status(); |
1721 |
++ if (ACPI_FAILURE(status)) { |
1722 |
++ return_ACPI_STATUS(status); |
1723 |
+ } |
1724 |
+ acpi_gbl_system_awake_and_running = FALSE; |
1725 |
+ |
1726 |
+diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c |
1727 |
+index 44f35ab3347d..0f0bdc9d24c6 100644 |
1728 |
+--- a/drivers/acpi/acpica/psloop.c |
1729 |
++++ b/drivers/acpi/acpica/psloop.c |
1730 |
+@@ -22,6 +22,7 @@ |
1731 |
+ #include "acdispat.h" |
1732 |
+ #include "amlcode.h" |
1733 |
+ #include "acconvert.h" |
1734 |
++#include "acnamesp.h" |
1735 |
+ |
1736 |
+ #define _COMPONENT ACPI_PARSER |
1737 |
+ ACPI_MODULE_NAME("psloop") |
1738 |
+@@ -527,12 +528,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) |
1739 |
+ if (ACPI_FAILURE(status)) { |
1740 |
+ return_ACPI_STATUS(status); |
1741 |
+ } |
1742 |
+- if (walk_state->opcode == AML_SCOPE_OP) { |
1743 |
++ if (acpi_ns_opens_scope |
1744 |
++ (acpi_ps_get_opcode_info |
1745 |
++ (walk_state->opcode)->object_type)) { |
1746 |
+ /* |
1747 |
+- * If the scope op fails to parse, skip the body of the |
1748 |
+- * scope op because the parse failure indicates that the |
1749 |
+- * device may not exist. |
1750 |
++ * If the scope/device op fails to parse, skip the body of |
1751 |
++ * the scope op because the parse failure indicates that |
1752 |
++ * the device may not exist. |
1753 |
+ */ |
1754 |
++ ACPI_ERROR((AE_INFO, |
1755 |
++ "Skip parsing opcode %s", |
1756 |
++ acpi_ps_get_opcode_name |
1757 |
++ (walk_state->opcode))); |
1758 |
+ walk_state->parser_state.aml = |
1759 |
+ walk_state->aml + 1; |
1760 |
+ walk_state->parser_state.aml = |
1761 |
+@@ -540,8 +547,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) |
1762 |
+ (&walk_state->parser_state); |
1763 |
+ walk_state->aml = |
1764 |
+ walk_state->parser_state.aml; |
1765 |
+- ACPI_ERROR((AE_INFO, |
1766 |
+- "Skipping Scope block")); |
1767 |
+ } |
1768 |
+ |
1769 |
+ continue; |
1770 |
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c |
1771 |
+index a390c6d4f72d..af7cb8e618fe 100644 |
1772 |
+--- a/drivers/block/zram/zram_drv.c |
1773 |
++++ b/drivers/block/zram/zram_drv.c |
1774 |
+@@ -337,6 +337,7 @@ static ssize_t backing_dev_store(struct device *dev, |
1775 |
+ struct device_attribute *attr, const char *buf, size_t len) |
1776 |
+ { |
1777 |
+ char *file_name; |
1778 |
++ size_t sz; |
1779 |
+ struct file *backing_dev = NULL; |
1780 |
+ struct inode *inode; |
1781 |
+ struct address_space *mapping; |
1782 |
+@@ -357,7 +358,11 @@ static ssize_t backing_dev_store(struct device *dev, |
1783 |
+ goto out; |
1784 |
+ } |
1785 |
+ |
1786 |
+- strlcpy(file_name, buf, len); |
1787 |
++ strlcpy(file_name, buf, PATH_MAX); |
1788 |
++ /* ignore trailing newline */ |
1789 |
++ sz = strlen(file_name); |
1790 |
++ if (sz > 0 && file_name[sz - 1] == '\n') |
1791 |
++ file_name[sz - 1] = 0x00; |
1792 |
+ |
1793 |
+ backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); |
1794 |
+ if (IS_ERR(backing_dev)) { |
1795 |
+diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c |
1796 |
+index 1d50e97d49f1..6d53f7d9fc7a 100644 |
1797 |
+--- a/drivers/cpufreq/cpufreq_governor.c |
1798 |
++++ b/drivers/cpufreq/cpufreq_governor.c |
1799 |
+@@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop); |
1800 |
+ |
1801 |
+ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy) |
1802 |
+ { |
1803 |
+- struct policy_dbs_info *policy_dbs = policy->governor_data; |
1804 |
++ struct policy_dbs_info *policy_dbs; |
1805 |
++ |
1806 |
++ /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */ |
1807 |
++ mutex_lock(&gov_dbs_data_mutex); |
1808 |
++ policy_dbs = policy->governor_data; |
1809 |
++ if (!policy_dbs) |
1810 |
++ goto out; |
1811 |
+ |
1812 |
+ mutex_lock(&policy_dbs->update_mutex); |
1813 |
+ cpufreq_policy_apply_limits(policy); |
1814 |
+ gov_update_sample_delay(policy_dbs, 0); |
1815 |
+- |
1816 |
+ mutex_unlock(&policy_dbs->update_mutex); |
1817 |
++ |
1818 |
++out: |
1819 |
++ mutex_unlock(&gov_dbs_data_mutex); |
1820 |
+ } |
1821 |
+ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits); |
1822 |
+diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c |
1823 |
+index 1aef60d160eb..910f8a68f58b 100644 |
1824 |
+--- a/drivers/cpuidle/governors/menu.c |
1825 |
++++ b/drivers/cpuidle/governors/menu.c |
1826 |
+@@ -349,14 +349,12 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
1827 |
+ * If the tick is already stopped, the cost of possible short |
1828 |
+ * idle duration misprediction is much higher, because the CPU |
1829 |
+ * may be stuck in a shallow idle state for a long time as a |
1830 |
+- * result of it. In that case say we might mispredict and try |
1831 |
+- * to force the CPU into a state for which we would have stopped |
1832 |
+- * the tick, unless a timer is going to expire really soon |
1833 |
+- * anyway. |
1834 |
++ * result of it. In that case say we might mispredict and use |
1835 |
++ * the known time till the closest timer event for the idle |
1836 |
++ * state selection. |
1837 |
+ */ |
1838 |
+ if (data->predicted_us < TICK_USEC) |
1839 |
+- data->predicted_us = min_t(unsigned int, TICK_USEC, |
1840 |
+- ktime_to_us(delta_next)); |
1841 |
++ data->predicted_us = ktime_to_us(delta_next); |
1842 |
+ } else { |
1843 |
+ /* |
1844 |
+ * Use the performance multiplier and the user-configurable |
1845 |
+@@ -381,8 +379,33 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
1846 |
+ continue; |
1847 |
+ if (idx == -1) |
1848 |
+ idx = i; /* first enabled state */ |
1849 |
+- if (s->target_residency > data->predicted_us) |
1850 |
+- break; |
1851 |
++ if (s->target_residency > data->predicted_us) { |
1852 |
++ if (data->predicted_us < TICK_USEC) |
1853 |
++ break; |
1854 |
++ |
1855 |
++ if (!tick_nohz_tick_stopped()) { |
1856 |
++ /* |
1857 |
++ * If the state selected so far is shallow, |
1858 |
++ * waking up early won't hurt, so retain the |
1859 |
++ * tick in that case and let the governor run |
1860 |
++ * again in the next iteration of the loop. |
1861 |
++ */ |
1862 |
++ expected_interval = drv->states[idx].target_residency; |
1863 |
++ break; |
1864 |
++ } |
1865 |
++ |
1866 |
++ /* |
1867 |
++ * If the state selected so far is shallow and this |
1868 |
++ * state's target residency matches the time till the |
1869 |
++ * closest timer event, select this one to avoid getting |
1870 |
++ * stuck in the shallow one for too long. |
1871 |
++ */ |
1872 |
++ if (drv->states[idx].target_residency < TICK_USEC && |
1873 |
++ s->target_residency <= ktime_to_us(delta_next)) |
1874 |
++ idx = i; |
1875 |
++ |
1876 |
++ goto out; |
1877 |
++ } |
1878 |
+ if (s->exit_latency > latency_req) { |
1879 |
+ /* |
1880 |
+ * If we break out of the loop for latency reasons, use |
1881 |
+@@ -403,14 +426,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
1882 |
+ * Don't stop the tick if the selected state is a polling one or if the |
1883 |
+ * expected idle duration is shorter than the tick period length. |
1884 |
+ */ |
1885 |
+- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || |
1886 |
+- expected_interval < TICK_USEC) { |
1887 |
++ if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || |
1888 |
++ expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) { |
1889 |
+ unsigned int delta_next_us = ktime_to_us(delta_next); |
1890 |
+ |
1891 |
+ *stop_tick = false; |
1892 |
+ |
1893 |
+- if (!tick_nohz_tick_stopped() && idx > 0 && |
1894 |
+- drv->states[idx].target_residency > delta_next_us) { |
1895 |
++ if (idx > 0 && drv->states[idx].target_residency > delta_next_us) { |
1896 |
+ /* |
1897 |
+ * The tick is not going to be stopped and the target |
1898 |
+ * residency of the state to be returned is not within |
1899 |
+@@ -429,6 +451,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
1900 |
+ } |
1901 |
+ } |
1902 |
+ |
1903 |
++out: |
1904 |
+ data->last_state_idx = idx; |
1905 |
+ |
1906 |
+ return data->last_state_idx; |
1907 |
+diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c |
1908 |
+index 6e61cc93c2b0..d7aa7d7ff102 100644 |
1909 |
+--- a/drivers/crypto/caam/caamalg_qi.c |
1910 |
++++ b/drivers/crypto/caam/caamalg_qi.c |
1911 |
+@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, |
1912 |
+ int ret = 0; |
1913 |
+ |
1914 |
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { |
1915 |
+- crypto_ablkcipher_set_flags(ablkcipher, |
1916 |
+- CRYPTO_TFM_RES_BAD_KEY_LEN); |
1917 |
+ dev_err(jrdev, "key size mismatch\n"); |
1918 |
+- return -EINVAL; |
1919 |
++ goto badkey; |
1920 |
+ } |
1921 |
+ |
1922 |
+ ctx->cdata.keylen = keylen; |
1923 |
+@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, |
1924 |
+ return ret; |
1925 |
+ badkey: |
1926 |
+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
1927 |
+- return 0; |
1928 |
++ return -EINVAL; |
1929 |
+ } |
1930 |
+ |
1931 |
+ /* |
1932 |
+diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c |
1933 |
+index 578ea63a3109..f26d62e5533a 100644 |
1934 |
+--- a/drivers/crypto/caam/caampkc.c |
1935 |
++++ b/drivers/crypto/caam/caampkc.c |
1936 |
+@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, |
1937 |
+ dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); |
1938 |
+ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); |
1939 |
+ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
1940 |
+- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); |
1941 |
+- dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); |
1942 |
++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
1943 |
++ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
1944 |
+ } |
1945 |
+ |
1946 |
+ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, |
1947 |
+@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, |
1948 |
+ dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); |
1949 |
+ dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); |
1950 |
+ dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
1951 |
+- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); |
1952 |
+- dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); |
1953 |
++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
1954 |
++ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); |
1955 |
+ } |
1956 |
+ |
1957 |
+ /* RSA Job Completion handler */ |
1958 |
+@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, |
1959 |
+ goto unmap_p; |
1960 |
+ } |
1961 |
+ |
1962 |
+- pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); |
1963 |
++ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
1964 |
+ if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
1965 |
+ dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
1966 |
+ goto unmap_q; |
1967 |
+ } |
1968 |
+ |
1969 |
+- pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); |
1970 |
++ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
1971 |
+ if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
1972 |
+ dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
1973 |
+ goto unmap_tmp1; |
1974 |
+@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, |
1975 |
+ return 0; |
1976 |
+ |
1977 |
+ unmap_tmp1: |
1978 |
+- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); |
1979 |
++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
1980 |
+ unmap_q: |
1981 |
+ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); |
1982 |
+ unmap_p: |
1983 |
+@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, |
1984 |
+ goto unmap_dq; |
1985 |
+ } |
1986 |
+ |
1987 |
+- pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); |
1988 |
++ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); |
1989 |
+ if (dma_mapping_error(dev, pdb->tmp1_dma)) { |
1990 |
+ dev_err(dev, "Unable to map RSA tmp1 memory\n"); |
1991 |
+ goto unmap_qinv; |
1992 |
+ } |
1993 |
+ |
1994 |
+- pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); |
1995 |
++ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); |
1996 |
+ if (dma_mapping_error(dev, pdb->tmp2_dma)) { |
1997 |
+ dev_err(dev, "Unable to map RSA tmp2 memory\n"); |
1998 |
+ goto unmap_tmp1; |
1999 |
+@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, |
2000 |
+ return 0; |
2001 |
+ |
2002 |
+ unmap_tmp1: |
2003 |
+- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); |
2004 |
++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); |
2005 |
+ unmap_qinv: |
2006 |
+ dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); |
2007 |
+ unmap_dq: |
2008 |
+diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c |
2009 |
+index f4f258075b89..acdd72016ffe 100644 |
2010 |
+--- a/drivers/crypto/caam/jr.c |
2011 |
++++ b/drivers/crypto/caam/jr.c |
2012 |
+@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) |
2013 |
+ BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); |
2014 |
+ |
2015 |
+ /* Unmap just-run descriptor so we can post-process */ |
2016 |
+- dma_unmap_single(dev, jrp->outring[hw_idx].desc, |
2017 |
++ dma_unmap_single(dev, |
2018 |
++ caam_dma_to_cpu(jrp->outring[hw_idx].desc), |
2019 |
+ jrp->entinfo[sw_idx].desc_size, |
2020 |
+ DMA_TO_DEVICE); |
2021 |
+ |
2022 |
+diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c |
2023 |
+index 5285ece4f33a..b71895871be3 100644 |
2024 |
+--- a/drivers/crypto/vmx/aes_cbc.c |
2025 |
++++ b/drivers/crypto/vmx/aes_cbc.c |
2026 |
+@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, |
2027 |
+ ret = crypto_skcipher_encrypt(req); |
2028 |
+ skcipher_request_zero(req); |
2029 |
+ } else { |
2030 |
+- preempt_disable(); |
2031 |
+- pagefault_disable(); |
2032 |
+- enable_kernel_vsx(); |
2033 |
+- |
2034 |
+ blkcipher_walk_init(&walk, dst, src, nbytes); |
2035 |
+ ret = blkcipher_walk_virt(desc, &walk); |
2036 |
+ while ((nbytes = walk.nbytes)) { |
2037 |
++ preempt_disable(); |
2038 |
++ pagefault_disable(); |
2039 |
++ enable_kernel_vsx(); |
2040 |
+ aes_p8_cbc_encrypt(walk.src.virt.addr, |
2041 |
+ walk.dst.virt.addr, |
2042 |
+ nbytes & AES_BLOCK_MASK, |
2043 |
+ &ctx->enc_key, walk.iv, 1); |
2044 |
++ disable_kernel_vsx(); |
2045 |
++ pagefault_enable(); |
2046 |
++ preempt_enable(); |
2047 |
++ |
2048 |
+ nbytes &= AES_BLOCK_SIZE - 1; |
2049 |
+ ret = blkcipher_walk_done(desc, &walk, nbytes); |
2050 |
+ } |
2051 |
+- |
2052 |
+- disable_kernel_vsx(); |
2053 |
+- pagefault_enable(); |
2054 |
+- preempt_enable(); |
2055 |
+ } |
2056 |
+ |
2057 |
+ return ret; |
2058 |
+@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, |
2059 |
+ ret = crypto_skcipher_decrypt(req); |
2060 |
+ skcipher_request_zero(req); |
2061 |
+ } else { |
2062 |
+- preempt_disable(); |
2063 |
+- pagefault_disable(); |
2064 |
+- enable_kernel_vsx(); |
2065 |
+- |
2066 |
+ blkcipher_walk_init(&walk, dst, src, nbytes); |
2067 |
+ ret = blkcipher_walk_virt(desc, &walk); |
2068 |
+ while ((nbytes = walk.nbytes)) { |
2069 |
++ preempt_disable(); |
2070 |
++ pagefault_disable(); |
2071 |
++ enable_kernel_vsx(); |
2072 |
+ aes_p8_cbc_encrypt(walk.src.virt.addr, |
2073 |
+ walk.dst.virt.addr, |
2074 |
+ nbytes & AES_BLOCK_MASK, |
2075 |
+ &ctx->dec_key, walk.iv, 0); |
2076 |
++ disable_kernel_vsx(); |
2077 |
++ pagefault_enable(); |
2078 |
++ preempt_enable(); |
2079 |
++ |
2080 |
+ nbytes &= AES_BLOCK_SIZE - 1; |
2081 |
+ ret = blkcipher_walk_done(desc, &walk, nbytes); |
2082 |
+ } |
2083 |
+- |
2084 |
+- disable_kernel_vsx(); |
2085 |
+- pagefault_enable(); |
2086 |
+- preempt_enable(); |
2087 |
+ } |
2088 |
+ |
2089 |
+ return ret; |
2090 |
+diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c |
2091 |
+index 8bd9aff0f55f..e9954a7d4694 100644 |
2092 |
+--- a/drivers/crypto/vmx/aes_xts.c |
2093 |
++++ b/drivers/crypto/vmx/aes_xts.c |
2094 |
+@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, |
2095 |
+ ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); |
2096 |
+ skcipher_request_zero(req); |
2097 |
+ } else { |
2098 |
++ blkcipher_walk_init(&walk, dst, src, nbytes); |
2099 |
++ |
2100 |
++ ret = blkcipher_walk_virt(desc, &walk); |
2101 |
++ |
2102 |
+ preempt_disable(); |
2103 |
+ pagefault_disable(); |
2104 |
+ enable_kernel_vsx(); |
2105 |
+ |
2106 |
+- blkcipher_walk_init(&walk, dst, src, nbytes); |
2107 |
+- |
2108 |
+- ret = blkcipher_walk_virt(desc, &walk); |
2109 |
+ iv = walk.iv; |
2110 |
+ memset(tweak, 0, AES_BLOCK_SIZE); |
2111 |
+ aes_p8_encrypt(iv, tweak, &ctx->tweak_key); |
2112 |
+ |
2113 |
++ disable_kernel_vsx(); |
2114 |
++ pagefault_enable(); |
2115 |
++ preempt_enable(); |
2116 |
++ |
2117 |
+ while ((nbytes = walk.nbytes)) { |
2118 |
++ preempt_disable(); |
2119 |
++ pagefault_disable(); |
2120 |
++ enable_kernel_vsx(); |
2121 |
+ if (enc) |
2122 |
+ aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, |
2123 |
+ nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); |
2124 |
+ else |
2125 |
+ aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, |
2126 |
+ nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); |
2127 |
++ disable_kernel_vsx(); |
2128 |
++ pagefault_enable(); |
2129 |
++ preempt_enable(); |
2130 |
+ |
2131 |
+ nbytes &= AES_BLOCK_SIZE - 1; |
2132 |
+ ret = blkcipher_walk_done(desc, &walk, nbytes); |
2133 |
+ } |
2134 |
+- |
2135 |
+- disable_kernel_vsx(); |
2136 |
+- pagefault_enable(); |
2137 |
+- preempt_enable(); |
2138 |
+ } |
2139 |
+ return ret; |
2140 |
+ } |
2141 |
+diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c |
2142 |
+index 314eb1071cce..532545b9488e 100644 |
2143 |
+--- a/drivers/dma-buf/reservation.c |
2144 |
++++ b/drivers/dma-buf/reservation.c |
2145 |
+@@ -141,6 +141,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, |
2146 |
+ if (signaled) { |
2147 |
+ RCU_INIT_POINTER(fobj->shared[signaled_idx], fence); |
2148 |
+ } else { |
2149 |
++ BUG_ON(fobj->shared_count >= fobj->shared_max); |
2150 |
+ RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); |
2151 |
+ fobj->shared_count++; |
2152 |
+ } |
2153 |
+@@ -230,10 +231,9 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, |
2154 |
+ old = reservation_object_get_list(obj); |
2155 |
+ obj->staged = NULL; |
2156 |
+ |
2157 |
+- if (!fobj) { |
2158 |
+- BUG_ON(old->shared_count >= old->shared_max); |
2159 |
++ if (!fobj) |
2160 |
+ reservation_object_add_shared_inplace(obj, old, fence); |
2161 |
+- } else |
2162 |
++ else |
2163 |
+ reservation_object_add_shared_replace(obj, old, fobj, fence); |
2164 |
+ } |
2165 |
+ EXPORT_SYMBOL(reservation_object_add_shared_fence); |
2166 |
+diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c |
2167 |
+index af83ad58819c..b9d27c8fe57e 100644 |
2168 |
+--- a/drivers/extcon/extcon.c |
2169 |
++++ b/drivers/extcon/extcon.c |
2170 |
+@@ -433,8 +433,8 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id) |
2171 |
+ return index; |
2172 |
+ |
2173 |
+ spin_lock_irqsave(&edev->lock, flags); |
2174 |
+- |
2175 |
+ state = !!(edev->state & BIT(index)); |
2176 |
++ spin_unlock_irqrestore(&edev->lock, flags); |
2177 |
+ |
2178 |
+ /* |
2179 |
+ * Call functions in a raw notifier chain for the specific one |
2180 |
+@@ -448,6 +448,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id) |
2181 |
+ */ |
2182 |
+ raw_notifier_call_chain(&edev->nh_all, state, edev); |
2183 |
+ |
2184 |
++ spin_lock_irqsave(&edev->lock, flags); |
2185 |
+ /* This could be in interrupt handler */ |
2186 |
+ prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); |
2187 |
+ if (!prop_buf) { |
2188 |
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c |
2189 |
+index ba0a092ae085..c3949220b770 100644 |
2190 |
+--- a/drivers/hv/channel.c |
2191 |
++++ b/drivers/hv/channel.c |
2192 |
+@@ -558,11 +558,8 @@ static void reset_channel_cb(void *arg) |
2193 |
+ channel->onchannel_callback = NULL; |
2194 |
+ } |
2195 |
+ |
2196 |
+-static int vmbus_close_internal(struct vmbus_channel *channel) |
2197 |
++void vmbus_reset_channel_cb(struct vmbus_channel *channel) |
2198 |
+ { |
2199 |
+- struct vmbus_channel_close_channel *msg; |
2200 |
+- int ret; |
2201 |
+- |
2202 |
+ /* |
2203 |
+ * vmbus_on_event(), running in the per-channel tasklet, can race |
2204 |
+ * with vmbus_close_internal() in the case of SMP guest, e.g., when |
2205 |
+@@ -572,6 +569,29 @@ static int vmbus_close_internal(struct vmbus_channel *channel) |
2206 |
+ */ |
2207 |
+ tasklet_disable(&channel->callback_event); |
2208 |
+ |
2209 |
++ channel->sc_creation_callback = NULL; |
2210 |
++ |
2211 |
++ /* Stop the callback asap */ |
2212 |
++ if (channel->target_cpu != get_cpu()) { |
2213 |
++ put_cpu(); |
2214 |
++ smp_call_function_single(channel->target_cpu, reset_channel_cb, |
2215 |
++ channel, true); |
2216 |
++ } else { |
2217 |
++ reset_channel_cb(channel); |
2218 |
++ put_cpu(); |
2219 |
++ } |
2220 |
++ |
2221 |
++ /* Re-enable tasklet for use on re-open */ |
2222 |
++ tasklet_enable(&channel->callback_event); |
2223 |
++} |
2224 |
++ |
2225 |
++static int vmbus_close_internal(struct vmbus_channel *channel) |
2226 |
++{ |
2227 |
++ struct vmbus_channel_close_channel *msg; |
2228 |
++ int ret; |
2229 |
++ |
2230 |
++ vmbus_reset_channel_cb(channel); |
2231 |
++ |
2232 |
+ /* |
2233 |
+ * In case a device driver's probe() fails (e.g., |
2234 |
+ * util_probe() -> vmbus_open() returns -ENOMEM) and the device is |
2235 |
+@@ -585,16 +605,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel) |
2236 |
+ } |
2237 |
+ |
2238 |
+ channel->state = CHANNEL_OPEN_STATE; |
2239 |
+- channel->sc_creation_callback = NULL; |
2240 |
+- /* Stop callback and cancel the timer asap */ |
2241 |
+- if (channel->target_cpu != get_cpu()) { |
2242 |
+- put_cpu(); |
2243 |
+- smp_call_function_single(channel->target_cpu, reset_channel_cb, |
2244 |
+- channel, true); |
2245 |
+- } else { |
2246 |
+- reset_channel_cb(channel); |
2247 |
+- put_cpu(); |
2248 |
+- } |
2249 |
+ |
2250 |
+ /* Send a closing message */ |
2251 |
+ |
2252 |
+@@ -639,8 +649,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel) |
2253 |
+ get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); |
2254 |
+ |
2255 |
+ out: |
2256 |
+- /* re-enable tasklet for use on re-open */ |
2257 |
+- tasklet_enable(&channel->callback_event); |
2258 |
+ return ret; |
2259 |
+ } |
2260 |
+ |
2261 |
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c |
2262 |
+index ecc2bd275a73..0f0e091c117c 100644 |
2263 |
+--- a/drivers/hv/channel_mgmt.c |
2264 |
++++ b/drivers/hv/channel_mgmt.c |
2265 |
+@@ -527,10 +527,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) |
2266 |
+ struct hv_device *dev |
2267 |
+ = newchannel->primary_channel->device_obj; |
2268 |
+ |
2269 |
+- if (vmbus_add_channel_kobj(dev, newchannel)) { |
2270 |
+- atomic_dec(&vmbus_connection.offer_in_progress); |
2271 |
++ if (vmbus_add_channel_kobj(dev, newchannel)) |
2272 |
+ goto err_free_chan; |
2273 |
+- } |
2274 |
+ |
2275 |
+ if (channel->sc_creation_callback != NULL) |
2276 |
+ channel->sc_creation_callback(newchannel); |
2277 |
+@@ -894,6 +892,12 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) |
2278 |
+ return; |
2279 |
+ } |
2280 |
+ |
2281 |
++ /* |
2282 |
++ * Before setting channel->rescind in vmbus_rescind_cleanup(), we |
2283 |
++ * should make sure the channel callback is not running any more. |
2284 |
++ */ |
2285 |
++ vmbus_reset_channel_cb(channel); |
2286 |
++ |
2287 |
+ /* |
2288 |
+ * Now wait for offer handling to complete. |
2289 |
+ */ |
2290 |
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c |
2291 |
+index 27436a937492..54b2a3a86677 100644 |
2292 |
+--- a/drivers/i2c/busses/i2c-designware-master.c |
2293 |
++++ b/drivers/i2c/busses/i2c-designware-master.c |
2294 |
+@@ -693,7 +693,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) |
2295 |
+ i2c_set_adapdata(adap, dev); |
2296 |
+ |
2297 |
+ if (dev->pm_disabled) { |
2298 |
+- dev_pm_syscore_device(dev->dev, true); |
2299 |
+ irq_flags = IRQF_NO_SUSPEND; |
2300 |
+ } else { |
2301 |
+ irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; |
2302 |
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c |
2303 |
+index 5660daf6c92e..d281d21cdd8e 100644 |
2304 |
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c |
2305 |
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c |
2306 |
+@@ -448,6 +448,9 @@ static int dw_i2c_plat_suspend(struct device *dev) |
2307 |
+ { |
2308 |
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
2309 |
+ |
2310 |
++ if (i_dev->pm_disabled) |
2311 |
++ return 0; |
2312 |
++ |
2313 |
+ i_dev->disable(i_dev); |
2314 |
+ i2c_dw_prepare_clk(i_dev, false); |
2315 |
+ |
2316 |
+@@ -458,7 +461,9 @@ static int dw_i2c_plat_resume(struct device *dev) |
2317 |
+ { |
2318 |
+ struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); |
2319 |
+ |
2320 |
+- i2c_dw_prepare_clk(i_dev, true); |
2321 |
++ if (!i_dev->pm_disabled) |
2322 |
++ i2c_dw_prepare_clk(i_dev, true); |
2323 |
++ |
2324 |
+ i_dev->init(i_dev); |
2325 |
+ |
2326 |
+ return 0; |
2327 |
+diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c |
2328 |
+index 4dceb75e3586..4964561595f5 100644 |
2329 |
+--- a/drivers/iio/accel/sca3000.c |
2330 |
++++ b/drivers/iio/accel/sca3000.c |
2331 |
+@@ -797,6 +797,7 @@ static int sca3000_write_raw(struct iio_dev *indio_dev, |
2332 |
+ mutex_lock(&st->lock); |
2333 |
+ ret = sca3000_write_3db_freq(st, val); |
2334 |
+ mutex_unlock(&st->lock); |
2335 |
++ return ret; |
2336 |
+ default: |
2337 |
+ return -EINVAL; |
2338 |
+ } |
2339 |
+diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c |
2340 |
+index ddb6a334ae68..8e8263758439 100644 |
2341 |
+--- a/drivers/iio/frequency/ad9523.c |
2342 |
++++ b/drivers/iio/frequency/ad9523.c |
2343 |
+@@ -508,7 +508,7 @@ static ssize_t ad9523_store(struct device *dev, |
2344 |
+ return ret; |
2345 |
+ |
2346 |
+ if (!state) |
2347 |
+- return 0; |
2348 |
++ return len; |
2349 |
+ |
2350 |
+ mutex_lock(&indio_dev->mlock); |
2351 |
+ switch ((u32)this_attr->address) { |
2352 |
+@@ -642,7 +642,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev, |
2353 |
+ code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) / |
2354 |
+ AD9523_CLK_DIST_DIV_REV(ret); |
2355 |
+ *val = code / 1000000; |
2356 |
+- *val2 = (code % 1000000) * 10; |
2357 |
++ *val2 = code % 1000000; |
2358 |
+ return IIO_VAL_INT_PLUS_MICRO; |
2359 |
+ default: |
2360 |
+ return -EINVAL; |
2361 |
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c |
2362 |
+index b3ba9a222550..cbeae4509359 100644 |
2363 |
+--- a/drivers/infiniband/hw/mlx5/main.c |
2364 |
++++ b/drivers/infiniband/hw/mlx5/main.c |
2365 |
+@@ -4694,7 +4694,7 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) |
2366 |
+ int i; |
2367 |
+ |
2368 |
+ for (i = 0; i < dev->num_ports; i++) { |
2369 |
+- if (dev->port[i].cnts.set_id) |
2370 |
++ if (dev->port[i].cnts.set_id_valid) |
2371 |
+ mlx5_core_dealloc_q_counter(dev->mdev, |
2372 |
+ dev->port[i].cnts.set_id); |
2373 |
+ kfree(dev->port[i].cnts.names); |
2374 |
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c |
2375 |
+index a4f1f638509f..01eae67d5a6e 100644 |
2376 |
+--- a/drivers/infiniband/hw/mlx5/qp.c |
2377 |
++++ b/drivers/infiniband/hw/mlx5/qp.c |
2378 |
+@@ -1626,7 +1626,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, |
2379 |
+ struct mlx5_ib_resources *devr = &dev->devr; |
2380 |
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in); |
2381 |
+ struct mlx5_core_dev *mdev = dev->mdev; |
2382 |
+- struct mlx5_ib_create_qp_resp resp; |
2383 |
++ struct mlx5_ib_create_qp_resp resp = {}; |
2384 |
+ struct mlx5_ib_cq *send_cq; |
2385 |
+ struct mlx5_ib_cq *recv_cq; |
2386 |
+ unsigned long flags; |
2387 |
+@@ -5365,7 +5365,9 @@ static int set_user_rq_size(struct mlx5_ib_dev *dev, |
2388 |
+ |
2389 |
+ rwq->wqe_count = ucmd->rq_wqe_count; |
2390 |
+ rwq->wqe_shift = ucmd->rq_wqe_shift; |
2391 |
+- rwq->buf_size = (rwq->wqe_count << rwq->wqe_shift); |
2392 |
++ if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size)) |
2393 |
++ return -EINVAL; |
2394 |
++ |
2395 |
+ rwq->log_rq_stride = rwq->wqe_shift; |
2396 |
+ rwq->log_rq_size = ilog2(rwq->wqe_count); |
2397 |
+ return 0; |
2398 |
+diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c |
2399 |
+index 98d470d1f3fc..83311dd07019 100644 |
2400 |
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c |
2401 |
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c |
2402 |
+@@ -276,6 +276,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, |
2403 |
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: |
2404 |
+ if (wqe->wr.opcode != IB_WR_RDMA_READ && |
2405 |
+ wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) { |
2406 |
++ wqe->status = IB_WC_FATAL_ERR; |
2407 |
+ return COMPST_ERROR; |
2408 |
+ } |
2409 |
+ reset_retry_counters(qp); |
2410 |
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c |
2411 |
+index 3081c629a7f7..8a9633e97bec 100644 |
2412 |
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c |
2413 |
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c |
2414 |
+@@ -1833,8 +1833,7 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch) |
2415 |
+ int ret; |
2416 |
+ |
2417 |
+ if (!srpt_set_ch_state(ch, CH_DRAINING)) { |
2418 |
+- pr_debug("%s-%d: already closed\n", ch->sess_name, |
2419 |
+- ch->qp->qp_num); |
2420 |
++ pr_debug("%s: already closed\n", ch->sess_name); |
2421 |
+ return false; |
2422 |
+ } |
2423 |
+ |
2424 |
+@@ -1940,8 +1939,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport) |
2425 |
+ list_for_each_entry(nexus, &sport->nexus_list, entry) { |
2426 |
+ list_for_each_entry(ch, &nexus->ch_list, list) { |
2427 |
+ if (srpt_disconnect_ch(ch) >= 0) |
2428 |
+- pr_info("Closing channel %s-%d because target %s_%d has been disabled\n", |
2429 |
+- ch->sess_name, ch->qp->qp_num, |
2430 |
++ pr_info("Closing channel %s because target %s_%d has been disabled\n", |
2431 |
++ ch->sess_name, |
2432 |
+ sport->sdev->device->name, sport->port); |
2433 |
+ srpt_close_ch(ch); |
2434 |
+ } |
2435 |
+@@ -2087,7 +2086,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, |
2436 |
+ struct rdma_conn_param rdma_cm; |
2437 |
+ struct ib_cm_rep_param ib_cm; |
2438 |
+ } *rep_param = NULL; |
2439 |
+- struct srpt_rdma_ch *ch; |
2440 |
++ struct srpt_rdma_ch *ch = NULL; |
2441 |
+ char i_port_id[36]; |
2442 |
+ u32 it_iu_len; |
2443 |
+ int i, ret; |
2444 |
+@@ -2234,13 +2233,15 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, |
2445 |
+ TARGET_PROT_NORMAL, |
2446 |
+ i_port_id + 2, ch, NULL); |
2447 |
+ if (IS_ERR_OR_NULL(ch->sess)) { |
2448 |
++ WARN_ON_ONCE(ch->sess == NULL); |
2449 |
+ ret = PTR_ERR(ch->sess); |
2450 |
++ ch->sess = NULL; |
2451 |
+ pr_info("Rejected login for initiator %s: ret = %d.\n", |
2452 |
+ ch->sess_name, ret); |
2453 |
+ rej->reason = cpu_to_be32(ret == -ENOMEM ? |
2454 |
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES : |
2455 |
+ SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); |
2456 |
+- goto reject; |
2457 |
++ goto destroy_ib; |
2458 |
+ } |
2459 |
+ |
2460 |
+ mutex_lock(&sport->mutex); |
2461 |
+@@ -2279,7 +2280,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, |
2462 |
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); |
2463 |
+ pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n", |
2464 |
+ ret); |
2465 |
+- goto destroy_ib; |
2466 |
++ goto reject; |
2467 |
+ } |
2468 |
+ |
2469 |
+ pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess, |
2470 |
+@@ -2358,8 +2359,11 @@ free_ring: |
2471 |
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, |
2472 |
+ ch->sport->sdev, ch->rq_size, |
2473 |
+ ch->max_rsp_size, DMA_TO_DEVICE); |
2474 |
++ |
2475 |
+ free_ch: |
2476 |
+- if (ib_cm_id) |
2477 |
++ if (rdma_cm_id) |
2478 |
++ rdma_cm_id->context = NULL; |
2479 |
++ else |
2480 |
+ ib_cm_id->context = NULL; |
2481 |
+ kfree(ch); |
2482 |
+ ch = NULL; |
2483 |
+@@ -2379,6 +2383,15 @@ reject: |
2484 |
+ ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, |
2485 |
+ rej, sizeof(*rej)); |
2486 |
+ |
2487 |
++ if (ch && ch->sess) { |
2488 |
++ srpt_close_ch(ch); |
2489 |
++ /* |
2490 |
++ * Tell the caller not to free cm_id since |
2491 |
++ * srpt_release_channel_work() will do that. |
2492 |
++ */ |
2493 |
++ ret = 0; |
2494 |
++ } |
2495 |
++ |
2496 |
+ out: |
2497 |
+ kfree(rep_param); |
2498 |
+ kfree(rsp); |
2499 |
+@@ -2969,7 +2982,8 @@ static void srpt_add_one(struct ib_device *device) |
2500 |
+ |
2501 |
+ pr_debug("device = %p\n", device); |
2502 |
+ |
2503 |
+- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); |
2504 |
++ sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt), |
2505 |
++ GFP_KERNEL); |
2506 |
+ if (!sdev) |
2507 |
+ goto err; |
2508 |
+ |
2509 |
+@@ -3023,8 +3037,6 @@ static void srpt_add_one(struct ib_device *device) |
2510 |
+ srpt_event_handler); |
2511 |
+ ib_register_event_handler(&sdev->event_handler); |
2512 |
+ |
2513 |
+- WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port)); |
2514 |
+- |
2515 |
+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) { |
2516 |
+ sport = &sdev->port[i - 1]; |
2517 |
+ INIT_LIST_HEAD(&sport->nexus_list); |
2518 |
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h |
2519 |
+index 2361483476a0..444dfd7281b5 100644 |
2520 |
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.h |
2521 |
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.h |
2522 |
+@@ -396,9 +396,9 @@ struct srpt_port { |
2523 |
+ * @sdev_mutex: Serializes use_srq changes. |
2524 |
+ * @use_srq: Whether or not to use SRQ. |
2525 |
+ * @ioctx_ring: Per-HCA SRQ. |
2526 |
+- * @port: Information about the ports owned by this HCA. |
2527 |
+ * @event_handler: Per-HCA asynchronous IB event handler. |
2528 |
+ * @list: Node in srpt_dev_list. |
2529 |
++ * @port: Information about the ports owned by this HCA. |
2530 |
+ */ |
2531 |
+ struct srpt_device { |
2532 |
+ struct ib_device *device; |
2533 |
+@@ -410,9 +410,9 @@ struct srpt_device { |
2534 |
+ struct mutex sdev_mutex; |
2535 |
+ bool use_srq; |
2536 |
+ struct srpt_recv_ioctx **ioctx_ring; |
2537 |
+- struct srpt_port port[2]; |
2538 |
+ struct ib_event_handler event_handler; |
2539 |
+ struct list_head list; |
2540 |
++ struct srpt_port port[]; |
2541 |
+ }; |
2542 |
+ |
2543 |
+ #endif /* IB_SRPT_H */ |
2544 |
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c |
2545 |
+index 75456b5aa825..d9c748b6f9e4 100644 |
2546 |
+--- a/drivers/iommu/dmar.c |
2547 |
++++ b/drivers/iommu/dmar.c |
2548 |
+@@ -1339,8 +1339,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
2549 |
+ qi_submit_sync(&desc, iommu); |
2550 |
+ } |
2551 |
+ |
2552 |
+-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, |
2553 |
+- u64 addr, unsigned mask) |
2554 |
++void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, |
2555 |
++ u16 qdep, u64 addr, unsigned mask) |
2556 |
+ { |
2557 |
+ struct qi_desc desc; |
2558 |
+ |
2559 |
+@@ -1355,7 +1355,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, |
2560 |
+ qdep = 0; |
2561 |
+ |
2562 |
+ desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | |
2563 |
+- QI_DIOTLB_TYPE; |
2564 |
++ QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); |
2565 |
+ |
2566 |
+ qi_submit_sync(&desc, iommu); |
2567 |
+ } |
2568 |
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
2569 |
+index 115ff26e9ced..07dc938199f9 100644 |
2570 |
+--- a/drivers/iommu/intel-iommu.c |
2571 |
++++ b/drivers/iommu/intel-iommu.c |
2572 |
+@@ -421,6 +421,7 @@ struct device_domain_info { |
2573 |
+ struct list_head global; /* link to global list */ |
2574 |
+ u8 bus; /* PCI bus number */ |
2575 |
+ u8 devfn; /* PCI devfn number */ |
2576 |
++ u16 pfsid; /* SRIOV physical function source ID */ |
2577 |
+ u8 pasid_supported:3; |
2578 |
+ u8 pasid_enabled:1; |
2579 |
+ u8 pri_supported:1; |
2580 |
+@@ -1501,6 +1502,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) |
2581 |
+ return; |
2582 |
+ |
2583 |
+ pdev = to_pci_dev(info->dev); |
2584 |
++ /* For IOMMU that supports device IOTLB throttling (DIT), we assign |
2585 |
++ * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge |
2586 |
++ * queue depth at PF level. If DIT is not set, PFSID will be treated as |
2587 |
++ * reserved, which should be set to 0. |
2588 |
++ */ |
2589 |
++ if (!ecap_dit(info->iommu->ecap)) |
2590 |
++ info->pfsid = 0; |
2591 |
++ else { |
2592 |
++ struct pci_dev *pf_pdev; |
2593 |
++ |
2594 |
++ /* pdev will be returned if device is not a vf */ |
2595 |
++ pf_pdev = pci_physfn(pdev); |
2596 |
++ info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn); |
2597 |
++ } |
2598 |
+ |
2599 |
+ #ifdef CONFIG_INTEL_IOMMU_SVM |
2600 |
+ /* The PCIe spec, in its wisdom, declares that the behaviour of |
2601 |
+@@ -1566,7 +1581,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, |
2602 |
+ |
2603 |
+ sid = info->bus << 8 | info->devfn; |
2604 |
+ qdep = info->ats_qdep; |
2605 |
+- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); |
2606 |
++ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, |
2607 |
++ qdep, addr, mask); |
2608 |
+ } |
2609 |
+ spin_unlock_irqrestore(&device_domain_lock, flags); |
2610 |
+ } |
2611 |
+diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c |
2612 |
+index 40ae6e87cb88..09b47260c74b 100644 |
2613 |
+--- a/drivers/iommu/ipmmu-vmsa.c |
2614 |
++++ b/drivers/iommu/ipmmu-vmsa.c |
2615 |
+@@ -1081,12 +1081,19 @@ static struct platform_driver ipmmu_driver = { |
2616 |
+ |
2617 |
+ static int __init ipmmu_init(void) |
2618 |
+ { |
2619 |
++ struct device_node *np; |
2620 |
+ static bool setup_done; |
2621 |
+ int ret; |
2622 |
+ |
2623 |
+ if (setup_done) |
2624 |
+ return 0; |
2625 |
+ |
2626 |
++ np = of_find_matching_node(NULL, ipmmu_of_ids); |
2627 |
++ if (!np) |
2628 |
++ return 0; |
2629 |
++ |
2630 |
++ of_node_put(np); |
2631 |
++ |
2632 |
+ ret = platform_driver_register(&ipmmu_driver); |
2633 |
+ if (ret < 0) |
2634 |
+ return ret; |
2635 |
+diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c |
2636 |
+index a7040163dd43..b8b2b3533f46 100644 |
2637 |
+--- a/drivers/mailbox/mailbox-xgene-slimpro.c |
2638 |
++++ b/drivers/mailbox/mailbox-xgene-slimpro.c |
2639 |
+@@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct platform_device *pdev) |
2640 |
+ platform_set_drvdata(pdev, ctx); |
2641 |
+ |
2642 |
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2643 |
+- mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); |
2644 |
+- if (!mb_base) |
2645 |
+- return -ENOMEM; |
2646 |
++ mb_base = devm_ioremap_resource(&pdev->dev, regs); |
2647 |
++ if (IS_ERR(mb_base)) |
2648 |
++ return PTR_ERR(mb_base); |
2649 |
+ |
2650 |
+ /* Setup mailbox links */ |
2651 |
+ for (i = 0; i < MBOX_CNT; i++) { |
2652 |
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c |
2653 |
+index ad45ebe1a74b..6c33923c2c35 100644 |
2654 |
+--- a/drivers/md/bcache/writeback.c |
2655 |
++++ b/drivers/md/bcache/writeback.c |
2656 |
+@@ -645,8 +645,10 @@ static int bch_writeback_thread(void *arg) |
2657 |
+ * data on cache. BCACHE_DEV_DETACHING flag is set in |
2658 |
+ * bch_cached_dev_detach(). |
2659 |
+ */ |
2660 |
+- if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) |
2661 |
++ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { |
2662 |
++ up_write(&dc->writeback_lock); |
2663 |
+ break; |
2664 |
++ } |
2665 |
+ } |
2666 |
+ |
2667 |
+ up_write(&dc->writeback_lock); |
2668 |
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c |
2669 |
+index 0d7212410e21..69dddeab124c 100644 |
2670 |
+--- a/drivers/md/dm-cache-metadata.c |
2671 |
++++ b/drivers/md/dm-cache-metadata.c |
2672 |
+@@ -363,7 +363,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) |
2673 |
+ disk_super->version = cpu_to_le32(cmd->version); |
2674 |
+ memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); |
2675 |
+ memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); |
2676 |
+- disk_super->policy_hint_size = 0; |
2677 |
++ disk_super->policy_hint_size = cpu_to_le32(0); |
2678 |
+ |
2679 |
+ __copy_sm_root(cmd, disk_super); |
2680 |
+ |
2681 |
+@@ -701,6 +701,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, |
2682 |
+ disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); |
2683 |
+ disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]); |
2684 |
+ disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]); |
2685 |
++ disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size); |
2686 |
+ |
2687 |
+ disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); |
2688 |
+ disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); |
2689 |
+@@ -1322,6 +1323,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd, |
2690 |
+ |
2691 |
+ dm_oblock_t oblock; |
2692 |
+ unsigned flags; |
2693 |
++ bool dirty = true; |
2694 |
+ |
2695 |
+ dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le); |
2696 |
+ memcpy(&mapping, mapping_value_le, sizeof(mapping)); |
2697 |
+@@ -1332,8 +1334,10 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd, |
2698 |
+ dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le); |
2699 |
+ memcpy(&hint, hint_value_le, sizeof(hint)); |
2700 |
+ } |
2701 |
++ if (cmd->clean_when_opened) |
2702 |
++ dirty = flags & M_DIRTY; |
2703 |
+ |
2704 |
+- r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY, |
2705 |
++ r = fn(context, oblock, to_cblock(cb), dirty, |
2706 |
+ le32_to_cpu(hint), hints_valid); |
2707 |
+ if (r) { |
2708 |
+ DMERR("policy couldn't load cache block %llu", |
2709 |
+@@ -1361,7 +1365,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd, |
2710 |
+ |
2711 |
+ dm_oblock_t oblock; |
2712 |
+ unsigned flags; |
2713 |
+- bool dirty; |
2714 |
++ bool dirty = true; |
2715 |
+ |
2716 |
+ dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le); |
2717 |
+ memcpy(&mapping, mapping_value_le, sizeof(mapping)); |
2718 |
+@@ -1372,8 +1376,9 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd, |
2719 |
+ dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le); |
2720 |
+ memcpy(&hint, hint_value_le, sizeof(hint)); |
2721 |
+ } |
2722 |
++ if (cmd->clean_when_opened) |
2723 |
++ dirty = dm_bitset_cursor_get_value(dirty_cursor); |
2724 |
+ |
2725 |
+- dirty = dm_bitset_cursor_get_value(dirty_cursor); |
2726 |
+ r = fn(context, oblock, to_cblock(cb), dirty, |
2727 |
+ le32_to_cpu(hint), hints_valid); |
2728 |
+ if (r) { |
2729 |
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c |
2730 |
+index b61b069c33af..3fdec1147221 100644 |
2731 |
+--- a/drivers/md/dm-crypt.c |
2732 |
++++ b/drivers/md/dm-crypt.c |
2733 |
+@@ -3069,11 +3069,11 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) |
2734 |
+ */ |
2735 |
+ limits->max_segment_size = PAGE_SIZE; |
2736 |
+ |
2737 |
+- if (cc->sector_size != (1 << SECTOR_SHIFT)) { |
2738 |
+- limits->logical_block_size = cc->sector_size; |
2739 |
+- limits->physical_block_size = cc->sector_size; |
2740 |
+- blk_limits_io_min(limits, cc->sector_size); |
2741 |
+- } |
2742 |
++ limits->logical_block_size = |
2743 |
++ max_t(unsigned short, limits->logical_block_size, cc->sector_size); |
2744 |
++ limits->physical_block_size = |
2745 |
++ max_t(unsigned, limits->physical_block_size, cc->sector_size); |
2746 |
++ limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); |
2747 |
+ } |
2748 |
+ |
2749 |
+ static struct target_type crypt_target = { |
2750 |
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c |
2751 |
+index 86438b2f10dd..0a8a4c2aa3ea 100644 |
2752 |
+--- a/drivers/md/dm-integrity.c |
2753 |
++++ b/drivers/md/dm-integrity.c |
2754 |
+@@ -178,7 +178,7 @@ struct dm_integrity_c { |
2755 |
+ __u8 sectors_per_block; |
2756 |
+ |
2757 |
+ unsigned char mode; |
2758 |
+- bool suspending; |
2759 |
++ int suspending; |
2760 |
+ |
2761 |
+ int failed; |
2762 |
+ |
2763 |
+@@ -2210,7 +2210,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti) |
2764 |
+ |
2765 |
+ del_timer_sync(&ic->autocommit_timer); |
2766 |
+ |
2767 |
+- ic->suspending = true; |
2768 |
++ WRITE_ONCE(ic->suspending, 1); |
2769 |
+ |
2770 |
+ queue_work(ic->commit_wq, &ic->commit_work); |
2771 |
+ drain_workqueue(ic->commit_wq); |
2772 |
+@@ -2220,7 +2220,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti) |
2773 |
+ dm_integrity_flush_buffers(ic); |
2774 |
+ } |
2775 |
+ |
2776 |
+- ic->suspending = false; |
2777 |
++ WRITE_ONCE(ic->suspending, 0); |
2778 |
+ |
2779 |
+ BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); |
2780 |
+ |
2781 |
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c |
2782 |
+index b900723bbd0f..1087f6a1ac79 100644 |
2783 |
+--- a/drivers/md/dm-thin.c |
2784 |
++++ b/drivers/md/dm-thin.c |
2785 |
+@@ -2520,6 +2520,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) |
2786 |
+ case PM_WRITE: |
2787 |
+ if (old_mode != new_mode) |
2788 |
+ notify_of_pool_mode_change(pool, "write"); |
2789 |
++ if (old_mode == PM_OUT_OF_DATA_SPACE) |
2790 |
++ cancel_delayed_work_sync(&pool->no_space_timeout); |
2791 |
+ pool->out_of_data_space = false; |
2792 |
+ pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; |
2793 |
+ dm_pool_metadata_read_write(pool->pmd); |
2794 |
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c |
2795 |
+index 87107c995cb5..7669069005e9 100644 |
2796 |
+--- a/drivers/md/dm-writecache.c |
2797 |
++++ b/drivers/md/dm-writecache.c |
2798 |
+@@ -457,7 +457,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc) |
2799 |
+ COMPLETION_INITIALIZER_ONSTACK(endio.c), |
2800 |
+ ATOMIC_INIT(1), |
2801 |
+ }; |
2802 |
+- unsigned bitmap_bits = wc->dirty_bitmap_size * BITS_PER_LONG; |
2803 |
++ unsigned bitmap_bits = wc->dirty_bitmap_size * 8; |
2804 |
+ unsigned i = 0; |
2805 |
+ |
2806 |
+ while (1) { |
2807 |
+diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c |
2808 |
+index b162c2fe62c3..76e6bed5a1da 100644 |
2809 |
+--- a/drivers/media/i2c/tvp5150.c |
2810 |
++++ b/drivers/media/i2c/tvp5150.c |
2811 |
+@@ -872,7 +872,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd, |
2812 |
+ f = &format->format; |
2813 |
+ |
2814 |
+ f->width = decoder->rect.width; |
2815 |
+- f->height = decoder->rect.height; |
2816 |
++ f->height = decoder->rect.height / 2; |
2817 |
+ |
2818 |
+ f->code = MEDIA_BUS_FMT_UYVY8_2X8; |
2819 |
+ f->field = V4L2_FIELD_ALTERNATE; |
2820 |
+diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c |
2821 |
+index c37ccbfd52f2..96c07fa1802a 100644 |
2822 |
+--- a/drivers/mfd/hi655x-pmic.c |
2823 |
++++ b/drivers/mfd/hi655x-pmic.c |
2824 |
+@@ -49,7 +49,7 @@ static struct regmap_config hi655x_regmap_config = { |
2825 |
+ .reg_bits = 32, |
2826 |
+ .reg_stride = HI655X_STRIDE, |
2827 |
+ .val_bits = 8, |
2828 |
+- .max_register = HI655X_BUS_ADDR(0xFFF), |
2829 |
++ .max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE, |
2830 |
+ }; |
2831 |
+ |
2832 |
+ static struct resource pwrkey_resources[] = { |
2833 |
+diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c |
2834 |
+index c1ba0d42cbc8..e0f29b8a872d 100644 |
2835 |
+--- a/drivers/misc/cxl/main.c |
2836 |
++++ b/drivers/misc/cxl/main.c |
2837 |
+@@ -287,7 +287,7 @@ int cxl_adapter_context_get(struct cxl *adapter) |
2838 |
+ int rc; |
2839 |
+ |
2840 |
+ rc = atomic_inc_unless_negative(&adapter->contexts_num); |
2841 |
+- return rc >= 0 ? 0 : -EBUSY; |
2842 |
++ return rc ? 0 : -EBUSY; |
2843 |
+ } |
2844 |
+ |
2845 |
+ void cxl_adapter_context_put(struct cxl *adapter) |
2846 |
+diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c |
2847 |
+index 88876ae8f330..a963b0a4a3c5 100644 |
2848 |
+--- a/drivers/misc/ocxl/link.c |
2849 |
++++ b/drivers/misc/ocxl/link.c |
2850 |
+@@ -136,7 +136,7 @@ static void xsl_fault_handler_bh(struct work_struct *fault_work) |
2851 |
+ int rc; |
2852 |
+ |
2853 |
+ /* |
2854 |
+- * We need to release a reference on the mm whenever exiting this |
2855 |
++ * We must release a reference on mm_users whenever exiting this |
2856 |
+ * function (taken in the memory fault interrupt handler) |
2857 |
+ */ |
2858 |
+ rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr, |
2859 |
+@@ -172,7 +172,7 @@ static void xsl_fault_handler_bh(struct work_struct *fault_work) |
2860 |
+ } |
2861 |
+ r = RESTART; |
2862 |
+ ack: |
2863 |
+- mmdrop(fault->pe_data.mm); |
2864 |
++ mmput(fault->pe_data.mm); |
2865 |
+ ack_irq(spa, r); |
2866 |
+ } |
2867 |
+ |
2868 |
+@@ -184,6 +184,7 @@ static irqreturn_t xsl_fault_handler(int irq, void *data) |
2869 |
+ struct pe_data *pe_data; |
2870 |
+ struct ocxl_process_element *pe; |
2871 |
+ int lpid, pid, tid; |
2872 |
++ bool schedule = false; |
2873 |
+ |
2874 |
+ read_irq(spa, &dsisr, &dar, &pe_handle); |
2875 |
+ trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1); |
2876 |
+@@ -226,14 +227,19 @@ static irqreturn_t xsl_fault_handler(int irq, void *data) |
2877 |
+ } |
2878 |
+ WARN_ON(pe_data->mm->context.id != pid); |
2879 |
+ |
2880 |
+- spa->xsl_fault.pe = pe_handle; |
2881 |
+- spa->xsl_fault.dar = dar; |
2882 |
+- spa->xsl_fault.dsisr = dsisr; |
2883 |
+- spa->xsl_fault.pe_data = *pe_data; |
2884 |
+- mmgrab(pe_data->mm); /* mm count is released by bottom half */ |
2885 |
+- |
2886 |
++ if (mmget_not_zero(pe_data->mm)) { |
2887 |
++ spa->xsl_fault.pe = pe_handle; |
2888 |
++ spa->xsl_fault.dar = dar; |
2889 |
++ spa->xsl_fault.dsisr = dsisr; |
2890 |
++ spa->xsl_fault.pe_data = *pe_data; |
2891 |
++ schedule = true; |
2892 |
++ /* mm_users count released by bottom half */ |
2893 |
++ } |
2894 |
+ rcu_read_unlock(); |
2895 |
+- schedule_work(&spa->xsl_fault.fault_work); |
2896 |
++ if (schedule) |
2897 |
++ schedule_work(&spa->xsl_fault.fault_work); |
2898 |
++ else |
2899 |
++ ack_irq(spa, ADDRESS_ERROR); |
2900 |
+ return IRQ_HANDLED; |
2901 |
+ } |
2902 |
+ |
2903 |
+diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c |
2904 |
+index 56c6f79a5c5a..5f8b583c6e41 100644 |
2905 |
+--- a/drivers/misc/vmw_balloon.c |
2906 |
++++ b/drivers/misc/vmw_balloon.c |
2907 |
+@@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) |
2908 |
+ success = false; |
2909 |
+ } |
2910 |
+ |
2911 |
+- if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) |
2912 |
++ /* |
2913 |
++ * 2MB pages are only supported with batching. If batching is for some |
2914 |
++ * reason disabled, do not use 2MB pages, since otherwise the legacy |
2915 |
++ * mechanism is used with 2MB pages, causing a failure. |
2916 |
++ */ |
2917 |
++ if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && |
2918 |
++ (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) |
2919 |
+ b->supported_page_sizes = 2; |
2920 |
+ else |
2921 |
+ b->supported_page_sizes = 1; |
2922 |
+@@ -450,7 +456,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, |
2923 |
+ |
2924 |
+ pfn32 = (u32)pfn; |
2925 |
+ if (pfn32 != pfn) |
2926 |
+- return -1; |
2927 |
++ return -EINVAL; |
2928 |
+ |
2929 |
+ STATS_INC(b->stats.lock[false]); |
2930 |
+ |
2931 |
+@@ -460,7 +466,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, |
2932 |
+ |
2933 |
+ pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); |
2934 |
+ STATS_INC(b->stats.lock_fail[false]); |
2935 |
+- return 1; |
2936 |
++ return -EIO; |
2937 |
+ } |
2938 |
+ |
2939 |
+ static int vmballoon_send_batched_lock(struct vmballoon *b, |
2940 |
+@@ -597,11 +603,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, |
2941 |
+ |
2942 |
+ locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status, |
2943 |
+ target); |
2944 |
+- if (locked > 0) { |
2945 |
++ if (locked) { |
2946 |
+ STATS_INC(b->stats.refused_alloc[false]); |
2947 |
+ |
2948 |
+- if (hv_status == VMW_BALLOON_ERROR_RESET || |
2949 |
+- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { |
2950 |
++ if (locked == -EIO && |
2951 |
++ (hv_status == VMW_BALLOON_ERROR_RESET || |
2952 |
++ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) { |
2953 |
+ vmballoon_free_page(page, false); |
2954 |
+ return -EIO; |
2955 |
+ } |
2956 |
+@@ -617,7 +624,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, |
2957 |
+ } else { |
2958 |
+ vmballoon_free_page(page, false); |
2959 |
+ } |
2960 |
+- return -EIO; |
2961 |
++ return locked; |
2962 |
+ } |
2963 |
+ |
2964 |
+ /* track allocated page */ |
2965 |
+@@ -1029,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b) |
2966 |
+ */ |
2967 |
+ static int vmballoon_vmci_init(struct vmballoon *b) |
2968 |
+ { |
2969 |
+- int error = 0; |
2970 |
++ unsigned long error, dummy; |
2971 |
+ |
2972 |
+- if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) { |
2973 |
+- error = vmci_doorbell_create(&b->vmci_doorbell, |
2974 |
+- VMCI_FLAG_DELAYED_CB, |
2975 |
+- VMCI_PRIVILEGE_FLAG_RESTRICTED, |
2976 |
+- vmballoon_doorbell, b); |
2977 |
+- |
2978 |
+- if (error == VMCI_SUCCESS) { |
2979 |
+- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, |
2980 |
+- b->vmci_doorbell.context, |
2981 |
+- b->vmci_doorbell.resource, error); |
2982 |
+- STATS_INC(b->stats.doorbell_set); |
2983 |
+- } |
2984 |
+- } |
2985 |
++ if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) |
2986 |
++ return 0; |
2987 |
+ |
2988 |
+- if (error != 0) { |
2989 |
+- vmballoon_vmci_cleanup(b); |
2990 |
++ error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB, |
2991 |
++ VMCI_PRIVILEGE_FLAG_RESTRICTED, |
2992 |
++ vmballoon_doorbell, b); |
2993 |
+ |
2994 |
+- return -EIO; |
2995 |
+- } |
2996 |
++ if (error != VMCI_SUCCESS) |
2997 |
++ goto fail; |
2998 |
++ |
2999 |
++ error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context, |
3000 |
++ b->vmci_doorbell.resource, dummy); |
3001 |
++ |
3002 |
++ STATS_INC(b->stats.doorbell_set); |
3003 |
++ |
3004 |
++ if (error != VMW_BALLOON_SUCCESS) |
3005 |
++ goto fail; |
3006 |
+ |
3007 |
+ return 0; |
3008 |
++fail: |
3009 |
++ vmballoon_vmci_cleanup(b); |
3010 |
++ return -EIO; |
3011 |
+ } |
3012 |
+ |
3013 |
+ /* |
3014 |
+@@ -1289,7 +1297,14 @@ static int __init vmballoon_init(void) |
3015 |
+ |
3016 |
+ return 0; |
3017 |
+ } |
3018 |
+-module_init(vmballoon_init); |
3019 |
++ |
3020 |
++/* |
3021 |
++ * Using late_initcall() instead of module_init() allows the balloon to use the |
3022 |
++ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the |
3023 |
++ * VMCI is probed only after the balloon is initialized. If the balloon is used |
3024 |
++ * as a module, late_initcall() is equivalent to module_init(). |
3025 |
++ */ |
3026 |
++late_initcall(vmballoon_init); |
3027 |
+ |
3028 |
+ static void __exit vmballoon_exit(void) |
3029 |
+ { |
3030 |
+diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c |
3031 |
+index 648eb6743ed5..6edffeed9953 100644 |
3032 |
+--- a/drivers/mmc/core/queue.c |
3033 |
++++ b/drivers/mmc/core/queue.c |
3034 |
+@@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, |
3035 |
+ mmc_exit_request(mq->queue, req); |
3036 |
+ } |
3037 |
+ |
3038 |
+-/* |
3039 |
+- * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests |
3040 |
+- * will not be dispatched in parallel. |
3041 |
+- */ |
3042 |
+ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
3043 |
+ const struct blk_mq_queue_data *bd) |
3044 |
+ { |
3045 |
+@@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
3046 |
+ |
3047 |
+ spin_lock_irq(q->queue_lock); |
3048 |
+ |
3049 |
+- if (mq->recovery_needed) { |
3050 |
++ if (mq->recovery_needed || mq->busy) { |
3051 |
+ spin_unlock_irq(q->queue_lock); |
3052 |
+ return BLK_STS_RESOURCE; |
3053 |
+ } |
3054 |
+@@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
3055 |
+ break; |
3056 |
+ } |
3057 |
+ |
3058 |
++ /* Parallel dispatch of requests is not supported at the moment */ |
3059 |
++ mq->busy = true; |
3060 |
++ |
3061 |
+ mq->in_flight[issue_type] += 1; |
3062 |
+ get_card = (mmc_tot_in_flight(mq) == 1); |
3063 |
+ cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
3064 |
+@@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
3065 |
+ mq->in_flight[issue_type] -= 1; |
3066 |
+ if (mmc_tot_in_flight(mq) == 0) |
3067 |
+ put_card = true; |
3068 |
++ mq->busy = false; |
3069 |
+ spin_unlock_irq(q->queue_lock); |
3070 |
+ if (put_card) |
3071 |
+ mmc_put_card(card, &mq->ctx); |
3072 |
++ } else { |
3073 |
++ WRITE_ONCE(mq->busy, false); |
3074 |
+ } |
3075 |
+ |
3076 |
+ return ret; |
3077 |
+diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h |
3078 |
+index 17e59d50b496..9bf3c9245075 100644 |
3079 |
+--- a/drivers/mmc/core/queue.h |
3080 |
++++ b/drivers/mmc/core/queue.h |
3081 |
+@@ -81,6 +81,7 @@ struct mmc_queue { |
3082 |
+ unsigned int cqe_busy; |
3083 |
+ #define MMC_CQE_DCMD_BUSY BIT(0) |
3084 |
+ #define MMC_CQE_QUEUE_FULL BIT(1) |
3085 |
++ bool busy; |
3086 |
+ bool use_cqe; |
3087 |
+ bool recovery_needed; |
3088 |
+ bool in_recovery; |
3089 |
+diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c |
3090 |
+index d032bd63444d..4a7991151918 100644 |
3091 |
+--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c |
3092 |
++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c |
3093 |
+@@ -45,14 +45,16 @@ |
3094 |
+ /* DM_CM_RST */ |
3095 |
+ #define RST_DTRANRST1 BIT(9) |
3096 |
+ #define RST_DTRANRST0 BIT(8) |
3097 |
+-#define RST_RESERVED_BITS GENMASK_ULL(32, 0) |
3098 |
++#define RST_RESERVED_BITS GENMASK_ULL(31, 0) |
3099 |
+ |
3100 |
+ /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ |
3101 |
+ #define INFO1_CLEAR 0 |
3102 |
++#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0) |
3103 |
+ #define INFO1_DTRANEND1 BIT(17) |
3104 |
+ #define INFO1_DTRANEND0 BIT(16) |
3105 |
+ |
3106 |
+ /* DM_CM_INFO2 and DM_CM_INFO2_MASK */ |
3107 |
++#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0) |
3108 |
+ #define INFO2_DTRANERR1 BIT(17) |
3109 |
+ #define INFO2_DTRANERR0 BIT(16) |
3110 |
+ |
3111 |
+@@ -236,6 +238,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host, |
3112 |
+ { |
3113 |
+ struct renesas_sdhi *priv = host_to_priv(host); |
3114 |
+ |
3115 |
++ /* Disable DMAC interrupts, we don't use them */ |
3116 |
++ renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK, |
3117 |
++ INFO1_MASK_CLEAR); |
3118 |
++ renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK, |
3119 |
++ INFO2_MASK_CLEAR); |
3120 |
++ |
3121 |
+ /* Each value is set to non-zero to assume "enabling" each DMA */ |
3122 |
+ host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; |
3123 |
+ |
3124 |
+diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h |
3125 |
+index dd1ee1f0af48..469134930026 100644 |
3126 |
+--- a/drivers/net/wireless/marvell/libertas/dev.h |
3127 |
++++ b/drivers/net/wireless/marvell/libertas/dev.h |
3128 |
+@@ -104,6 +104,7 @@ struct lbs_private { |
3129 |
+ u8 fw_ready; |
3130 |
+ u8 surpriseremoved; |
3131 |
+ u8 setup_fw_on_resume; |
3132 |
++ u8 power_up_on_resume; |
3133 |
+ int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); |
3134 |
+ void (*reset_card) (struct lbs_private *priv); |
3135 |
+ int (*power_save) (struct lbs_private *priv); |
3136 |
+diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c |
3137 |
+index 2300e796c6ab..43743c26c071 100644 |
3138 |
+--- a/drivers/net/wireless/marvell/libertas/if_sdio.c |
3139 |
++++ b/drivers/net/wireless/marvell/libertas/if_sdio.c |
3140 |
+@@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func) |
3141 |
+ static int if_sdio_suspend(struct device *dev) |
3142 |
+ { |
3143 |
+ struct sdio_func *func = dev_to_sdio_func(dev); |
3144 |
+- int ret; |
3145 |
+ struct if_sdio_card *card = sdio_get_drvdata(func); |
3146 |
++ struct lbs_private *priv = card->priv; |
3147 |
++ int ret; |
3148 |
+ |
3149 |
+ mmc_pm_flag_t flags = sdio_get_host_pm_caps(func); |
3150 |
++ priv->power_up_on_resume = false; |
3151 |
+ |
3152 |
+ /* If we're powered off anyway, just let the mmc layer remove the |
3153 |
+ * card. */ |
3154 |
+- if (!lbs_iface_active(card->priv)) |
3155 |
+- return -ENOSYS; |
3156 |
++ if (!lbs_iface_active(priv)) { |
3157 |
++ if (priv->fw_ready) { |
3158 |
++ priv->power_up_on_resume = true; |
3159 |
++ if_sdio_power_off(card); |
3160 |
++ } |
3161 |
++ |
3162 |
++ return 0; |
3163 |
++ } |
3164 |
+ |
3165 |
+ dev_info(dev, "%s: suspend: PM flags = 0x%x\n", |
3166 |
+ sdio_func_id(func), flags); |
3167 |
+@@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev) |
3168 |
+ /* If we aren't being asked to wake on anything, we should bail out |
3169 |
+ * and let the SD stack power down the card. |
3170 |
+ */ |
3171 |
+- if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) { |
3172 |
++ if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { |
3173 |
+ dev_info(dev, "Suspend without wake params -- powering down card\n"); |
3174 |
+- return -ENOSYS; |
3175 |
++ if (priv->fw_ready) { |
3176 |
++ priv->power_up_on_resume = true; |
3177 |
++ if_sdio_power_off(card); |
3178 |
++ } |
3179 |
++ |
3180 |
++ return 0; |
3181 |
+ } |
3182 |
+ |
3183 |
+ if (!(flags & MMC_PM_KEEP_POWER)) { |
3184 |
+@@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev) |
3185 |
+ if (ret) |
3186 |
+ return ret; |
3187 |
+ |
3188 |
+- ret = lbs_suspend(card->priv); |
3189 |
++ ret = lbs_suspend(priv); |
3190 |
+ if (ret) |
3191 |
+ return ret; |
3192 |
+ |
3193 |
+@@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev) |
3194 |
+ |
3195 |
+ dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func)); |
3196 |
+ |
3197 |
++ if (card->priv->power_up_on_resume) { |
3198 |
++ if_sdio_power_on(card); |
3199 |
++ wait_event(card->pwron_waitq, card->priv->fw_ready); |
3200 |
++ } |
3201 |
++ |
3202 |
+ ret = lbs_resume(card->priv); |
3203 |
+ |
3204 |
+ return ret; |
3205 |
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c |
3206 |
+index 27902a8799b1..8aae6dcc839f 100644 |
3207 |
+--- a/drivers/nvdimm/bus.c |
3208 |
++++ b/drivers/nvdimm/bus.c |
3209 |
+@@ -812,9 +812,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, |
3210 |
+ * overshoots the remainder by 4 bytes, assume it was |
3211 |
+ * including 'status'. |
3212 |
+ */ |
3213 |
+- if (out_field[1] - 8 == remainder) |
3214 |
++ if (out_field[1] - 4 == remainder) |
3215 |
+ return remainder; |
3216 |
+- return out_field[1] - 4; |
3217 |
++ return out_field[1] - 8; |
3218 |
+ } else if (cmd == ND_CMD_CALL) { |
3219 |
+ struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field; |
3220 |
+ |
3221 |
+diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c |
3222 |
+index 8d348b22ba45..863cabc35215 100644 |
3223 |
+--- a/drivers/nvdimm/dimm_devs.c |
3224 |
++++ b/drivers/nvdimm/dimm_devs.c |
3225 |
+@@ -536,6 +536,37 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) |
3226 |
+ return info.available; |
3227 |
+ } |
3228 |
+ |
3229 |
++/** |
3230 |
++ * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max |
3231 |
++ * contiguous unallocated dpa range. |
3232 |
++ * @nd_region: constrain available space check to this reference region |
3233 |
++ * @nd_mapping: container of dpa-resource-root + labels |
3234 |
++ */ |
3235 |
++resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, |
3236 |
++ struct nd_mapping *nd_mapping) |
3237 |
++{ |
3238 |
++ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
3239 |
++ struct nvdimm_bus *nvdimm_bus; |
3240 |
++ resource_size_t max = 0; |
3241 |
++ struct resource *res; |
3242 |
++ |
3243 |
++ /* if a dimm is disabled the available capacity is zero */ |
3244 |
++ if (!ndd) |
3245 |
++ return 0; |
3246 |
++ |
3247 |
++ nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); |
3248 |
++ if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm)) |
3249 |
++ return 0; |
3250 |
++ for_each_dpa_resource(ndd, res) { |
3251 |
++ if (strcmp(res->name, "pmem-reserve") != 0) |
3252 |
++ continue; |
3253 |
++ if (resource_size(res) > max) |
3254 |
++ max = resource_size(res); |
3255 |
++ } |
3256 |
++ release_free_pmem(nvdimm_bus, nd_mapping); |
3257 |
++ return max; |
3258 |
++} |
3259 |
++ |
3260 |
+ /** |
3261 |
+ * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa |
3262 |
+ * @nd_mapping: container of dpa-resource-root + labels |
3263 |
+diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c |
3264 |
+index 28afdd668905..4525d8ef6022 100644 |
3265 |
+--- a/drivers/nvdimm/namespace_devs.c |
3266 |
++++ b/drivers/nvdimm/namespace_devs.c |
3267 |
+@@ -799,7 +799,7 @@ static int merge_dpa(struct nd_region *nd_region, |
3268 |
+ return 0; |
3269 |
+ } |
3270 |
+ |
3271 |
+-static int __reserve_free_pmem(struct device *dev, void *data) |
3272 |
++int __reserve_free_pmem(struct device *dev, void *data) |
3273 |
+ { |
3274 |
+ struct nvdimm *nvdimm = data; |
3275 |
+ struct nd_region *nd_region; |
3276 |
+@@ -836,7 +836,7 @@ static int __reserve_free_pmem(struct device *dev, void *data) |
3277 |
+ return 0; |
3278 |
+ } |
3279 |
+ |
3280 |
+-static void release_free_pmem(struct nvdimm_bus *nvdimm_bus, |
3281 |
++void release_free_pmem(struct nvdimm_bus *nvdimm_bus, |
3282 |
+ struct nd_mapping *nd_mapping) |
3283 |
+ { |
3284 |
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
3285 |
+@@ -1032,7 +1032,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) |
3286 |
+ |
3287 |
+ allocated += nvdimm_allocated_dpa(ndd, &label_id); |
3288 |
+ } |
3289 |
+- available = nd_region_available_dpa(nd_region); |
3290 |
++ available = nd_region_allocatable_dpa(nd_region); |
3291 |
+ |
3292 |
+ if (val > available + allocated) |
3293 |
+ return -ENOSPC; |
3294 |
+diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h |
3295 |
+index 79274ead54fb..ac68072fb8cd 100644 |
3296 |
+--- a/drivers/nvdimm/nd-core.h |
3297 |
++++ b/drivers/nvdimm/nd-core.h |
3298 |
+@@ -100,6 +100,14 @@ struct nd_region; |
3299 |
+ struct nvdimm_drvdata; |
3300 |
+ struct nd_mapping; |
3301 |
+ void nd_mapping_free_labels(struct nd_mapping *nd_mapping); |
3302 |
++ |
3303 |
++int __reserve_free_pmem(struct device *dev, void *data); |
3304 |
++void release_free_pmem(struct nvdimm_bus *nvdimm_bus, |
3305 |
++ struct nd_mapping *nd_mapping); |
3306 |
++ |
3307 |
++resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, |
3308 |
++ struct nd_mapping *nd_mapping); |
3309 |
++resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region); |
3310 |
+ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, |
3311 |
+ struct nd_mapping *nd_mapping, resource_size_t *overlap); |
3312 |
+ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region); |
3313 |
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c |
3314 |
+index ec3543b83330..c30d5af02cc2 100644 |
3315 |
+--- a/drivers/nvdimm/region_devs.c |
3316 |
++++ b/drivers/nvdimm/region_devs.c |
3317 |
+@@ -389,6 +389,30 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) |
3318 |
+ return available; |
3319 |
+ } |
3320 |
+ |
3321 |
++resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) |
3322 |
++{ |
3323 |
++ resource_size_t available = 0; |
3324 |
++ int i; |
3325 |
++ |
3326 |
++ if (is_memory(&nd_region->dev)) |
3327 |
++ available = PHYS_ADDR_MAX; |
3328 |
++ |
3329 |
++ WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); |
3330 |
++ for (i = 0; i < nd_region->ndr_mappings; i++) { |
3331 |
++ struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
3332 |
++ |
3333 |
++ if (is_memory(&nd_region->dev)) |
3334 |
++ available = min(available, |
3335 |
++ nd_pmem_max_contiguous_dpa(nd_region, |
3336 |
++ nd_mapping)); |
3337 |
++ else if (is_nd_blk(&nd_region->dev)) |
3338 |
++ available += nd_blk_available_dpa(nd_region); |
3339 |
++ } |
3340 |
++ if (is_memory(&nd_region->dev)) |
3341 |
++ return available * nd_region->ndr_mappings; |
3342 |
++ return available; |
3343 |
++} |
3344 |
++ |
3345 |
+ static ssize_t available_size_show(struct device *dev, |
3346 |
+ struct device_attribute *attr, char *buf) |
3347 |
+ { |
3348 |
+diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c |
3349 |
+index 665da3c8fbce..f45798679e3c 100644 |
3350 |
+--- a/drivers/pwm/pwm-omap-dmtimer.c |
3351 |
++++ b/drivers/pwm/pwm-omap-dmtimer.c |
3352 |
+@@ -264,8 +264,9 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev) |
3353 |
+ |
3354 |
+ timer_pdata = dev_get_platdata(&timer_pdev->dev); |
3355 |
+ if (!timer_pdata) { |
3356 |
+- dev_err(&pdev->dev, "dmtimer pdata structure NULL\n"); |
3357 |
+- ret = -EINVAL; |
3358 |
++ dev_dbg(&pdev->dev, |
3359 |
++ "dmtimer pdata structure NULL, deferring probe\n"); |
3360 |
++ ret = -EPROBE_DEFER; |
3361 |
+ goto put; |
3362 |
+ } |
3363 |
+ |
3364 |
+diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c |
3365 |
+index 4c22cb395040..f7b8a86fa5c5 100644 |
3366 |
+--- a/drivers/pwm/pwm-tiehrpwm.c |
3367 |
++++ b/drivers/pwm/pwm-tiehrpwm.c |
3368 |
+@@ -33,10 +33,6 @@ |
3369 |
+ #define TBCTL 0x00 |
3370 |
+ #define TBPRD 0x0A |
3371 |
+ |
3372 |
+-#define TBCTL_RUN_MASK (BIT(15) | BIT(14)) |
3373 |
+-#define TBCTL_STOP_NEXT 0 |
3374 |
+-#define TBCTL_STOP_ON_CYCLE BIT(14) |
3375 |
+-#define TBCTL_FREE_RUN (BIT(15) | BIT(14)) |
3376 |
+ #define TBCTL_PRDLD_MASK BIT(3) |
3377 |
+ #define TBCTL_PRDLD_SHDW 0 |
3378 |
+ #define TBCTL_PRDLD_IMDT BIT(3) |
3379 |
+@@ -360,7 +356,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) |
3380 |
+ /* Channels polarity can be configured from action qualifier module */ |
3381 |
+ configure_polarity(pc, pwm->hwpwm); |
3382 |
+ |
3383 |
+- /* Enable TBCLK before enabling PWM device */ |
3384 |
++ /* Enable TBCLK */ |
3385 |
+ ret = clk_enable(pc->tbclk); |
3386 |
+ if (ret) { |
3387 |
+ dev_err(chip->dev, "Failed to enable TBCLK for %s: %d\n", |
3388 |
+@@ -368,9 +364,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) |
3389 |
+ return ret; |
3390 |
+ } |
3391 |
+ |
3392 |
+- /* Enable time counter for free_run */ |
3393 |
+- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN); |
3394 |
+- |
3395 |
+ return 0; |
3396 |
+ } |
3397 |
+ |
3398 |
+@@ -388,6 +381,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) |
3399 |
+ aqcsfrc_mask = AQCSFRC_CSFA_MASK; |
3400 |
+ } |
3401 |
+ |
3402 |
++ /* Update shadow register first before modifying active register */ |
3403 |
++ ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); |
3404 |
+ /* |
3405 |
+ * Changes to immediate action on Action Qualifier. This puts |
3406 |
+ * Action Qualifier control on PWM output from next TBCLK |
3407 |
+@@ -400,9 +395,6 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) |
3408 |
+ /* Disabling TBCLK on PWM disable */ |
3409 |
+ clk_disable(pc->tbclk); |
3410 |
+ |
3411 |
+- /* Stop Time base counter */ |
3412 |
+- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT); |
3413 |
+- |
3414 |
+ /* Disable clock on PWM disable */ |
3415 |
+ pm_runtime_put_sync(chip->dev); |
3416 |
+ } |
3417 |
+diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c |
3418 |
+index 39086398833e..6a7b804c3074 100644 |
3419 |
+--- a/drivers/rtc/rtc-omap.c |
3420 |
++++ b/drivers/rtc/rtc-omap.c |
3421 |
+@@ -861,13 +861,6 @@ static int omap_rtc_probe(struct platform_device *pdev) |
3422 |
+ goto err; |
3423 |
+ } |
3424 |
+ |
3425 |
+- if (rtc->is_pmic_controller) { |
3426 |
+- if (!pm_power_off) { |
3427 |
+- omap_rtc_power_off_rtc = rtc; |
3428 |
+- pm_power_off = omap_rtc_power_off; |
3429 |
+- } |
3430 |
+- } |
3431 |
+- |
3432 |
+ /* Support ext_wakeup pinconf */ |
3433 |
+ rtc_pinctrl_desc.name = dev_name(&pdev->dev); |
3434 |
+ |
3435 |
+@@ -880,12 +873,21 @@ static int omap_rtc_probe(struct platform_device *pdev) |
3436 |
+ |
3437 |
+ ret = rtc_register_device(rtc->rtc); |
3438 |
+ if (ret) |
3439 |
+- goto err; |
3440 |
++ goto err_deregister_pinctrl; |
3441 |
+ |
3442 |
+ rtc_nvmem_register(rtc->rtc, &omap_rtc_nvmem_config); |
3443 |
+ |
3444 |
++ if (rtc->is_pmic_controller) { |
3445 |
++ if (!pm_power_off) { |
3446 |
++ omap_rtc_power_off_rtc = rtc; |
3447 |
++ pm_power_off = omap_rtc_power_off; |
3448 |
++ } |
3449 |
++ } |
3450 |
++ |
3451 |
+ return 0; |
3452 |
+ |
3453 |
++err_deregister_pinctrl: |
3454 |
++ pinctrl_unregister(rtc->pctldev); |
3455 |
+ err: |
3456 |
+ clk_disable_unprepare(rtc->clk); |
3457 |
+ device_init_wakeup(&pdev->dev, false); |
3458 |
+diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c |
3459 |
+index f3dad6fcdc35..a568f35522f9 100644 |
3460 |
+--- a/drivers/spi/spi-cadence.c |
3461 |
++++ b/drivers/spi/spi-cadence.c |
3462 |
+@@ -319,7 +319,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) |
3463 |
+ */ |
3464 |
+ if (cdns_spi_read(xspi, CDNS_SPI_ISR) & |
3465 |
+ CDNS_SPI_IXR_TXFULL) |
3466 |
+- usleep_range(10, 20); |
3467 |
++ udelay(10); |
3468 |
+ |
3469 |
+ if (xspi->txbuf) |
3470 |
+ cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); |
3471 |
+diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c |
3472 |
+index 577084bb911b..a02099c90c5c 100644 |
3473 |
+--- a/drivers/spi/spi-davinci.c |
3474 |
++++ b/drivers/spi/spi-davinci.c |
3475 |
+@@ -217,7 +217,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value) |
3476 |
+ pdata = &dspi->pdata; |
3477 |
+ |
3478 |
+ /* program delay transfers if tx_delay is non zero */ |
3479 |
+- if (spicfg->wdelay) |
3480 |
++ if (spicfg && spicfg->wdelay) |
3481 |
+ spidat1 |= SPIDAT1_WDEL; |
3482 |
+ |
3483 |
+ /* |
3484 |
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c |
3485 |
+index 0630962ce442..f225f7c99a32 100644 |
3486 |
+--- a/drivers/spi/spi-fsl-dspi.c |
3487 |
++++ b/drivers/spi/spi-fsl-dspi.c |
3488 |
+@@ -1029,30 +1029,30 @@ static int dspi_probe(struct platform_device *pdev) |
3489 |
+ goto out_master_put; |
3490 |
+ } |
3491 |
+ |
3492 |
++ dspi->clk = devm_clk_get(&pdev->dev, "dspi"); |
3493 |
++ if (IS_ERR(dspi->clk)) { |
3494 |
++ ret = PTR_ERR(dspi->clk); |
3495 |
++ dev_err(&pdev->dev, "unable to get clock\n"); |
3496 |
++ goto out_master_put; |
3497 |
++ } |
3498 |
++ ret = clk_prepare_enable(dspi->clk); |
3499 |
++ if (ret) |
3500 |
++ goto out_master_put; |
3501 |
++ |
3502 |
+ dspi_init(dspi); |
3503 |
+ dspi->irq = platform_get_irq(pdev, 0); |
3504 |
+ if (dspi->irq < 0) { |
3505 |
+ dev_err(&pdev->dev, "can't get platform irq\n"); |
3506 |
+ ret = dspi->irq; |
3507 |
+- goto out_master_put; |
3508 |
++ goto out_clk_put; |
3509 |
+ } |
3510 |
+ |
3511 |
+ ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0, |
3512 |
+ pdev->name, dspi); |
3513 |
+ if (ret < 0) { |
3514 |
+ dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); |
3515 |
+- goto out_master_put; |
3516 |
+- } |
3517 |
+- |
3518 |
+- dspi->clk = devm_clk_get(&pdev->dev, "dspi"); |
3519 |
+- if (IS_ERR(dspi->clk)) { |
3520 |
+- ret = PTR_ERR(dspi->clk); |
3521 |
+- dev_err(&pdev->dev, "unable to get clock\n"); |
3522 |
+- goto out_master_put; |
3523 |
++ goto out_clk_put; |
3524 |
+ } |
3525 |
+- ret = clk_prepare_enable(dspi->clk); |
3526 |
+- if (ret) |
3527 |
+- goto out_master_put; |
3528 |
+ |
3529 |
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { |
3530 |
+ ret = dspi_request_dma(dspi, res->start); |
3531 |
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c |
3532 |
+index 0b2d60d30f69..14f4ea59caff 100644 |
3533 |
+--- a/drivers/spi/spi-pxa2xx.c |
3534 |
++++ b/drivers/spi/spi-pxa2xx.c |
3535 |
+@@ -1391,6 +1391,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { |
3536 |
+ { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP }, |
3537 |
+ { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP }, |
3538 |
+ { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP }, |
3539 |
++ /* ICL-LP */ |
3540 |
++ { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP }, |
3541 |
++ { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP }, |
3542 |
++ { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP }, |
3543 |
+ /* APL */ |
3544 |
+ { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP }, |
3545 |
+ { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP }, |
3546 |
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c |
3547 |
+index 9c14a453f73c..80bb56facfb6 100644 |
3548 |
+--- a/drivers/tty/serial/serial_core.c |
3549 |
++++ b/drivers/tty/serial/serial_core.c |
3550 |
+@@ -182,6 +182,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, |
3551 |
+ { |
3552 |
+ struct uart_port *uport = uart_port_check(state); |
3553 |
+ unsigned long page; |
3554 |
++ unsigned long flags = 0; |
3555 |
+ int retval = 0; |
3556 |
+ |
3557 |
+ if (uport->type == PORT_UNKNOWN) |
3558 |
+@@ -196,15 +197,18 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, |
3559 |
+ * Initialise and allocate the transmit and temporary |
3560 |
+ * buffer. |
3561 |
+ */ |
3562 |
+- if (!state->xmit.buf) { |
3563 |
+- /* This is protected by the per port mutex */ |
3564 |
+- page = get_zeroed_page(GFP_KERNEL); |
3565 |
+- if (!page) |
3566 |
+- return -ENOMEM; |
3567 |
++ page = get_zeroed_page(GFP_KERNEL); |
3568 |
++ if (!page) |
3569 |
++ return -ENOMEM; |
3570 |
+ |
3571 |
++ uart_port_lock(state, flags); |
3572 |
++ if (!state->xmit.buf) { |
3573 |
+ state->xmit.buf = (unsigned char *) page; |
3574 |
+ uart_circ_clear(&state->xmit); |
3575 |
++ } else { |
3576 |
++ free_page(page); |
3577 |
+ } |
3578 |
++ uart_port_unlock(uport, flags); |
3579 |
+ |
3580 |
+ retval = uport->ops->startup(uport); |
3581 |
+ if (retval == 0) { |
3582 |
+@@ -263,6 +267,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) |
3583 |
+ { |
3584 |
+ struct uart_port *uport = uart_port_check(state); |
3585 |
+ struct tty_port *port = &state->port; |
3586 |
++ unsigned long flags = 0; |
3587 |
+ |
3588 |
+ /* |
3589 |
+ * Set the TTY IO error marker |
3590 |
+@@ -295,10 +300,12 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) |
3591 |
+ /* |
3592 |
+ * Free the transmit buffer page. |
3593 |
+ */ |
3594 |
++ uart_port_lock(state, flags); |
3595 |
+ if (state->xmit.buf) { |
3596 |
+ free_page((unsigned long)state->xmit.buf); |
3597 |
+ state->xmit.buf = NULL; |
3598 |
+ } |
3599 |
++ uart_port_unlock(uport, flags); |
3600 |
+ } |
3601 |
+ |
3602 |
+ /** |
3603 |
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c |
3604 |
+index 609438d2465b..9ae2fb1344de 100644 |
3605 |
+--- a/drivers/video/fbdev/core/fbmem.c |
3606 |
++++ b/drivers/video/fbdev/core/fbmem.c |
3607 |
+@@ -1704,12 +1704,12 @@ static int do_register_framebuffer(struct fb_info *fb_info) |
3608 |
+ return 0; |
3609 |
+ } |
3610 |
+ |
3611 |
+-static int do_unregister_framebuffer(struct fb_info *fb_info) |
3612 |
++static int unbind_console(struct fb_info *fb_info) |
3613 |
+ { |
3614 |
+ struct fb_event event; |
3615 |
+- int i, ret = 0; |
3616 |
++ int ret; |
3617 |
++ int i = fb_info->node; |
3618 |
+ |
3619 |
+- i = fb_info->node; |
3620 |
+ if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) |
3621 |
+ return -EINVAL; |
3622 |
+ |
3623 |
+@@ -1724,17 +1724,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) |
3624 |
+ unlock_fb_info(fb_info); |
3625 |
+ console_unlock(); |
3626 |
+ |
3627 |
++ return ret; |
3628 |
++} |
3629 |
++ |
3630 |
++static int __unlink_framebuffer(struct fb_info *fb_info); |
3631 |
++ |
3632 |
++static int do_unregister_framebuffer(struct fb_info *fb_info) |
3633 |
++{ |
3634 |
++ struct fb_event event; |
3635 |
++ int ret; |
3636 |
++ |
3637 |
++ ret = unbind_console(fb_info); |
3638 |
++ |
3639 |
+ if (ret) |
3640 |
+ return -EINVAL; |
3641 |
+ |
3642 |
+ pm_vt_switch_unregister(fb_info->dev); |
3643 |
+ |
3644 |
+- unlink_framebuffer(fb_info); |
3645 |
++ __unlink_framebuffer(fb_info); |
3646 |
+ if (fb_info->pixmap.addr && |
3647 |
+ (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) |
3648 |
+ kfree(fb_info->pixmap.addr); |
3649 |
+ fb_destroy_modelist(&fb_info->modelist); |
3650 |
+- registered_fb[i] = NULL; |
3651 |
++ registered_fb[fb_info->node] = NULL; |
3652 |
+ num_registered_fb--; |
3653 |
+ fb_cleanup_device(fb_info); |
3654 |
+ event.info = fb_info; |
3655 |
+@@ -1747,7 +1759,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) |
3656 |
+ return 0; |
3657 |
+ } |
3658 |
+ |
3659 |
+-int unlink_framebuffer(struct fb_info *fb_info) |
3660 |
++static int __unlink_framebuffer(struct fb_info *fb_info) |
3661 |
+ { |
3662 |
+ int i; |
3663 |
+ |
3664 |
+@@ -1759,6 +1771,20 @@ int unlink_framebuffer(struct fb_info *fb_info) |
3665 |
+ device_destroy(fb_class, MKDEV(FB_MAJOR, i)); |
3666 |
+ fb_info->dev = NULL; |
3667 |
+ } |
3668 |
++ |
3669 |
++ return 0; |
3670 |
++} |
3671 |
++ |
3672 |
++int unlink_framebuffer(struct fb_info *fb_info) |
3673 |
++{ |
3674 |
++ int ret; |
3675 |
++ |
3676 |
++ ret = __unlink_framebuffer(fb_info); |
3677 |
++ if (ret) |
3678 |
++ return ret; |
3679 |
++ |
3680 |
++ unbind_console(fb_info); |
3681 |
++ |
3682 |
+ return 0; |
3683 |
+ } |
3684 |
+ EXPORT_SYMBOL(unlink_framebuffer); |
3685 |
+diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c |
3686 |
+index f365d4862015..862e8027acf6 100644 |
3687 |
+--- a/drivers/video/fbdev/udlfb.c |
3688 |
++++ b/drivers/video/fbdev/udlfb.c |
3689 |
+@@ -27,6 +27,7 @@ |
3690 |
+ #include <linux/slab.h> |
3691 |
+ #include <linux/prefetch.h> |
3692 |
+ #include <linux/delay.h> |
3693 |
++#include <asm/unaligned.h> |
3694 |
+ #include <video/udlfb.h> |
3695 |
+ #include "edid.h" |
3696 |
+ |
3697 |
+@@ -450,17 +451,17 @@ static void dlfb_compress_hline( |
3698 |
+ raw_pixels_count_byte = cmd++; /* we'll know this later */ |
3699 |
+ raw_pixel_start = pixel; |
3700 |
+ |
3701 |
+- cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1, |
3702 |
+- min((int)(pixel_end - pixel), |
3703 |
+- (int)(cmd_buffer_end - cmd) / BPP)); |
3704 |
++ cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, |
3705 |
++ (unsigned long)(pixel_end - pixel), |
3706 |
++ (unsigned long)(cmd_buffer_end - 1 - cmd) / BPP); |
3707 |
+ |
3708 |
+- prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * BPP); |
3709 |
++ prefetch_range((void *) pixel, (u8 *)cmd_pixel_end - (u8 *)pixel); |
3710 |
+ |
3711 |
+ while (pixel < cmd_pixel_end) { |
3712 |
+ const uint16_t * const repeating_pixel = pixel; |
3713 |
+ |
3714 |
+- *cmd++ = *pixel >> 8; |
3715 |
+- *cmd++ = *pixel; |
3716 |
++ put_unaligned_be16(*pixel, cmd); |
3717 |
++ cmd += 2; |
3718 |
+ pixel++; |
3719 |
+ |
3720 |
+ if (unlikely((pixel < cmd_pixel_end) && |
3721 |
+@@ -486,13 +487,16 @@ static void dlfb_compress_hline( |
3722 |
+ if (pixel > raw_pixel_start) { |
3723 |
+ /* finalize last RAW span */ |
3724 |
+ *raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF; |
3725 |
++ } else { |
3726 |
++ /* undo unused byte */ |
3727 |
++ cmd--; |
3728 |
+ } |
3729 |
+ |
3730 |
+ *cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF; |
3731 |
+- dev_addr += (pixel - cmd_pixel_start) * BPP; |
3732 |
++ dev_addr += (u8 *)pixel - (u8 *)cmd_pixel_start; |
3733 |
+ } |
3734 |
+ |
3735 |
+- if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) { |
3736 |
++ if (cmd_buffer_end - MIN_RLX_CMD_BYTES <= cmd) { |
3737 |
+ /* Fill leftover bytes with no-ops */ |
3738 |
+ if (cmd_buffer_end > cmd) |
3739 |
+ memset(cmd, 0xAF, cmd_buffer_end - cmd); |
3740 |
+@@ -610,8 +614,11 @@ static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, |
3741 |
+ } |
3742 |
+ |
3743 |
+ if (cmd > (char *) urb->transfer_buffer) { |
3744 |
++ int len; |
3745 |
++ if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length) |
3746 |
++ *cmd++ = 0xAF; |
3747 |
+ /* Send partial buffer remaining before exiting */ |
3748 |
+- int len = cmd - (char *) urb->transfer_buffer; |
3749 |
++ len = cmd - (char *) urb->transfer_buffer; |
3750 |
+ ret = dlfb_submit_urb(dlfb, urb, len); |
3751 |
+ bytes_sent += len; |
3752 |
+ } else |
3753 |
+@@ -735,8 +742,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info, |
3754 |
+ } |
3755 |
+ |
3756 |
+ if (cmd > (char *) urb->transfer_buffer) { |
3757 |
++ int len; |
3758 |
++ if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length) |
3759 |
++ *cmd++ = 0xAF; |
3760 |
+ /* Send partial buffer remaining before exiting */ |
3761 |
+- int len = cmd - (char *) urb->transfer_buffer; |
3762 |
++ len = cmd - (char *) urb->transfer_buffer; |
3763 |
+ dlfb_submit_urb(dlfb, urb, len); |
3764 |
+ bytes_sent += len; |
3765 |
+ } else |
3766 |
+@@ -922,14 +932,6 @@ static void dlfb_free(struct kref *kref) |
3767 |
+ kfree(dlfb); |
3768 |
+ } |
3769 |
+ |
3770 |
+-static void dlfb_release_urb_work(struct work_struct *work) |
3771 |
+-{ |
3772 |
+- struct urb_node *unode = container_of(work, struct urb_node, |
3773 |
+- release_urb_work.work); |
3774 |
+- |
3775 |
+- up(&unode->dlfb->urbs.limit_sem); |
3776 |
+-} |
3777 |
+- |
3778 |
+ static void dlfb_free_framebuffer(struct dlfb_data *dlfb) |
3779 |
+ { |
3780 |
+ struct fb_info *info = dlfb->info; |
3781 |
+@@ -1039,10 +1041,25 @@ static int dlfb_ops_set_par(struct fb_info *info) |
3782 |
+ int result; |
3783 |
+ u16 *pix_framebuffer; |
3784 |
+ int i; |
3785 |
++ struct fb_var_screeninfo fvs; |
3786 |
++ |
3787 |
++ /* clear the activate field because it causes spurious miscompares */ |
3788 |
++ fvs = info->var; |
3789 |
++ fvs.activate = 0; |
3790 |
++ fvs.vmode &= ~FB_VMODE_SMOOTH_XPAN; |
3791 |
++ |
3792 |
++ if (!memcmp(&dlfb->current_mode, &fvs, sizeof(struct fb_var_screeninfo))) |
3793 |
++ return 0; |
3794 |
+ |
3795 |
+ result = dlfb_set_video_mode(dlfb, &info->var); |
3796 |
+ |
3797 |
+- if ((result == 0) && (dlfb->fb_count == 0)) { |
3798 |
++ if (result) |
3799 |
++ return result; |
3800 |
++ |
3801 |
++ dlfb->current_mode = fvs; |
3802 |
++ info->fix.line_length = info->var.xres * (info->var.bits_per_pixel / 8); |
3803 |
++ |
3804 |
++ if (dlfb->fb_count == 0) { |
3805 |
+ |
3806 |
+ /* paint greenscreen */ |
3807 |
+ |
3808 |
+@@ -1054,7 +1071,7 @@ static int dlfb_ops_set_par(struct fb_info *info) |
3809 |
+ info->screen_base); |
3810 |
+ } |
3811 |
+ |
3812 |
+- return result; |
3813 |
++ return 0; |
3814 |
+ } |
3815 |
+ |
3816 |
+ /* To fonzi the jukebox (e.g. make blanking changes take effect) */ |
3817 |
+@@ -1649,7 +1666,8 @@ static void dlfb_init_framebuffer_work(struct work_struct *work) |
3818 |
+ dlfb->info = info; |
3819 |
+ info->par = dlfb; |
3820 |
+ info->pseudo_palette = dlfb->pseudo_palette; |
3821 |
+- info->fbops = &dlfb_ops; |
3822 |
++ dlfb->ops = dlfb_ops; |
3823 |
++ info->fbops = &dlfb->ops; |
3824 |
+ |
3825 |
+ retval = fb_alloc_cmap(&info->cmap, 256, 0); |
3826 |
+ if (retval < 0) { |
3827 |
+@@ -1789,14 +1807,7 @@ static void dlfb_urb_completion(struct urb *urb) |
3828 |
+ dlfb->urbs.available++; |
3829 |
+ spin_unlock_irqrestore(&dlfb->urbs.lock, flags); |
3830 |
+ |
3831 |
+- /* |
3832 |
+- * When using fb_defio, we deadlock if up() is called |
3833 |
+- * while another is waiting. So queue to another process. |
3834 |
+- */ |
3835 |
+- if (fb_defio) |
3836 |
+- schedule_delayed_work(&unode->release_urb_work, 0); |
3837 |
+- else |
3838 |
+- up(&dlfb->urbs.limit_sem); |
3839 |
++ up(&dlfb->urbs.limit_sem); |
3840 |
+ } |
3841 |
+ |
3842 |
+ static void dlfb_free_urb_list(struct dlfb_data *dlfb) |
3843 |
+@@ -1805,16 +1816,11 @@ static void dlfb_free_urb_list(struct dlfb_data *dlfb) |
3844 |
+ struct list_head *node; |
3845 |
+ struct urb_node *unode; |
3846 |
+ struct urb *urb; |
3847 |
+- int ret; |
3848 |
+ unsigned long flags; |
3849 |
+ |
3850 |
+ /* keep waiting and freeing, until we've got 'em all */ |
3851 |
+ while (count--) { |
3852 |
+- |
3853 |
+- /* Getting interrupted means a leak, but ok at disconnect */ |
3854 |
+- ret = down_interruptible(&dlfb->urbs.limit_sem); |
3855 |
+- if (ret) |
3856 |
+- break; |
3857 |
++ down(&dlfb->urbs.limit_sem); |
3858 |
+ |
3859 |
+ spin_lock_irqsave(&dlfb->urbs.lock, flags); |
3860 |
+ |
3861 |
+@@ -1838,25 +1844,27 @@ static void dlfb_free_urb_list(struct dlfb_data *dlfb) |
3862 |
+ |
3863 |
+ static int dlfb_alloc_urb_list(struct dlfb_data *dlfb, int count, size_t size) |
3864 |
+ { |
3865 |
+- int i = 0; |
3866 |
+ struct urb *urb; |
3867 |
+ struct urb_node *unode; |
3868 |
+ char *buf; |
3869 |
++ size_t wanted_size = count * size; |
3870 |
+ |
3871 |
+ spin_lock_init(&dlfb->urbs.lock); |
3872 |
+ |
3873 |
++retry: |
3874 |
+ dlfb->urbs.size = size; |
3875 |
+ INIT_LIST_HEAD(&dlfb->urbs.list); |
3876 |
+ |
3877 |
+- while (i < count) { |
3878 |
++ sema_init(&dlfb->urbs.limit_sem, 0); |
3879 |
++ dlfb->urbs.count = 0; |
3880 |
++ dlfb->urbs.available = 0; |
3881 |
++ |
3882 |
++ while (dlfb->urbs.count * size < wanted_size) { |
3883 |
+ unode = kzalloc(sizeof(*unode), GFP_KERNEL); |
3884 |
+ if (!unode) |
3885 |
+ break; |
3886 |
+ unode->dlfb = dlfb; |
3887 |
+ |
3888 |
+- INIT_DELAYED_WORK(&unode->release_urb_work, |
3889 |
+- dlfb_release_urb_work); |
3890 |
+- |
3891 |
+ urb = usb_alloc_urb(0, GFP_KERNEL); |
3892 |
+ if (!urb) { |
3893 |
+ kfree(unode); |
3894 |
+@@ -1864,11 +1872,16 @@ static int dlfb_alloc_urb_list(struct dlfb_data *dlfb, int count, size_t size) |
3895 |
+ } |
3896 |
+ unode->urb = urb; |
3897 |
+ |
3898 |
+- buf = usb_alloc_coherent(dlfb->udev, MAX_TRANSFER, GFP_KERNEL, |
3899 |
++ buf = usb_alloc_coherent(dlfb->udev, size, GFP_KERNEL, |
3900 |
+ &urb->transfer_dma); |
3901 |
+ if (!buf) { |
3902 |
+ kfree(unode); |
3903 |
+ usb_free_urb(urb); |
3904 |
++ if (size > PAGE_SIZE) { |
3905 |
++ size /= 2; |
3906 |
++ dlfb_free_urb_list(dlfb); |
3907 |
++ goto retry; |
3908 |
++ } |
3909 |
+ break; |
3910 |
+ } |
3911 |
+ |
3912 |
+@@ -1879,14 +1892,12 @@ static int dlfb_alloc_urb_list(struct dlfb_data *dlfb, int count, size_t size) |
3913 |
+ |
3914 |
+ list_add_tail(&unode->entry, &dlfb->urbs.list); |
3915 |
+ |
3916 |
+- i++; |
3917 |
++ up(&dlfb->urbs.limit_sem); |
3918 |
++ dlfb->urbs.count++; |
3919 |
++ dlfb->urbs.available++; |
3920 |
+ } |
3921 |
+ |
3922 |
+- sema_init(&dlfb->urbs.limit_sem, i); |
3923 |
+- dlfb->urbs.count = i; |
3924 |
+- dlfb->urbs.available = i; |
3925 |
+- |
3926 |
+- return i; |
3927 |
++ return dlfb->urbs.count; |
3928 |
+ } |
3929 |
+ |
3930 |
+ static struct urb *dlfb_get_urb(struct dlfb_data *dlfb) |
3931 |
+diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c |
3932 |
+index f329eee6dc93..352abc39e891 100644 |
3933 |
+--- a/fs/9p/xattr.c |
3934 |
++++ b/fs/9p/xattr.c |
3935 |
+@@ -105,7 +105,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, |
3936 |
+ { |
3937 |
+ struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len}; |
3938 |
+ struct iov_iter from; |
3939 |
+- int retval; |
3940 |
++ int retval, err; |
3941 |
+ |
3942 |
+ iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len); |
3943 |
+ |
3944 |
+@@ -126,7 +126,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, |
3945 |
+ retval); |
3946 |
+ else |
3947 |
+ p9_client_write(fid, 0, &from, &retval); |
3948 |
+- p9_client_clunk(fid); |
3949 |
++ err = p9_client_clunk(fid); |
3950 |
++ if (!retval && err) |
3951 |
++ retval = err; |
3952 |
+ return retval; |
3953 |
+ } |
3954 |
+ |
3955 |
+diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c |
3956 |
+index 96c1d14c18f1..c2a128678e6e 100644 |
3957 |
+--- a/fs/lockd/clntlock.c |
3958 |
++++ b/fs/lockd/clntlock.c |
3959 |
+@@ -187,7 +187,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock) |
3960 |
+ continue; |
3961 |
+ if (!rpc_cmp_addr(nlm_addr(block->b_host), addr)) |
3962 |
+ continue; |
3963 |
+- if (nfs_compare_fh(NFS_FH(file_inode(fl_blocked->fl_file)) ,fh) != 0) |
3964 |
++ if (nfs_compare_fh(NFS_FH(locks_inode(fl_blocked->fl_file)), fh) != 0) |
3965 |
+ continue; |
3966 |
+ /* Alright, we found a lock. Set the return status |
3967 |
+ * and wake up the caller |
3968 |
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c |
3969 |
+index a2c0dfc6fdc0..d20b92f271c2 100644 |
3970 |
+--- a/fs/lockd/clntproc.c |
3971 |
++++ b/fs/lockd/clntproc.c |
3972 |
+@@ -128,7 +128,7 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) |
3973 |
+ char *nodename = req->a_host->h_rpcclnt->cl_nodename; |
3974 |
+ |
3975 |
+ nlmclnt_next_cookie(&argp->cookie); |
3976 |
+- memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh)); |
3977 |
++ memcpy(&lock->fh, NFS_FH(locks_inode(fl->fl_file)), sizeof(struct nfs_fh)); |
3978 |
+ lock->caller = nodename; |
3979 |
+ lock->oh.data = req->a_owner; |
3980 |
+ lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", |
3981 |
+diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c |
3982 |
+index 3701bccab478..74330daeab71 100644 |
3983 |
+--- a/fs/lockd/svclock.c |
3984 |
++++ b/fs/lockd/svclock.c |
3985 |
+@@ -405,8 +405,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, |
3986 |
+ __be32 ret; |
3987 |
+ |
3988 |
+ dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", |
3989 |
+- file_inode(file->f_file)->i_sb->s_id, |
3990 |
+- file_inode(file->f_file)->i_ino, |
3991 |
++ locks_inode(file->f_file)->i_sb->s_id, |
3992 |
++ locks_inode(file->f_file)->i_ino, |
3993 |
+ lock->fl.fl_type, lock->fl.fl_pid, |
3994 |
+ (long long)lock->fl.fl_start, |
3995 |
+ (long long)lock->fl.fl_end, |
3996 |
+@@ -511,8 +511,8 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, |
3997 |
+ __be32 ret; |
3998 |
+ |
3999 |
+ dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", |
4000 |
+- file_inode(file->f_file)->i_sb->s_id, |
4001 |
+- file_inode(file->f_file)->i_ino, |
4002 |
++ locks_inode(file->f_file)->i_sb->s_id, |
4003 |
++ locks_inode(file->f_file)->i_ino, |
4004 |
+ lock->fl.fl_type, |
4005 |
+ (long long)lock->fl.fl_start, |
4006 |
+ (long long)lock->fl.fl_end); |
4007 |
+@@ -566,8 +566,8 @@ nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) |
4008 |
+ int error; |
4009 |
+ |
4010 |
+ dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", |
4011 |
+- file_inode(file->f_file)->i_sb->s_id, |
4012 |
+- file_inode(file->f_file)->i_ino, |
4013 |
++ locks_inode(file->f_file)->i_sb->s_id, |
4014 |
++ locks_inode(file->f_file)->i_ino, |
4015 |
+ lock->fl.fl_pid, |
4016 |
+ (long long)lock->fl.fl_start, |
4017 |
+ (long long)lock->fl.fl_end); |
4018 |
+@@ -595,8 +595,8 @@ nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *l |
4019 |
+ int status = 0; |
4020 |
+ |
4021 |
+ dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", |
4022 |
+- file_inode(file->f_file)->i_sb->s_id, |
4023 |
+- file_inode(file->f_file)->i_ino, |
4024 |
++ locks_inode(file->f_file)->i_sb->s_id, |
4025 |
++ locks_inode(file->f_file)->i_ino, |
4026 |
+ lock->fl.fl_pid, |
4027 |
+ (long long)lock->fl.fl_start, |
4028 |
+ (long long)lock->fl.fl_end); |
4029 |
+diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c |
4030 |
+index 4ec3d6e03e76..899360ba3b84 100644 |
4031 |
+--- a/fs/lockd/svcsubs.c |
4032 |
++++ b/fs/lockd/svcsubs.c |
4033 |
+@@ -44,7 +44,7 @@ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) |
4034 |
+ |
4035 |
+ static inline void nlm_debug_print_file(char *msg, struct nlm_file *file) |
4036 |
+ { |
4037 |
+- struct inode *inode = file_inode(file->f_file); |
4038 |
++ struct inode *inode = locks_inode(file->f_file); |
4039 |
+ |
4040 |
+ dprintk("lockd: %s %s/%ld\n", |
4041 |
+ msg, inode->i_sb->s_id, inode->i_ino); |
4042 |
+@@ -414,7 +414,7 @@ nlmsvc_match_sb(void *datap, struct nlm_file *file) |
4043 |
+ { |
4044 |
+ struct super_block *sb = datap; |
4045 |
+ |
4046 |
+- return sb == file_inode(file->f_file)->i_sb; |
4047 |
++ return sb == locks_inode(file->f_file)->i_sb; |
4048 |
+ } |
4049 |
+ |
4050 |
+ /** |
4051 |
+diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c |
4052 |
+index a7efd83779d2..dec5880ac6de 100644 |
4053 |
+--- a/fs/nfs/blocklayout/dev.c |
4054 |
++++ b/fs/nfs/blocklayout/dev.c |
4055 |
+@@ -204,7 +204,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset, |
4056 |
+ chunk = div_u64(offset, dev->chunk_size); |
4057 |
+ div_u64_rem(chunk, dev->nr_children, &chunk_idx); |
4058 |
+ |
4059 |
+- if (chunk_idx > dev->nr_children) { |
4060 |
++ if (chunk_idx >= dev->nr_children) { |
4061 |
+ dprintk("%s: invalid chunk idx %d (%lld/%lld)\n", |
4062 |
+ __func__, chunk_idx, offset, dev->chunk_size); |
4063 |
+ /* error, should not happen */ |
4064 |
+diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c |
4065 |
+index 64c214fb9da6..5d57e818d0c3 100644 |
4066 |
+--- a/fs/nfs/callback_proc.c |
4067 |
++++ b/fs/nfs/callback_proc.c |
4068 |
+@@ -441,11 +441,14 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot, |
4069 |
+ * a match. If the slot is in use and the sequence numbers match, the |
4070 |
+ * client is still waiting for a response to the original request. |
4071 |
+ */ |
4072 |
+-static bool referring_call_exists(struct nfs_client *clp, |
4073 |
++static int referring_call_exists(struct nfs_client *clp, |
4074 |
+ uint32_t nrclists, |
4075 |
+- struct referring_call_list *rclists) |
4076 |
++ struct referring_call_list *rclists, |
4077 |
++ spinlock_t *lock) |
4078 |
++ __releases(lock) |
4079 |
++ __acquires(lock) |
4080 |
+ { |
4081 |
+- bool status = false; |
4082 |
++ int status = 0; |
4083 |
+ int i, j; |
4084 |
+ struct nfs4_session *session; |
4085 |
+ struct nfs4_slot_table *tbl; |
4086 |
+@@ -468,8 +471,10 @@ static bool referring_call_exists(struct nfs_client *clp, |
4087 |
+ |
4088 |
+ for (j = 0; j < rclist->rcl_nrefcalls; j++) { |
4089 |
+ ref = &rclist->rcl_refcalls[j]; |
4090 |
++ spin_unlock(lock); |
4091 |
+ status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid, |
4092 |
+ ref->rc_sequenceid, HZ >> 1) < 0; |
4093 |
++ spin_lock(lock); |
4094 |
+ if (status) |
4095 |
+ goto out; |
4096 |
+ } |
4097 |
+@@ -546,7 +551,8 @@ __be32 nfs4_callback_sequence(void *argp, void *resp, |
4098 |
+ * related callback was received before the response to the original |
4099 |
+ * call. |
4100 |
+ */ |
4101 |
+- if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { |
4102 |
++ if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists, |
4103 |
++ &tbl->slot_tbl_lock) < 0) { |
4104 |
+ status = htonl(NFS4ERR_DELAY); |
4105 |
+ goto out_unlock; |
4106 |
+ } |
4107 |
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
4108 |
+index f6c4ccd693f4..464db0c0f5c8 100644 |
4109 |
+--- a/fs/nfs/nfs4proc.c |
4110 |
++++ b/fs/nfs/nfs4proc.c |
4111 |
+@@ -581,8 +581,15 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, |
4112 |
+ ret = -EIO; |
4113 |
+ return ret; |
4114 |
+ out_retry: |
4115 |
+- if (ret == 0) |
4116 |
++ if (ret == 0) { |
4117 |
+ exception->retry = 1; |
4118 |
++ /* |
4119 |
++ * For NFS4ERR_MOVED, the client transport will need to |
4120 |
++ * be recomputed after migration recovery has completed. |
4121 |
++ */ |
4122 |
++ if (errorcode == -NFS4ERR_MOVED) |
4123 |
++ rpc_task_release_transport(task); |
4124 |
++ } |
4125 |
+ return ret; |
4126 |
+ } |
4127 |
+ |
4128 |
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c |
4129 |
+index 32ba2d471853..d5e4d3cd8c7f 100644 |
4130 |
+--- a/fs/nfs/pnfs_nfs.c |
4131 |
++++ b/fs/nfs/pnfs_nfs.c |
4132 |
+@@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_release); |
4133 |
+ |
4134 |
+ /* The generic layer is about to remove the req from the commit list. |
4135 |
+ * If this will make the bucket empty, it will need to put the lseg reference. |
4136 |
+- * Note this must be called holding i_lock |
4137 |
++ * Note this must be called holding nfsi->commit_mutex |
4138 |
+ */ |
4139 |
+ void |
4140 |
+ pnfs_generic_clear_request_commit(struct nfs_page *req, |
4141 |
+@@ -149,9 +149,7 @@ restart: |
4142 |
+ if (list_empty(&b->written)) { |
4143 |
+ freeme = b->wlseg; |
4144 |
+ b->wlseg = NULL; |
4145 |
+- spin_unlock(&cinfo->inode->i_lock); |
4146 |
+ pnfs_put_lseg(freeme); |
4147 |
+- spin_lock(&cinfo->inode->i_lock); |
4148 |
+ goto restart; |
4149 |
+ } |
4150 |
+ } |
4151 |
+@@ -167,7 +165,7 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx) |
4152 |
+ LIST_HEAD(pages); |
4153 |
+ int i; |
4154 |
+ |
4155 |
+- spin_lock(&cinfo->inode->i_lock); |
4156 |
++ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); |
4157 |
+ for (i = idx; i < fl_cinfo->nbuckets; i++) { |
4158 |
+ bucket = &fl_cinfo->buckets[i]; |
4159 |
+ if (list_empty(&bucket->committing)) |
4160 |
+@@ -177,12 +175,12 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx) |
4161 |
+ list_for_each(pos, &bucket->committing) |
4162 |
+ cinfo->ds->ncommitting--; |
4163 |
+ list_splice_init(&bucket->committing, &pages); |
4164 |
+- spin_unlock(&cinfo->inode->i_lock); |
4165 |
++ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); |
4166 |
+ nfs_retry_commit(&pages, freeme, cinfo, i); |
4167 |
+ pnfs_put_lseg(freeme); |
4168 |
+- spin_lock(&cinfo->inode->i_lock); |
4169 |
++ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); |
4170 |
+ } |
4171 |
+- spin_unlock(&cinfo->inode->i_lock); |
4172 |
++ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); |
4173 |
+ } |
4174 |
+ |
4175 |
+ static unsigned int |
4176 |
+@@ -222,13 +220,13 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages, |
4177 |
+ struct list_head *pos; |
4178 |
+ |
4179 |
+ bucket = &cinfo->ds->buckets[data->ds_commit_index]; |
4180 |
+- spin_lock(&cinfo->inode->i_lock); |
4181 |
++ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); |
4182 |
+ list_for_each(pos, &bucket->committing) |
4183 |
+ cinfo->ds->ncommitting--; |
4184 |
+ list_splice_init(&bucket->committing, pages); |
4185 |
+ data->lseg = bucket->clseg; |
4186 |
+ bucket->clseg = NULL; |
4187 |
+- spin_unlock(&cinfo->inode->i_lock); |
4188 |
++ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); |
4189 |
+ |
4190 |
+ } |
4191 |
+ |
4192 |
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
4193 |
+index 857141446d6b..4a17fad93411 100644 |
4194 |
+--- a/fs/nfsd/nfs4state.c |
4195 |
++++ b/fs/nfsd/nfs4state.c |
4196 |
+@@ -6293,7 +6293,7 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) |
4197 |
+ return status; |
4198 |
+ } |
4199 |
+ |
4200 |
+- inode = file_inode(filp); |
4201 |
++ inode = locks_inode(filp); |
4202 |
+ flctx = inode->i_flctx; |
4203 |
+ |
4204 |
+ if (flctx && !list_empty_careful(&flctx->flc_posix)) { |
4205 |
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c |
4206 |
+index ef1fe42ff7bb..cc8303a806b4 100644 |
4207 |
+--- a/fs/overlayfs/readdir.c |
4208 |
++++ b/fs/overlayfs/readdir.c |
4209 |
+@@ -668,6 +668,21 @@ static int ovl_fill_real(struct dir_context *ctx, const char *name, |
4210 |
+ return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type); |
4211 |
+ } |
4212 |
+ |
4213 |
++static bool ovl_is_impure_dir(struct file *file) |
4214 |
++{ |
4215 |
++ struct ovl_dir_file *od = file->private_data; |
4216 |
++ struct inode *dir = d_inode(file->f_path.dentry); |
4217 |
++ |
4218 |
++ /* |
4219 |
++ * Only upper dir can be impure, but if we are in the middle of |
4220 |
++ * iterating a lower real dir, dir could be copied up and marked |
4221 |
++ * impure. We only want the impure cache if we started iterating |
4222 |
++ * a real upper dir to begin with. |
4223 |
++ */ |
4224 |
++ return od->is_upper && ovl_test_flag(OVL_IMPURE, dir); |
4225 |
++ |
4226 |
++} |
4227 |
++ |
4228 |
+ static int ovl_iterate_real(struct file *file, struct dir_context *ctx) |
4229 |
+ { |
4230 |
+ int err; |
4231 |
+@@ -696,7 +711,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx) |
4232 |
+ rdt.parent_ino = stat.ino; |
4233 |
+ } |
4234 |
+ |
4235 |
+- if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) { |
4236 |
++ if (ovl_is_impure_dir(file)) { |
4237 |
+ rdt.cache = ovl_cache_get_impure(&file->f_path); |
4238 |
+ if (IS_ERR(rdt.cache)) |
4239 |
+ return PTR_ERR(rdt.cache); |
4240 |
+@@ -727,7 +742,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) |
4241 |
+ */ |
4242 |
+ if (ovl_xino_bits(dentry->d_sb) || |
4243 |
+ (ovl_same_sb(dentry->d_sb) && |
4244 |
+- (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) || |
4245 |
++ (ovl_is_impure_dir(file) || |
4246 |
+ OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) { |
4247 |
+ return ovl_iterate_real(file, ctx); |
4248 |
+ } |
4249 |
+diff --git a/fs/quota/quota.c b/fs/quota/quota.c |
4250 |
+index 860bfbe7a07a..dac1735312df 100644 |
4251 |
+--- a/fs/quota/quota.c |
4252 |
++++ b/fs/quota/quota.c |
4253 |
+@@ -18,6 +18,7 @@ |
4254 |
+ #include <linux/quotaops.h> |
4255 |
+ #include <linux/types.h> |
4256 |
+ #include <linux/writeback.h> |
4257 |
++#include <linux/nospec.h> |
4258 |
+ |
4259 |
+ static int check_quotactl_permission(struct super_block *sb, int type, int cmd, |
4260 |
+ qid_t id) |
4261 |
+@@ -703,6 +704,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, |
4262 |
+ |
4263 |
+ if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) |
4264 |
+ return -EINVAL; |
4265 |
++ type = array_index_nospec(type, MAXQUOTAS); |
4266 |
+ /* |
4267 |
+ * Quota not supported on this fs? Check this before s_quota_types |
4268 |
+ * since they needn't be set if quota is not supported at all. |
4269 |
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c |
4270 |
+index 9da224d4f2da..e8616040bffc 100644 |
4271 |
+--- a/fs/ubifs/dir.c |
4272 |
++++ b/fs/ubifs/dir.c |
4273 |
+@@ -1123,8 +1123,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, |
4274 |
+ struct ubifs_inode *ui; |
4275 |
+ struct ubifs_inode *dir_ui = ubifs_inode(dir); |
4276 |
+ struct ubifs_info *c = dir->i_sb->s_fs_info; |
4277 |
+- int err, len = strlen(symname); |
4278 |
+- int sz_change = CALC_DENT_SIZE(len); |
4279 |
++ int err, sz_change, len = strlen(symname); |
4280 |
+ struct fscrypt_str disk_link; |
4281 |
+ struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, |
4282 |
+ .new_ino_d = ALIGN(len, 8), |
4283 |
+@@ -1151,6 +1150,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, |
4284 |
+ if (err) |
4285 |
+ goto out_budg; |
4286 |
+ |
4287 |
++ sz_change = CALC_DENT_SIZE(fname_len(&nm)); |
4288 |
++ |
4289 |
+ inode = ubifs_new_inode(c, dir, S_IFLNK | S_IRWXUGO); |
4290 |
+ if (IS_ERR(inode)) { |
4291 |
+ err = PTR_ERR(inode); |
4292 |
+diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c |
4293 |
+index 07b4956e0425..48060dc48683 100644 |
4294 |
+--- a/fs/ubifs/journal.c |
4295 |
++++ b/fs/ubifs/journal.c |
4296 |
+@@ -664,6 +664,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, |
4297 |
+ spin_lock(&ui->ui_lock); |
4298 |
+ ui->synced_i_size = ui->ui_size; |
4299 |
+ spin_unlock(&ui->ui_lock); |
4300 |
++ if (xent) { |
4301 |
++ spin_lock(&host_ui->ui_lock); |
4302 |
++ host_ui->synced_i_size = host_ui->ui_size; |
4303 |
++ spin_unlock(&host_ui->ui_lock); |
4304 |
++ } |
4305 |
+ mark_inode_clean(c, ui); |
4306 |
+ mark_inode_clean(c, host_ui); |
4307 |
+ return 0; |
4308 |
+@@ -1282,11 +1287,10 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in |
4309 |
+ int *new_len) |
4310 |
+ { |
4311 |
+ void *buf; |
4312 |
+- int err, compr_type; |
4313 |
+- u32 dlen, out_len, old_dlen; |
4314 |
++ int err, dlen, compr_type, out_len, old_dlen; |
4315 |
+ |
4316 |
+ out_len = le32_to_cpu(dn->size); |
4317 |
+- buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS); |
4318 |
++ buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS); |
4319 |
+ if (!buf) |
4320 |
+ return -ENOMEM; |
4321 |
+ |
4322 |
+@@ -1388,7 +1392,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, |
4323 |
+ else if (err) |
4324 |
+ goto out_free; |
4325 |
+ else { |
4326 |
+- if (le32_to_cpu(dn->size) <= dlen) |
4327 |
++ int dn_len = le32_to_cpu(dn->size); |
4328 |
++ |
4329 |
++ if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) { |
4330 |
++ ubifs_err(c, "bad data node (block %u, inode %lu)", |
4331 |
++ blk, inode->i_ino); |
4332 |
++ ubifs_dump_node(c, dn); |
4333 |
++ goto out_free; |
4334 |
++ } |
4335 |
++ |
4336 |
++ if (dn_len <= dlen) |
4337 |
+ dlen = 0; /* Nothing to do */ |
4338 |
+ else { |
4339 |
+ err = truncate_data_node(c, inode, blk, dn, &dlen); |
4340 |
+diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c |
4341 |
+index f5a46844340c..8ade493a423a 100644 |
4342 |
+--- a/fs/ubifs/lprops.c |
4343 |
++++ b/fs/ubifs/lprops.c |
4344 |
+@@ -1089,10 +1089,6 @@ static int scan_check_cb(struct ubifs_info *c, |
4345 |
+ } |
4346 |
+ } |
4347 |
+ |
4348 |
+- buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); |
4349 |
+- if (!buf) |
4350 |
+- return -ENOMEM; |
4351 |
+- |
4352 |
+ /* |
4353 |
+ * After an unclean unmount, empty and freeable LEBs |
4354 |
+ * may contain garbage - do not scan them. |
4355 |
+@@ -1111,6 +1107,10 @@ static int scan_check_cb(struct ubifs_info *c, |
4356 |
+ return LPT_SCAN_CONTINUE; |
4357 |
+ } |
4358 |
+ |
4359 |
++ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); |
4360 |
++ if (!buf) |
4361 |
++ return -ENOMEM; |
4362 |
++ |
4363 |
+ sleb = ubifs_scan(c, lnum, 0, buf, 0); |
4364 |
+ if (IS_ERR(sleb)) { |
4365 |
+ ret = PTR_ERR(sleb); |
4366 |
+diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c |
4367 |
+index 6f720fdf5020..09e37e63bddd 100644 |
4368 |
+--- a/fs/ubifs/xattr.c |
4369 |
++++ b/fs/ubifs/xattr.c |
4370 |
+@@ -152,6 +152,12 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, |
4371 |
+ ui->data_len = size; |
4372 |
+ |
4373 |
+ mutex_lock(&host_ui->ui_mutex); |
4374 |
++ |
4375 |
++ if (!host->i_nlink) { |
4376 |
++ err = -ENOENT; |
4377 |
++ goto out_noent; |
4378 |
++ } |
4379 |
++ |
4380 |
+ host->i_ctime = current_time(host); |
4381 |
+ host_ui->xattr_cnt += 1; |
4382 |
+ host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); |
4383 |
+@@ -184,6 +190,7 @@ out_cancel: |
4384 |
+ host_ui->xattr_size -= CALC_XATTR_BYTES(size); |
4385 |
+ host_ui->xattr_names -= fname_len(nm); |
4386 |
+ host_ui->flags &= ~UBIFS_CRYPT_FL; |
4387 |
++out_noent: |
4388 |
+ mutex_unlock(&host_ui->ui_mutex); |
4389 |
+ out_free: |
4390 |
+ make_bad_inode(inode); |
4391 |
+@@ -235,6 +242,12 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, |
4392 |
+ mutex_unlock(&ui->ui_mutex); |
4393 |
+ |
4394 |
+ mutex_lock(&host_ui->ui_mutex); |
4395 |
++ |
4396 |
++ if (!host->i_nlink) { |
4397 |
++ err = -ENOENT; |
4398 |
++ goto out_noent; |
4399 |
++ } |
4400 |
++ |
4401 |
+ host->i_ctime = current_time(host); |
4402 |
+ host_ui->xattr_size -= CALC_XATTR_BYTES(old_size); |
4403 |
+ host_ui->xattr_size += CALC_XATTR_BYTES(size); |
4404 |
+@@ -256,6 +269,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, |
4405 |
+ out_cancel: |
4406 |
+ host_ui->xattr_size -= CALC_XATTR_BYTES(size); |
4407 |
+ host_ui->xattr_size += CALC_XATTR_BYTES(old_size); |
4408 |
++out_noent: |
4409 |
+ mutex_unlock(&host_ui->ui_mutex); |
4410 |
+ make_bad_inode(inode); |
4411 |
+ out_free: |
4412 |
+@@ -482,6 +496,12 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host, |
4413 |
+ return err; |
4414 |
+ |
4415 |
+ mutex_lock(&host_ui->ui_mutex); |
4416 |
++ |
4417 |
++ if (!host->i_nlink) { |
4418 |
++ err = -ENOENT; |
4419 |
++ goto out_noent; |
4420 |
++ } |
4421 |
++ |
4422 |
+ host->i_ctime = current_time(host); |
4423 |
+ host_ui->xattr_cnt -= 1; |
4424 |
+ host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm)); |
4425 |
+@@ -501,6 +521,7 @@ out_cancel: |
4426 |
+ host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); |
4427 |
+ host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); |
4428 |
+ host_ui->xattr_names += fname_len(nm); |
4429 |
++out_noent: |
4430 |
+ mutex_unlock(&host_ui->ui_mutex); |
4431 |
+ ubifs_release_budget(c, &req); |
4432 |
+ make_bad_inode(inode); |
4433 |
+@@ -540,6 +561,9 @@ static int ubifs_xattr_remove(struct inode *host, const char *name) |
4434 |
+ |
4435 |
+ ubifs_assert(inode_is_locked(host)); |
4436 |
+ |
4437 |
++ if (!host->i_nlink) |
4438 |
++ return -ENOENT; |
4439 |
++ |
4440 |
+ if (fname_len(&nm) > UBIFS_MAX_NLEN) |
4441 |
+ return -ENAMETOOLONG; |
4442 |
+ |
4443 |
+diff --git a/fs/udf/super.c b/fs/udf/super.c |
4444 |
+index 0c504c8031d3..74b13347cd94 100644 |
4445 |
+--- a/fs/udf/super.c |
4446 |
++++ b/fs/udf/super.c |
4447 |
+@@ -1570,10 +1570,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ |
4448 |
+ */ |
4449 |
+ #define PART_DESC_ALLOC_STEP 32 |
4450 |
+ |
4451 |
++struct part_desc_seq_scan_data { |
4452 |
++ struct udf_vds_record rec; |
4453 |
++ u32 partnum; |
4454 |
++}; |
4455 |
++ |
4456 |
+ struct desc_seq_scan_data { |
4457 |
+ struct udf_vds_record vds[VDS_POS_LENGTH]; |
4458 |
+ unsigned int size_part_descs; |
4459 |
+- struct udf_vds_record *part_descs_loc; |
4460 |
++ unsigned int num_part_descs; |
4461 |
++ struct part_desc_seq_scan_data *part_descs_loc; |
4462 |
+ }; |
4463 |
+ |
4464 |
+ static struct udf_vds_record *handle_partition_descriptor( |
4465 |
+@@ -1582,10 +1588,14 @@ static struct udf_vds_record *handle_partition_descriptor( |
4466 |
+ { |
4467 |
+ struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; |
4468 |
+ int partnum; |
4469 |
++ int i; |
4470 |
+ |
4471 |
+ partnum = le16_to_cpu(desc->partitionNumber); |
4472 |
+- if (partnum >= data->size_part_descs) { |
4473 |
+- struct udf_vds_record *new_loc; |
4474 |
++ for (i = 0; i < data->num_part_descs; i++) |
4475 |
++ if (partnum == data->part_descs_loc[i].partnum) |
4476 |
++ return &(data->part_descs_loc[i].rec); |
4477 |
++ if (data->num_part_descs >= data->size_part_descs) { |
4478 |
++ struct part_desc_seq_scan_data *new_loc; |
4479 |
+ unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); |
4480 |
+ |
4481 |
+ new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); |
4482 |
+@@ -1597,7 +1607,7 @@ static struct udf_vds_record *handle_partition_descriptor( |
4483 |
+ data->part_descs_loc = new_loc; |
4484 |
+ data->size_part_descs = new_size; |
4485 |
+ } |
4486 |
+- return &(data->part_descs_loc[partnum]); |
4487 |
++ return &(data->part_descs_loc[data->num_part_descs++].rec); |
4488 |
+ } |
4489 |
+ |
4490 |
+ |
4491 |
+@@ -1647,6 +1657,7 @@ static noinline int udf_process_sequence( |
4492 |
+ |
4493 |
+ memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); |
4494 |
+ data.size_part_descs = PART_DESC_ALLOC_STEP; |
4495 |
++ data.num_part_descs = 0; |
4496 |
+ data.part_descs_loc = kcalloc(data.size_part_descs, |
4497 |
+ sizeof(*data.part_descs_loc), |
4498 |
+ GFP_KERNEL); |
4499 |
+@@ -1658,7 +1669,6 @@ static noinline int udf_process_sequence( |
4500 |
+ * are in it. |
4501 |
+ */ |
4502 |
+ for (; (!done && block <= lastblock); block++) { |
4503 |
+- |
4504 |
+ bh = udf_read_tagged(sb, block, block, &ident); |
4505 |
+ if (!bh) |
4506 |
+ break; |
4507 |
+@@ -1730,13 +1740,10 @@ static noinline int udf_process_sequence( |
4508 |
+ } |
4509 |
+ |
4510 |
+ /* Now handle prevailing Partition Descriptors */ |
4511 |
+- for (i = 0; i < data.size_part_descs; i++) { |
4512 |
+- if (data.part_descs_loc[i].block) { |
4513 |
+- ret = udf_load_partdesc(sb, |
4514 |
+- data.part_descs_loc[i].block); |
4515 |
+- if (ret < 0) |
4516 |
+- return ret; |
4517 |
+- } |
4518 |
++ for (i = 0; i < data.num_part_descs; i++) { |
4519 |
++ ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block); |
4520 |
++ if (ret < 0) |
4521 |
++ return ret; |
4522 |
+ } |
4523 |
+ |
4524 |
+ return 0; |
4525 |
+diff --git a/fs/xattr.c b/fs/xattr.c |
4526 |
+index f9cb1db187b7..1bee74682513 100644 |
4527 |
+--- a/fs/xattr.c |
4528 |
++++ b/fs/xattr.c |
4529 |
+@@ -539,7 +539,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, |
4530 |
+ if (error > 0) { |
4531 |
+ if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) || |
4532 |
+ (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)) |
4533 |
+- posix_acl_fix_xattr_to_user(kvalue, size); |
4534 |
++ posix_acl_fix_xattr_to_user(kvalue, error); |
4535 |
+ if (size && copy_to_user(value, kvalue, error)) |
4536 |
+ error = -EFAULT; |
4537 |
+ } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) { |
4538 |
+diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h |
4539 |
+index 6c666fd7de3c..0fce47d5acb1 100644 |
4540 |
+--- a/include/linux/blk-cgroup.h |
4541 |
++++ b/include/linux/blk-cgroup.h |
4542 |
+@@ -295,6 +295,23 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, |
4543 |
+ return __blkg_lookup(blkcg, q, false); |
4544 |
+ } |
4545 |
+ |
4546 |
++/** |
4547 |
++ * blkg_lookup - look up blkg for the specified request queue |
4548 |
++ * @q: request_queue of interest |
4549 |
++ * |
4550 |
++ * Lookup blkg for @q at the root level. See also blkg_lookup(). |
4551 |
++ */ |
4552 |
++static inline struct blkcg_gq *blkg_root_lookup(struct request_queue *q) |
4553 |
++{ |
4554 |
++ struct blkcg_gq *blkg; |
4555 |
++ |
4556 |
++ rcu_read_lock(); |
4557 |
++ blkg = blkg_lookup(&blkcg_root, q); |
4558 |
++ rcu_read_unlock(); |
4559 |
++ |
4560 |
++ return blkg; |
4561 |
++} |
4562 |
++ |
4563 |
+ /** |
4564 |
+ * blkg_to_pdata - get policy private data |
4565 |
+ * @blkg: blkg of interest |
4566 |
+@@ -737,6 +754,7 @@ struct blkcg_policy { |
4567 |
+ #ifdef CONFIG_BLOCK |
4568 |
+ |
4569 |
+ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } |
4570 |
++static inline struct blkcg_gq *blkg_root_lookup(struct request_queue *q) { return NULL; } |
4571 |
+ static inline int blkcg_init_queue(struct request_queue *q) { return 0; } |
4572 |
+ static inline void blkcg_drain_queue(struct request_queue *q) { } |
4573 |
+ static inline void blkcg_exit_queue(struct request_queue *q) { } |
4574 |
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h |
4575 |
+index 3a3012f57be4..5389012f1d25 100644 |
4576 |
+--- a/include/linux/hyperv.h |
4577 |
++++ b/include/linux/hyperv.h |
4578 |
+@@ -1046,6 +1046,8 @@ extern int vmbus_establish_gpadl(struct vmbus_channel *channel, |
4579 |
+ extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, |
4580 |
+ u32 gpadl_handle); |
4581 |
+ |
4582 |
++void vmbus_reset_channel_cb(struct vmbus_channel *channel); |
4583 |
++ |
4584 |
+ extern int vmbus_recvpacket(struct vmbus_channel *channel, |
4585 |
+ void *buffer, |
4586 |
+ u32 bufferlen, |
4587 |
+diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h |
4588 |
+index ef169d67df92..7fd9fbaea5aa 100644 |
4589 |
+--- a/include/linux/intel-iommu.h |
4590 |
++++ b/include/linux/intel-iommu.h |
4591 |
+@@ -114,6 +114,7 @@ |
4592 |
+ * Extended Capability Register |
4593 |
+ */ |
4594 |
+ |
4595 |
++#define ecap_dit(e) ((e >> 41) & 0x1) |
4596 |
+ #define ecap_pasid(e) ((e >> 40) & 0x1) |
4597 |
+ #define ecap_pss(e) ((e >> 35) & 0x1f) |
4598 |
+ #define ecap_eafs(e) ((e >> 34) & 0x1) |
4599 |
+@@ -284,6 +285,7 @@ enum { |
4600 |
+ #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) |
4601 |
+ #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) |
4602 |
+ #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) |
4603 |
++#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) |
4604 |
+ #define QI_DEV_IOTLB_SIZE 1 |
4605 |
+ #define QI_DEV_IOTLB_MAX_INVS 32 |
4606 |
+ |
4607 |
+@@ -308,6 +310,7 @@ enum { |
4608 |
+ #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) |
4609 |
+ #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) |
4610 |
+ #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) |
4611 |
++#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) |
4612 |
+ #define QI_DEV_EIOTLB_MAX_INVS 32 |
4613 |
+ |
4614 |
+ #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) |
4615 |
+@@ -453,9 +456,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, |
4616 |
+ u8 fm, u64 type); |
4617 |
+ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, |
4618 |
+ unsigned int size_order, u64 type); |
4619 |
+-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, |
4620 |
+- u64 addr, unsigned mask); |
4621 |
+- |
4622 |
++extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, |
4623 |
++ u16 qdep, u64 addr, unsigned mask); |
4624 |
+ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); |
4625 |
+ |
4626 |
+ extern int dmar_ir_support(void); |
4627 |
+diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h |
4628 |
+index 4fd95dbeb52f..b065ef406770 100644 |
4629 |
+--- a/include/linux/lockd/lockd.h |
4630 |
++++ b/include/linux/lockd/lockd.h |
4631 |
+@@ -299,7 +299,7 @@ int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); |
4632 |
+ |
4633 |
+ static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) |
4634 |
+ { |
4635 |
+- return file_inode(file->f_file); |
4636 |
++ return locks_inode(file->f_file); |
4637 |
+ } |
4638 |
+ |
4639 |
+ static inline int __nlm_privileged_request4(const struct sockaddr *sap) |
4640 |
+@@ -359,7 +359,7 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp) |
4641 |
+ static inline int nlm_compare_locks(const struct file_lock *fl1, |
4642 |
+ const struct file_lock *fl2) |
4643 |
+ { |
4644 |
+- return file_inode(fl1->fl_file) == file_inode(fl2->fl_file) |
4645 |
++ return locks_inode(fl1->fl_file) == locks_inode(fl2->fl_file) |
4646 |
+ && fl1->fl_pid == fl2->fl_pid |
4647 |
+ && fl1->fl_owner == fl2->fl_owner |
4648 |
+ && fl1->fl_start == fl2->fl_start |
4649 |
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h |
4650 |
+index 99ce070e7dcb..22651e124071 100644 |
4651 |
+--- a/include/linux/mm_types.h |
4652 |
++++ b/include/linux/mm_types.h |
4653 |
+@@ -139,7 +139,10 @@ struct page { |
4654 |
+ unsigned long _pt_pad_1; /* compound_head */ |
4655 |
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */ |
4656 |
+ unsigned long _pt_pad_2; /* mapping */ |
4657 |
+- struct mm_struct *pt_mm; /* x86 pgds only */ |
4658 |
++ union { |
4659 |
++ struct mm_struct *pt_mm; /* x86 pgds only */ |
4660 |
++ atomic_t pt_frag_refcount; /* powerpc */ |
4661 |
++ }; |
4662 |
+ #if ALLOC_SPLIT_PTLOCKS |
4663 |
+ spinlock_t *ptl; |
4664 |
+ #else |
4665 |
+diff --git a/include/linux/overflow.h b/include/linux/overflow.h |
4666 |
+index 8712ff70995f..40b48e2133cb 100644 |
4667 |
+--- a/include/linux/overflow.h |
4668 |
++++ b/include/linux/overflow.h |
4669 |
+@@ -202,6 +202,37 @@ |
4670 |
+ |
4671 |
+ #endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ |
4672 |
+ |
4673 |
++/** check_shl_overflow() - Calculate a left-shifted value and check overflow |
4674 |
++ * |
4675 |
++ * @a: Value to be shifted |
4676 |
++ * @s: How many bits left to shift |
4677 |
++ * @d: Pointer to where to store the result |
4678 |
++ * |
4679 |
++ * Computes *@d = (@a << @s) |
4680 |
++ * |
4681 |
++ * Returns true if '*d' cannot hold the result or when 'a << s' doesn't |
4682 |
++ * make sense. Example conditions: |
4683 |
++ * - 'a << s' causes bits to be lost when stored in *d. |
4684 |
++ * - 's' is garbage (e.g. negative) or so large that the result of |
4685 |
++ * 'a << s' is guaranteed to be 0. |
4686 |
++ * - 'a' is negative. |
4687 |
++ * - 'a << s' sets the sign bit, if any, in '*d'. |
4688 |
++ * |
4689 |
++ * '*d' will hold the results of the attempted shift, but is not |
4690 |
++ * considered "safe for use" if false is returned. |
4691 |
++ */ |
4692 |
++#define check_shl_overflow(a, s, d) ({ \ |
4693 |
++ typeof(a) _a = a; \ |
4694 |
++ typeof(s) _s = s; \ |
4695 |
++ typeof(d) _d = d; \ |
4696 |
++ u64 _a_full = _a; \ |
4697 |
++ unsigned int _to_shift = \ |
4698 |
++ _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ |
4699 |
++ *_d = (_a_full << _to_shift); \ |
4700 |
++ (_to_shift != _s || *_d < 0 || _a < 0 || \ |
4701 |
++ (*_d >> _to_shift) != _a); \ |
4702 |
++}) |
4703 |
++ |
4704 |
+ /** |
4705 |
+ * array_size() - Calculate size of 2-dimensional array. |
4706 |
+ * |
4707 |
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h |
4708 |
+index 9b11b6a0978c..73d5c4a870fa 100644 |
4709 |
+--- a/include/linux/sunrpc/clnt.h |
4710 |
++++ b/include/linux/sunrpc/clnt.h |
4711 |
+@@ -156,6 +156,7 @@ int rpc_switch_client_transport(struct rpc_clnt *, |
4712 |
+ |
4713 |
+ void rpc_shutdown_client(struct rpc_clnt *); |
4714 |
+ void rpc_release_client(struct rpc_clnt *); |
4715 |
++void rpc_task_release_transport(struct rpc_task *); |
4716 |
+ void rpc_task_release_client(struct rpc_task *); |
4717 |
+ |
4718 |
+ int rpcb_create_local(struct net *); |
4719 |
+diff --git a/include/linux/verification.h b/include/linux/verification.h |
4720 |
+index a10549a6c7cd..cfa4730d607a 100644 |
4721 |
+--- a/include/linux/verification.h |
4722 |
++++ b/include/linux/verification.h |
4723 |
+@@ -12,6 +12,12 @@ |
4724 |
+ #ifndef _LINUX_VERIFICATION_H |
4725 |
+ #define _LINUX_VERIFICATION_H |
4726 |
+ |
4727 |
++/* |
4728 |
++ * Indicate that both builtin trusted keys and secondary trusted keys |
4729 |
++ * should be used. |
4730 |
++ */ |
4731 |
++#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL) |
4732 |
++ |
4733 |
+ /* |
4734 |
+ * The use to which an asymmetric key is being put. |
4735 |
+ */ |
4736 |
+diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h |
4737 |
+index bf48e71f2634..8a3432d0f0dc 100644 |
4738 |
+--- a/include/uapi/linux/eventpoll.h |
4739 |
++++ b/include/uapi/linux/eventpoll.h |
4740 |
+@@ -42,7 +42,7 @@ |
4741 |
+ #define EPOLLRDHUP (__force __poll_t)0x00002000 |
4742 |
+ |
4743 |
+ /* Set exclusive wakeup mode for the target file descriptor */ |
4744 |
+-#define EPOLLEXCLUSIVE (__force __poll_t)(1U << 28) |
4745 |
++#define EPOLLEXCLUSIVE ((__force __poll_t)(1U << 28)) |
4746 |
+ |
4747 |
+ /* |
4748 |
+ * Request the handling of system wakeup events so as to prevent system suspends |
4749 |
+@@ -54,13 +54,13 @@ |
4750 |
+ * |
4751 |
+ * Requires CAP_BLOCK_SUSPEND |
4752 |
+ */ |
4753 |
+-#define EPOLLWAKEUP (__force __poll_t)(1U << 29) |
4754 |
++#define EPOLLWAKEUP ((__force __poll_t)(1U << 29)) |
4755 |
+ |
4756 |
+ /* Set the One Shot behaviour for the target file descriptor */ |
4757 |
+-#define EPOLLONESHOT (__force __poll_t)(1U << 30) |
4758 |
++#define EPOLLONESHOT ((__force __poll_t)(1U << 30)) |
4759 |
+ |
4760 |
+ /* Set the Edge Triggered behaviour for the target file descriptor */ |
4761 |
+-#define EPOLLET (__force __poll_t)(1U << 31) |
4762 |
++#define EPOLLET ((__force __poll_t)(1U << 31)) |
4763 |
+ |
4764 |
+ /* |
4765 |
+ * On x86-64 make the 64bit structure have the same alignment as the |
4766 |
+diff --git a/include/video/udlfb.h b/include/video/udlfb.h |
4767 |
+index 0cabe6b09095..6e1a2e790b1b 100644 |
4768 |
+--- a/include/video/udlfb.h |
4769 |
++++ b/include/video/udlfb.h |
4770 |
+@@ -20,7 +20,6 @@ struct dloarea { |
4771 |
+ struct urb_node { |
4772 |
+ struct list_head entry; |
4773 |
+ struct dlfb_data *dlfb; |
4774 |
+- struct delayed_work release_urb_work; |
4775 |
+ struct urb *urb; |
4776 |
+ }; |
4777 |
+ |
4778 |
+@@ -52,11 +51,13 @@ struct dlfb_data { |
4779 |
+ int base8; |
4780 |
+ u32 pseudo_palette[256]; |
4781 |
+ int blank_mode; /*one of FB_BLANK_ */ |
4782 |
++ struct fb_ops ops; |
4783 |
+ /* blit-only rendering path metrics, exposed through sysfs */ |
4784 |
+ atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */ |
4785 |
+ atomic_t bytes_identical; /* saved effort with backbuffer comparison */ |
4786 |
+ atomic_t bytes_sent; /* to usb, after compression including overhead */ |
4787 |
+ atomic_t cpu_kcycles_used; /* transpired during pixel processing */ |
4788 |
++ struct fb_var_screeninfo current_mode; |
4789 |
+ }; |
4790 |
+ |
4791 |
+ #define NR_USB_REQUEST_I2C_SUB_IO 0x02 |
4792 |
+@@ -87,7 +88,7 @@ struct dlfb_data { |
4793 |
+ #define MIN_RAW_PIX_BYTES 2 |
4794 |
+ #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES) |
4795 |
+ |
4796 |
+-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ |
4797 |
++#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */ |
4798 |
+ #define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */ |
4799 |
+ |
4800 |
+ /* remove these once align.h patch is taken into kernel */ |
4801 |
+diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c |
4802 |
+index 3a4656fb7047..5b77a7314e01 100644 |
4803 |
+--- a/kernel/livepatch/core.c |
4804 |
++++ b/kernel/livepatch/core.c |
4805 |
+@@ -678,6 +678,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) |
4806 |
+ if (!func->old_name || !func->new_func) |
4807 |
+ return -EINVAL; |
4808 |
+ |
4809 |
++ if (strlen(func->old_name) >= KSYM_NAME_LEN) |
4810 |
++ return -EINVAL; |
4811 |
++ |
4812 |
+ INIT_LIST_HEAD(&func->stack_node); |
4813 |
+ func->patched = false; |
4814 |
+ func->transition = false; |
4815 |
+@@ -751,6 +754,9 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) |
4816 |
+ if (!obj->funcs) |
4817 |
+ return -EINVAL; |
4818 |
+ |
4819 |
++ if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN) |
4820 |
++ return -EINVAL; |
4821 |
++ |
4822 |
+ obj->patched = false; |
4823 |
+ obj->mod = NULL; |
4824 |
+ |
4825 |
+diff --git a/kernel/memremap.c b/kernel/memremap.c |
4826 |
+index 38283363da06..cfb750105e1e 100644 |
4827 |
+--- a/kernel/memremap.c |
4828 |
++++ b/kernel/memremap.c |
4829 |
+@@ -355,7 +355,6 @@ void __put_devmap_managed_page(struct page *page) |
4830 |
+ __ClearPageActive(page); |
4831 |
+ __ClearPageWaiters(page); |
4832 |
+ |
4833 |
+- page->mapping = NULL; |
4834 |
+ mem_cgroup_uncharge(page); |
4835 |
+ |
4836 |
+ page->pgmap->page_free(page, page->pgmap->data); |
4837 |
+diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig |
4838 |
+index e880ca22c5a5..3a6c2f87699e 100644 |
4839 |
+--- a/kernel/power/Kconfig |
4840 |
++++ b/kernel/power/Kconfig |
4841 |
+@@ -105,6 +105,7 @@ config PM_SLEEP |
4842 |
+ def_bool y |
4843 |
+ depends on SUSPEND || HIBERNATE_CALLBACKS |
4844 |
+ select PM |
4845 |
++ select SRCU |
4846 |
+ |
4847 |
+ config PM_SLEEP_SMP |
4848 |
+ def_bool y |
4849 |
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c |
4850 |
+index a0a74c533e4b..0913b4d385de 100644 |
4851 |
+--- a/kernel/printk/printk_safe.c |
4852 |
++++ b/kernel/printk/printk_safe.c |
4853 |
+@@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) |
4854 |
+ return printk_safe_log_store(s, fmt, args); |
4855 |
+ } |
4856 |
+ |
4857 |
+-void printk_nmi_enter(void) |
4858 |
++void notrace printk_nmi_enter(void) |
4859 |
+ { |
4860 |
+ this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); |
4861 |
+ } |
4862 |
+ |
4863 |
+-void printk_nmi_exit(void) |
4864 |
++void notrace printk_nmi_exit(void) |
4865 |
+ { |
4866 |
+ this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); |
4867 |
+ } |
4868 |
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h |
4869 |
+index d40708e8c5d6..01b6ddeb4f05 100644 |
4870 |
+--- a/kernel/rcu/tree_exp.h |
4871 |
++++ b/kernel/rcu/tree_exp.h |
4872 |
+@@ -472,6 +472,7 @@ retry_ipi: |
4873 |
+ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, |
4874 |
+ smp_call_func_t func) |
4875 |
+ { |
4876 |
++ int cpu; |
4877 |
+ struct rcu_node *rnp; |
4878 |
+ |
4879 |
+ trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); |
4880 |
+@@ -492,7 +493,13 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, |
4881 |
+ continue; |
4882 |
+ } |
4883 |
+ INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); |
4884 |
+- queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work); |
4885 |
++ preempt_disable(); |
4886 |
++ cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); |
4887 |
++ /* If all offline, queue the work on an unbound CPU. */ |
4888 |
++ if (unlikely(cpu > rnp->grphi)) |
4889 |
++ cpu = WORK_CPU_UNBOUND; |
4890 |
++ queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); |
4891 |
++ preempt_enable(); |
4892 |
+ rnp->exp_need_flush = true; |
4893 |
+ } |
4894 |
+ |
4895 |
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c |
4896 |
+index 1a3e9bddd17b..16f84142f2f4 100644 |
4897 |
+--- a/kernel/sched/idle.c |
4898 |
++++ b/kernel/sched/idle.c |
4899 |
+@@ -190,7 +190,7 @@ static void cpuidle_idle_call(void) |
4900 |
+ */ |
4901 |
+ next_state = cpuidle_select(drv, dev, &stop_tick); |
4902 |
+ |
4903 |
+- if (stop_tick) |
4904 |
++ if (stop_tick || tick_nohz_tick_stopped()) |
4905 |
+ tick_nohz_idle_stop_tick(); |
4906 |
+ else |
4907 |
+ tick_nohz_idle_retain_tick(); |
4908 |
+diff --git a/kernel/sys.c b/kernel/sys.c |
4909 |
+index 38509dc1f77b..69b9a37ecf0d 100644 |
4910 |
+--- a/kernel/sys.c |
4911 |
++++ b/kernel/sys.c |
4912 |
+@@ -1237,18 +1237,19 @@ static int override_release(char __user *release, size_t len) |
4913 |
+ |
4914 |
+ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) |
4915 |
+ { |
4916 |
+- int errno = 0; |
4917 |
++ struct new_utsname tmp; |
4918 |
+ |
4919 |
+ down_read(&uts_sem); |
4920 |
+- if (copy_to_user(name, utsname(), sizeof *name)) |
4921 |
+- errno = -EFAULT; |
4922 |
++ memcpy(&tmp, utsname(), sizeof(tmp)); |
4923 |
+ up_read(&uts_sem); |
4924 |
++ if (copy_to_user(name, &tmp, sizeof(tmp))) |
4925 |
++ return -EFAULT; |
4926 |
+ |
4927 |
+- if (!errno && override_release(name->release, sizeof(name->release))) |
4928 |
+- errno = -EFAULT; |
4929 |
+- if (!errno && override_architecture(name)) |
4930 |
+- errno = -EFAULT; |
4931 |
+- return errno; |
4932 |
++ if (override_release(name->release, sizeof(name->release))) |
4933 |
++ return -EFAULT; |
4934 |
++ if (override_architecture(name)) |
4935 |
++ return -EFAULT; |
4936 |
++ return 0; |
4937 |
+ } |
4938 |
+ |
4939 |
+ #ifdef __ARCH_WANT_SYS_OLD_UNAME |
4940 |
+@@ -1257,55 +1258,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) |
4941 |
+ */ |
4942 |
+ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) |
4943 |
+ { |
4944 |
+- int error = 0; |
4945 |
++ struct old_utsname tmp; |
4946 |
+ |
4947 |
+ if (!name) |
4948 |
+ return -EFAULT; |
4949 |
+ |
4950 |
+ down_read(&uts_sem); |
4951 |
+- if (copy_to_user(name, utsname(), sizeof(*name))) |
4952 |
+- error = -EFAULT; |
4953 |
++ memcpy(&tmp, utsname(), sizeof(tmp)); |
4954 |
+ up_read(&uts_sem); |
4955 |
++ if (copy_to_user(name, &tmp, sizeof(tmp))) |
4956 |
++ return -EFAULT; |
4957 |
+ |
4958 |
+- if (!error && override_release(name->release, sizeof(name->release))) |
4959 |
+- error = -EFAULT; |
4960 |
+- if (!error && override_architecture(name)) |
4961 |
+- error = -EFAULT; |
4962 |
+- return error; |
4963 |
++ if (override_release(name->release, sizeof(name->release))) |
4964 |
++ return -EFAULT; |
4965 |
++ if (override_architecture(name)) |
4966 |
++ return -EFAULT; |
4967 |
++ return 0; |
4968 |
+ } |
4969 |
+ |
4970 |
+ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) |
4971 |
+ { |
4972 |
+- int error; |
4973 |
++ struct oldold_utsname tmp = {}; |
4974 |
+ |
4975 |
+ if (!name) |
4976 |
+ return -EFAULT; |
4977 |
+- if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname))) |
4978 |
+- return -EFAULT; |
4979 |
+ |
4980 |
+ down_read(&uts_sem); |
4981 |
+- error = __copy_to_user(&name->sysname, &utsname()->sysname, |
4982 |
+- __OLD_UTS_LEN); |
4983 |
+- error |= __put_user(0, name->sysname + __OLD_UTS_LEN); |
4984 |
+- error |= __copy_to_user(&name->nodename, &utsname()->nodename, |
4985 |
+- __OLD_UTS_LEN); |
4986 |
+- error |= __put_user(0, name->nodename + __OLD_UTS_LEN); |
4987 |
+- error |= __copy_to_user(&name->release, &utsname()->release, |
4988 |
+- __OLD_UTS_LEN); |
4989 |
+- error |= __put_user(0, name->release + __OLD_UTS_LEN); |
4990 |
+- error |= __copy_to_user(&name->version, &utsname()->version, |
4991 |
+- __OLD_UTS_LEN); |
4992 |
+- error |= __put_user(0, name->version + __OLD_UTS_LEN); |
4993 |
+- error |= __copy_to_user(&name->machine, &utsname()->machine, |
4994 |
+- __OLD_UTS_LEN); |
4995 |
+- error |= __put_user(0, name->machine + __OLD_UTS_LEN); |
4996 |
++ memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); |
4997 |
++ memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); |
4998 |
++ memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN); |
4999 |
++ memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN); |
5000 |
++ memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN); |
5001 |
+ up_read(&uts_sem); |
5002 |
++ if (copy_to_user(name, &tmp, sizeof(tmp))) |
5003 |
++ return -EFAULT; |
5004 |
+ |
5005 |
+- if (!error && override_architecture(name)) |
5006 |
+- error = -EFAULT; |
5007 |
+- if (!error && override_release(name->release, sizeof(name->release))) |
5008 |
+- error = -EFAULT; |
5009 |
+- return error ? -EFAULT : 0; |
5010 |
++ if (override_architecture(name)) |
5011 |
++ return -EFAULT; |
5012 |
++ if (override_release(name->release, sizeof(name->release))) |
5013 |
++ return -EFAULT; |
5014 |
++ return 0; |
5015 |
+ } |
5016 |
+ #endif |
5017 |
+ |
5018 |
+@@ -1319,17 +1311,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) |
5019 |
+ |
5020 |
+ if (len < 0 || len > __NEW_UTS_LEN) |
5021 |
+ return -EINVAL; |
5022 |
+- down_write(&uts_sem); |
5023 |
+ errno = -EFAULT; |
5024 |
+ if (!copy_from_user(tmp, name, len)) { |
5025 |
+- struct new_utsname *u = utsname(); |
5026 |
++ struct new_utsname *u; |
5027 |
+ |
5028 |
++ down_write(&uts_sem); |
5029 |
++ u = utsname(); |
5030 |
+ memcpy(u->nodename, tmp, len); |
5031 |
+ memset(u->nodename + len, 0, sizeof(u->nodename) - len); |
5032 |
+ errno = 0; |
5033 |
+ uts_proc_notify(UTS_PROC_HOSTNAME); |
5034 |
++ up_write(&uts_sem); |
5035 |
+ } |
5036 |
+- up_write(&uts_sem); |
5037 |
+ return errno; |
5038 |
+ } |
5039 |
+ |
5040 |
+@@ -1337,8 +1330,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) |
5041 |
+ |
5042 |
+ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) |
5043 |
+ { |
5044 |
+- int i, errno; |
5045 |
++ int i; |
5046 |
+ struct new_utsname *u; |
5047 |
++ char tmp[__NEW_UTS_LEN + 1]; |
5048 |
+ |
5049 |
+ if (len < 0) |
5050 |
+ return -EINVAL; |
5051 |
+@@ -1347,11 +1341,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) |
5052 |
+ i = 1 + strlen(u->nodename); |
5053 |
+ if (i > len) |
5054 |
+ i = len; |
5055 |
+- errno = 0; |
5056 |
+- if (copy_to_user(name, u->nodename, i)) |
5057 |
+- errno = -EFAULT; |
5058 |
++ memcpy(tmp, u->nodename, i); |
5059 |
+ up_read(&uts_sem); |
5060 |
+- return errno; |
5061 |
++ if (copy_to_user(name, tmp, i)) |
5062 |
++ return -EFAULT; |
5063 |
++ return 0; |
5064 |
+ } |
5065 |
+ |
5066 |
+ #endif |
5067 |
+@@ -1370,17 +1364,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) |
5068 |
+ if (len < 0 || len > __NEW_UTS_LEN) |
5069 |
+ return -EINVAL; |
5070 |
+ |
5071 |
+- down_write(&uts_sem); |
5072 |
+ errno = -EFAULT; |
5073 |
+ if (!copy_from_user(tmp, name, len)) { |
5074 |
+- struct new_utsname *u = utsname(); |
5075 |
++ struct new_utsname *u; |
5076 |
+ |
5077 |
++ down_write(&uts_sem); |
5078 |
++ u = utsname(); |
5079 |
+ memcpy(u->domainname, tmp, len); |
5080 |
+ memset(u->domainname + len, 0, sizeof(u->domainname) - len); |
5081 |
+ errno = 0; |
5082 |
+ uts_proc_notify(UTS_PROC_DOMAINNAME); |
5083 |
++ up_write(&uts_sem); |
5084 |
+ } |
5085 |
+- up_write(&uts_sem); |
5086 |
+ return errno; |
5087 |
+ } |
5088 |
+ |
5089 |
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c |
5090 |
+index 987d9a9ae283..8defc6fd8c0f 100644 |
5091 |
+--- a/kernel/trace/blktrace.c |
5092 |
++++ b/kernel/trace/blktrace.c |
5093 |
+@@ -1841,6 +1841,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, |
5094 |
+ mutex_lock(&q->blk_trace_mutex); |
5095 |
+ |
5096 |
+ if (attr == &dev_attr_enable) { |
5097 |
++ if (!!value == !!q->blk_trace) { |
5098 |
++ ret = 0; |
5099 |
++ goto out_unlock_bdev; |
5100 |
++ } |
5101 |
+ if (value) |
5102 |
+ ret = blk_trace_setup_queue(q, bdev); |
5103 |
+ else |
5104 |
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
5105 |
+index 176debd3481b..ddae35127571 100644 |
5106 |
+--- a/kernel/trace/trace.c |
5107 |
++++ b/kernel/trace/trace.c |
5108 |
+@@ -7628,7 +7628,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf, |
5109 |
+ |
5110 |
+ if (buffer) { |
5111 |
+ mutex_lock(&trace_types_lock); |
5112 |
+- if (val) { |
5113 |
++ if (!!val == tracer_tracing_is_on(tr)) { |
5114 |
++ val = 0; /* do nothing */ |
5115 |
++ } else if (val) { |
5116 |
+ tracer_tracing_on(tr); |
5117 |
+ if (tr->current_trace->start) |
5118 |
+ tr->current_trace->start(tr); |
5119 |
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c |
5120 |
+index bf89a51e740d..ac02fafc9f1b 100644 |
5121 |
+--- a/kernel/trace/trace_uprobe.c |
5122 |
++++ b/kernel/trace/trace_uprobe.c |
5123 |
+@@ -952,7 +952,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) |
5124 |
+ |
5125 |
+ list_del_rcu(&link->list); |
5126 |
+ /* synchronize with u{,ret}probe_trace_func */ |
5127 |
+- synchronize_sched(); |
5128 |
++ synchronize_rcu(); |
5129 |
+ kfree(link); |
5130 |
+ |
5131 |
+ if (!list_empty(&tu->tp.files)) |
5132 |
+diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c |
5133 |
+index c3d7583fcd21..e5222b5fb4fe 100644 |
5134 |
+--- a/kernel/user_namespace.c |
5135 |
++++ b/kernel/user_namespace.c |
5136 |
+@@ -859,7 +859,16 @@ static ssize_t map_write(struct file *file, const char __user *buf, |
5137 |
+ unsigned idx; |
5138 |
+ struct uid_gid_extent extent; |
5139 |
+ char *kbuf = NULL, *pos, *next_line; |
5140 |
+- ssize_t ret = -EINVAL; |
5141 |
++ ssize_t ret; |
5142 |
++ |
5143 |
++ /* Only allow < page size writes at the beginning of the file */ |
5144 |
++ if ((*ppos != 0) || (count >= PAGE_SIZE)) |
5145 |
++ return -EINVAL; |
5146 |
++ |
5147 |
++ /* Slurp in the user data */ |
5148 |
++ kbuf = memdup_user_nul(buf, count); |
5149 |
++ if (IS_ERR(kbuf)) |
5150 |
++ return PTR_ERR(kbuf); |
5151 |
+ |
5152 |
+ /* |
5153 |
+ * The userns_state_mutex serializes all writes to any given map. |
5154 |
+@@ -895,19 +904,6 @@ static ssize_t map_write(struct file *file, const char __user *buf, |
5155 |
+ if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN)) |
5156 |
+ goto out; |
5157 |
+ |
5158 |
+- /* Only allow < page size writes at the beginning of the file */ |
5159 |
+- ret = -EINVAL; |
5160 |
+- if ((*ppos != 0) || (count >= PAGE_SIZE)) |
5161 |
+- goto out; |
5162 |
+- |
5163 |
+- /* Slurp in the user data */ |
5164 |
+- kbuf = memdup_user_nul(buf, count); |
5165 |
+- if (IS_ERR(kbuf)) { |
5166 |
+- ret = PTR_ERR(kbuf); |
5167 |
+- kbuf = NULL; |
5168 |
+- goto out; |
5169 |
+- } |
5170 |
+- |
5171 |
+ /* Parse the user data */ |
5172 |
+ ret = -EINVAL; |
5173 |
+ pos = kbuf; |
5174 |
+diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c |
5175 |
+index 233cd8fc6910..258033d62cb3 100644 |
5176 |
+--- a/kernel/utsname_sysctl.c |
5177 |
++++ b/kernel/utsname_sysctl.c |
5178 |
+@@ -18,7 +18,7 @@ |
5179 |
+ |
5180 |
+ #ifdef CONFIG_PROC_SYSCTL |
5181 |
+ |
5182 |
+-static void *get_uts(struct ctl_table *table, int write) |
5183 |
++static void *get_uts(struct ctl_table *table) |
5184 |
+ { |
5185 |
+ char *which = table->data; |
5186 |
+ struct uts_namespace *uts_ns; |
5187 |
+@@ -26,21 +26,9 @@ static void *get_uts(struct ctl_table *table, int write) |
5188 |
+ uts_ns = current->nsproxy->uts_ns; |
5189 |
+ which = (which - (char *)&init_uts_ns) + (char *)uts_ns; |
5190 |
+ |
5191 |
+- if (!write) |
5192 |
+- down_read(&uts_sem); |
5193 |
+- else |
5194 |
+- down_write(&uts_sem); |
5195 |
+ return which; |
5196 |
+ } |
5197 |
+ |
5198 |
+-static void put_uts(struct ctl_table *table, int write, void *which) |
5199 |
+-{ |
5200 |
+- if (!write) |
5201 |
+- up_read(&uts_sem); |
5202 |
+- else |
5203 |
+- up_write(&uts_sem); |
5204 |
+-} |
5205 |
+- |
5206 |
+ /* |
5207 |
+ * Special case of dostring for the UTS structure. This has locks |
5208 |
+ * to observe. Should this be in kernel/sys.c ???? |
5209 |
+@@ -50,13 +38,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write, |
5210 |
+ { |
5211 |
+ struct ctl_table uts_table; |
5212 |
+ int r; |
5213 |
++ char tmp_data[__NEW_UTS_LEN + 1]; |
5214 |
++ |
5215 |
+ memcpy(&uts_table, table, sizeof(uts_table)); |
5216 |
+- uts_table.data = get_uts(table, write); |
5217 |
++ uts_table.data = tmp_data; |
5218 |
++ |
5219 |
++ /* |
5220 |
++ * Buffer the value in tmp_data so that proc_dostring() can be called |
5221 |
++ * without holding any locks. |
5222 |
++ * We also need to read the original value in the write==1 case to |
5223 |
++ * support partial writes. |
5224 |
++ */ |
5225 |
++ down_read(&uts_sem); |
5226 |
++ memcpy(tmp_data, get_uts(table), sizeof(tmp_data)); |
5227 |
++ up_read(&uts_sem); |
5228 |
+ r = proc_dostring(&uts_table, write, buffer, lenp, ppos); |
5229 |
+- put_uts(table, write, uts_table.data); |
5230 |
+ |
5231 |
+- if (write) |
5232 |
++ if (write) { |
5233 |
++ /* |
5234 |
++ * Write back the new value. |
5235 |
++ * Note that, since we dropped uts_sem, the result can |
5236 |
++ * theoretically be incorrect if there are two parallel writes |
5237 |
++ * at non-zero offsets to the same sysctl. |
5238 |
++ */ |
5239 |
++ down_write(&uts_sem); |
5240 |
++ memcpy(get_uts(table), tmp_data, sizeof(tmp_data)); |
5241 |
++ up_write(&uts_sem); |
5242 |
+ proc_sys_poll_notify(table->poll); |
5243 |
++ } |
5244 |
+ |
5245 |
+ return r; |
5246 |
+ } |
5247 |
+diff --git a/mm/hmm.c b/mm/hmm.c |
5248 |
+index de7b6bf77201..f9d1d89dec4d 100644 |
5249 |
+--- a/mm/hmm.c |
5250 |
++++ b/mm/hmm.c |
5251 |
+@@ -963,6 +963,8 @@ static void hmm_devmem_free(struct page *page, void *data) |
5252 |
+ { |
5253 |
+ struct hmm_devmem *devmem = data; |
5254 |
+ |
5255 |
++ page->mapping = NULL; |
5256 |
++ |
5257 |
+ devmem->ops->free(devmem, page); |
5258 |
+ } |
5259 |
+ |
5260 |
+diff --git a/mm/memory.c b/mm/memory.c |
5261 |
+index 86d4329acb05..f94feec6518d 100644 |
5262 |
+--- a/mm/memory.c |
5263 |
++++ b/mm/memory.c |
5264 |
+@@ -391,15 +391,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) |
5265 |
+ { |
5266 |
+ struct mmu_table_batch **batch = &tlb->batch; |
5267 |
+ |
5268 |
+- /* |
5269 |
+- * When there's less then two users of this mm there cannot be a |
5270 |
+- * concurrent page-table walk. |
5271 |
+- */ |
5272 |
+- if (atomic_read(&tlb->mm->mm_users) < 2) { |
5273 |
+- __tlb_remove_table(table); |
5274 |
+- return; |
5275 |
+- } |
5276 |
+- |
5277 |
+ if (*batch == NULL) { |
5278 |
+ *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); |
5279 |
+ if (*batch == NULL) { |
5280 |
+diff --git a/mm/readahead.c b/mm/readahead.c |
5281 |
+index e273f0de3376..792dea696d54 100644 |
5282 |
+--- a/mm/readahead.c |
5283 |
++++ b/mm/readahead.c |
5284 |
+@@ -385,6 +385,7 @@ ondemand_readahead(struct address_space *mapping, |
5285 |
+ { |
5286 |
+ struct backing_dev_info *bdi = inode_to_bdi(mapping->host); |
5287 |
+ unsigned long max_pages = ra->ra_pages; |
5288 |
++ unsigned long add_pages; |
5289 |
+ pgoff_t prev_offset; |
5290 |
+ |
5291 |
+ /* |
5292 |
+@@ -474,10 +475,17 @@ readit: |
5293 |
+ * Will this read hit the readahead marker made by itself? |
5294 |
+ * If so, trigger the readahead marker hit now, and merge |
5295 |
+ * the resulted next readahead window into the current one. |
5296 |
++ * Take care of maximum IO pages as above. |
5297 |
+ */ |
5298 |
+ if (offset == ra->start && ra->size == ra->async_size) { |
5299 |
+- ra->async_size = get_next_ra_size(ra, max_pages); |
5300 |
+- ra->size += ra->async_size; |
5301 |
++ add_pages = get_next_ra_size(ra, max_pages); |
5302 |
++ if (ra->size + add_pages <= max_pages) { |
5303 |
++ ra->async_size = add_pages; |
5304 |
++ ra->size += add_pages; |
5305 |
++ } else { |
5306 |
++ ra->size = max_pages; |
5307 |
++ ra->async_size = max_pages >> 1; |
5308 |
++ } |
5309 |
+ } |
5310 |
+ |
5311 |
+ return ra_submit(ra, mapping, filp); |
5312 |
+diff --git a/net/9p/client.c b/net/9p/client.c |
5313 |
+index 5c1343195292..2872f3dbfd86 100644 |
5314 |
+--- a/net/9p/client.c |
5315 |
++++ b/net/9p/client.c |
5316 |
+@@ -958,7 +958,7 @@ static int p9_client_version(struct p9_client *c) |
5317 |
+ { |
5318 |
+ int err = 0; |
5319 |
+ struct p9_req_t *req; |
5320 |
+- char *version; |
5321 |
++ char *version = NULL; |
5322 |
+ int msize; |
5323 |
+ |
5324 |
+ p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n", |
5325 |
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c |
5326 |
+index 588bf88c3305..ef456395645a 100644 |
5327 |
+--- a/net/9p/trans_fd.c |
5328 |
++++ b/net/9p/trans_fd.c |
5329 |
+@@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m) |
5330 |
+ spin_lock_irqsave(&p9_poll_lock, flags); |
5331 |
+ list_del_init(&m->poll_pending_link); |
5332 |
+ spin_unlock_irqrestore(&p9_poll_lock, flags); |
5333 |
++ |
5334 |
++ flush_work(&p9_poll_work); |
5335 |
+ } |
5336 |
+ |
5337 |
+ /** |
5338 |
+@@ -940,7 +942,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) |
5339 |
+ if (err < 0) |
5340 |
+ return err; |
5341 |
+ |
5342 |
+- if (valid_ipaddr4(addr) < 0) |
5343 |
++ if (addr == NULL || valid_ipaddr4(addr) < 0) |
5344 |
+ return -EINVAL; |
5345 |
+ |
5346 |
+ csocket = NULL; |
5347 |
+@@ -990,6 +992,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) |
5348 |
+ |
5349 |
+ csocket = NULL; |
5350 |
+ |
5351 |
++ if (addr == NULL) |
5352 |
++ return -EINVAL; |
5353 |
++ |
5354 |
+ if (strlen(addr) >= UNIX_PATH_MAX) { |
5355 |
+ pr_err("%s (%d): address too long: %s\n", |
5356 |
+ __func__, task_pid_nr(current), addr); |
5357 |
+diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c |
5358 |
+index 3d414acb7015..afaf0d65f3dd 100644 |
5359 |
+--- a/net/9p/trans_rdma.c |
5360 |
++++ b/net/9p/trans_rdma.c |
5361 |
+@@ -644,6 +644,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) |
5362 |
+ struct rdma_conn_param conn_param; |
5363 |
+ struct ib_qp_init_attr qp_attr; |
5364 |
+ |
5365 |
++ if (addr == NULL) |
5366 |
++ return -EINVAL; |
5367 |
++ |
5368 |
+ /* Parse the transport specific mount options */ |
5369 |
+ err = parse_opts(args, &opts); |
5370 |
+ if (err < 0) |
5371 |
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c |
5372 |
+index 05006cbb3361..4c2da2513c8b 100644 |
5373 |
+--- a/net/9p/trans_virtio.c |
5374 |
++++ b/net/9p/trans_virtio.c |
5375 |
+@@ -188,7 +188,7 @@ static int pack_sg_list(struct scatterlist *sg, int start, |
5376 |
+ s = rest_of_page(data); |
5377 |
+ if (s > count) |
5378 |
+ s = count; |
5379 |
+- BUG_ON(index > limit); |
5380 |
++ BUG_ON(index >= limit); |
5381 |
+ /* Make sure we don't terminate early. */ |
5382 |
+ sg_unmark_end(&sg[index]); |
5383 |
+ sg_set_buf(&sg[index++], data, s); |
5384 |
+@@ -233,6 +233,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit, |
5385 |
+ s = PAGE_SIZE - data_off; |
5386 |
+ if (s > count) |
5387 |
+ s = count; |
5388 |
++ BUG_ON(index >= limit); |
5389 |
+ /* Make sure we don't terminate early. */ |
5390 |
+ sg_unmark_end(&sg[index]); |
5391 |
+ sg_set_page(&sg[index++], pdata[i++], s, data_off); |
5392 |
+@@ -406,6 +407,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, |
5393 |
+ p9_debug(P9_DEBUG_TRANS, "virtio request\n"); |
5394 |
+ |
5395 |
+ if (uodata) { |
5396 |
++ __le32 sz; |
5397 |
+ int n = p9_get_mapped_pages(chan, &out_pages, uodata, |
5398 |
+ outlen, &offs, &need_drop); |
5399 |
+ if (n < 0) |
5400 |
+@@ -416,6 +418,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, |
5401 |
+ memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4); |
5402 |
+ outlen = n; |
5403 |
+ } |
5404 |
++ /* The size field of the message must include the length of the |
5405 |
++ * header and the length of the data. We didn't actually know |
5406 |
++ * the length of the data until this point so add it in now. |
5407 |
++ */ |
5408 |
++ sz = cpu_to_le32(req->tc->size + outlen); |
5409 |
++ memcpy(&req->tc->sdata[0], &sz, sizeof(sz)); |
5410 |
+ } else if (uidata) { |
5411 |
+ int n = p9_get_mapped_pages(chan, &in_pages, uidata, |
5412 |
+ inlen, &offs, &need_drop); |
5413 |
+@@ -643,6 +651,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) |
5414 |
+ int ret = -ENOENT; |
5415 |
+ int found = 0; |
5416 |
+ |
5417 |
++ if (devname == NULL) |
5418 |
++ return -EINVAL; |
5419 |
++ |
5420 |
+ mutex_lock(&virtio_9p_lock); |
5421 |
+ list_for_each_entry(chan, &virtio_chan_list, chan_list) { |
5422 |
+ if (!strncmp(devname, chan->tag, chan->tag_len) && |
5423 |
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c |
5424 |
+index 2e2b8bca54f3..c2d54ac76bfd 100644 |
5425 |
+--- a/net/9p/trans_xen.c |
5426 |
++++ b/net/9p/trans_xen.c |
5427 |
+@@ -94,6 +94,9 @@ static int p9_xen_create(struct p9_client *client, const char *addr, char *args) |
5428 |
+ { |
5429 |
+ struct xen_9pfs_front_priv *priv; |
5430 |
+ |
5431 |
++ if (addr == NULL) |
5432 |
++ return -EINVAL; |
5433 |
++ |
5434 |
+ read_lock(&xen_9pfs_lock); |
5435 |
+ list_for_each_entry(priv, &xen_9pfs_devs, list) { |
5436 |
+ if (!strcmp(priv->tag, addr)) { |
5437 |
+diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c |
5438 |
+index e6ff5128e61a..ca53efa17be1 100644 |
5439 |
+--- a/net/ieee802154/6lowpan/tx.c |
5440 |
++++ b/net/ieee802154/6lowpan/tx.c |
5441 |
+@@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) |
5442 |
+ /* We must take a copy of the skb before we modify/replace the ipv6 |
5443 |
+ * header as the header could be used elsewhere |
5444 |
+ */ |
5445 |
+- skb = skb_unshare(skb, GFP_ATOMIC); |
5446 |
+- if (!skb) |
5447 |
+- return NET_XMIT_DROP; |
5448 |
++ if (unlikely(skb_headroom(skb) < ldev->needed_headroom || |
5449 |
++ skb_tailroom(skb) < ldev->needed_tailroom)) { |
5450 |
++ struct sk_buff *nskb; |
5451 |
++ |
5452 |
++ nskb = skb_copy_expand(skb, ldev->needed_headroom, |
5453 |
++ ldev->needed_tailroom, GFP_ATOMIC); |
5454 |
++ if (likely(nskb)) { |
5455 |
++ consume_skb(skb); |
5456 |
++ skb = nskb; |
5457 |
++ } else { |
5458 |
++ kfree_skb(skb); |
5459 |
++ return NET_XMIT_DROP; |
5460 |
++ } |
5461 |
++ } else { |
5462 |
++ skb = skb_unshare(skb, GFP_ATOMIC); |
5463 |
++ if (!skb) |
5464 |
++ return NET_XMIT_DROP; |
5465 |
++ } |
5466 |
+ |
5467 |
+ ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset); |
5468 |
+ if (ret < 0) { |
5469 |
+diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c |
5470 |
+index 7e253455f9dd..bcd1a5e6ebf4 100644 |
5471 |
+--- a/net/mac802154/tx.c |
5472 |
++++ b/net/mac802154/tx.c |
5473 |
+@@ -63,8 +63,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) |
5474 |
+ int ret; |
5475 |
+ |
5476 |
+ if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) { |
5477 |
+- u16 crc = crc_ccitt(0, skb->data, skb->len); |
5478 |
++ struct sk_buff *nskb; |
5479 |
++ u16 crc; |
5480 |
++ |
5481 |
++ if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) { |
5482 |
++ nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN, |
5483 |
++ GFP_ATOMIC); |
5484 |
++ if (likely(nskb)) { |
5485 |
++ consume_skb(skb); |
5486 |
++ skb = nskb; |
5487 |
++ } else { |
5488 |
++ goto err_tx; |
5489 |
++ } |
5490 |
++ } |
5491 |
+ |
5492 |
++ crc = crc_ccitt(0, skb->data, skb->len); |
5493 |
+ put_unaligned_le16(crc, skb_put(skb, 2)); |
5494 |
+ } |
5495 |
+ |
5496 |
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c |
5497 |
+index d839c33ae7d9..0d85425b1e07 100644 |
5498 |
+--- a/net/sunrpc/clnt.c |
5499 |
++++ b/net/sunrpc/clnt.c |
5500 |
+@@ -965,10 +965,20 @@ out: |
5501 |
+ } |
5502 |
+ EXPORT_SYMBOL_GPL(rpc_bind_new_program); |
5503 |
+ |
5504 |
++void rpc_task_release_transport(struct rpc_task *task) |
5505 |
++{ |
5506 |
++ struct rpc_xprt *xprt = task->tk_xprt; |
5507 |
++ |
5508 |
++ if (xprt) { |
5509 |
++ task->tk_xprt = NULL; |
5510 |
++ xprt_put(xprt); |
5511 |
++ } |
5512 |
++} |
5513 |
++EXPORT_SYMBOL_GPL(rpc_task_release_transport); |
5514 |
++ |
5515 |
+ void rpc_task_release_client(struct rpc_task *task) |
5516 |
+ { |
5517 |
+ struct rpc_clnt *clnt = task->tk_client; |
5518 |
+- struct rpc_xprt *xprt = task->tk_xprt; |
5519 |
+ |
5520 |
+ if (clnt != NULL) { |
5521 |
+ /* Remove from client task list */ |
5522 |
+@@ -979,12 +989,14 @@ void rpc_task_release_client(struct rpc_task *task) |
5523 |
+ |
5524 |
+ rpc_release_client(clnt); |
5525 |
+ } |
5526 |
++ rpc_task_release_transport(task); |
5527 |
++} |
5528 |
+ |
5529 |
+- if (xprt != NULL) { |
5530 |
+- task->tk_xprt = NULL; |
5531 |
+- |
5532 |
+- xprt_put(xprt); |
5533 |
+- } |
5534 |
++static |
5535 |
++void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) |
5536 |
++{ |
5537 |
++ if (!task->tk_xprt) |
5538 |
++ task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi); |
5539 |
+ } |
5540 |
+ |
5541 |
+ static |
5542 |
+@@ -992,8 +1004,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) |
5543 |
+ { |
5544 |
+ |
5545 |
+ if (clnt != NULL) { |
5546 |
+- if (task->tk_xprt == NULL) |
5547 |
+- task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi); |
5548 |
++ rpc_task_set_transport(task, clnt); |
5549 |
+ task->tk_client = clnt; |
5550 |
+ atomic_inc(&clnt->cl_count); |
5551 |
+ if (clnt->cl_softrtry) |
5552 |
+@@ -1512,6 +1523,7 @@ call_start(struct rpc_task *task) |
5553 |
+ clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; |
5554 |
+ clnt->cl_stats->rpccnt++; |
5555 |
+ task->tk_action = call_reserve; |
5556 |
++ rpc_task_set_transport(task, clnt); |
5557 |
+ } |
5558 |
+ |
5559 |
+ /* |
5560 |
+diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile |
5561 |
+index a3ac2c91331c..5e1dd493ce59 100644 |
5562 |
+--- a/scripts/kconfig/Makefile |
5563 |
++++ b/scripts/kconfig/Makefile |
5564 |
+@@ -173,7 +173,7 @@ HOSTLOADLIBES_nconf = $(shell . $(obj)/.nconf-cfg && echo $$libs) |
5565 |
+ HOSTCFLAGS_nconf.o = $(shell . $(obj)/.nconf-cfg && echo $$cflags) |
5566 |
+ HOSTCFLAGS_nconf.gui.o = $(shell . $(obj)/.nconf-cfg && echo $$cflags) |
5567 |
+ |
5568 |
+-$(obj)/nconf.o: $(obj)/.nconf-cfg |
5569 |
++$(obj)/nconf.o $(obj)/nconf.gui.o: $(obj)/.nconf-cfg |
5570 |
+ |
5571 |
+ # mconf: Used for the menuconfig target based on lxdialog |
5572 |
+ hostprogs-y += mconf |
5573 |
+@@ -184,7 +184,8 @@ HOSTLOADLIBES_mconf = $(shell . $(obj)/.mconf-cfg && echo $$libs) |
5574 |
+ $(foreach f, mconf.o $(lxdialog), \ |
5575 |
+ $(eval HOSTCFLAGS_$f = $$(shell . $(obj)/.mconf-cfg && echo $$$$cflags))) |
5576 |
+ |
5577 |
+-$(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/.mconf-cfg |
5578 |
++$(obj)/mconf.o: $(obj)/.mconf-cfg |
5579 |
++$(addprefix $(obj)/lxdialog/, $(lxdialog)): $(obj)/.mconf-cfg |
5580 |
+ |
5581 |
+ # qconf: Used for the xconfig target based on Qt |
5582 |
+ hostprogs-y += qconf |
5583 |
+diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c |
5584 |
+index f2f22d00db18..4ccec1bcf6f5 100644 |
5585 |
+--- a/security/apparmor/secid.c |
5586 |
++++ b/security/apparmor/secid.c |
5587 |
+@@ -79,7 +79,6 @@ int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) |
5588 |
+ struct aa_label *label = aa_secid_to_label(secid); |
5589 |
+ int len; |
5590 |
+ |
5591 |
+- AA_BUG(!secdata); |
5592 |
+ AA_BUG(!seclen); |
5593 |
+ |
5594 |
+ if (!label) |
5595 |
+diff --git a/security/commoncap.c b/security/commoncap.c |
5596 |
+index f4c33abd9959..2e489d6a3ac8 100644 |
5597 |
+--- a/security/commoncap.c |
5598 |
++++ b/security/commoncap.c |
5599 |
+@@ -388,7 +388,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, |
5600 |
+ if (strcmp(name, "capability") != 0) |
5601 |
+ return -EOPNOTSUPP; |
5602 |
+ |
5603 |
+- dentry = d_find_alias(inode); |
5604 |
++ dentry = d_find_any_alias(inode); |
5605 |
+ if (!dentry) |
5606 |
+ return -EINVAL; |
5607 |
+ |
5608 |
+diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c |
5609 |
+index 31f858eceffc..83eed9d7f679 100644 |
5610 |
+--- a/sound/ac97/bus.c |
5611 |
++++ b/sound/ac97/bus.c |
5612 |
+@@ -503,7 +503,7 @@ static int ac97_bus_remove(struct device *dev) |
5613 |
+ int ret; |
5614 |
+ |
5615 |
+ ret = pm_runtime_get_sync(dev); |
5616 |
+- if (ret) |
5617 |
++ if (ret < 0) |
5618 |
+ return ret; |
5619 |
+ |
5620 |
+ ret = adrv->remove(adev); |
5621 |
+@@ -511,6 +511,8 @@ static int ac97_bus_remove(struct device *dev) |
5622 |
+ if (ret == 0) |
5623 |
+ ac97_put_disable_clk(adev); |
5624 |
+ |
5625 |
++ pm_runtime_disable(dev); |
5626 |
++ |
5627 |
+ return ret; |
5628 |
+ } |
5629 |
+ |
5630 |
+diff --git a/sound/ac97/snd_ac97_compat.c b/sound/ac97/snd_ac97_compat.c |
5631 |
+index 61544e0d8de4..8bab44f74bb8 100644 |
5632 |
+--- a/sound/ac97/snd_ac97_compat.c |
5633 |
++++ b/sound/ac97/snd_ac97_compat.c |
5634 |
+@@ -15,6 +15,11 @@ |
5635 |
+ |
5636 |
+ #include "ac97_core.h" |
5637 |
+ |
5638 |
++static void compat_ac97_release(struct device *dev) |
5639 |
++{ |
5640 |
++ kfree(to_ac97_t(dev)); |
5641 |
++} |
5642 |
++ |
5643 |
+ static void compat_ac97_reset(struct snd_ac97 *ac97) |
5644 |
+ { |
5645 |
+ struct ac97_codec_device *adev = to_ac97_device(ac97->private_data); |
5646 |
+@@ -65,21 +70,31 @@ static struct snd_ac97_bus compat_soc_ac97_bus = { |
5647 |
+ struct snd_ac97 *snd_ac97_compat_alloc(struct ac97_codec_device *adev) |
5648 |
+ { |
5649 |
+ struct snd_ac97 *ac97; |
5650 |
++ int ret; |
5651 |
+ |
5652 |
+ ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL); |
5653 |
+ if (ac97 == NULL) |
5654 |
+ return ERR_PTR(-ENOMEM); |
5655 |
+ |
5656 |
+- ac97->dev = adev->dev; |
5657 |
+ ac97->private_data = adev; |
5658 |
+ ac97->bus = &compat_soc_ac97_bus; |
5659 |
++ |
5660 |
++ ac97->dev.parent = &adev->dev; |
5661 |
++ ac97->dev.release = compat_ac97_release; |
5662 |
++ dev_set_name(&ac97->dev, "%s-compat", dev_name(&adev->dev)); |
5663 |
++ ret = device_register(&ac97->dev); |
5664 |
++ if (ret) { |
5665 |
++ put_device(&ac97->dev); |
5666 |
++ return ERR_PTR(ret); |
5667 |
++ } |
5668 |
++ |
5669 |
+ return ac97; |
5670 |
+ } |
5671 |
+ EXPORT_SYMBOL_GPL(snd_ac97_compat_alloc); |
5672 |
+ |
5673 |
+ void snd_ac97_compat_release(struct snd_ac97 *ac97) |
5674 |
+ { |
5675 |
+- kfree(ac97); |
5676 |
++ device_unregister(&ac97->dev); |
5677 |
+ } |
5678 |
+ EXPORT_SYMBOL_GPL(snd_ac97_compat_release); |
5679 |
+ |
5680 |
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c |
5681 |
+index d056447520a2..eeb6d1f7cfb3 100644 |
5682 |
+--- a/tools/perf/util/auxtrace.c |
5683 |
++++ b/tools/perf/util/auxtrace.c |
5684 |
+@@ -202,6 +202,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues, |
5685 |
+ for (i = 0; i < queues->nr_queues; i++) { |
5686 |
+ list_splice_tail(&queues->queue_array[i].head, |
5687 |
+ &queue_array[i].head); |
5688 |
++ queue_array[i].tid = queues->queue_array[i].tid; |
5689 |
++ queue_array[i].cpu = queues->queue_array[i].cpu; |
5690 |
++ queue_array[i].set = queues->queue_array[i].set; |
5691 |
+ queue_array[i].priv = queues->queue_array[i].priv; |
5692 |
+ } |
5693 |
+ |