1 |
commit: d6237b6da0d6c278d18543a00c55cda4bbc53075 |
2 |
Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sat Feb 17 14:25:08 2018 +0000 |
4 |
Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org> |
5 |
CommitDate: Sat Feb 17 14:25:08 2018 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d6237b6d |
7 |
|
8 |
linux kernel 4.14.20 |
9 |
|
10 |
0000_README | 4 + |
11 |
1018_linux-4.14.20.patch | 10698 +++++++++++++++++++++++++++++++++++++++++++++ |
12 |
2 files changed, 10702 insertions(+) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index b7c928d..7fd6d67 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -119,6 +119,10 @@ Patch: 1018_linux-4.14.19.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 4.14.19 |
21 |
|
22 |
+Patch: 1019_linux-4.14.20.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 4.14.20 |
25 |
+ |
26 |
Patch: 1500_XATTR_USER_PREFIX.patch |
27 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
28 |
Desc: Support for namespace user.pax.* on tmpfs. |
29 |
|
30 |
diff --git a/1018_linux-4.14.20.patch b/1018_linux-4.14.20.patch |
31 |
new file mode 100644 |
32 |
index 0000000..0cea7cc |
33 |
--- /dev/null |
34 |
+++ b/1018_linux-4.14.20.patch |
35 |
@@ -0,0 +1,10698 @@ |
36 |
+diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt |
37 |
+index 66e8ce14d23d..f3d0d316d5f1 100644 |
38 |
+--- a/Documentation/arm64/silicon-errata.txt |
39 |
++++ b/Documentation/arm64/silicon-errata.txt |
40 |
+@@ -71,6 +71,7 @@ stable kernels. |
41 |
+ | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | |
42 |
+ | Hisilicon | Hip0{6,7} | #161010701 | N/A | |
43 |
+ | | | | | |
44 |
+-| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | |
45 |
++| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | |
46 |
+ | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | |
47 |
+ | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | |
48 |
++| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 | |
49 |
+diff --git a/Makefile b/Makefile |
50 |
+index 76a0b13623f4..33176140f133 100644 |
51 |
+--- a/Makefile |
52 |
++++ b/Makefile |
53 |
+@@ -1,7 +1,7 @@ |
54 |
+ # SPDX-License-Identifier: GPL-2.0 |
55 |
+ VERSION = 4 |
56 |
+ PATCHLEVEL = 14 |
57 |
+-SUBLEVEL = 19 |
58 |
++SUBLEVEL = 20 |
59 |
+ EXTRAVERSION = |
60 |
+ NAME = Petit Gorille |
61 |
+ |
62 |
+@@ -416,7 +416,8 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE |
63 |
+ export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS |
64 |
+ |
65 |
+ export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS |
66 |
+-export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN |
67 |
++export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE |
68 |
++export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN |
69 |
+ export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE |
70 |
+ export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE |
71 |
+ export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL |
72 |
+diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h |
73 |
+index d2e4da93e68c..ca3322536f72 100644 |
74 |
+--- a/arch/alpha/include/asm/futex.h |
75 |
++++ b/arch/alpha/include/asm/futex.h |
76 |
+@@ -20,8 +20,8 @@ |
77 |
+ "3: .subsection 2\n" \ |
78 |
+ "4: br 1b\n" \ |
79 |
+ " .previous\n" \ |
80 |
+- EXC(1b,3b,%1,$31) \ |
81 |
+- EXC(2b,3b,%1,$31) \ |
82 |
++ EXC(1b,3b,$31,%1) \ |
83 |
++ EXC(2b,3b,$31,%1) \ |
84 |
+ : "=&r" (oldval), "=&r"(ret) \ |
85 |
+ : "r" (uaddr), "r"(oparg) \ |
86 |
+ : "memory") |
87 |
+@@ -82,8 +82,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
88 |
+ "3: .subsection 2\n" |
89 |
+ "4: br 1b\n" |
90 |
+ " .previous\n" |
91 |
+- EXC(1b,3b,%0,$31) |
92 |
+- EXC(2b,3b,%0,$31) |
93 |
++ EXC(1b,3b,$31,%0) |
94 |
++ EXC(2b,3b,$31,%0) |
95 |
+ : "+r"(ret), "=&r"(prev), "=&r"(cmp) |
96 |
+ : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) |
97 |
+ : "memory"); |
98 |
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c |
99 |
+index ce3a675c0c4b..75a5c35a2067 100644 |
100 |
+--- a/arch/alpha/kernel/osf_sys.c |
101 |
++++ b/arch/alpha/kernel/osf_sys.c |
102 |
+@@ -964,8 +964,8 @@ static inline long |
103 |
+ put_tv32(struct timeval32 __user *o, struct timeval *i) |
104 |
+ { |
105 |
+ return copy_to_user(o, &(struct timeval32){ |
106 |
+- .tv_sec = o->tv_sec, |
107 |
+- .tv_usec = o->tv_usec}, |
108 |
++ .tv_sec = i->tv_sec, |
109 |
++ .tv_usec = i->tv_usec}, |
110 |
+ sizeof(struct timeval32)); |
111 |
+ } |
112 |
+ |
113 |
+diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h |
114 |
+index 26231601630e..f332d88ffaff 100644 |
115 |
+--- a/arch/alpha/kernel/pci_impl.h |
116 |
++++ b/arch/alpha/kernel/pci_impl.h |
117 |
+@@ -144,7 +144,8 @@ struct pci_iommu_arena |
118 |
+ }; |
119 |
+ |
120 |
+ #if defined(CONFIG_ALPHA_SRM) && \ |
121 |
+- (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA)) |
122 |
++ (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \ |
123 |
++ defined(CONFIG_ALPHA_AVANTI)) |
124 |
+ # define NEED_SRM_SAVE_RESTORE |
125 |
+ #else |
126 |
+ # undef NEED_SRM_SAVE_RESTORE |
127 |
+diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c |
128 |
+index 74bfb1f2d68e..3a885253f486 100644 |
129 |
+--- a/arch/alpha/kernel/process.c |
130 |
++++ b/arch/alpha/kernel/process.c |
131 |
+@@ -269,12 +269,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp, |
132 |
+ application calling fork. */ |
133 |
+ if (clone_flags & CLONE_SETTLS) |
134 |
+ childti->pcb.unique = regs->r20; |
135 |
++ else |
136 |
++ regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */ |
137 |
+ childti->pcb.usp = usp ?: rdusp(); |
138 |
+ *childregs = *regs; |
139 |
+ childregs->r0 = 0; |
140 |
+ childregs->r19 = 0; |
141 |
+ childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ |
142 |
+- regs->r20 = 0; |
143 |
+ stack = ((struct switch_stack *) regs) - 1; |
144 |
+ *childstack = *stack; |
145 |
+ childstack->r26 = (unsigned long) ret_from_fork; |
146 |
+diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c |
147 |
+index 4bd99a7b1c41..f43bd05dede2 100644 |
148 |
+--- a/arch/alpha/kernel/traps.c |
149 |
++++ b/arch/alpha/kernel/traps.c |
150 |
+@@ -160,11 +160,16 @@ void show_stack(struct task_struct *task, unsigned long *sp) |
151 |
+ for(i=0; i < kstack_depth_to_print; i++) { |
152 |
+ if (((long) stack & (THREAD_SIZE-1)) == 0) |
153 |
+ break; |
154 |
+- if (i && ((i % 4) == 0)) |
155 |
+- printk("\n "); |
156 |
+- printk("%016lx ", *stack++); |
157 |
++ if ((i % 4) == 0) { |
158 |
++ if (i) |
159 |
++ pr_cont("\n"); |
160 |
++ printk(" "); |
161 |
++ } else { |
162 |
++ pr_cont(" "); |
163 |
++ } |
164 |
++ pr_cont("%016lx", *stack++); |
165 |
+ } |
166 |
+- printk("\n"); |
167 |
++ pr_cont("\n"); |
168 |
+ dik_show_trace(sp); |
169 |
+ } |
170 |
+ |
171 |
+diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c |
172 |
+index 1b0e0e86ee9c..96e62ec105d0 100644 |
173 |
+--- a/arch/arm/crypto/crc32-ce-glue.c |
174 |
++++ b/arch/arm/crypto/crc32-ce-glue.c |
175 |
+@@ -188,6 +188,7 @@ static struct shash_alg crc32_pmull_algs[] = { { |
176 |
+ .base.cra_name = "crc32", |
177 |
+ .base.cra_driver_name = "crc32-arm-ce", |
178 |
+ .base.cra_priority = 200, |
179 |
++ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
180 |
+ .base.cra_blocksize = 1, |
181 |
+ .base.cra_module = THIS_MODULE, |
182 |
+ }, { |
183 |
+@@ -203,6 +204,7 @@ static struct shash_alg crc32_pmull_algs[] = { { |
184 |
+ .base.cra_name = "crc32c", |
185 |
+ .base.cra_driver_name = "crc32c-arm-ce", |
186 |
+ .base.cra_priority = 200, |
187 |
++ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
188 |
+ .base.cra_blocksize = 1, |
189 |
+ .base.cra_module = THIS_MODULE, |
190 |
+ } }; |
191 |
+diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h |
192 |
+index 4a879f6ff13b..31fbb9285f62 100644 |
193 |
+--- a/arch/arm/include/asm/kvm_host.h |
194 |
++++ b/arch/arm/include/asm/kvm_host.h |
195 |
+@@ -293,4 +293,10 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, |
196 |
+ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, |
197 |
+ struct kvm_device_attr *attr); |
198 |
+ |
199 |
++static inline bool kvm_arm_harden_branch_predictor(void) |
200 |
++{ |
201 |
++ /* No way to detect it yet, pretend it is not there. */ |
202 |
++ return false; |
203 |
++} |
204 |
++ |
205 |
+ #endif /* __ARM_KVM_HOST_H__ */ |
206 |
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h |
207 |
+index fa6f2174276b..eb46fc81a440 100644 |
208 |
+--- a/arch/arm/include/asm/kvm_mmu.h |
209 |
++++ b/arch/arm/include/asm/kvm_mmu.h |
210 |
+@@ -221,6 +221,16 @@ static inline unsigned int kvm_get_vmid_bits(void) |
211 |
+ return 8; |
212 |
+ } |
213 |
+ |
214 |
++static inline void *kvm_get_hyp_vector(void) |
215 |
++{ |
216 |
++ return kvm_ksym_ref(__kvm_hyp_vector); |
217 |
++} |
218 |
++ |
219 |
++static inline int kvm_map_vectors(void) |
220 |
++{ |
221 |
++ return 0; |
222 |
++} |
223 |
++ |
224 |
+ #endif /* !__ASSEMBLY__ */ |
225 |
+ |
226 |
+ #endif /* __ARM_KVM_MMU_H__ */ |
227 |
+diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h |
228 |
+deleted file mode 100644 |
229 |
+index 6bda945d31fa..000000000000 |
230 |
+--- a/arch/arm/include/asm/kvm_psci.h |
231 |
++++ /dev/null |
232 |
+@@ -1,27 +0,0 @@ |
233 |
+-/* |
234 |
+- * Copyright (C) 2012 - ARM Ltd |
235 |
+- * Author: Marc Zyngier <marc.zyngier@×××.com> |
236 |
+- * |
237 |
+- * This program is free software; you can redistribute it and/or modify |
238 |
+- * it under the terms of the GNU General Public License version 2 as |
239 |
+- * published by the Free Software Foundation. |
240 |
+- * |
241 |
+- * This program is distributed in the hope that it will be useful, |
242 |
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of |
243 |
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
244 |
+- * GNU General Public License for more details. |
245 |
+- * |
246 |
+- * You should have received a copy of the GNU General Public License |
247 |
+- * along with this program. If not, see <http://www.gnu.org/licenses/>. |
248 |
+- */ |
249 |
+- |
250 |
+-#ifndef __ARM_KVM_PSCI_H__ |
251 |
+-#define __ARM_KVM_PSCI_H__ |
252 |
+- |
253 |
+-#define KVM_ARM_PSCI_0_1 1 |
254 |
+-#define KVM_ARM_PSCI_0_2 2 |
255 |
+- |
256 |
+-int kvm_psci_version(struct kvm_vcpu *vcpu); |
257 |
+-int kvm_psci_call(struct kvm_vcpu *vcpu); |
258 |
+- |
259 |
+-#endif /* __ARM_KVM_PSCI_H__ */ |
260 |
+diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c |
261 |
+index cf8bf6bf87c4..910bd8dabb3c 100644 |
262 |
+--- a/arch/arm/kvm/handle_exit.c |
263 |
++++ b/arch/arm/kvm/handle_exit.c |
264 |
+@@ -21,7 +21,7 @@ |
265 |
+ #include <asm/kvm_emulate.h> |
266 |
+ #include <asm/kvm_coproc.h> |
267 |
+ #include <asm/kvm_mmu.h> |
268 |
+-#include <asm/kvm_psci.h> |
269 |
++#include <kvm/arm_psci.h> |
270 |
+ #include <trace/events/kvm.h> |
271 |
+ |
272 |
+ #include "trace.h" |
273 |
+@@ -36,9 +36,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
274 |
+ kvm_vcpu_hvc_get_imm(vcpu)); |
275 |
+ vcpu->stat.hvc_exit_stat++; |
276 |
+ |
277 |
+- ret = kvm_psci_call(vcpu); |
278 |
++ ret = kvm_hvc_call_handler(vcpu); |
279 |
+ if (ret < 0) { |
280 |
+- kvm_inject_undefined(vcpu); |
281 |
++ vcpu_set_reg(vcpu, 0, ~0UL); |
282 |
+ return 1; |
283 |
+ } |
284 |
+ |
285 |
+@@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
286 |
+ |
287 |
+ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
288 |
+ { |
289 |
+- kvm_inject_undefined(vcpu); |
290 |
++ /* |
291 |
++ * "If an SMC instruction executed at Non-secure EL1 is |
292 |
++ * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a |
293 |
++ * Trap exception, not a Secure Monitor Call exception [...]" |
294 |
++ * |
295 |
++ * We need to advance the PC after the trap, as it would |
296 |
++ * otherwise return to the same address... |
297 |
++ */ |
298 |
++ vcpu_set_reg(vcpu, 0, ~0UL); |
299 |
++ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
300 |
+ return 1; |
301 |
+ } |
302 |
+ |
303 |
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig |
304 |
+index 0df64a6a56d4..c2abb4e88ff2 100644 |
305 |
+--- a/arch/arm64/Kconfig |
306 |
++++ b/arch/arm64/Kconfig |
307 |
+@@ -504,20 +504,13 @@ config CAVIUM_ERRATUM_30115 |
308 |
+ config QCOM_FALKOR_ERRATUM_1003 |
309 |
+ bool "Falkor E1003: Incorrect translation due to ASID change" |
310 |
+ default y |
311 |
+- select ARM64_PAN if ARM64_SW_TTBR0_PAN |
312 |
+ help |
313 |
+ On Falkor v1, an incorrect ASID may be cached in the TLB when ASID |
314 |
+- and BADDR are changed together in TTBRx_EL1. The workaround for this |
315 |
+- issue is to use a reserved ASID in cpu_do_switch_mm() before |
316 |
+- switching to the new ASID. Saying Y here selects ARM64_PAN if |
317 |
+- ARM64_SW_TTBR0_PAN is selected. This is done because implementing and |
318 |
+- maintaining the E1003 workaround in the software PAN emulation code |
319 |
+- would be an unnecessary complication. The affected Falkor v1 CPU |
320 |
+- implements ARMv8.1 hardware PAN support and using hardware PAN |
321 |
+- support versus software PAN emulation is mutually exclusive at |
322 |
+- runtime. |
323 |
+- |
324 |
+- If unsure, say Y. |
325 |
++ and BADDR are changed together in TTBRx_EL1. Since we keep the ASID |
326 |
++ in TTBR1_EL1, this situation only occurs in the entry trampoline and |
327 |
++ then only for entries in the walk cache, since the leaf translation |
328 |
++ is unchanged. Work around the erratum by invalidating the walk cache |
329 |
++ entries for the trampoline before entering the kernel proper. |
330 |
+ |
331 |
+ config QCOM_FALKOR_ERRATUM_1009 |
332 |
+ bool "Falkor E1009: Prematurely complete a DSB after a TLBI" |
333 |
+@@ -539,6 +532,16 @@ config QCOM_QDF2400_ERRATUM_0065 |
334 |
+ |
335 |
+ If unsure, say Y. |
336 |
+ |
337 |
++config QCOM_FALKOR_ERRATUM_E1041 |
338 |
++ bool "Falkor E1041: Speculative instruction fetches might cause errant memory access" |
339 |
++ default y |
340 |
++ help |
341 |
++ Falkor CPU may speculatively fetch instructions from an improper |
342 |
++ memory location when MMU translation is changed from SCTLR_ELn[M]=1 |
343 |
++ to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem. |
344 |
++ |
345 |
++ If unsure, say Y. |
346 |
++ |
347 |
+ endmenu |
348 |
+ |
349 |
+ |
350 |
+@@ -803,6 +806,35 @@ config FORCE_MAX_ZONEORDER |
351 |
+ However for 4K, we choose a higher default value, 11 as opposed to 10, giving us |
352 |
+ 4M allocations matching the default size used by generic code. |
353 |
+ |
354 |
++config UNMAP_KERNEL_AT_EL0 |
355 |
++ bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT |
356 |
++ default y |
357 |
++ help |
358 |
++ Speculation attacks against some high-performance processors can |
359 |
++ be used to bypass MMU permission checks and leak kernel data to |
360 |
++ userspace. This can be defended against by unmapping the kernel |
361 |
++ when running in userspace, mapping it back in on exception entry |
362 |
++ via a trampoline page in the vector table. |
363 |
++ |
364 |
++ If unsure, say Y. |
365 |
++ |
366 |
++config HARDEN_BRANCH_PREDICTOR |
367 |
++ bool "Harden the branch predictor against aliasing attacks" if EXPERT |
368 |
++ default y |
369 |
++ help |
370 |
++ Speculation attacks against some high-performance processors rely on |
371 |
++ being able to manipulate the branch predictor for a victim context by |
372 |
++ executing aliasing branches in the attacker context. Such attacks |
373 |
++ can be partially mitigated against by clearing internal branch |
374 |
++ predictor state and limiting the prediction logic in some situations. |
375 |
++ |
376 |
++ This config option will take CPU-specific actions to harden the |
377 |
++ branch predictor against aliasing attacks and may rely on specific |
378 |
++ instruction sequences or control bits being set by the system |
379 |
++ firmware. |
380 |
++ |
381 |
++ If unsure, say Y. |
382 |
++ |
383 |
+ menuconfig ARMV8_DEPRECATED |
384 |
+ bool "Emulate deprecated/obsolete ARMv8 instructions" |
385 |
+ depends on COMPAT |
386 |
+diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts |
387 |
+index 9c3bdf87e543..51327645b3fb 100644 |
388 |
+--- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts |
389 |
++++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts |
390 |
+@@ -61,6 +61,12 @@ |
391 |
+ reg = <0x0 0x0 0x0 0x80000000>; |
392 |
+ }; |
393 |
+ |
394 |
++ aliases { |
395 |
++ ethernet0 = &cpm_eth0; |
396 |
++ ethernet1 = &cpm_eth1; |
397 |
++ ethernet2 = &cpm_eth2; |
398 |
++ }; |
399 |
++ |
400 |
+ cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus { |
401 |
+ compatible = "regulator-fixed"; |
402 |
+ regulator-name = "usb3h0-vbus"; |
403 |
+diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts |
404 |
+index 0d7b2ae46610..a4f82f1efbbc 100644 |
405 |
+--- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts |
406 |
++++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts |
407 |
+@@ -61,6 +61,13 @@ |
408 |
+ reg = <0x0 0x0 0x0 0x80000000>; |
409 |
+ }; |
410 |
+ |
411 |
++ aliases { |
412 |
++ ethernet0 = &cpm_eth0; |
413 |
++ ethernet1 = &cpm_eth2; |
414 |
++ ethernet2 = &cps_eth0; |
415 |
++ ethernet3 = &cps_eth1; |
416 |
++ }; |
417 |
++ |
418 |
+ cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus { |
419 |
+ compatible = "regulator-fixed"; |
420 |
+ regulator-name = "cpm-usb3h0-vbus"; |
421 |
+diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts |
422 |
+index acf5c7d16d79..2b6b792dab93 100644 |
423 |
+--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts |
424 |
++++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts |
425 |
+@@ -62,6 +62,12 @@ |
426 |
+ reg = <0x0 0x0 0x0 0x80000000>; |
427 |
+ }; |
428 |
+ |
429 |
++ aliases { |
430 |
++ ethernet0 = &cpm_eth0; |
431 |
++ ethernet1 = &cps_eth0; |
432 |
++ ethernet2 = &cps_eth1; |
433 |
++ }; |
434 |
++ |
435 |
+ /* Regulator labels correspond with schematics */ |
436 |
+ v_3_3: regulator-3-3v { |
437 |
+ compatible = "regulator-fixed"; |
438 |
+diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c |
439 |
+index 624f4137918c..34b4e3d46aab 100644 |
440 |
+--- a/arch/arm64/crypto/crc32-ce-glue.c |
441 |
++++ b/arch/arm64/crypto/crc32-ce-glue.c |
442 |
+@@ -185,6 +185,7 @@ static struct shash_alg crc32_pmull_algs[] = { { |
443 |
+ .base.cra_name = "crc32", |
444 |
+ .base.cra_driver_name = "crc32-arm64-ce", |
445 |
+ .base.cra_priority = 200, |
446 |
++ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
447 |
+ .base.cra_blocksize = 1, |
448 |
+ .base.cra_module = THIS_MODULE, |
449 |
+ }, { |
450 |
+@@ -200,6 +201,7 @@ static struct shash_alg crc32_pmull_algs[] = { { |
451 |
+ .base.cra_name = "crc32c", |
452 |
+ .base.cra_driver_name = "crc32c-arm64-ce", |
453 |
+ .base.cra_priority = 200, |
454 |
++ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
455 |
+ .base.cra_blocksize = 1, |
456 |
+ .base.cra_module = THIS_MODULE, |
457 |
+ } }; |
458 |
+diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h |
459 |
+index b3da6c886835..dd49c3567f20 100644 |
460 |
+--- a/arch/arm64/include/asm/asm-uaccess.h |
461 |
++++ b/arch/arm64/include/asm/asm-uaccess.h |
462 |
+@@ -4,6 +4,7 @@ |
463 |
+ |
464 |
+ #include <asm/alternative.h> |
465 |
+ #include <asm/kernel-pgtable.h> |
466 |
++#include <asm/mmu.h> |
467 |
+ #include <asm/sysreg.h> |
468 |
+ #include <asm/assembler.h> |
469 |
+ |
470 |
+@@ -13,51 +14,62 @@ |
471 |
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
472 |
+ .macro __uaccess_ttbr0_disable, tmp1 |
473 |
+ mrs \tmp1, ttbr1_el1 // swapper_pg_dir |
474 |
++ bic \tmp1, \tmp1, #TTBR_ASID_MASK |
475 |
+ add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir |
476 |
+ msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 |
477 |
+ isb |
478 |
++ sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE |
479 |
++ msr ttbr1_el1, \tmp1 // set reserved ASID |
480 |
++ isb |
481 |
+ .endm |
482 |
+ |
483 |
+- .macro __uaccess_ttbr0_enable, tmp1 |
484 |
++ .macro __uaccess_ttbr0_enable, tmp1, tmp2 |
485 |
+ get_thread_info \tmp1 |
486 |
+ ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 |
487 |
++ mrs \tmp2, ttbr1_el1 |
488 |
++ extr \tmp2, \tmp2, \tmp1, #48 |
489 |
++ ror \tmp2, \tmp2, #16 |
490 |
++ msr ttbr1_el1, \tmp2 // set the active ASID |
491 |
++ isb |
492 |
+ msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 |
493 |
+ isb |
494 |
+ .endm |
495 |
+ |
496 |
+- .macro uaccess_ttbr0_disable, tmp1 |
497 |
++ .macro uaccess_ttbr0_disable, tmp1, tmp2 |
498 |
+ alternative_if_not ARM64_HAS_PAN |
499 |
++ save_and_disable_irq \tmp2 // avoid preemption |
500 |
+ __uaccess_ttbr0_disable \tmp1 |
501 |
++ restore_irq \tmp2 |
502 |
+ alternative_else_nop_endif |
503 |
+ .endm |
504 |
+ |
505 |
+- .macro uaccess_ttbr0_enable, tmp1, tmp2 |
506 |
++ .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 |
507 |
+ alternative_if_not ARM64_HAS_PAN |
508 |
+- save_and_disable_irq \tmp2 // avoid preemption |
509 |
+- __uaccess_ttbr0_enable \tmp1 |
510 |
+- restore_irq \tmp2 |
511 |
++ save_and_disable_irq \tmp3 // avoid preemption |
512 |
++ __uaccess_ttbr0_enable \tmp1, \tmp2 |
513 |
++ restore_irq \tmp3 |
514 |
+ alternative_else_nop_endif |
515 |
+ .endm |
516 |
+ #else |
517 |
+- .macro uaccess_ttbr0_disable, tmp1 |
518 |
++ .macro uaccess_ttbr0_disable, tmp1, tmp2 |
519 |
+ .endm |
520 |
+ |
521 |
+- .macro uaccess_ttbr0_enable, tmp1, tmp2 |
522 |
++ .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 |
523 |
+ .endm |
524 |
+ #endif |
525 |
+ |
526 |
+ /* |
527 |
+ * These macros are no-ops when UAO is present. |
528 |
+ */ |
529 |
+- .macro uaccess_disable_not_uao, tmp1 |
530 |
+- uaccess_ttbr0_disable \tmp1 |
531 |
++ .macro uaccess_disable_not_uao, tmp1, tmp2 |
532 |
++ uaccess_ttbr0_disable \tmp1, \tmp2 |
533 |
+ alternative_if ARM64_ALT_PAN_NOT_UAO |
534 |
+ SET_PSTATE_PAN(1) |
535 |
+ alternative_else_nop_endif |
536 |
+ .endm |
537 |
+ |
538 |
+- .macro uaccess_enable_not_uao, tmp1, tmp2 |
539 |
+- uaccess_ttbr0_enable \tmp1, \tmp2 |
540 |
++ .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3 |
541 |
++ uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3 |
542 |
+ alternative_if ARM64_ALT_PAN_NOT_UAO |
543 |
+ SET_PSTATE_PAN(0) |
544 |
+ alternative_else_nop_endif |
545 |
+diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h |
546 |
+index d58a6253c6ab..463619dcadd4 100644 |
547 |
+--- a/arch/arm64/include/asm/assembler.h |
548 |
++++ b/arch/arm64/include/asm/assembler.h |
549 |
+@@ -25,7 +25,6 @@ |
550 |
+ |
551 |
+ #include <asm/asm-offsets.h> |
552 |
+ #include <asm/cpufeature.h> |
553 |
+-#include <asm/mmu_context.h> |
554 |
+ #include <asm/page.h> |
555 |
+ #include <asm/pgtable-hwdef.h> |
556 |
+ #include <asm/ptrace.h> |
557 |
+@@ -96,6 +95,24 @@ |
558 |
+ dmb \opt |
559 |
+ .endm |
560 |
+ |
561 |
++/* |
562 |
++ * Value prediction barrier |
563 |
++ */ |
564 |
++ .macro csdb |
565 |
++ hint #20 |
566 |
++ .endm |
567 |
++ |
568 |
++/* |
569 |
++ * Sanitise a 64-bit bounded index wrt speculation, returning zero if out |
570 |
++ * of bounds. |
571 |
++ */ |
572 |
++ .macro mask_nospec64, idx, limit, tmp |
573 |
++ sub \tmp, \idx, \limit |
574 |
++ bic \tmp, \tmp, \idx |
575 |
++ and \idx, \idx, \tmp, asr #63 |
576 |
++ csdb |
577 |
++ .endm |
578 |
++ |
579 |
+ /* |
580 |
+ * NOP sequence |
581 |
+ */ |
582 |
+@@ -464,39 +481,18 @@ alternative_endif |
583 |
+ mrs \rd, sp_el0 |
584 |
+ .endm |
585 |
+ |
586 |
+-/* |
587 |
+- * Errata workaround prior to TTBR0_EL1 update |
588 |
+- * |
589 |
+- * val: TTBR value with new BADDR, preserved |
590 |
+- * tmp0: temporary register, clobbered |
591 |
+- * tmp1: other temporary register, clobbered |
592 |
++/** |
593 |
++ * Errata workaround prior to disable MMU. Insert an ISB immediately prior |
594 |
++ * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. |
595 |
+ */ |
596 |
+- .macro pre_ttbr0_update_workaround, val, tmp0, tmp1 |
597 |
+-#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
598 |
+-alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 |
599 |
+- mrs \tmp0, ttbr0_el1 |
600 |
+- mov \tmp1, #FALKOR_RESERVED_ASID |
601 |
+- bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR |
602 |
+- msr ttbr0_el1, \tmp0 |
603 |
++ .macro pre_disable_mmu_workaround |
604 |
++#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041 |
605 |
+ isb |
606 |
+- bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR |
607 |
+- msr ttbr0_el1, \tmp0 |
608 |
+- isb |
609 |
+-alternative_else_nop_endif |
610 |
+ #endif |
611 |
+ .endm |
612 |
+ |
613 |
+-/* |
614 |
+- * Errata workaround post TTBR0_EL1 update. |
615 |
+- */ |
616 |
+- .macro post_ttbr0_update_workaround |
617 |
+-#ifdef CONFIG_CAVIUM_ERRATUM_27456 |
618 |
+-alternative_if ARM64_WORKAROUND_CAVIUM_27456 |
619 |
+- ic iallu |
620 |
+- dsb nsh |
621 |
+- isb |
622 |
+-alternative_else_nop_endif |
623 |
+-#endif |
624 |
++ .macro pte_to_phys, phys, pte |
625 |
++ and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) |
626 |
+ .endm |
627 |
+ |
628 |
+ #endif /* __ASM_ASSEMBLER_H */ |
629 |
+diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h |
630 |
+index 0fe7e43b7fbc..0b0755c961ac 100644 |
631 |
+--- a/arch/arm64/include/asm/barrier.h |
632 |
++++ b/arch/arm64/include/asm/barrier.h |
633 |
+@@ -31,6 +31,8 @@ |
634 |
+ #define dmb(opt) asm volatile("dmb " #opt : : : "memory") |
635 |
+ #define dsb(opt) asm volatile("dsb " #opt : : : "memory") |
636 |
+ |
637 |
++#define csdb() asm volatile("hint #20" : : : "memory") |
638 |
++ |
639 |
+ #define mb() dsb(sy) |
640 |
+ #define rmb() dsb(ld) |
641 |
+ #define wmb() dsb(st) |
642 |
+@@ -38,6 +40,27 @@ |
643 |
+ #define dma_rmb() dmb(oshld) |
644 |
+ #define dma_wmb() dmb(oshst) |
645 |
+ |
646 |
++/* |
647 |
++ * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz |
648 |
++ * and 0 otherwise. |
649 |
++ */ |
650 |
++#define array_index_mask_nospec array_index_mask_nospec |
651 |
++static inline unsigned long array_index_mask_nospec(unsigned long idx, |
652 |
++ unsigned long sz) |
653 |
++{ |
654 |
++ unsigned long mask; |
655 |
++ |
656 |
++ asm volatile( |
657 |
++ " cmp %1, %2\n" |
658 |
++ " sbc %0, xzr, xzr\n" |
659 |
++ : "=r" (mask) |
660 |
++ : "r" (idx), "Ir" (sz) |
661 |
++ : "cc"); |
662 |
++ |
663 |
++ csdb(); |
664 |
++ return mask; |
665 |
++} |
666 |
++ |
667 |
+ #define __smp_mb() dmb(ish) |
668 |
+ #define __smp_rmb() dmb(ishld) |
669 |
+ #define __smp_wmb() dmb(ishst) |
670 |
+diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h |
671 |
+index 8da621627d7c..2e7b236bc596 100644 |
672 |
+--- a/arch/arm64/include/asm/cpucaps.h |
673 |
++++ b/arch/arm64/include/asm/cpucaps.h |
674 |
+@@ -40,7 +40,10 @@ |
675 |
+ #define ARM64_WORKAROUND_858921 19 |
676 |
+ #define ARM64_WORKAROUND_CAVIUM_30115 20 |
677 |
+ #define ARM64_HAS_DCPOP 21 |
678 |
++#define ARM64_UNMAP_KERNEL_AT_EL0 23 |
679 |
++#define ARM64_HARDEN_BRANCH_PREDICTOR 24 |
680 |
++#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 |
681 |
+ |
682 |
+-#define ARM64_NCAPS 22 |
683 |
++#define ARM64_NCAPS 26 |
684 |
+ |
685 |
+ #endif /* __ASM_CPUCAPS_H */ |
686 |
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h |
687 |
+index 235e77d98261..be7bd19c87ec 100644 |
688 |
+--- a/arch/arm64/include/asm/cputype.h |
689 |
++++ b/arch/arm64/include/asm/cputype.h |
690 |
+@@ -79,26 +79,37 @@ |
691 |
+ #define ARM_CPU_PART_AEM_V8 0xD0F |
692 |
+ #define ARM_CPU_PART_FOUNDATION 0xD00 |
693 |
+ #define ARM_CPU_PART_CORTEX_A57 0xD07 |
694 |
++#define ARM_CPU_PART_CORTEX_A72 0xD08 |
695 |
+ #define ARM_CPU_PART_CORTEX_A53 0xD03 |
696 |
+ #define ARM_CPU_PART_CORTEX_A73 0xD09 |
697 |
++#define ARM_CPU_PART_CORTEX_A75 0xD0A |
698 |
+ |
699 |
+ #define APM_CPU_PART_POTENZA 0x000 |
700 |
+ |
701 |
+ #define CAVIUM_CPU_PART_THUNDERX 0x0A1 |
702 |
+ #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 |
703 |
+ #define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3 |
704 |
++#define CAVIUM_CPU_PART_THUNDERX2 0x0AF |
705 |
+ |
706 |
+ #define BRCM_CPU_PART_VULCAN 0x516 |
707 |
+ |
708 |
+ #define QCOM_CPU_PART_FALKOR_V1 0x800 |
709 |
++#define QCOM_CPU_PART_FALKOR 0xC00 |
710 |
++#define QCOM_CPU_PART_KRYO 0x200 |
711 |
+ |
712 |
+ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) |
713 |
+ #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) |
714 |
++#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) |
715 |
+ #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) |
716 |
++#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) |
717 |
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) |
718 |
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) |
719 |
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) |
720 |
++#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2) |
721 |
++#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN) |
722 |
+ #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) |
723 |
++#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) |
724 |
++#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) |
725 |
+ |
726 |
+ #ifndef __ASSEMBLY__ |
727 |
+ |
728 |
+diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h |
729 |
+index c4cd5081d78b..8389050328bb 100644 |
730 |
+--- a/arch/arm64/include/asm/efi.h |
731 |
++++ b/arch/arm64/include/asm/efi.h |
732 |
+@@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm_struct *mm) |
733 |
+ if (mm != current->active_mm) { |
734 |
+ /* |
735 |
+ * Update the current thread's saved ttbr0 since it is |
736 |
+- * restored as part of a return from exception. Set |
737 |
+- * the hardware TTBR0_EL1 using cpu_switch_mm() |
738 |
+- * directly to enable potential errata workarounds. |
739 |
++ * restored as part of a return from exception. Enable |
740 |
++ * access to the valid TTBR0_EL1 and invoke the errata |
741 |
++ * workaround directly since there is no return from |
742 |
++ * exception when invoking the EFI run-time services. |
743 |
+ */ |
744 |
+ update_saved_ttbr0(current, mm); |
745 |
+- cpu_switch_mm(mm->pgd, mm); |
746 |
++ uaccess_ttbr0_enable(); |
747 |
++ post_ttbr_update_workaround(); |
748 |
+ } else { |
749 |
+ /* |
750 |
+ * Defer the switch to the current thread's TTBR0_EL1 |
751 |
+ * until uaccess_enable(). Restore the current |
752 |
+ * thread's saved ttbr0 corresponding to its active_mm |
753 |
+ */ |
754 |
+- cpu_set_reserved_ttbr0(); |
755 |
++ uaccess_ttbr0_disable(); |
756 |
+ update_saved_ttbr0(current, current->active_mm); |
757 |
+ } |
758 |
+ } |
759 |
+diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h |
760 |
+index 4052ec39e8db..ec1e6d6fa14c 100644 |
761 |
+--- a/arch/arm64/include/asm/fixmap.h |
762 |
++++ b/arch/arm64/include/asm/fixmap.h |
763 |
+@@ -58,6 +58,11 @@ enum fixed_addresses { |
764 |
+ FIX_APEI_GHES_NMI, |
765 |
+ #endif /* CONFIG_ACPI_APEI_GHES */ |
766 |
+ |
767 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
768 |
++ FIX_ENTRY_TRAMP_DATA, |
769 |
++ FIX_ENTRY_TRAMP_TEXT, |
770 |
++#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) |
771 |
++#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ |
772 |
+ __end_of_permanent_fixed_addresses, |
773 |
+ |
774 |
+ /* |
775 |
+diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h |
776 |
+index 5bb2fd4674e7..07fe2479d310 100644 |
777 |
+--- a/arch/arm64/include/asm/futex.h |
778 |
++++ b/arch/arm64/include/asm/futex.h |
779 |
+@@ -48,9 +48,10 @@ do { \ |
780 |
+ } while (0) |
781 |
+ |
782 |
+ static inline int |
783 |
+-arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) |
784 |
++arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) |
785 |
+ { |
786 |
+ int oldval = 0, ret, tmp; |
787 |
++ u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); |
788 |
+ |
789 |
+ pagefault_disable(); |
790 |
+ |
791 |
+@@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) |
792 |
+ } |
793 |
+ |
794 |
+ static inline int |
795 |
+-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
796 |
++futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, |
797 |
+ u32 oldval, u32 newval) |
798 |
+ { |
799 |
+ int ret = 0; |
800 |
+ u32 val, tmp; |
801 |
++ u32 __user *uaddr; |
802 |
+ |
803 |
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
804 |
++ if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32))) |
805 |
+ return -EFAULT; |
806 |
+ |
807 |
++ uaddr = __uaccess_mask_ptr(_uaddr); |
808 |
+ uaccess_enable(); |
809 |
+ asm volatile("// futex_atomic_cmpxchg_inatomic\n" |
810 |
+ " prfm pstl1strm, %2\n" |
811 |
+diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h |
812 |
+index 26a64d0f9ab9..a7ef5a051911 100644 |
813 |
+--- a/arch/arm64/include/asm/kvm_asm.h |
814 |
++++ b/arch/arm64/include/asm/kvm_asm.h |
815 |
+@@ -66,6 +66,8 @@ extern u32 __kvm_get_mdcr_el2(void); |
816 |
+ |
817 |
+ extern u32 __init_stage2_translation(void); |
818 |
+ |
819 |
++extern void __qcom_hyp_sanitize_btac_predictors(void); |
820 |
++ |
821 |
+ #endif |
822 |
+ |
823 |
+ #endif /* __ARM_KVM_ASM_H__ */ |
824 |
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h |
825 |
+index e923b58606e2..8ad208cb866c 100644 |
826 |
+--- a/arch/arm64/include/asm/kvm_host.h |
827 |
++++ b/arch/arm64/include/asm/kvm_host.h |
828 |
+@@ -384,4 +384,9 @@ static inline void __cpu_init_stage2(void) |
829 |
+ "PARange is %d bits, unsupported configuration!", parange); |
830 |
+ } |
831 |
+ |
832 |
++static inline bool kvm_arm_harden_branch_predictor(void) |
833 |
++{ |
834 |
++ return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); |
835 |
++} |
836 |
++ |
837 |
+ #endif /* __ARM64_KVM_HOST_H__ */ |
838 |
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h |
839 |
+index 672c8684d5c2..2d6d4bd9de52 100644 |
840 |
+--- a/arch/arm64/include/asm/kvm_mmu.h |
841 |
++++ b/arch/arm64/include/asm/kvm_mmu.h |
842 |
+@@ -309,5 +309,43 @@ static inline unsigned int kvm_get_vmid_bits(void) |
843 |
+ return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; |
844 |
+ } |
845 |
+ |
846 |
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
847 |
++#include <asm/mmu.h> |
848 |
++ |
849 |
++static inline void *kvm_get_hyp_vector(void) |
850 |
++{ |
851 |
++ struct bp_hardening_data *data = arm64_get_bp_hardening_data(); |
852 |
++ void *vect = kvm_ksym_ref(__kvm_hyp_vector); |
853 |
++ |
854 |
++ if (data->fn) { |
855 |
++ vect = __bp_harden_hyp_vecs_start + |
856 |
++ data->hyp_vectors_slot * SZ_2K; |
857 |
++ |
858 |
++ if (!has_vhe()) |
859 |
++ vect = lm_alias(vect); |
860 |
++ } |
861 |
++ |
862 |
++ return vect; |
863 |
++} |
864 |
++ |
865 |
++static inline int kvm_map_vectors(void) |
866 |
++{ |
867 |
++ return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start), |
868 |
++ kvm_ksym_ref(__bp_harden_hyp_vecs_end), |
869 |
++ PAGE_HYP_EXEC); |
870 |
++} |
871 |
++ |
872 |
++#else |
873 |
++static inline void *kvm_get_hyp_vector(void) |
874 |
++{ |
875 |
++ return kvm_ksym_ref(__kvm_hyp_vector); |
876 |
++} |
877 |
++ |
878 |
++static inline int kvm_map_vectors(void) |
879 |
++{ |
880 |
++ return 0; |
881 |
++} |
882 |
++#endif |
883 |
++ |
884 |
+ #endif /* __ASSEMBLY__ */ |
885 |
+ #endif /* __ARM64_KVM_MMU_H__ */ |
886 |
+diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h |
887 |
+deleted file mode 100644 |
888 |
+index bc39e557c56c..000000000000 |
889 |
+--- a/arch/arm64/include/asm/kvm_psci.h |
890 |
++++ /dev/null |
891 |
+@@ -1,27 +0,0 @@ |
892 |
+-/* |
893 |
+- * Copyright (C) 2012,2013 - ARM Ltd |
894 |
+- * Author: Marc Zyngier <marc.zyngier@×××.com> |
895 |
+- * |
896 |
+- * This program is free software; you can redistribute it and/or modify |
897 |
+- * it under the terms of the GNU General Public License version 2 as |
898 |
+- * published by the Free Software Foundation. |
899 |
+- * |
900 |
+- * This program is distributed in the hope that it will be useful, |
901 |
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of |
902 |
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
903 |
+- * GNU General Public License for more details. |
904 |
+- * |
905 |
+- * You should have received a copy of the GNU General Public License |
906 |
+- * along with this program. If not, see <http://www.gnu.org/licenses/>. |
907 |
+- */ |
908 |
+- |
909 |
+-#ifndef __ARM64_KVM_PSCI_H__ |
910 |
+-#define __ARM64_KVM_PSCI_H__ |
911 |
+- |
912 |
+-#define KVM_ARM_PSCI_0_1 1 |
913 |
+-#define KVM_ARM_PSCI_0_2 2 |
914 |
+- |
915 |
+-int kvm_psci_version(struct kvm_vcpu *vcpu); |
916 |
+-int kvm_psci_call(struct kvm_vcpu *vcpu); |
917 |
+- |
918 |
+-#endif /* __ARM64_KVM_PSCI_H__ */ |
919 |
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h |
920 |
+index f7c4d2146aed..d4bae7d6e0d8 100644 |
921 |
+--- a/arch/arm64/include/asm/memory.h |
922 |
++++ b/arch/arm64/include/asm/memory.h |
923 |
+@@ -61,8 +61,6 @@ |
924 |
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image |
925 |
+ * VA_BITS - the maximum number of bits for virtual addresses. |
926 |
+ * VA_START - the first kernel virtual address. |
927 |
+- * TASK_SIZE - the maximum size of a user space task. |
928 |
+- * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. |
929 |
+ */ |
930 |
+ #define VA_BITS (CONFIG_ARM64_VA_BITS) |
931 |
+ #define VA_START (UL(0xffffffffffffffff) - \ |
932 |
+@@ -77,19 +75,6 @@ |
933 |
+ #define PCI_IO_END (VMEMMAP_START - SZ_2M) |
934 |
+ #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) |
935 |
+ #define FIXADDR_TOP (PCI_IO_START - SZ_2M) |
936 |
+-#define TASK_SIZE_64 (UL(1) << VA_BITS) |
937 |
+- |
938 |
+-#ifdef CONFIG_COMPAT |
939 |
+-#define TASK_SIZE_32 UL(0x100000000) |
940 |
+-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ |
941 |
+- TASK_SIZE_32 : TASK_SIZE_64) |
942 |
+-#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ |
943 |
+- TASK_SIZE_32 : TASK_SIZE_64) |
944 |
+-#else |
945 |
+-#define TASK_SIZE TASK_SIZE_64 |
946 |
+-#endif /* CONFIG_COMPAT */ |
947 |
+- |
948 |
+-#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) |
949 |
+ |
950 |
+ #define KERNEL_START _text |
951 |
+ #define KERNEL_END _end |
952 |
+diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h |
953 |
+index 0d34bf0a89c7..6dd83d75b82a 100644 |
954 |
+--- a/arch/arm64/include/asm/mmu.h |
955 |
++++ b/arch/arm64/include/asm/mmu.h |
956 |
+@@ -17,6 +17,10 @@ |
957 |
+ #define __ASM_MMU_H |
958 |
+ |
959 |
+ #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ |
960 |
++#define USER_ASID_FLAG (UL(1) << 48) |
961 |
++#define TTBR_ASID_MASK (UL(0xffff) << 48) |
962 |
++ |
963 |
++#ifndef __ASSEMBLY__ |
964 |
+ |
965 |
+ typedef struct { |
966 |
+ atomic64_t id; |
967 |
+@@ -31,6 +35,49 @@ typedef struct { |
968 |
+ */ |
969 |
+ #define ASID(mm) ((mm)->context.id.counter & 0xffff) |
970 |
+ |
971 |
++static inline bool arm64_kernel_unmapped_at_el0(void) |
972 |
++{ |
973 |
++ return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && |
974 |
++ cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); |
975 |
++} |
976 |
++ |
977 |
++typedef void (*bp_hardening_cb_t)(void); |
978 |
++ |
979 |
++struct bp_hardening_data { |
980 |
++ int hyp_vectors_slot; |
981 |
++ bp_hardening_cb_t fn; |
982 |
++}; |
983 |
++ |
984 |
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
985 |
++extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[]; |
986 |
++ |
987 |
++DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); |
988 |
++ |
989 |
++static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) |
990 |
++{ |
991 |
++ return this_cpu_ptr(&bp_hardening_data); |
992 |
++} |
993 |
++ |
994 |
++static inline void arm64_apply_bp_hardening(void) |
995 |
++{ |
996 |
++ struct bp_hardening_data *d; |
997 |
++ |
998 |
++ if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) |
999 |
++ return; |
1000 |
++ |
1001 |
++ d = arm64_get_bp_hardening_data(); |
1002 |
++ if (d->fn) |
1003 |
++ d->fn(); |
1004 |
++} |
1005 |
++#else |
1006 |
++static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) |
1007 |
++{ |
1008 |
++ return NULL; |
1009 |
++} |
1010 |
++ |
1011 |
++static inline void arm64_apply_bp_hardening(void) { } |
1012 |
++#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ |
1013 |
++ |
1014 |
+ extern void paging_init(void); |
1015 |
+ extern void bootmem_init(void); |
1016 |
+ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); |
1017 |
+@@ -41,4 +88,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
1018 |
+ extern void *fixmap_remap_fdt(phys_addr_t dt_phys); |
1019 |
+ extern void mark_linear_text_alias_ro(void); |
1020 |
+ |
1021 |
++#endif /* !__ASSEMBLY__ */ |
1022 |
+ #endif |
1023 |
+diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h |
1024 |
+index 9d155fa9a507..779d7a2ec5ec 100644 |
1025 |
+--- a/arch/arm64/include/asm/mmu_context.h |
1026 |
++++ b/arch/arm64/include/asm/mmu_context.h |
1027 |
+@@ -19,8 +19,6 @@ |
1028 |
+ #ifndef __ASM_MMU_CONTEXT_H |
1029 |
+ #define __ASM_MMU_CONTEXT_H |
1030 |
+ |
1031 |
+-#define FALKOR_RESERVED_ASID 1 |
1032 |
+- |
1033 |
+ #ifndef __ASSEMBLY__ |
1034 |
+ |
1035 |
+ #include <linux/compiler.h> |
1036 |
+@@ -57,6 +55,13 @@ static inline void cpu_set_reserved_ttbr0(void) |
1037 |
+ isb(); |
1038 |
+ } |
1039 |
+ |
1040 |
++static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) |
1041 |
++{ |
1042 |
++ BUG_ON(pgd == swapper_pg_dir); |
1043 |
++ cpu_set_reserved_ttbr0(); |
1044 |
++ cpu_do_switch_mm(virt_to_phys(pgd),mm); |
1045 |
++} |
1046 |
++ |
1047 |
+ /* |
1048 |
+ * TCR.T0SZ value to use when the ID map is active. Usually equals |
1049 |
+ * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in |
1050 |
+@@ -170,7 +175,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, |
1051 |
+ else |
1052 |
+ ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; |
1053 |
+ |
1054 |
+- task_thread_info(tsk)->ttbr0 = ttbr; |
1055 |
++ WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); |
1056 |
+ } |
1057 |
+ #else |
1058 |
+ static inline void update_saved_ttbr0(struct task_struct *tsk, |
1059 |
+@@ -225,6 +230,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, |
1060 |
+ #define activate_mm(prev,next) switch_mm(prev, next, current) |
1061 |
+ |
1062 |
+ void verify_cpu_asid_bits(void); |
1063 |
++void post_ttbr_update_workaround(void); |
1064 |
+ |
1065 |
+ #endif /* !__ASSEMBLY__ */ |
1066 |
+ |
1067 |
+diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h |
1068 |
+index eb0c2bd90de9..8df4cb6ac6f7 100644 |
1069 |
+--- a/arch/arm64/include/asm/pgtable-hwdef.h |
1070 |
++++ b/arch/arm64/include/asm/pgtable-hwdef.h |
1071 |
+@@ -272,6 +272,7 @@ |
1072 |
+ #define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT) |
1073 |
+ #define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT) |
1074 |
+ |
1075 |
++#define TCR_A1 (UL(1) << 22) |
1076 |
+ #define TCR_ASID16 (UL(1) << 36) |
1077 |
+ #define TCR_TBI0 (UL(1) << 37) |
1078 |
+ #define TCR_HA (UL(1) << 39) |
1079 |
+diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h |
1080 |
+index 0a5635fb0ef9..2db84df5eb42 100644 |
1081 |
+--- a/arch/arm64/include/asm/pgtable-prot.h |
1082 |
++++ b/arch/arm64/include/asm/pgtable-prot.h |
1083 |
+@@ -34,8 +34,14 @@ |
1084 |
+ |
1085 |
+ #include <asm/pgtable-types.h> |
1086 |
+ |
1087 |
+-#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
1088 |
+-#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) |
1089 |
++#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
1090 |
++#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) |
1091 |
++ |
1092 |
++#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) |
1093 |
++#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) |
1094 |
++ |
1095 |
++#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) |
1096 |
++#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) |
1097 |
+ |
1098 |
+ #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) |
1099 |
+ #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
1100 |
+@@ -47,23 +53,24 @@ |
1101 |
+ #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) |
1102 |
+ #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) |
1103 |
+ |
1104 |
+-#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) |
1105 |
++#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) |
1106 |
++#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT |
1107 |
+ |
1108 |
+-#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
1109 |
+-#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) |
1110 |
+-#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) |
1111 |
+-#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
1112 |
+-#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) |
1113 |
++#define PAGE_KERNEL __pgprot(PROT_NORMAL) |
1114 |
++#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) |
1115 |
++#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) |
1116 |
++#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) |
1117 |
++#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) |
1118 |
+ |
1119 |
+-#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) |
1120 |
+-#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) |
1121 |
+-#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) |
1122 |
++#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) |
1123 |
++#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) |
1124 |
++#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) |
1125 |
+ #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) |
1126 |
+ |
1127 |
+-#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) |
1128 |
+-#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) |
1129 |
++#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) |
1130 |
++#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) |
1131 |
+ |
1132 |
+-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) |
1133 |
++#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) |
1134 |
+ #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
1135 |
+ #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) |
1136 |
+ #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) |
1137 |
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h |
1138 |
+index 960d05c8816a..aafea648a30f 100644 |
1139 |
+--- a/arch/arm64/include/asm/pgtable.h |
1140 |
++++ b/arch/arm64/include/asm/pgtable.h |
1141 |
+@@ -684,6 +684,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, |
1142 |
+ |
1143 |
+ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
1144 |
+ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; |
1145 |
++extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; |
1146 |
+ |
1147 |
+ /* |
1148 |
+ * Encode and decode a swap entry: |
1149 |
+diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h |
1150 |
+index 14ad6e4e87d1..16cef2e8449e 100644 |
1151 |
+--- a/arch/arm64/include/asm/proc-fns.h |
1152 |
++++ b/arch/arm64/include/asm/proc-fns.h |
1153 |
+@@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); |
1154 |
+ |
1155 |
+ #include <asm/memory.h> |
1156 |
+ |
1157 |
+-#define cpu_switch_mm(pgd,mm) \ |
1158 |
+-do { \ |
1159 |
+- BUG_ON(pgd == swapper_pg_dir); \ |
1160 |
+- cpu_do_switch_mm(virt_to_phys(pgd),mm); \ |
1161 |
+-} while (0) |
1162 |
+- |
1163 |
+ #endif /* __ASSEMBLY__ */ |
1164 |
+ #endif /* __KERNEL__ */ |
1165 |
+ #endif /* __ASM_PROCFNS_H */ |
1166 |
+diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h |
1167 |
+index 29adab8138c3..fda6f5812281 100644 |
1168 |
+--- a/arch/arm64/include/asm/processor.h |
1169 |
++++ b/arch/arm64/include/asm/processor.h |
1170 |
+@@ -19,6 +19,13 @@ |
1171 |
+ #ifndef __ASM_PROCESSOR_H |
1172 |
+ #define __ASM_PROCESSOR_H |
1173 |
+ |
1174 |
++#define TASK_SIZE_64 (UL(1) << VA_BITS) |
1175 |
++ |
1176 |
++#define KERNEL_DS UL(-1) |
1177 |
++#define USER_DS (TASK_SIZE_64 - 1) |
1178 |
++ |
1179 |
++#ifndef __ASSEMBLY__ |
1180 |
++ |
1181 |
+ /* |
1182 |
+ * Default implementation of macro that returns current |
1183 |
+ * instruction pointer ("program counter"). |
1184 |
+@@ -37,6 +44,22 @@ |
1185 |
+ #include <asm/ptrace.h> |
1186 |
+ #include <asm/types.h> |
1187 |
+ |
1188 |
++/* |
1189 |
++ * TASK_SIZE - the maximum size of a user space task. |
1190 |
++ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. |
1191 |
++ */ |
1192 |
++#ifdef CONFIG_COMPAT |
1193 |
++#define TASK_SIZE_32 UL(0x100000000) |
1194 |
++#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ |
1195 |
++ TASK_SIZE_32 : TASK_SIZE_64) |
1196 |
++#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ |
1197 |
++ TASK_SIZE_32 : TASK_SIZE_64) |
1198 |
++#else |
1199 |
++#define TASK_SIZE TASK_SIZE_64 |
1200 |
++#endif /* CONFIG_COMPAT */ |
1201 |
++ |
1202 |
++#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) |
1203 |
++ |
1204 |
+ #define STACK_TOP_MAX TASK_SIZE_64 |
1205 |
+ #ifdef CONFIG_COMPAT |
1206 |
+ #define AARCH32_VECTORS_BASE 0xffff0000 |
1207 |
+@@ -194,4 +217,5 @@ static inline void spin_lock_prefetch(const void *ptr) |
1208 |
+ int cpu_enable_pan(void *__unused); |
1209 |
+ int cpu_enable_cache_maint_trap(void *__unused); |
1210 |
+ |
1211 |
++#endif /* __ASSEMBLY__ */ |
1212 |
+ #endif /* __ASM_PROCESSOR_H */ |
1213 |
+diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h |
1214 |
+index f707fed5886f..ede80d47d0ef 100644 |
1215 |
+--- a/arch/arm64/include/asm/sysreg.h |
1216 |
++++ b/arch/arm64/include/asm/sysreg.h |
1217 |
+@@ -332,6 +332,8 @@ |
1218 |
+ #define ID_AA64ISAR1_DPB_SHIFT 0 |
1219 |
+ |
1220 |
+ /* id_aa64pfr0 */ |
1221 |
++#define ID_AA64PFR0_CSV3_SHIFT 60 |
1222 |
++#define ID_AA64PFR0_CSV2_SHIFT 56 |
1223 |
+ #define ID_AA64PFR0_GIC_SHIFT 24 |
1224 |
+ #define ID_AA64PFR0_ASIMD_SHIFT 20 |
1225 |
+ #define ID_AA64PFR0_FP_SHIFT 16 |
1226 |
+diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h |
1227 |
+index af1c76981911..9e82dd79c7db 100644 |
1228 |
+--- a/arch/arm64/include/asm/tlbflush.h |
1229 |
++++ b/arch/arm64/include/asm/tlbflush.h |
1230 |
+@@ -23,6 +23,7 @@ |
1231 |
+ |
1232 |
+ #include <linux/sched.h> |
1233 |
+ #include <asm/cputype.h> |
1234 |
++#include <asm/mmu.h> |
1235 |
+ |
1236 |
+ /* |
1237 |
+ * Raw TLBI operations. |
1238 |
+@@ -54,6 +55,11 @@ |
1239 |
+ |
1240 |
+ #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) |
1241 |
+ |
1242 |
++#define __tlbi_user(op, arg) do { \ |
1243 |
++ if (arm64_kernel_unmapped_at_el0()) \ |
1244 |
++ __tlbi(op, (arg) | USER_ASID_FLAG); \ |
1245 |
++} while (0) |
1246 |
++ |
1247 |
+ /* |
1248 |
+ * TLB Management |
1249 |
+ * ============== |
1250 |
+@@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) |
1251 |
+ |
1252 |
+ dsb(ishst); |
1253 |
+ __tlbi(aside1is, asid); |
1254 |
++ __tlbi_user(aside1is, asid); |
1255 |
+ dsb(ish); |
1256 |
+ } |
1257 |
+ |
1258 |
+@@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, |
1259 |
+ |
1260 |
+ dsb(ishst); |
1261 |
+ __tlbi(vale1is, addr); |
1262 |
++ __tlbi_user(vale1is, addr); |
1263 |
+ dsb(ish); |
1264 |
+ } |
1265 |
+ |
1266 |
+@@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, |
1267 |
+ |
1268 |
+ dsb(ishst); |
1269 |
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { |
1270 |
+- if (last_level) |
1271 |
++ if (last_level) { |
1272 |
+ __tlbi(vale1is, addr); |
1273 |
+- else |
1274 |
++ __tlbi_user(vale1is, addr); |
1275 |
++ } else { |
1276 |
+ __tlbi(vae1is, addr); |
1277 |
++ __tlbi_user(vae1is, addr); |
1278 |
++ } |
1279 |
+ } |
1280 |
+ dsb(ish); |
1281 |
+ } |
1282 |
+@@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, |
1283 |
+ unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); |
1284 |
+ |
1285 |
+ __tlbi(vae1is, addr); |
1286 |
++ __tlbi_user(vae1is, addr); |
1287 |
+ dsb(ish); |
1288 |
+ } |
1289 |
+ |
1290 |
+diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h |
1291 |
+index fc0f9eb66039..fad8c1b2ca3e 100644 |
1292 |
+--- a/arch/arm64/include/asm/uaccess.h |
1293 |
++++ b/arch/arm64/include/asm/uaccess.h |
1294 |
+@@ -35,16 +35,20 @@ |
1295 |
+ #include <asm/compiler.h> |
1296 |
+ #include <asm/extable.h> |
1297 |
+ |
1298 |
+-#define KERNEL_DS (-1UL) |
1299 |
+ #define get_ds() (KERNEL_DS) |
1300 |
+- |
1301 |
+-#define USER_DS TASK_SIZE_64 |
1302 |
+ #define get_fs() (current_thread_info()->addr_limit) |
1303 |
+ |
1304 |
+ static inline void set_fs(mm_segment_t fs) |
1305 |
+ { |
1306 |
+ current_thread_info()->addr_limit = fs; |
1307 |
+ |
1308 |
++ /* |
1309 |
++ * Prevent a mispredicted conditional call to set_fs from forwarding |
1310 |
++ * the wrong address limit to access_ok under speculation. |
1311 |
++ */ |
1312 |
++ dsb(nsh); |
1313 |
++ isb(); |
1314 |
++ |
1315 |
+ /* On user-mode return, check fs is correct */ |
1316 |
+ set_thread_flag(TIF_FSCHECK); |
1317 |
+ |
1318 |
+@@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs) |
1319 |
+ * Returns 1 if the range is valid, 0 otherwise. |
1320 |
+ * |
1321 |
+ * This is equivalent to the following test: |
1322 |
+- * (u65)addr + (u65)size <= current->addr_limit |
1323 |
+- * |
1324 |
+- * This needs 65-bit arithmetic. |
1325 |
++ * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 |
1326 |
+ */ |
1327 |
+-#define __range_ok(addr, size) \ |
1328 |
+-({ \ |
1329 |
+- unsigned long __addr = (unsigned long)(addr); \ |
1330 |
+- unsigned long flag, roksum; \ |
1331 |
+- __chk_user_ptr(addr); \ |
1332 |
+- asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ |
1333 |
+- : "=&r" (flag), "=&r" (roksum) \ |
1334 |
+- : "1" (__addr), "Ir" (size), \ |
1335 |
+- "r" (current_thread_info()->addr_limit) \ |
1336 |
+- : "cc"); \ |
1337 |
+- flag; \ |
1338 |
+-}) |
1339 |
++static inline unsigned long __range_ok(unsigned long addr, unsigned long size) |
1340 |
++{ |
1341 |
++ unsigned long limit = current_thread_info()->addr_limit; |
1342 |
++ |
1343 |
++ __chk_user_ptr(addr); |
1344 |
++ asm volatile( |
1345 |
++ // A + B <= C + 1 for all A,B,C, in four easy steps: |
1346 |
++ // 1: X = A + B; X' = X % 2^64 |
1347 |
++ " adds %0, %0, %2\n" |
1348 |
++ // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 |
1349 |
++ " csel %1, xzr, %1, hi\n" |
1350 |
++ // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' |
1351 |
++ // to compensate for the carry flag being set in step 4. For |
1352 |
++ // X > 2^64, X' merely has to remain nonzero, which it does. |
1353 |
++ " csinv %0, %0, xzr, cc\n" |
1354 |
++ // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 |
1355 |
++ // comes from the carry in being clear. Otherwise, we are |
1356 |
++ // testing X' - C == 0, subject to the previous adjustments. |
1357 |
++ " sbcs xzr, %0, %1\n" |
1358 |
++ " cset %0, ls\n" |
1359 |
++ : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); |
1360 |
++ |
1361 |
++ return addr; |
1362 |
++} |
1363 |
+ |
1364 |
+ /* |
1365 |
+ * When dealing with data aborts, watchpoints, or instruction traps we may end |
1366 |
+@@ -90,7 +104,7 @@ static inline void set_fs(mm_segment_t fs) |
1367 |
+ */ |
1368 |
+ #define untagged_addr(addr) sign_extend64(addr, 55) |
1369 |
+ |
1370 |
+-#define access_ok(type, addr, size) __range_ok(addr, size) |
1371 |
++#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) |
1372 |
+ #define user_addr_max get_fs |
1373 |
+ |
1374 |
+ #define _ASM_EXTABLE(from, to) \ |
1375 |
+@@ -105,17 +119,23 @@ static inline void set_fs(mm_segment_t fs) |
1376 |
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
1377 |
+ static inline void __uaccess_ttbr0_disable(void) |
1378 |
+ { |
1379 |
+- unsigned long ttbr; |
1380 |
++ unsigned long flags, ttbr; |
1381 |
+ |
1382 |
++ local_irq_save(flags); |
1383 |
++ ttbr = read_sysreg(ttbr1_el1); |
1384 |
++ ttbr &= ~TTBR_ASID_MASK; |
1385 |
+ /* reserved_ttbr0 placed at the end of swapper_pg_dir */ |
1386 |
+- ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; |
1387 |
+- write_sysreg(ttbr, ttbr0_el1); |
1388 |
++ write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1); |
1389 |
+ isb(); |
1390 |
++ /* Set reserved ASID */ |
1391 |
++ write_sysreg(ttbr, ttbr1_el1); |
1392 |
++ isb(); |
1393 |
++ local_irq_restore(flags); |
1394 |
+ } |
1395 |
+ |
1396 |
+ static inline void __uaccess_ttbr0_enable(void) |
1397 |
+ { |
1398 |
+- unsigned long flags; |
1399 |
++ unsigned long flags, ttbr0, ttbr1; |
1400 |
+ |
1401 |
+ /* |
1402 |
+ * Disable interrupts to avoid preemption between reading the 'ttbr0' |
1403 |
+@@ -123,7 +143,17 @@ static inline void __uaccess_ttbr0_enable(void) |
1404 |
+ * roll-over and an update of 'ttbr0'. |
1405 |
+ */ |
1406 |
+ local_irq_save(flags); |
1407 |
+- write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); |
1408 |
++ ttbr0 = READ_ONCE(current_thread_info()->ttbr0); |
1409 |
++ |
1410 |
++ /* Restore active ASID */ |
1411 |
++ ttbr1 = read_sysreg(ttbr1_el1); |
1412 |
++ ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ |
1413 |
++ ttbr1 |= ttbr0 & TTBR_ASID_MASK; |
1414 |
++ write_sysreg(ttbr1, ttbr1_el1); |
1415 |
++ isb(); |
1416 |
++ |
1417 |
++ /* Restore user page table */ |
1418 |
++ write_sysreg(ttbr0, ttbr0_el1); |
1419 |
+ isb(); |
1420 |
+ local_irq_restore(flags); |
1421 |
+ } |
1422 |
+@@ -192,6 +222,26 @@ static inline void uaccess_enable_not_uao(void) |
1423 |
+ __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); |
1424 |
+ } |
1425 |
+ |
1426 |
++/* |
1427 |
++ * Sanitise a uaccess pointer such that it becomes NULL if above the |
1428 |
++ * current addr_limit. |
1429 |
++ */ |
1430 |
++#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) |
1431 |
++static inline void __user *__uaccess_mask_ptr(const void __user *ptr) |
1432 |
++{ |
1433 |
++ void __user *safe_ptr; |
1434 |
++ |
1435 |
++ asm volatile( |
1436 |
++ " bics xzr, %1, %2\n" |
1437 |
++ " csel %0, %1, xzr, eq\n" |
1438 |
++ : "=&r" (safe_ptr) |
1439 |
++ : "r" (ptr), "r" (current_thread_info()->addr_limit) |
1440 |
++ : "cc"); |
1441 |
++ |
1442 |
++ csdb(); |
1443 |
++ return safe_ptr; |
1444 |
++} |
1445 |
++ |
1446 |
+ /* |
1447 |
+ * The "__xxx" versions of the user access functions do not verify the address |
1448 |
+ * space - it must have been done previously with a separate "access_ok()" |
1449 |
+@@ -244,28 +294,33 @@ do { \ |
1450 |
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
1451 |
+ } while (0) |
1452 |
+ |
1453 |
+-#define __get_user(x, ptr) \ |
1454 |
++#define __get_user_check(x, ptr, err) \ |
1455 |
+ ({ \ |
1456 |
+- int __gu_err = 0; \ |
1457 |
+- __get_user_err((x), (ptr), __gu_err); \ |
1458 |
+- __gu_err; \ |
1459 |
++ __typeof__(*(ptr)) __user *__p = (ptr); \ |
1460 |
++ might_fault(); \ |
1461 |
++ if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ |
1462 |
++ __p = uaccess_mask_ptr(__p); \ |
1463 |
++ __get_user_err((x), __p, (err)); \ |
1464 |
++ } else { \ |
1465 |
++ (x) = 0; (err) = -EFAULT; \ |
1466 |
++ } \ |
1467 |
+ }) |
1468 |
+ |
1469 |
+ #define __get_user_error(x, ptr, err) \ |
1470 |
+ ({ \ |
1471 |
+- __get_user_err((x), (ptr), (err)); \ |
1472 |
++ __get_user_check((x), (ptr), (err)); \ |
1473 |
+ (void)0; \ |
1474 |
+ }) |
1475 |
+ |
1476 |
+-#define get_user(x, ptr) \ |
1477 |
++#define __get_user(x, ptr) \ |
1478 |
+ ({ \ |
1479 |
+- __typeof__(*(ptr)) __user *__p = (ptr); \ |
1480 |
+- might_fault(); \ |
1481 |
+- access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ |
1482 |
+- __get_user((x), __p) : \ |
1483 |
+- ((x) = 0, -EFAULT); \ |
1484 |
++ int __gu_err = 0; \ |
1485 |
++ __get_user_check((x), (ptr), __gu_err); \ |
1486 |
++ __gu_err; \ |
1487 |
+ }) |
1488 |
+ |
1489 |
++#define get_user __get_user |
1490 |
++ |
1491 |
+ #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ |
1492 |
+ asm volatile( \ |
1493 |
+ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ |
1494 |
+@@ -308,43 +363,63 @@ do { \ |
1495 |
+ uaccess_disable_not_uao(); \ |
1496 |
+ } while (0) |
1497 |
+ |
1498 |
+-#define __put_user(x, ptr) \ |
1499 |
++#define __put_user_check(x, ptr, err) \ |
1500 |
+ ({ \ |
1501 |
+- int __pu_err = 0; \ |
1502 |
+- __put_user_err((x), (ptr), __pu_err); \ |
1503 |
+- __pu_err; \ |
1504 |
++ __typeof__(*(ptr)) __user *__p = (ptr); \ |
1505 |
++ might_fault(); \ |
1506 |
++ if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ |
1507 |
++ __p = uaccess_mask_ptr(__p); \ |
1508 |
++ __put_user_err((x), __p, (err)); \ |
1509 |
++ } else { \ |
1510 |
++ (err) = -EFAULT; \ |
1511 |
++ } \ |
1512 |
+ }) |
1513 |
+ |
1514 |
+ #define __put_user_error(x, ptr, err) \ |
1515 |
+ ({ \ |
1516 |
+- __put_user_err((x), (ptr), (err)); \ |
1517 |
++ __put_user_check((x), (ptr), (err)); \ |
1518 |
+ (void)0; \ |
1519 |
+ }) |
1520 |
+ |
1521 |
+-#define put_user(x, ptr) \ |
1522 |
++#define __put_user(x, ptr) \ |
1523 |
+ ({ \ |
1524 |
+- __typeof__(*(ptr)) __user *__p = (ptr); \ |
1525 |
+- might_fault(); \ |
1526 |
+- access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ |
1527 |
+- __put_user((x), __p) : \ |
1528 |
+- -EFAULT; \ |
1529 |
++ int __pu_err = 0; \ |
1530 |
++ __put_user_check((x), (ptr), __pu_err); \ |
1531 |
++ __pu_err; \ |
1532 |
+ }) |
1533 |
+ |
1534 |
++#define put_user __put_user |
1535 |
++ |
1536 |
+ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); |
1537 |
+-#define raw_copy_from_user __arch_copy_from_user |
1538 |
++#define raw_copy_from_user(to, from, n) \ |
1539 |
++({ \ |
1540 |
++ __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ |
1541 |
++}) |
1542 |
++ |
1543 |
+ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); |
1544 |
+-#define raw_copy_to_user __arch_copy_to_user |
1545 |
+-extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); |
1546 |
+-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); |
1547 |
++#define raw_copy_to_user(to, from, n) \ |
1548 |
++({ \ |
1549 |
++ __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ |
1550 |
++}) |
1551 |
++ |
1552 |
++extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); |
1553 |
++#define raw_copy_in_user(to, from, n) \ |
1554 |
++({ \ |
1555 |
++ __arch_copy_in_user(__uaccess_mask_ptr(to), \ |
1556 |
++ __uaccess_mask_ptr(from), (n)); \ |
1557 |
++}) |
1558 |
++ |
1559 |
+ #define INLINE_COPY_TO_USER |
1560 |
+ #define INLINE_COPY_FROM_USER |
1561 |
+ |
1562 |
+-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) |
1563 |
++extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); |
1564 |
++static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) |
1565 |
+ { |
1566 |
+ if (access_ok(VERIFY_WRITE, to, n)) |
1567 |
+- n = __clear_user(to, n); |
1568 |
++ n = __arch_clear_user(__uaccess_mask_ptr(to), n); |
1569 |
+ return n; |
1570 |
+ } |
1571 |
++#define clear_user __clear_user |
1572 |
+ |
1573 |
+ extern long strncpy_from_user(char *dest, const char __user *src, long count); |
1574 |
+ |
1575 |
+@@ -358,7 +433,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __ |
1576 |
+ static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) |
1577 |
+ { |
1578 |
+ kasan_check_write(dst, size); |
1579 |
+- return __copy_user_flushcache(dst, src, size); |
1580 |
++ return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); |
1581 |
+ } |
1582 |
+ #endif |
1583 |
+ |
1584 |
+diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile |
1585 |
+index 2f5ff2a65db3..def8d5623fd1 100644 |
1586 |
+--- a/arch/arm64/kernel/Makefile |
1587 |
++++ b/arch/arm64/kernel/Makefile |
1588 |
+@@ -55,6 +55,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o |
1589 |
+ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o |
1590 |
+ arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
1591 |
+ |
1592 |
++ifeq ($(CONFIG_KVM),y) |
1593 |
++arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o |
1594 |
++endif |
1595 |
++ |
1596 |
+ obj-y += $(arm64-obj-y) vdso/ probes/ |
1597 |
+ obj-m += $(arm64-obj-m) |
1598 |
+ head-y := head.o |
1599 |
+diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c |
1600 |
+index 67368c7329c0..66be504edb6c 100644 |
1601 |
+--- a/arch/arm64/kernel/arm64ksyms.c |
1602 |
++++ b/arch/arm64/kernel/arm64ksyms.c |
1603 |
+@@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page); |
1604 |
+ /* user mem (segment) */ |
1605 |
+ EXPORT_SYMBOL(__arch_copy_from_user); |
1606 |
+ EXPORT_SYMBOL(__arch_copy_to_user); |
1607 |
+-EXPORT_SYMBOL(__clear_user); |
1608 |
+-EXPORT_SYMBOL(raw_copy_in_user); |
1609 |
++EXPORT_SYMBOL(__arch_clear_user); |
1610 |
++EXPORT_SYMBOL(__arch_copy_in_user); |
1611 |
+ |
1612 |
+ /* physical memory */ |
1613 |
+ EXPORT_SYMBOL(memstart_addr); |
1614 |
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c |
1615 |
+index 71bf088f1e4b..af247d10252f 100644 |
1616 |
+--- a/arch/arm64/kernel/asm-offsets.c |
1617 |
++++ b/arch/arm64/kernel/asm-offsets.c |
1618 |
+@@ -24,6 +24,7 @@ |
1619 |
+ #include <linux/kvm_host.h> |
1620 |
+ #include <linux/suspend.h> |
1621 |
+ #include <asm/cpufeature.h> |
1622 |
++#include <asm/fixmap.h> |
1623 |
+ #include <asm/thread_info.h> |
1624 |
+ #include <asm/memory.h> |
1625 |
+ #include <asm/smp_plat.h> |
1626 |
+@@ -148,11 +149,14 @@ int main(void) |
1627 |
+ DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); |
1628 |
+ DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); |
1629 |
+ DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); |
1630 |
+- |
1631 |
+ BLANK(); |
1632 |
+ DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); |
1633 |
+ DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); |
1634 |
+ DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next)); |
1635 |
+ DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val)); |
1636 |
++ BLANK(); |
1637 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
1638 |
++ DEFINE(TRAMP_VALIAS, TRAMP_VALIAS); |
1639 |
++#endif |
1640 |
+ return 0; |
1641 |
+ } |
1642 |
+diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S |
1643 |
+new file mode 100644 |
1644 |
+index 000000000000..e5de33513b5d |
1645 |
+--- /dev/null |
1646 |
++++ b/arch/arm64/kernel/bpi.S |
1647 |
+@@ -0,0 +1,83 @@ |
1648 |
++/* |
1649 |
++ * Contains CPU specific branch predictor invalidation sequences |
1650 |
++ * |
1651 |
++ * Copyright (C) 2018 ARM Ltd. |
1652 |
++ * |
1653 |
++ * This program is free software; you can redistribute it and/or modify |
1654 |
++ * it under the terms of the GNU General Public License version 2 as |
1655 |
++ * published by the Free Software Foundation. |
1656 |
++ * |
1657 |
++ * This program is distributed in the hope that it will be useful, |
1658 |
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
1659 |
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
1660 |
++ * GNU General Public License for more details. |
1661 |
++ * |
1662 |
++ * You should have received a copy of the GNU General Public License |
1663 |
++ * along with this program. If not, see <http://www.gnu.org/licenses/>. |
1664 |
++ */ |
1665 |
++ |
1666 |
++#include <linux/linkage.h> |
1667 |
++#include <linux/arm-smccc.h> |
1668 |
++ |
1669 |
++.macro ventry target |
1670 |
++ .rept 31 |
1671 |
++ nop |
1672 |
++ .endr |
1673 |
++ b \target |
1674 |
++.endm |
1675 |
++ |
1676 |
++.macro vectors target |
1677 |
++ ventry \target + 0x000 |
1678 |
++ ventry \target + 0x080 |
1679 |
++ ventry \target + 0x100 |
1680 |
++ ventry \target + 0x180 |
1681 |
++ |
1682 |
++ ventry \target + 0x200 |
1683 |
++ ventry \target + 0x280 |
1684 |
++ ventry \target + 0x300 |
1685 |
++ ventry \target + 0x380 |
1686 |
++ |
1687 |
++ ventry \target + 0x400 |
1688 |
++ ventry \target + 0x480 |
1689 |
++ ventry \target + 0x500 |
1690 |
++ ventry \target + 0x580 |
1691 |
++ |
1692 |
++ ventry \target + 0x600 |
1693 |
++ ventry \target + 0x680 |
1694 |
++ ventry \target + 0x700 |
1695 |
++ ventry \target + 0x780 |
1696 |
++.endm |
1697 |
++ |
1698 |
++ .align 11 |
1699 |
++ENTRY(__bp_harden_hyp_vecs_start) |
1700 |
++ .rept 4 |
1701 |
++ vectors __kvm_hyp_vector |
1702 |
++ .endr |
1703 |
++ENTRY(__bp_harden_hyp_vecs_end) |
1704 |
++ |
1705 |
++ENTRY(__qcom_hyp_sanitize_link_stack_start) |
1706 |
++ stp x29, x30, [sp, #-16]! |
1707 |
++ .rept 16 |
1708 |
++ bl . + 4 |
1709 |
++ .endr |
1710 |
++ ldp x29, x30, [sp], #16 |
1711 |
++ENTRY(__qcom_hyp_sanitize_link_stack_end) |
1712 |
++ |
1713 |
++.macro smccc_workaround_1 inst |
1714 |
++ sub sp, sp, #(8 * 4) |
1715 |
++ stp x2, x3, [sp, #(8 * 0)] |
1716 |
++ stp x0, x1, [sp, #(8 * 2)] |
1717 |
++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 |
1718 |
++ \inst #0 |
1719 |
++ ldp x2, x3, [sp, #(8 * 0)] |
1720 |
++ ldp x0, x1, [sp, #(8 * 2)] |
1721 |
++ add sp, sp, #(8 * 4) |
1722 |
++.endm |
1723 |
++ |
1724 |
++ENTRY(__smccc_workaround_1_smc_start) |
1725 |
++ smccc_workaround_1 smc |
1726 |
++ENTRY(__smccc_workaround_1_smc_end) |
1727 |
++ |
1728 |
++ENTRY(__smccc_workaround_1_hvc_start) |
1729 |
++ smccc_workaround_1 hvc |
1730 |
++ENTRY(__smccc_workaround_1_hvc_end) |
1731 |
+diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S |
1732 |
+index 65f42d257414..8021b46c9743 100644 |
1733 |
+--- a/arch/arm64/kernel/cpu-reset.S |
1734 |
++++ b/arch/arm64/kernel/cpu-reset.S |
1735 |
+@@ -16,7 +16,7 @@ |
1736 |
+ #include <asm/virt.h> |
1737 |
+ |
1738 |
+ .text |
1739 |
+-.pushsection .idmap.text, "ax" |
1740 |
++.pushsection .idmap.text, "awx" |
1741 |
+ |
1742 |
+ /* |
1743 |
+ * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for |
1744 |
+@@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart) |
1745 |
+ mrs x12, sctlr_el1 |
1746 |
+ ldr x13, =SCTLR_ELx_FLAGS |
1747 |
+ bic x12, x12, x13 |
1748 |
++ pre_disable_mmu_workaround |
1749 |
+ msr sctlr_el1, x12 |
1750 |
+ isb |
1751 |
+ |
1752 |
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c |
1753 |
+index 0e27f86ee709..07823595b7f0 100644 |
1754 |
+--- a/arch/arm64/kernel/cpu_errata.c |
1755 |
++++ b/arch/arm64/kernel/cpu_errata.c |
1756 |
+@@ -30,6 +30,20 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
1757 |
+ entry->midr_range_max); |
1758 |
+ } |
1759 |
+ |
1760 |
++static bool __maybe_unused |
1761 |
++is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) |
1762 |
++{ |
1763 |
++ u32 model; |
1764 |
++ |
1765 |
++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1766 |
++ |
1767 |
++ model = read_cpuid_id(); |
1768 |
++ model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | |
1769 |
++ MIDR_ARCHITECTURE_MASK; |
1770 |
++ |
1771 |
++ return model == entry->midr_model; |
1772 |
++} |
1773 |
++ |
1774 |
+ static bool |
1775 |
+ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, |
1776 |
+ int scope) |
1777 |
+@@ -46,6 +60,174 @@ static int cpu_enable_trap_ctr_access(void *__unused) |
1778 |
+ return 0; |
1779 |
+ } |
1780 |
+ |
1781 |
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
1782 |
++#include <asm/mmu_context.h> |
1783 |
++#include <asm/cacheflush.h> |
1784 |
++ |
1785 |
++DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); |
1786 |
++ |
1787 |
++#ifdef CONFIG_KVM |
1788 |
++extern char __qcom_hyp_sanitize_link_stack_start[]; |
1789 |
++extern char __qcom_hyp_sanitize_link_stack_end[]; |
1790 |
++extern char __smccc_workaround_1_smc_start[]; |
1791 |
++extern char __smccc_workaround_1_smc_end[]; |
1792 |
++extern char __smccc_workaround_1_hvc_start[]; |
1793 |
++extern char __smccc_workaround_1_hvc_end[]; |
1794 |
++ |
1795 |
++static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, |
1796 |
++ const char *hyp_vecs_end) |
1797 |
++{ |
1798 |
++ void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); |
1799 |
++ int i; |
1800 |
++ |
1801 |
++ for (i = 0; i < SZ_2K; i += 0x80) |
1802 |
++ memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); |
1803 |
++ |
1804 |
++ flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); |
1805 |
++} |
1806 |
++ |
1807 |
++static void __install_bp_hardening_cb(bp_hardening_cb_t fn, |
1808 |
++ const char *hyp_vecs_start, |
1809 |
++ const char *hyp_vecs_end) |
1810 |
++{ |
1811 |
++ static int last_slot = -1; |
1812 |
++ static DEFINE_SPINLOCK(bp_lock); |
1813 |
++ int cpu, slot = -1; |
1814 |
++ |
1815 |
++ spin_lock(&bp_lock); |
1816 |
++ for_each_possible_cpu(cpu) { |
1817 |
++ if (per_cpu(bp_hardening_data.fn, cpu) == fn) { |
1818 |
++ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); |
1819 |
++ break; |
1820 |
++ } |
1821 |
++ } |
1822 |
++ |
1823 |
++ if (slot == -1) { |
1824 |
++ last_slot++; |
1825 |
++ BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start) |
1826 |
++ / SZ_2K) <= last_slot); |
1827 |
++ slot = last_slot; |
1828 |
++ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); |
1829 |
++ } |
1830 |
++ |
1831 |
++ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); |
1832 |
++ __this_cpu_write(bp_hardening_data.fn, fn); |
1833 |
++ spin_unlock(&bp_lock); |
1834 |
++} |
1835 |
++#else |
1836 |
++#define __qcom_hyp_sanitize_link_stack_start NULL |
1837 |
++#define __qcom_hyp_sanitize_link_stack_end NULL |
1838 |
++#define __smccc_workaround_1_smc_start NULL |
1839 |
++#define __smccc_workaround_1_smc_end NULL |
1840 |
++#define __smccc_workaround_1_hvc_start NULL |
1841 |
++#define __smccc_workaround_1_hvc_end NULL |
1842 |
++ |
1843 |
++static void __install_bp_hardening_cb(bp_hardening_cb_t fn, |
1844 |
++ const char *hyp_vecs_start, |
1845 |
++ const char *hyp_vecs_end) |
1846 |
++{ |
1847 |
++ __this_cpu_write(bp_hardening_data.fn, fn); |
1848 |
++} |
1849 |
++#endif /* CONFIG_KVM */ |
1850 |
++ |
1851 |
++static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, |
1852 |
++ bp_hardening_cb_t fn, |
1853 |
++ const char *hyp_vecs_start, |
1854 |
++ const char *hyp_vecs_end) |
1855 |
++{ |
1856 |
++ u64 pfr0; |
1857 |
++ |
1858 |
++ if (!entry->matches(entry, SCOPE_LOCAL_CPU)) |
1859 |
++ return; |
1860 |
++ |
1861 |
++ pfr0 = read_cpuid(ID_AA64PFR0_EL1); |
1862 |
++ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) |
1863 |
++ return; |
1864 |
++ |
1865 |
++ __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); |
1866 |
++} |
1867 |
++ |
1868 |
++#include <uapi/linux/psci.h> |
1869 |
++#include <linux/arm-smccc.h> |
1870 |
++#include <linux/psci.h> |
1871 |
++ |
1872 |
++static void call_smc_arch_workaround_1(void) |
1873 |
++{ |
1874 |
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); |
1875 |
++} |
1876 |
++ |
1877 |
++static void call_hvc_arch_workaround_1(void) |
1878 |
++{ |
1879 |
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); |
1880 |
++} |
1881 |
++ |
1882 |
++static int enable_smccc_arch_workaround_1(void *data) |
1883 |
++{ |
1884 |
++ const struct arm64_cpu_capabilities *entry = data; |
1885 |
++ bp_hardening_cb_t cb; |
1886 |
++ void *smccc_start, *smccc_end; |
1887 |
++ struct arm_smccc_res res; |
1888 |
++ |
1889 |
++ if (!entry->matches(entry, SCOPE_LOCAL_CPU)) |
1890 |
++ return 0; |
1891 |
++ |
1892 |
++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0) |
1893 |
++ return 0; |
1894 |
++ |
1895 |
++ switch (psci_ops.conduit) { |
1896 |
++ case PSCI_CONDUIT_HVC: |
1897 |
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
1898 |
++ ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
1899 |
++ if (res.a0) |
1900 |
++ return 0; |
1901 |
++ cb = call_hvc_arch_workaround_1; |
1902 |
++ smccc_start = __smccc_workaround_1_hvc_start; |
1903 |
++ smccc_end = __smccc_workaround_1_hvc_end; |
1904 |
++ break; |
1905 |
++ |
1906 |
++ case PSCI_CONDUIT_SMC: |
1907 |
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
1908 |
++ ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
1909 |
++ if (res.a0) |
1910 |
++ return 0; |
1911 |
++ cb = call_smc_arch_workaround_1; |
1912 |
++ smccc_start = __smccc_workaround_1_smc_start; |
1913 |
++ smccc_end = __smccc_workaround_1_smc_end; |
1914 |
++ break; |
1915 |
++ |
1916 |
++ default: |
1917 |
++ return 0; |
1918 |
++ } |
1919 |
++ |
1920 |
++ install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); |
1921 |
++ |
1922 |
++ return 0; |
1923 |
++} |
1924 |
++ |
1925 |
++static void qcom_link_stack_sanitization(void) |
1926 |
++{ |
1927 |
++ u64 tmp; |
1928 |
++ |
1929 |
++ asm volatile("mov %0, x30 \n" |
1930 |
++ ".rept 16 \n" |
1931 |
++ "bl . + 4 \n" |
1932 |
++ ".endr \n" |
1933 |
++ "mov x30, %0 \n" |
1934 |
++ : "=&r" (tmp)); |
1935 |
++} |
1936 |
++ |
1937 |
++static int qcom_enable_link_stack_sanitization(void *data) |
1938 |
++{ |
1939 |
++ const struct arm64_cpu_capabilities *entry = data; |
1940 |
++ |
1941 |
++ install_bp_hardening_cb(entry, qcom_link_stack_sanitization, |
1942 |
++ __qcom_hyp_sanitize_link_stack_start, |
1943 |
++ __qcom_hyp_sanitize_link_stack_end); |
1944 |
++ |
1945 |
++ return 0; |
1946 |
++} |
1947 |
++#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ |
1948 |
++ |
1949 |
+ #define MIDR_RANGE(model, min, max) \ |
1950 |
+ .def_scope = SCOPE_LOCAL_CPU, \ |
1951 |
+ .matches = is_affected_midr_range, \ |
1952 |
+@@ -169,6 +351,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { |
1953 |
+ MIDR_CPU_VAR_REV(0, 0), |
1954 |
+ MIDR_CPU_VAR_REV(0, 0)), |
1955 |
+ }, |
1956 |
++ { |
1957 |
++ .desc = "Qualcomm Technologies Kryo erratum 1003", |
1958 |
++ .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
1959 |
++ .def_scope = SCOPE_LOCAL_CPU, |
1960 |
++ .midr_model = MIDR_QCOM_KRYO, |
1961 |
++ .matches = is_kryo_midr, |
1962 |
++ }, |
1963 |
+ #endif |
1964 |
+ #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
1965 |
+ { |
1966 |
+@@ -186,6 +375,47 @@ const struct arm64_cpu_capabilities arm64_errata[] = { |
1967 |
+ .capability = ARM64_WORKAROUND_858921, |
1968 |
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
1969 |
+ }, |
1970 |
++#endif |
1971 |
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
1972 |
++ { |
1973 |
++ .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
1974 |
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
1975 |
++ .enable = enable_smccc_arch_workaround_1, |
1976 |
++ }, |
1977 |
++ { |
1978 |
++ .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
1979 |
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
1980 |
++ .enable = enable_smccc_arch_workaround_1, |
1981 |
++ }, |
1982 |
++ { |
1983 |
++ .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
1984 |
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
1985 |
++ .enable = enable_smccc_arch_workaround_1, |
1986 |
++ }, |
1987 |
++ { |
1988 |
++ .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
1989 |
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), |
1990 |
++ .enable = enable_smccc_arch_workaround_1, |
1991 |
++ }, |
1992 |
++ { |
1993 |
++ .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
1994 |
++ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), |
1995 |
++ .enable = qcom_enable_link_stack_sanitization, |
1996 |
++ }, |
1997 |
++ { |
1998 |
++ .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, |
1999 |
++ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), |
2000 |
++ }, |
2001 |
++ { |
2002 |
++ .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
2003 |
++ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), |
2004 |
++ .enable = enable_smccc_arch_workaround_1, |
2005 |
++ }, |
2006 |
++ { |
2007 |
++ .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
2008 |
++ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), |
2009 |
++ .enable = enable_smccc_arch_workaround_1, |
2010 |
++ }, |
2011 |
+ #endif |
2012 |
+ { |
2013 |
+ } |
2014 |
+@@ -200,15 +430,18 @@ void verify_local_cpu_errata_workarounds(void) |
2015 |
+ { |
2016 |
+ const struct arm64_cpu_capabilities *caps = arm64_errata; |
2017 |
+ |
2018 |
+- for (; caps->matches; caps++) |
2019 |
+- if (!cpus_have_cap(caps->capability) && |
2020 |
+- caps->matches(caps, SCOPE_LOCAL_CPU)) { |
2021 |
++ for (; caps->matches; caps++) { |
2022 |
++ if (cpus_have_cap(caps->capability)) { |
2023 |
++ if (caps->enable) |
2024 |
++ caps->enable((void *)caps); |
2025 |
++ } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) { |
2026 |
+ pr_crit("CPU%d: Requires work around for %s, not detected" |
2027 |
+ " at boot time\n", |
2028 |
+ smp_processor_id(), |
2029 |
+ caps->desc ? : "an erratum"); |
2030 |
+ cpu_die_early(); |
2031 |
+ } |
2032 |
++ } |
2033 |
+ } |
2034 |
+ |
2035 |
+ void update_cpu_errata_workarounds(void) |
2036 |
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c |
2037 |
+index 21e2c95d24e7..582142ae92e1 100644 |
2038 |
+--- a/arch/arm64/kernel/cpufeature.c |
2039 |
++++ b/arch/arm64/kernel/cpufeature.c |
2040 |
+@@ -125,6 +125,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { |
2041 |
+ }; |
2042 |
+ |
2043 |
+ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { |
2044 |
++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), |
2045 |
++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), |
2046 |
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), |
2047 |
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), |
2048 |
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), |
2049 |
+@@ -796,6 +798,86 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus |
2050 |
+ ID_AA64PFR0_FP_SHIFT) < 0; |
2051 |
+ } |
2052 |
+ |
2053 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
2054 |
++static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ |
2055 |
++ |
2056 |
++static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, |
2057 |
++ int __unused) |
2058 |
++{ |
2059 |
++ char const *str = "command line option"; |
2060 |
++ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
2061 |
++ |
2062 |
++ /* |
2063 |
++ * For reasons that aren't entirely clear, enabling KPTI on Cavium |
2064 |
++ * ThunderX leads to apparent I-cache corruption of kernel text, which |
2065 |
++ * ends as well as you might imagine. Don't even try. |
2066 |
++ */ |
2067 |
++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) { |
2068 |
++ str = "ARM64_WORKAROUND_CAVIUM_27456"; |
2069 |
++ __kpti_forced = -1; |
2070 |
++ } |
2071 |
++ |
2072 |
++ /* Forced? */ |
2073 |
++ if (__kpti_forced) { |
2074 |
++ pr_info_once("kernel page table isolation forced %s by %s\n", |
2075 |
++ __kpti_forced > 0 ? "ON" : "OFF", str); |
2076 |
++ return __kpti_forced > 0; |
2077 |
++ } |
2078 |
++ |
2079 |
++ /* Useful for KASLR robustness */ |
2080 |
++ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) |
2081 |
++ return true; |
2082 |
++ |
2083 |
++ /* Don't force KPTI for CPUs that are not vulnerable */ |
2084 |
++ switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) { |
2085 |
++ case MIDR_CAVIUM_THUNDERX2: |
2086 |
++ case MIDR_BRCM_VULCAN: |
2087 |
++ return false; |
2088 |
++ } |
2089 |
++ |
2090 |
++ /* Defer to CPU feature registers */ |
2091 |
++ return !cpuid_feature_extract_unsigned_field(pfr0, |
2092 |
++ ID_AA64PFR0_CSV3_SHIFT); |
2093 |
++} |
2094 |
++ |
2095 |
++static int kpti_install_ng_mappings(void *__unused) |
2096 |
++{ |
2097 |
++ typedef void (kpti_remap_fn)(int, int, phys_addr_t); |
2098 |
++ extern kpti_remap_fn idmap_kpti_install_ng_mappings; |
2099 |
++ kpti_remap_fn *remap_fn; |
2100 |
++ |
2101 |
++ static bool kpti_applied = false; |
2102 |
++ int cpu = smp_processor_id(); |
2103 |
++ |
2104 |
++ if (kpti_applied) |
2105 |
++ return 0; |
2106 |
++ |
2107 |
++ remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); |
2108 |
++ |
2109 |
++ cpu_install_idmap(); |
2110 |
++ remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); |
2111 |
++ cpu_uninstall_idmap(); |
2112 |
++ |
2113 |
++ if (!cpu) |
2114 |
++ kpti_applied = true; |
2115 |
++ |
2116 |
++ return 0; |
2117 |
++} |
2118 |
++ |
2119 |
++static int __init parse_kpti(char *str) |
2120 |
++{ |
2121 |
++ bool enabled; |
2122 |
++ int ret = strtobool(str, &enabled); |
2123 |
++ |
2124 |
++ if (ret) |
2125 |
++ return ret; |
2126 |
++ |
2127 |
++ __kpti_forced = enabled ? 1 : -1; |
2128 |
++ return 0; |
2129 |
++} |
2130 |
++__setup("kpti=", parse_kpti); |
2131 |
++#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ |
2132 |
++ |
2133 |
+ static const struct arm64_cpu_capabilities arm64_features[] = { |
2134 |
+ { |
2135 |
+ .desc = "GIC system register CPU interface", |
2136 |
+@@ -882,6 +964,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { |
2137 |
+ .def_scope = SCOPE_SYSTEM, |
2138 |
+ .matches = hyp_offset_low, |
2139 |
+ }, |
2140 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
2141 |
++ { |
2142 |
++ .desc = "Kernel page table isolation (KPTI)", |
2143 |
++ .capability = ARM64_UNMAP_KERNEL_AT_EL0, |
2144 |
++ .def_scope = SCOPE_SYSTEM, |
2145 |
++ .matches = unmap_kernel_at_el0, |
2146 |
++ .enable = kpti_install_ng_mappings, |
2147 |
++ }, |
2148 |
++#endif |
2149 |
+ { |
2150 |
+ /* FP/SIMD is not implemented */ |
2151 |
+ .capability = ARM64_HAS_NO_FPSIMD, |
2152 |
+@@ -1000,6 +1091,25 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) |
2153 |
+ cap_set_elf_hwcap(hwcaps); |
2154 |
+ } |
2155 |
+ |
2156 |
++/* |
2157 |
++ * Check if the current CPU has a given feature capability. |
2158 |
++ * Should be called from non-preemptible context. |
2159 |
++ */ |
2160 |
++static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, |
2161 |
++ unsigned int cap) |
2162 |
++{ |
2163 |
++ const struct arm64_cpu_capabilities *caps; |
2164 |
++ |
2165 |
++ if (WARN_ON(preemptible())) |
2166 |
++ return false; |
2167 |
++ |
2168 |
++ for (caps = cap_array; caps->matches; caps++) |
2169 |
++ if (caps->capability == cap && |
2170 |
++ caps->matches(caps, SCOPE_LOCAL_CPU)) |
2171 |
++ return true; |
2172 |
++ return false; |
2173 |
++} |
2174 |
++ |
2175 |
+ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, |
2176 |
+ const char *info) |
2177 |
+ { |
2178 |
+@@ -1035,7 +1145,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) |
2179 |
+ * uses an IPI, giving us a PSTATE that disappears when |
2180 |
+ * we return. |
2181 |
+ */ |
2182 |
+- stop_machine(caps->enable, NULL, cpu_online_mask); |
2183 |
++ stop_machine(caps->enable, (void *)caps, cpu_online_mask); |
2184 |
+ } |
2185 |
+ } |
2186 |
+ } |
2187 |
+@@ -1078,8 +1188,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) |
2188 |
+ } |
2189 |
+ |
2190 |
+ static void |
2191 |
+-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) |
2192 |
++verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list) |
2193 |
+ { |
2194 |
++ const struct arm64_cpu_capabilities *caps = caps_list; |
2195 |
+ for (; caps->matches; caps++) { |
2196 |
+ if (!cpus_have_cap(caps->capability)) |
2197 |
+ continue; |
2198 |
+@@ -1087,13 +1198,13 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) |
2199 |
+ * If the new CPU misses an advertised feature, we cannot proceed |
2200 |
+ * further, park the cpu. |
2201 |
+ */ |
2202 |
+- if (!caps->matches(caps, SCOPE_LOCAL_CPU)) { |
2203 |
++ if (!__this_cpu_has_cap(caps_list, caps->capability)) { |
2204 |
+ pr_crit("CPU%d: missing feature: %s\n", |
2205 |
+ smp_processor_id(), caps->desc); |
2206 |
+ cpu_die_early(); |
2207 |
+ } |
2208 |
+ if (caps->enable) |
2209 |
+- caps->enable(NULL); |
2210 |
++ caps->enable((void *)caps); |
2211 |
+ } |
2212 |
+ } |
2213 |
+ |
2214 |
+@@ -1148,25 +1259,6 @@ static void __init mark_const_caps_ready(void) |
2215 |
+ static_branch_enable(&arm64_const_caps_ready); |
2216 |
+ } |
2217 |
+ |
2218 |
+-/* |
2219 |
+- * Check if the current CPU has a given feature capability. |
2220 |
+- * Should be called from non-preemptible context. |
2221 |
+- */ |
2222 |
+-static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, |
2223 |
+- unsigned int cap) |
2224 |
+-{ |
2225 |
+- const struct arm64_cpu_capabilities *caps; |
2226 |
+- |
2227 |
+- if (WARN_ON(preemptible())) |
2228 |
+- return false; |
2229 |
+- |
2230 |
+- for (caps = cap_array; caps->desc; caps++) |
2231 |
+- if (caps->capability == cap && caps->matches) |
2232 |
+- return caps->matches(caps, SCOPE_LOCAL_CPU); |
2233 |
+- |
2234 |
+- return false; |
2235 |
+-} |
2236 |
+- |
2237 |
+ extern const struct arm64_cpu_capabilities arm64_errata[]; |
2238 |
+ |
2239 |
+ bool this_cpu_has_cap(unsigned int cap) |
2240 |
+diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S |
2241 |
+index 4e6ad355bd05..6b9736c3fb56 100644 |
2242 |
+--- a/arch/arm64/kernel/efi-entry.S |
2243 |
++++ b/arch/arm64/kernel/efi-entry.S |
2244 |
+@@ -96,6 +96,7 @@ ENTRY(entry) |
2245 |
+ mrs x0, sctlr_el2 |
2246 |
+ bic x0, x0, #1 << 0 // clear SCTLR.M |
2247 |
+ bic x0, x0, #1 << 2 // clear SCTLR.C |
2248 |
++ pre_disable_mmu_workaround |
2249 |
+ msr sctlr_el2, x0 |
2250 |
+ isb |
2251 |
+ b 2f |
2252 |
+@@ -103,6 +104,7 @@ ENTRY(entry) |
2253 |
+ mrs x0, sctlr_el1 |
2254 |
+ bic x0, x0, #1 << 0 // clear SCTLR.M |
2255 |
+ bic x0, x0, #1 << 2 // clear SCTLR.C |
2256 |
++ pre_disable_mmu_workaround |
2257 |
+ msr sctlr_el1, x0 |
2258 |
+ isb |
2259 |
+ 2: |
2260 |
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S |
2261 |
+index e1c59d4008a8..93958d1341bb 100644 |
2262 |
+--- a/arch/arm64/kernel/entry.S |
2263 |
++++ b/arch/arm64/kernel/entry.S |
2264 |
+@@ -29,6 +29,8 @@ |
2265 |
+ #include <asm/esr.h> |
2266 |
+ #include <asm/irq.h> |
2267 |
+ #include <asm/memory.h> |
2268 |
++#include <asm/mmu.h> |
2269 |
++#include <asm/processor.h> |
2270 |
+ #include <asm/ptrace.h> |
2271 |
+ #include <asm/thread_info.h> |
2272 |
+ #include <asm/asm-uaccess.h> |
2273 |
+@@ -69,8 +71,21 @@ |
2274 |
+ #define BAD_FIQ 2 |
2275 |
+ #define BAD_ERROR 3 |
2276 |
+ |
2277 |
+- .macro kernel_ventry label |
2278 |
++ .macro kernel_ventry, el, label, regsize = 64 |
2279 |
+ .align 7 |
2280 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
2281 |
++alternative_if ARM64_UNMAP_KERNEL_AT_EL0 |
2282 |
++ .if \el == 0 |
2283 |
++ .if \regsize == 64 |
2284 |
++ mrs x30, tpidrro_el0 |
2285 |
++ msr tpidrro_el0, xzr |
2286 |
++ .else |
2287 |
++ mov x30, xzr |
2288 |
++ .endif |
2289 |
++ .endif |
2290 |
++alternative_else_nop_endif |
2291 |
++#endif |
2292 |
++ |
2293 |
+ sub sp, sp, #S_FRAME_SIZE |
2294 |
+ #ifdef CONFIG_VMAP_STACK |
2295 |
+ /* |
2296 |
+@@ -82,7 +97,7 @@ |
2297 |
+ tbnz x0, #THREAD_SHIFT, 0f |
2298 |
+ sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 |
2299 |
+ sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp |
2300 |
+- b \label |
2301 |
++ b el\()\el\()_\label |
2302 |
+ |
2303 |
+ 0: |
2304 |
+ /* |
2305 |
+@@ -114,7 +129,12 @@ |
2306 |
+ sub sp, sp, x0 |
2307 |
+ mrs x0, tpidrro_el0 |
2308 |
+ #endif |
2309 |
+- b \label |
2310 |
++ b el\()\el\()_\label |
2311 |
++ .endm |
2312 |
++ |
2313 |
++ .macro tramp_alias, dst, sym |
2314 |
++ mov_q \dst, TRAMP_VALIAS |
2315 |
++ add \dst, \dst, #(\sym - .entry.tramp.text) |
2316 |
+ .endm |
2317 |
+ |
2318 |
+ .macro kernel_entry, el, regsize = 64 |
2319 |
+@@ -147,10 +167,10 @@ |
2320 |
+ .else |
2321 |
+ add x21, sp, #S_FRAME_SIZE |
2322 |
+ get_thread_info tsk |
2323 |
+- /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ |
2324 |
++ /* Save the task's original addr_limit and set USER_DS */ |
2325 |
+ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] |
2326 |
+ str x20, [sp, #S_ORIG_ADDR_LIMIT] |
2327 |
+- mov x20, #TASK_SIZE_64 |
2328 |
++ mov x20, #USER_DS |
2329 |
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT] |
2330 |
+ /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ |
2331 |
+ .endif /* \el == 0 */ |
2332 |
+@@ -185,7 +205,7 @@ alternative_else_nop_endif |
2333 |
+ |
2334 |
+ .if \el != 0 |
2335 |
+ mrs x21, ttbr0_el1 |
2336 |
+- tst x21, #0xffff << 48 // Check for the reserved ASID |
2337 |
++ tst x21, #TTBR_ASID_MASK // Check for the reserved ASID |
2338 |
+ orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR |
2339 |
+ b.eq 1f // TTBR0 access already disabled |
2340 |
+ and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR |
2341 |
+@@ -246,7 +266,7 @@ alternative_else_nop_endif |
2342 |
+ tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set |
2343 |
+ .endif |
2344 |
+ |
2345 |
+- __uaccess_ttbr0_enable x0 |
2346 |
++ __uaccess_ttbr0_enable x0, x1 |
2347 |
+ |
2348 |
+ .if \el == 0 |
2349 |
+ /* |
2350 |
+@@ -255,7 +275,7 @@ alternative_else_nop_endif |
2351 |
+ * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache |
2352 |
+ * corruption). |
2353 |
+ */ |
2354 |
+- post_ttbr0_update_workaround |
2355 |
++ bl post_ttbr_update_workaround |
2356 |
+ .endif |
2357 |
+ 1: |
2358 |
+ .if \el != 0 |
2359 |
+@@ -267,18 +287,20 @@ alternative_else_nop_endif |
2360 |
+ .if \el == 0 |
2361 |
+ ldr x23, [sp, #S_SP] // load return stack pointer |
2362 |
+ msr sp_el0, x23 |
2363 |
++ tst x22, #PSR_MODE32_BIT // native task? |
2364 |
++ b.eq 3f |
2365 |
++ |
2366 |
+ #ifdef CONFIG_ARM64_ERRATUM_845719 |
2367 |
+ alternative_if ARM64_WORKAROUND_845719 |
2368 |
+- tbz x22, #4, 1f |
2369 |
+ #ifdef CONFIG_PID_IN_CONTEXTIDR |
2370 |
+ mrs x29, contextidr_el1 |
2371 |
+ msr contextidr_el1, x29 |
2372 |
+ #else |
2373 |
+ msr contextidr_el1, xzr |
2374 |
+ #endif |
2375 |
+-1: |
2376 |
+ alternative_else_nop_endif |
2377 |
+ #endif |
2378 |
++3: |
2379 |
+ .endif |
2380 |
+ |
2381 |
+ msr elr_el1, x21 // set up the return data |
2382 |
+@@ -300,7 +322,21 @@ alternative_else_nop_endif |
2383 |
+ ldp x28, x29, [sp, #16 * 14] |
2384 |
+ ldr lr, [sp, #S_LR] |
2385 |
+ add sp, sp, #S_FRAME_SIZE // restore sp |
2386 |
+- eret // return to kernel |
2387 |
++ |
2388 |
++ .if \el == 0 |
2389 |
++alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 |
2390 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
2391 |
++ bne 4f |
2392 |
++ msr far_el1, x30 |
2393 |
++ tramp_alias x30, tramp_exit_native |
2394 |
++ br x30 |
2395 |
++4: |
2396 |
++ tramp_alias x30, tramp_exit_compat |
2397 |
++ br x30 |
2398 |
++#endif |
2399 |
++ .else |
2400 |
++ eret |
2401 |
++ .endif |
2402 |
+ .endm |
2403 |
+ |
2404 |
+ .macro irq_stack_entry |
2405 |
+@@ -340,6 +376,7 @@ alternative_else_nop_endif |
2406 |
+ * x7 is reserved for the system call number in 32-bit mode. |
2407 |
+ */ |
2408 |
+ wsc_nr .req w25 // number of system calls |
2409 |
++xsc_nr .req x25 // number of system calls (zero-extended) |
2410 |
+ wscno .req w26 // syscall number |
2411 |
+ xscno .req x26 // syscall number (zero-extended) |
2412 |
+ stbl .req x27 // syscall table pointer |
2413 |
+@@ -365,31 +402,31 @@ tsk .req x28 // current thread_info |
2414 |
+ |
2415 |
+ .align 11 |
2416 |
+ ENTRY(vectors) |
2417 |
+- kernel_ventry el1_sync_invalid // Synchronous EL1t |
2418 |
+- kernel_ventry el1_irq_invalid // IRQ EL1t |
2419 |
+- kernel_ventry el1_fiq_invalid // FIQ EL1t |
2420 |
+- kernel_ventry el1_error_invalid // Error EL1t |
2421 |
++ kernel_ventry 1, sync_invalid // Synchronous EL1t |
2422 |
++ kernel_ventry 1, irq_invalid // IRQ EL1t |
2423 |
++ kernel_ventry 1, fiq_invalid // FIQ EL1t |
2424 |
++ kernel_ventry 1, error_invalid // Error EL1t |
2425 |
+ |
2426 |
+- kernel_ventry el1_sync // Synchronous EL1h |
2427 |
+- kernel_ventry el1_irq // IRQ EL1h |
2428 |
+- kernel_ventry el1_fiq_invalid // FIQ EL1h |
2429 |
+- kernel_ventry el1_error_invalid // Error EL1h |
2430 |
++ kernel_ventry 1, sync // Synchronous EL1h |
2431 |
++ kernel_ventry 1, irq // IRQ EL1h |
2432 |
++ kernel_ventry 1, fiq_invalid // FIQ EL1h |
2433 |
++ kernel_ventry 1, error_invalid // Error EL1h |
2434 |
+ |
2435 |
+- kernel_ventry el0_sync // Synchronous 64-bit EL0 |
2436 |
+- kernel_ventry el0_irq // IRQ 64-bit EL0 |
2437 |
+- kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0 |
2438 |
+- kernel_ventry el0_error_invalid // Error 64-bit EL0 |
2439 |
++ kernel_ventry 0, sync // Synchronous 64-bit EL0 |
2440 |
++ kernel_ventry 0, irq // IRQ 64-bit EL0 |
2441 |
++ kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 |
2442 |
++ kernel_ventry 0, error_invalid // Error 64-bit EL0 |
2443 |
+ |
2444 |
+ #ifdef CONFIG_COMPAT |
2445 |
+- kernel_ventry el0_sync_compat // Synchronous 32-bit EL0 |
2446 |
+- kernel_ventry el0_irq_compat // IRQ 32-bit EL0 |
2447 |
+- kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 |
2448 |
+- kernel_ventry el0_error_invalid_compat // Error 32-bit EL0 |
2449 |
++ kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 |
2450 |
++ kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 |
2451 |
++ kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 |
2452 |
++ kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0 |
2453 |
+ #else |
2454 |
+- kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0 |
2455 |
+- kernel_ventry el0_irq_invalid // IRQ 32-bit EL0 |
2456 |
+- kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0 |
2457 |
+- kernel_ventry el0_error_invalid // Error 32-bit EL0 |
2458 |
++ kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 |
2459 |
++ kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 |
2460 |
++ kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 |
2461 |
++ kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 |
2462 |
+ #endif |
2463 |
+ END(vectors) |
2464 |
+ |
2465 |
+@@ -687,13 +724,15 @@ el0_ia: |
2466 |
+ * Instruction abort handling |
2467 |
+ */ |
2468 |
+ mrs x26, far_el1 |
2469 |
+- // enable interrupts before calling the main handler |
2470 |
+- enable_dbg_and_irq |
2471 |
++ enable_dbg |
2472 |
++#ifdef CONFIG_TRACE_IRQFLAGS |
2473 |
++ bl trace_hardirqs_off |
2474 |
++#endif |
2475 |
+ ct_user_exit |
2476 |
+ mov x0, x26 |
2477 |
+ mov x1, x25 |
2478 |
+ mov x2, sp |
2479 |
+- bl do_mem_abort |
2480 |
++ bl do_el0_ia_bp_hardening |
2481 |
+ b ret_to_user |
2482 |
+ el0_fpsimd_acc: |
2483 |
+ /* |
2484 |
+@@ -720,8 +759,10 @@ el0_sp_pc: |
2485 |
+ * Stack or PC alignment exception handling |
2486 |
+ */ |
2487 |
+ mrs x26, far_el1 |
2488 |
+- // enable interrupts before calling the main handler |
2489 |
+- enable_dbg_and_irq |
2490 |
++ enable_dbg |
2491 |
++#ifdef CONFIG_TRACE_IRQFLAGS |
2492 |
++ bl trace_hardirqs_off |
2493 |
++#endif |
2494 |
+ ct_user_exit |
2495 |
+ mov x0, x26 |
2496 |
+ mov x1, x25 |
2497 |
+@@ -780,6 +821,11 @@ el0_irq_naked: |
2498 |
+ #endif |
2499 |
+ |
2500 |
+ ct_user_exit |
2501 |
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
2502 |
++ tbz x22, #55, 1f |
2503 |
++ bl do_el0_irq_bp_hardening |
2504 |
++1: |
2505 |
++#endif |
2506 |
+ irq_handler |
2507 |
+ |
2508 |
+ #ifdef CONFIG_TRACE_IRQFLAGS |
2509 |
+@@ -848,6 +894,7 @@ el0_svc_naked: // compat entry point |
2510 |
+ b.ne __sys_trace |
2511 |
+ cmp wscno, wsc_nr // check upper syscall limit |
2512 |
+ b.hs ni_sys |
2513 |
++ mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number |
2514 |
+ ldr x16, [stbl, xscno, lsl #3] // address in the syscall table |
2515 |
+ blr x16 // call sys_* routine |
2516 |
+ b ret_fast_syscall |
2517 |
+@@ -895,6 +942,117 @@ __ni_sys_trace: |
2518 |
+ |
2519 |
+ .popsection // .entry.text |
2520 |
+ |
2521 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
2522 |
++/* |
2523 |
++ * Exception vectors trampoline. |
2524 |
++ */ |
2525 |
++ .pushsection ".entry.tramp.text", "ax" |
2526 |
++ |
2527 |
++ .macro tramp_map_kernel, tmp |
2528 |
++ mrs \tmp, ttbr1_el1 |
2529 |
++ sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) |
2530 |
++ bic \tmp, \tmp, #USER_ASID_FLAG |
2531 |
++ msr ttbr1_el1, \tmp |
2532 |
++#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
2533 |
++alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 |
2534 |
++ /* ASID already in \tmp[63:48] */ |
2535 |
++ movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) |
2536 |
++ movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) |
2537 |
++ /* 2MB boundary containing the vectors, so we nobble the walk cache */ |
2538 |
++ movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) |
2539 |
++ isb |
2540 |
++ tlbi vae1, \tmp |
2541 |
++ dsb nsh |
2542 |
++alternative_else_nop_endif |
2543 |
++#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ |
2544 |
++ .endm |
2545 |
++ |
2546 |
++ .macro tramp_unmap_kernel, tmp |
2547 |
++ mrs \tmp, ttbr1_el1 |
2548 |
++ add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) |
2549 |
++ orr \tmp, \tmp, #USER_ASID_FLAG |
2550 |
++ msr ttbr1_el1, \tmp |
2551 |
++ /* |
2552 |
++ * We avoid running the post_ttbr_update_workaround here because |
2553 |
++ * it's only needed by Cavium ThunderX, which requires KPTI to be |
2554 |
++ * disabled. |
2555 |
++ */ |
2556 |
++ .endm |
2557 |
++ |
2558 |
++ .macro tramp_ventry, regsize = 64 |
2559 |
++ .align 7 |
2560 |
++1: |
2561 |
++ .if \regsize == 64 |
2562 |
++ msr tpidrro_el0, x30 // Restored in kernel_ventry |
2563 |
++ .endif |
2564 |
++ /* |
2565 |
++ * Defend against branch aliasing attacks by pushing a dummy |
2566 |
++ * entry onto the return stack and using a RET instruction to |
2567 |
++ * enter the full-fat kernel vectors. |
2568 |
++ */ |
2569 |
++ bl 2f |
2570 |
++ b . |
2571 |
++2: |
2572 |
++ tramp_map_kernel x30 |
2573 |
++#ifdef CONFIG_RANDOMIZE_BASE |
2574 |
++ adr x30, tramp_vectors + PAGE_SIZE |
2575 |
++alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 |
2576 |
++ ldr x30, [x30] |
2577 |
++#else |
2578 |
++ ldr x30, =vectors |
2579 |
++#endif |
2580 |
++ prfm plil1strm, [x30, #(1b - tramp_vectors)] |
2581 |
++ msr vbar_el1, x30 |
2582 |
++ add x30, x30, #(1b - tramp_vectors) |
2583 |
++ isb |
2584 |
++ ret |
2585 |
++ .endm |
2586 |
++ |
2587 |
++ .macro tramp_exit, regsize = 64 |
2588 |
++ adr x30, tramp_vectors |
2589 |
++ msr vbar_el1, x30 |
2590 |
++ tramp_unmap_kernel x30 |
2591 |
++ .if \regsize == 64 |
2592 |
++ mrs x30, far_el1 |
2593 |
++ .endif |
2594 |
++ eret |
2595 |
++ .endm |
2596 |
++ |
2597 |
++ .align 11 |
2598 |
++ENTRY(tramp_vectors) |
2599 |
++ .space 0x400 |
2600 |
++ |
2601 |
++ tramp_ventry |
2602 |
++ tramp_ventry |
2603 |
++ tramp_ventry |
2604 |
++ tramp_ventry |
2605 |
++ |
2606 |
++ tramp_ventry 32 |
2607 |
++ tramp_ventry 32 |
2608 |
++ tramp_ventry 32 |
2609 |
++ tramp_ventry 32 |
2610 |
++END(tramp_vectors) |
2611 |
++ |
2612 |
++ENTRY(tramp_exit_native) |
2613 |
++ tramp_exit |
2614 |
++END(tramp_exit_native) |
2615 |
++ |
2616 |
++ENTRY(tramp_exit_compat) |
2617 |
++ tramp_exit 32 |
2618 |
++END(tramp_exit_compat) |
2619 |
++ |
2620 |
++ .ltorg |
2621 |
++ .popsection // .entry.tramp.text |
2622 |
++#ifdef CONFIG_RANDOMIZE_BASE |
2623 |
++ .pushsection ".rodata", "a" |
2624 |
++ .align PAGE_SHIFT |
2625 |
++ .globl __entry_tramp_data_start |
2626 |
++__entry_tramp_data_start: |
2627 |
++ .quad vectors |
2628 |
++ .popsection // .rodata |
2629 |
++#endif /* CONFIG_RANDOMIZE_BASE */ |
2630 |
++#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ |
2631 |
++ |
2632 |
+ /* |
2633 |
+ * Special system call wrappers. |
2634 |
+ */ |
2635 |
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S |
2636 |
+index 0b243ecaf7ac..261f3f88364c 100644 |
2637 |
+--- a/arch/arm64/kernel/head.S |
2638 |
++++ b/arch/arm64/kernel/head.S |
2639 |
+@@ -371,7 +371,7 @@ ENDPROC(__primary_switched) |
2640 |
+ * end early head section, begin head code that is also used for |
2641 |
+ * hotplug and needs to have the same protections as the text region |
2642 |
+ */ |
2643 |
+- .section ".idmap.text","ax" |
2644 |
++ .section ".idmap.text","awx" |
2645 |
+ |
2646 |
+ ENTRY(kimage_vaddr) |
2647 |
+ .quad _text - TEXT_OFFSET |
2648 |
+@@ -732,6 +732,7 @@ __primary_switch: |
2649 |
+ * to take into account by discarding the current kernel mapping and |
2650 |
+ * creating a new one. |
2651 |
+ */ |
2652 |
++ pre_disable_mmu_workaround |
2653 |
+ msr sctlr_el1, x20 // disable the MMU |
2654 |
+ isb |
2655 |
+ bl __create_page_tables // recreate kernel mapping |
2656 |
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c |
2657 |
+index bcd22d7ee590..9e773732520c 100644 |
2658 |
+--- a/arch/arm64/kernel/process.c |
2659 |
++++ b/arch/arm64/kernel/process.c |
2660 |
+@@ -314,16 +314,14 @@ void tls_preserve_current_state(void) |
2661 |
+ |
2662 |
+ static void tls_thread_switch(struct task_struct *next) |
2663 |
+ { |
2664 |
+- unsigned long tpidr, tpidrro; |
2665 |
+- |
2666 |
+ tls_preserve_current_state(); |
2667 |
+ |
2668 |
+- tpidr = *task_user_tls(next); |
2669 |
+- tpidrro = is_compat_thread(task_thread_info(next)) ? |
2670 |
+- next->thread.tp_value : 0; |
2671 |
++ if (is_compat_thread(task_thread_info(next))) |
2672 |
++ write_sysreg(next->thread.tp_value, tpidrro_el0); |
2673 |
++ else if (!arm64_kernel_unmapped_at_el0()) |
2674 |
++ write_sysreg(0, tpidrro_el0); |
2675 |
+ |
2676 |
+- write_sysreg(tpidr, tpidr_el0); |
2677 |
+- write_sysreg(tpidrro, tpidrro_el0); |
2678 |
++ write_sysreg(*task_user_tls(next), tpidr_el0); |
2679 |
+ } |
2680 |
+ |
2681 |
+ /* Restore the UAO state depending on next's addr_limit */ |
2682 |
+diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S |
2683 |
+index ce704a4aeadd..f407e422a720 100644 |
2684 |
+--- a/arch/arm64/kernel/relocate_kernel.S |
2685 |
++++ b/arch/arm64/kernel/relocate_kernel.S |
2686 |
+@@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel) |
2687 |
+ mrs x0, sctlr_el2 |
2688 |
+ ldr x1, =SCTLR_ELx_FLAGS |
2689 |
+ bic x0, x0, x1 |
2690 |
++ pre_disable_mmu_workaround |
2691 |
+ msr sctlr_el2, x0 |
2692 |
+ isb |
2693 |
+ 1: |
2694 |
+diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S |
2695 |
+index 10dd16d7902d..bebec8ef9372 100644 |
2696 |
+--- a/arch/arm64/kernel/sleep.S |
2697 |
++++ b/arch/arm64/kernel/sleep.S |
2698 |
+@@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter) |
2699 |
+ ret |
2700 |
+ ENDPROC(__cpu_suspend_enter) |
2701 |
+ |
2702 |
+- .pushsection ".idmap.text", "ax" |
2703 |
++ .pushsection ".idmap.text", "awx" |
2704 |
+ ENTRY(cpu_resume) |
2705 |
+ bl el2_setup // if in EL2 drop to EL1 cleanly |
2706 |
+ bl __cpu_setup |
2707 |
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S |
2708 |
+index 7da3e5c366a0..ddfd3c0942f7 100644 |
2709 |
+--- a/arch/arm64/kernel/vmlinux.lds.S |
2710 |
++++ b/arch/arm64/kernel/vmlinux.lds.S |
2711 |
+@@ -57,6 +57,17 @@ jiffies = jiffies_64; |
2712 |
+ #define HIBERNATE_TEXT |
2713 |
+ #endif |
2714 |
+ |
2715 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
2716 |
++#define TRAMP_TEXT \ |
2717 |
++ . = ALIGN(PAGE_SIZE); \ |
2718 |
++ VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \ |
2719 |
++ *(.entry.tramp.text) \ |
2720 |
++ . = ALIGN(PAGE_SIZE); \ |
2721 |
++ VMLINUX_SYMBOL(__entry_tramp_text_end) = .; |
2722 |
++#else |
2723 |
++#define TRAMP_TEXT |
2724 |
++#endif |
2725 |
++ |
2726 |
+ /* |
2727 |
+ * The size of the PE/COFF section that covers the kernel image, which |
2728 |
+ * runs from stext to _edata, must be a round multiple of the PE/COFF |
2729 |
+@@ -113,6 +124,7 @@ SECTIONS |
2730 |
+ HYPERVISOR_TEXT |
2731 |
+ IDMAP_TEXT |
2732 |
+ HIBERNATE_TEXT |
2733 |
++ TRAMP_TEXT |
2734 |
+ *(.fixup) |
2735 |
+ *(.gnu.warning) |
2736 |
+ . = ALIGN(16); |
2737 |
+@@ -214,6 +226,11 @@ SECTIONS |
2738 |
+ . += RESERVED_TTBR0_SIZE; |
2739 |
+ #endif |
2740 |
+ |
2741 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
2742 |
++ tramp_pg_dir = .; |
2743 |
++ . += PAGE_SIZE; |
2744 |
++#endif |
2745 |
++ |
2746 |
+ __pecoff_data_size = ABSOLUTE(. - __initdata_begin); |
2747 |
+ _end = .; |
2748 |
+ |
2749 |
+@@ -234,7 +251,10 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, |
2750 |
+ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) |
2751 |
+ <= SZ_4K, "Hibernate exit text too big or misaligned") |
2752 |
+ #endif |
2753 |
+- |
2754 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
2755 |
++ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, |
2756 |
++ "Entry trampoline text too big") |
2757 |
++#endif |
2758 |
+ /* |
2759 |
+ * If padding is applied before .head.text, virt<->phys conversions will fail. |
2760 |
+ */ |
2761 |
+diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c |
2762 |
+index 380261e258ef..ab48c5ed3943 100644 |
2763 |
+--- a/arch/arm64/kvm/handle_exit.c |
2764 |
++++ b/arch/arm64/kvm/handle_exit.c |
2765 |
+@@ -22,12 +22,13 @@ |
2766 |
+ #include <linux/kvm.h> |
2767 |
+ #include <linux/kvm_host.h> |
2768 |
+ |
2769 |
++#include <kvm/arm_psci.h> |
2770 |
++ |
2771 |
+ #include <asm/esr.h> |
2772 |
+ #include <asm/kvm_asm.h> |
2773 |
+ #include <asm/kvm_coproc.h> |
2774 |
+ #include <asm/kvm_emulate.h> |
2775 |
+ #include <asm/kvm_mmu.h> |
2776 |
+-#include <asm/kvm_psci.h> |
2777 |
+ |
2778 |
+ #define CREATE_TRACE_POINTS |
2779 |
+ #include "trace.h" |
2780 |
+@@ -42,7 +43,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
2781 |
+ kvm_vcpu_hvc_get_imm(vcpu)); |
2782 |
+ vcpu->stat.hvc_exit_stat++; |
2783 |
+ |
2784 |
+- ret = kvm_psci_call(vcpu); |
2785 |
++ ret = kvm_hvc_call_handler(vcpu); |
2786 |
+ if (ret < 0) { |
2787 |
+ vcpu_set_reg(vcpu, 0, ~0UL); |
2788 |
+ return 1; |
2789 |
+@@ -53,7 +54,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
2790 |
+ |
2791 |
+ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
2792 |
+ { |
2793 |
++ /* |
2794 |
++ * "If an SMC instruction executed at Non-secure EL1 is |
2795 |
++ * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a |
2796 |
++ * Trap exception, not a Secure Monitor Call exception [...]" |
2797 |
++ * |
2798 |
++ * We need to advance the PC after the trap, as it would |
2799 |
++ * otherwise return to the same address... |
2800 |
++ */ |
2801 |
+ vcpu_set_reg(vcpu, 0, ~0UL); |
2802 |
++ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
2803 |
+ return 1; |
2804 |
+ } |
2805 |
+ |
2806 |
+diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S |
2807 |
+index 3f9615582377..870828c364c5 100644 |
2808 |
+--- a/arch/arm64/kvm/hyp-init.S |
2809 |
++++ b/arch/arm64/kvm/hyp-init.S |
2810 |
+@@ -151,6 +151,7 @@ reset: |
2811 |
+ mrs x5, sctlr_el2 |
2812 |
+ ldr x6, =SCTLR_ELx_FLAGS |
2813 |
+ bic x5, x5, x6 // Clear SCTL_M and etc |
2814 |
++ pre_disable_mmu_workaround |
2815 |
+ msr sctlr_el2, x5 |
2816 |
+ isb |
2817 |
+ |
2818 |
+diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S |
2819 |
+index 12ee62d6d410..9c45c6af1f58 100644 |
2820 |
+--- a/arch/arm64/kvm/hyp/entry.S |
2821 |
++++ b/arch/arm64/kvm/hyp/entry.S |
2822 |
+@@ -196,3 +196,15 @@ alternative_endif |
2823 |
+ |
2824 |
+ eret |
2825 |
+ ENDPROC(__fpsimd_guest_restore) |
2826 |
++ |
2827 |
++ENTRY(__qcom_hyp_sanitize_btac_predictors) |
2828 |
++ /** |
2829 |
++ * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700) |
2830 |
++ * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls |
2831 |
++ * b15-b0: contains SiP functionID |
2832 |
++ */ |
2833 |
++ movz x0, #0x1700 |
2834 |
++ movk x0, #0xc200, lsl #16 |
2835 |
++ smc #0 |
2836 |
++ ret |
2837 |
++ENDPROC(__qcom_hyp_sanitize_btac_predictors) |
2838 |
+diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S |
2839 |
+index 5170ce1021da..f49b53331d28 100644 |
2840 |
+--- a/arch/arm64/kvm/hyp/hyp-entry.S |
2841 |
++++ b/arch/arm64/kvm/hyp/hyp-entry.S |
2842 |
+@@ -15,6 +15,7 @@ |
2843 |
+ * along with this program. If not, see <http://www.gnu.org/licenses/>. |
2844 |
+ */ |
2845 |
+ |
2846 |
++#include <linux/arm-smccc.h> |
2847 |
+ #include <linux/linkage.h> |
2848 |
+ |
2849 |
+ #include <asm/alternative.h> |
2850 |
+@@ -64,10 +65,11 @@ alternative_endif |
2851 |
+ lsr x0, x1, #ESR_ELx_EC_SHIFT |
2852 |
+ |
2853 |
+ cmp x0, #ESR_ELx_EC_HVC64 |
2854 |
++ ccmp x0, #ESR_ELx_EC_HVC32, #4, ne |
2855 |
+ b.ne el1_trap |
2856 |
+ |
2857 |
+- mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest |
2858 |
+- cbnz x1, el1_trap // called HVC |
2859 |
++ mrs x1, vttbr_el2 // If vttbr is valid, the guest |
2860 |
++ cbnz x1, el1_hvc_guest // called HVC |
2861 |
+ |
2862 |
+ /* Here, we're pretty sure the host called HVC. */ |
2863 |
+ ldp x0, x1, [sp], #16 |
2864 |
+@@ -100,6 +102,20 @@ alternative_endif |
2865 |
+ |
2866 |
+ eret |
2867 |
+ |
2868 |
++el1_hvc_guest: |
2869 |
++ /* |
2870 |
++ * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. |
2871 |
++ * The workaround has already been applied on the host, |
2872 |
++ * so let's quickly get back to the guest. We don't bother |
2873 |
++ * restoring x1, as it can be clobbered anyway. |
2874 |
++ */ |
2875 |
++ ldr x1, [sp] // Guest's x0 |
2876 |
++ eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 |
2877 |
++ cbnz w1, el1_trap |
2878 |
++ mov x0, x1 |
2879 |
++ add sp, sp, #16 |
2880 |
++ eret |
2881 |
++ |
2882 |
+ el1_trap: |
2883 |
+ /* |
2884 |
+ * x0: ESR_EC |
2885 |
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c |
2886 |
+index 945e79c641c4..79364d3455c0 100644 |
2887 |
+--- a/arch/arm64/kvm/hyp/switch.c |
2888 |
++++ b/arch/arm64/kvm/hyp/switch.c |
2889 |
+@@ -17,6 +17,9 @@ |
2890 |
+ |
2891 |
+ #include <linux/types.h> |
2892 |
+ #include <linux/jump_label.h> |
2893 |
++#include <uapi/linux/psci.h> |
2894 |
++ |
2895 |
++#include <kvm/arm_psci.h> |
2896 |
+ |
2897 |
+ #include <asm/kvm_asm.h> |
2898 |
+ #include <asm/kvm_emulate.h> |
2899 |
+@@ -51,7 +54,7 @@ static void __hyp_text __activate_traps_vhe(void) |
2900 |
+ val &= ~CPACR_EL1_FPEN; |
2901 |
+ write_sysreg(val, cpacr_el1); |
2902 |
+ |
2903 |
+- write_sysreg(__kvm_hyp_vector, vbar_el1); |
2904 |
++ write_sysreg(kvm_get_hyp_vector(), vbar_el1); |
2905 |
+ } |
2906 |
+ |
2907 |
+ static void __hyp_text __activate_traps_nvhe(void) |
2908 |
+@@ -364,6 +367,14 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) |
2909 |
+ /* 0 falls through to be handled out of EL2 */ |
2910 |
+ } |
2911 |
+ |
2912 |
++ if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) { |
2913 |
++ u32 midr = read_cpuid_id(); |
2914 |
++ |
2915 |
++ /* Apply BTAC predictors mitigation to all Falkor chips */ |
2916 |
++ if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1) |
2917 |
++ __qcom_hyp_sanitize_btac_predictors(); |
2918 |
++ } |
2919 |
++ |
2920 |
+ fp_enabled = __fpsimd_enabled(); |
2921 |
+ |
2922 |
+ __sysreg_save_guest_state(guest_ctxt); |
2923 |
+diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S |
2924 |
+index e88fb99c1561..21ba0b29621b 100644 |
2925 |
+--- a/arch/arm64/lib/clear_user.S |
2926 |
++++ b/arch/arm64/lib/clear_user.S |
2927 |
+@@ -21,7 +21,7 @@ |
2928 |
+ |
2929 |
+ .text |
2930 |
+ |
2931 |
+-/* Prototype: int __clear_user(void *addr, size_t sz) |
2932 |
++/* Prototype: int __arch_clear_user(void *addr, size_t sz) |
2933 |
+ * Purpose : clear some user memory |
2934 |
+ * Params : addr - user memory address to clear |
2935 |
+ * : sz - number of bytes to clear |
2936 |
+@@ -29,8 +29,8 @@ |
2937 |
+ * |
2938 |
+ * Alignment fixed up by hardware. |
2939 |
+ */ |
2940 |
+-ENTRY(__clear_user) |
2941 |
+- uaccess_enable_not_uao x2, x3 |
2942 |
++ENTRY(__arch_clear_user) |
2943 |
++ uaccess_enable_not_uao x2, x3, x4 |
2944 |
+ mov x2, x1 // save the size for fixup return |
2945 |
+ subs x1, x1, #8 |
2946 |
+ b.mi 2f |
2947 |
+@@ -50,9 +50,9 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 |
2948 |
+ b.mi 5f |
2949 |
+ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 |
2950 |
+ 5: mov x0, #0 |
2951 |
+- uaccess_disable_not_uao x2 |
2952 |
++ uaccess_disable_not_uao x2, x3 |
2953 |
+ ret |
2954 |
+-ENDPROC(__clear_user) |
2955 |
++ENDPROC(__arch_clear_user) |
2956 |
+ |
2957 |
+ .section .fixup,"ax" |
2958 |
+ .align 2 |
2959 |
+diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S |
2960 |
+index 4b5d826895ff..20305d485046 100644 |
2961 |
+--- a/arch/arm64/lib/copy_from_user.S |
2962 |
++++ b/arch/arm64/lib/copy_from_user.S |
2963 |
+@@ -64,10 +64,10 @@ |
2964 |
+ |
2965 |
+ end .req x5 |
2966 |
+ ENTRY(__arch_copy_from_user) |
2967 |
+- uaccess_enable_not_uao x3, x4 |
2968 |
++ uaccess_enable_not_uao x3, x4, x5 |
2969 |
+ add end, x0, x2 |
2970 |
+ #include "copy_template.S" |
2971 |
+- uaccess_disable_not_uao x3 |
2972 |
++ uaccess_disable_not_uao x3, x4 |
2973 |
+ mov x0, #0 // Nothing to copy |
2974 |
+ ret |
2975 |
+ ENDPROC(__arch_copy_from_user) |
2976 |
+diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S |
2977 |
+index b24a830419ad..54b75deb1d16 100644 |
2978 |
+--- a/arch/arm64/lib/copy_in_user.S |
2979 |
++++ b/arch/arm64/lib/copy_in_user.S |
2980 |
+@@ -64,14 +64,15 @@ |
2981 |
+ .endm |
2982 |
+ |
2983 |
+ end .req x5 |
2984 |
+-ENTRY(raw_copy_in_user) |
2985 |
+- uaccess_enable_not_uao x3, x4 |
2986 |
++ |
2987 |
++ENTRY(__arch_copy_in_user) |
2988 |
++ uaccess_enable_not_uao x3, x4, x5 |
2989 |
+ add end, x0, x2 |
2990 |
+ #include "copy_template.S" |
2991 |
+- uaccess_disable_not_uao x3 |
2992 |
++ uaccess_disable_not_uao x3, x4 |
2993 |
+ mov x0, #0 |
2994 |
+ ret |
2995 |
+-ENDPROC(raw_copy_in_user) |
2996 |
++ENDPROC(__arch_copy_in_user) |
2997 |
+ |
2998 |
+ .section .fixup,"ax" |
2999 |
+ .align 2 |
3000 |
+diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S |
3001 |
+index 351f0766f7a6..fda6172d6b88 100644 |
3002 |
+--- a/arch/arm64/lib/copy_to_user.S |
3003 |
++++ b/arch/arm64/lib/copy_to_user.S |
3004 |
+@@ -63,10 +63,10 @@ |
3005 |
+ |
3006 |
+ end .req x5 |
3007 |
+ ENTRY(__arch_copy_to_user) |
3008 |
+- uaccess_enable_not_uao x3, x4 |
3009 |
++ uaccess_enable_not_uao x3, x4, x5 |
3010 |
+ add end, x0, x2 |
3011 |
+ #include "copy_template.S" |
3012 |
+- uaccess_disable_not_uao x3 |
3013 |
++ uaccess_disable_not_uao x3, x4 |
3014 |
+ mov x0, #0 |
3015 |
+ ret |
3016 |
+ ENDPROC(__arch_copy_to_user) |
3017 |
+diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S |
3018 |
+index 7f1dbe962cf5..91464e7f77cc 100644 |
3019 |
+--- a/arch/arm64/mm/cache.S |
3020 |
++++ b/arch/arm64/mm/cache.S |
3021 |
+@@ -49,7 +49,7 @@ ENTRY(flush_icache_range) |
3022 |
+ * - end - virtual end address of region |
3023 |
+ */ |
3024 |
+ ENTRY(__flush_cache_user_range) |
3025 |
+- uaccess_ttbr0_enable x2, x3 |
3026 |
++ uaccess_ttbr0_enable x2, x3, x4 |
3027 |
+ dcache_line_size x2, x3 |
3028 |
+ sub x3, x2, #1 |
3029 |
+ bic x4, x0, x3 |
3030 |
+@@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU |
3031 |
+ isb |
3032 |
+ mov x0, #0 |
3033 |
+ 1: |
3034 |
+- uaccess_ttbr0_disable x1 |
3035 |
++ uaccess_ttbr0_disable x1, x2 |
3036 |
+ ret |
3037 |
+ 9: |
3038 |
+ mov x0, #-EFAULT |
3039 |
+diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c |
3040 |
+index ab9f5f0fb2c7..9284788733d6 100644 |
3041 |
+--- a/arch/arm64/mm/context.c |
3042 |
++++ b/arch/arm64/mm/context.c |
3043 |
+@@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending; |
3044 |
+ |
3045 |
+ #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) |
3046 |
+ #define ASID_FIRST_VERSION (1UL << asid_bits) |
3047 |
+-#define NUM_USER_ASIDS ASID_FIRST_VERSION |
3048 |
++ |
3049 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
3050 |
++#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) |
3051 |
++#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) |
3052 |
++#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) |
3053 |
++#else |
3054 |
++#define NUM_USER_ASIDS (ASID_FIRST_VERSION) |
3055 |
++#define asid2idx(asid) ((asid) & ~ASID_MASK) |
3056 |
++#define idx2asid(idx) asid2idx(idx) |
3057 |
++#endif |
3058 |
+ |
3059 |
+ /* Get the ASIDBits supported by the current CPU */ |
3060 |
+ static u32 get_cpu_asid_bits(void) |
3061 |
+@@ -79,13 +88,6 @@ void verify_cpu_asid_bits(void) |
3062 |
+ } |
3063 |
+ } |
3064 |
+ |
3065 |
+-static void set_reserved_asid_bits(void) |
3066 |
+-{ |
3067 |
+- if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) && |
3068 |
+- cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003)) |
3069 |
+- __set_bit(FALKOR_RESERVED_ASID, asid_map); |
3070 |
+-} |
3071 |
+- |
3072 |
+ static void flush_context(unsigned int cpu) |
3073 |
+ { |
3074 |
+ int i; |
3075 |
+@@ -94,8 +96,6 @@ static void flush_context(unsigned int cpu) |
3076 |
+ /* Update the list of reserved ASIDs and the ASID bitmap. */ |
3077 |
+ bitmap_clear(asid_map, 0, NUM_USER_ASIDS); |
3078 |
+ |
3079 |
+- set_reserved_asid_bits(); |
3080 |
+- |
3081 |
+ /* |
3082 |
+ * Ensure the generation bump is observed before we xchg the |
3083 |
+ * active_asids. |
3084 |
+@@ -113,7 +113,7 @@ static void flush_context(unsigned int cpu) |
3085 |
+ */ |
3086 |
+ if (asid == 0) |
3087 |
+ asid = per_cpu(reserved_asids, i); |
3088 |
+- __set_bit(asid & ~ASID_MASK, asid_map); |
3089 |
++ __set_bit(asid2idx(asid), asid_map); |
3090 |
+ per_cpu(reserved_asids, i) = asid; |
3091 |
+ } |
3092 |
+ |
3093 |
+@@ -165,16 +165,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
3094 |
+ * We had a valid ASID in a previous life, so try to re-use |
3095 |
+ * it if possible. |
3096 |
+ */ |
3097 |
+- asid &= ~ASID_MASK; |
3098 |
+- if (!__test_and_set_bit(asid, asid_map)) |
3099 |
++ if (!__test_and_set_bit(asid2idx(asid), asid_map)) |
3100 |
+ return newasid; |
3101 |
+ } |
3102 |
+ |
3103 |
+ /* |
3104 |
+ * Allocate a free ASID. If we can't find one, take a note of the |
3105 |
+- * currently active ASIDs and mark the TLBs as requiring flushes. |
3106 |
+- * We always count from ASID #1, as we use ASID #0 when setting a |
3107 |
+- * reserved TTBR0 for the init_mm. |
3108 |
++ * currently active ASIDs and mark the TLBs as requiring flushes. We |
3109 |
++ * always count from ASID #2 (index 1), as we use ASID #0 when setting |
3110 |
++ * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd |
3111 |
++ * pairs. |
3112 |
+ */ |
3113 |
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); |
3114 |
+ if (asid != NUM_USER_ASIDS) |
3115 |
+@@ -191,7 +191,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
3116 |
+ set_asid: |
3117 |
+ __set_bit(asid, asid_map); |
3118 |
+ cur_idx = asid; |
3119 |
+- return asid | generation; |
3120 |
++ return idx2asid(asid) | generation; |
3121 |
+ } |
3122 |
+ |
3123 |
+ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) |
3124 |
+@@ -227,6 +227,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) |
3125 |
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
3126 |
+ |
3127 |
+ switch_mm_fastpath: |
3128 |
++ |
3129 |
++ arm64_apply_bp_hardening(); |
3130 |
++ |
3131 |
+ /* |
3132 |
+ * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when |
3133 |
+ * emulating PAN. |
3134 |
+@@ -235,6 +238,15 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) |
3135 |
+ cpu_switch_mm(mm->pgd, mm); |
3136 |
+ } |
3137 |
+ |
3138 |
++/* Errata workaround post TTBRx_EL1 update. */ |
3139 |
++asmlinkage void post_ttbr_update_workaround(void) |
3140 |
++{ |
3141 |
++ asm(ALTERNATIVE("nop; nop; nop", |
3142 |
++ "ic iallu; dsb nsh; isb", |
3143 |
++ ARM64_WORKAROUND_CAVIUM_27456, |
3144 |
++ CONFIG_CAVIUM_ERRATUM_27456)); |
3145 |
++} |
3146 |
++ |
3147 |
+ static int asids_init(void) |
3148 |
+ { |
3149 |
+ asid_bits = get_cpu_asid_bits(); |
3150 |
+@@ -250,8 +262,6 @@ static int asids_init(void) |
3151 |
+ panic("Failed to allocate bitmap for %lu ASIDs\n", |
3152 |
+ NUM_USER_ASIDS); |
3153 |
+ |
3154 |
+- set_reserved_asid_bits(); |
3155 |
+- |
3156 |
+ pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); |
3157 |
+ return 0; |
3158 |
+ } |
3159 |
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c |
3160 |
+index b64958b23a7f..5edb706aacb0 100644 |
3161 |
+--- a/arch/arm64/mm/fault.c |
3162 |
++++ b/arch/arm64/mm/fault.c |
3163 |
+@@ -242,7 +242,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs, |
3164 |
+ if (fsc_type == ESR_ELx_FSC_PERM) |
3165 |
+ return true; |
3166 |
+ |
3167 |
+- if (addr < USER_DS && system_uses_ttbr0_pan()) |
3168 |
++ if (addr < TASK_SIZE && system_uses_ttbr0_pan()) |
3169 |
+ return fsc_type == ESR_ELx_FSC_FAULT && |
3170 |
+ (regs->pstate & PSR_PAN_BIT); |
3171 |
+ |
3172 |
+@@ -426,7 +426,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, |
3173 |
+ mm_flags |= FAULT_FLAG_WRITE; |
3174 |
+ } |
3175 |
+ |
3176 |
+- if (addr < USER_DS && is_permission_fault(esr, regs, addr)) { |
3177 |
++ if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) { |
3178 |
+ /* regs->orig_addr_limit may be 0 if we entered from EL0 */ |
3179 |
+ if (regs->orig_addr_limit == KERNEL_DS) |
3180 |
+ die("Accessing user space memory with fs=KERNEL_DS", regs, esr); |
3181 |
+@@ -751,6 +751,29 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, |
3182 |
+ arm64_notify_die("", regs, &info, esr); |
3183 |
+ } |
3184 |
+ |
3185 |
++asmlinkage void __exception do_el0_irq_bp_hardening(void) |
3186 |
++{ |
3187 |
++ /* PC has already been checked in entry.S */ |
3188 |
++ arm64_apply_bp_hardening(); |
3189 |
++} |
3190 |
++ |
3191 |
++asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, |
3192 |
++ unsigned int esr, |
3193 |
++ struct pt_regs *regs) |
3194 |
++{ |
3195 |
++ /* |
3196 |
++ * We've taken an instruction abort from userspace and not yet |
3197 |
++ * re-enabled IRQs. If the address is a kernel address, apply |
3198 |
++ * BP hardening prior to enabling IRQs and pre-emption. |
3199 |
++ */ |
3200 |
++ if (addr > TASK_SIZE) |
3201 |
++ arm64_apply_bp_hardening(); |
3202 |
++ |
3203 |
++ local_irq_enable(); |
3204 |
++ do_mem_abort(addr, esr, regs); |
3205 |
++} |
3206 |
++ |
3207 |
++ |
3208 |
+ /* |
3209 |
+ * Handle stack alignment exceptions. |
3210 |
+ */ |
3211 |
+@@ -761,6 +784,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, |
3212 |
+ struct siginfo info; |
3213 |
+ struct task_struct *tsk = current; |
3214 |
+ |
3215 |
++ if (user_mode(regs)) { |
3216 |
++ if (instruction_pointer(regs) > TASK_SIZE) |
3217 |
++ arm64_apply_bp_hardening(); |
3218 |
++ local_irq_enable(); |
3219 |
++ } |
3220 |
++ |
3221 |
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS)) |
3222 |
+ pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n", |
3223 |
+ tsk->comm, task_pid_nr(tsk), |
3224 |
+@@ -820,6 +849,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, |
3225 |
+ if (interrupts_enabled(regs)) |
3226 |
+ trace_hardirqs_off(); |
3227 |
+ |
3228 |
++ if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE) |
3229 |
++ arm64_apply_bp_hardening(); |
3230 |
++ |
3231 |
+ if (!inf->fn(addr, esr, regs)) { |
3232 |
+ rv = 1; |
3233 |
+ } else { |
3234 |
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c |
3235 |
+index f1eb15e0e864..fa20124c19d5 100644 |
3236 |
+--- a/arch/arm64/mm/mmu.c |
3237 |
++++ b/arch/arm64/mm/mmu.c |
3238 |
+@@ -117,6 +117,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new) |
3239 |
+ if ((old | new) & PTE_CONT) |
3240 |
+ return false; |
3241 |
+ |
3242 |
++ /* Transitioning from Global to Non-Global is safe */ |
3243 |
++ if (((old ^ new) == PTE_NG) && (new & PTE_NG)) |
3244 |
++ return true; |
3245 |
++ |
3246 |
+ return ((old ^ new) & ~mask) == 0; |
3247 |
+ } |
3248 |
+ |
3249 |
+@@ -525,6 +529,37 @@ static int __init parse_rodata(char *arg) |
3250 |
+ } |
3251 |
+ early_param("rodata", parse_rodata); |
3252 |
+ |
3253 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
3254 |
++static int __init map_entry_trampoline(void) |
3255 |
++{ |
3256 |
++ extern char __entry_tramp_text_start[]; |
3257 |
++ |
3258 |
++ pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; |
3259 |
++ phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); |
3260 |
++ |
3261 |
++ /* The trampoline is always mapped and can therefore be global */ |
3262 |
++ pgprot_val(prot) &= ~PTE_NG; |
3263 |
++ |
3264 |
++ /* Map only the text into the trampoline page table */ |
3265 |
++ memset(tramp_pg_dir, 0, PGD_SIZE); |
3266 |
++ __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, |
3267 |
++ prot, pgd_pgtable_alloc, 0); |
3268 |
++ |
3269 |
++ /* Map both the text and data into the kernel page table */ |
3270 |
++ __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); |
3271 |
++ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
3272 |
++ extern char __entry_tramp_data_start[]; |
3273 |
++ |
3274 |
++ __set_fixmap(FIX_ENTRY_TRAMP_DATA, |
3275 |
++ __pa_symbol(__entry_tramp_data_start), |
3276 |
++ PAGE_KERNEL_RO); |
3277 |
++ } |
3278 |
++ |
3279 |
++ return 0; |
3280 |
++} |
3281 |
++core_initcall(map_entry_trampoline); |
3282 |
++#endif |
3283 |
++ |
3284 |
+ /* |
3285 |
+ * Create fine-grained mappings for the kernel. |
3286 |
+ */ |
3287 |
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S |
3288 |
+index 877d42fb0df6..27058f3fd132 100644 |
3289 |
+--- a/arch/arm64/mm/proc.S |
3290 |
++++ b/arch/arm64/mm/proc.S |
3291 |
+@@ -86,7 +86,7 @@ ENDPROC(cpu_do_suspend) |
3292 |
+ * |
3293 |
+ * x0: Address of context pointer |
3294 |
+ */ |
3295 |
+- .pushsection ".idmap.text", "ax" |
3296 |
++ .pushsection ".idmap.text", "awx" |
3297 |
+ ENTRY(cpu_do_resume) |
3298 |
+ ldp x2, x3, [x0] |
3299 |
+ ldp x4, x5, [x0, #16] |
3300 |
+@@ -138,16 +138,30 @@ ENDPROC(cpu_do_resume) |
3301 |
+ * - pgd_phys - physical address of new TTB |
3302 |
+ */ |
3303 |
+ ENTRY(cpu_do_switch_mm) |
3304 |
+- pre_ttbr0_update_workaround x0, x2, x3 |
3305 |
++ mrs x2, ttbr1_el1 |
3306 |
+ mmid x1, x1 // get mm->context.id |
3307 |
+- bfi x0, x1, #48, #16 // set the ASID |
3308 |
+- msr ttbr0_el1, x0 // set TTBR0 |
3309 |
++#ifdef CONFIG_ARM64_SW_TTBR0_PAN |
3310 |
++ bfi x0, x1, #48, #16 // set the ASID field in TTBR0 |
3311 |
++#endif |
3312 |
++ bfi x2, x1, #48, #16 // set the ASID |
3313 |
++ msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) |
3314 |
+ isb |
3315 |
+- post_ttbr0_update_workaround |
3316 |
+- ret |
3317 |
++ msr ttbr0_el1, x0 // now update TTBR0 |
3318 |
++ isb |
3319 |
++ b post_ttbr_update_workaround // Back to C code... |
3320 |
+ ENDPROC(cpu_do_switch_mm) |
3321 |
+ |
3322 |
+- .pushsection ".idmap.text", "ax" |
3323 |
++ .pushsection ".idmap.text", "awx" |
3324 |
++ |
3325 |
++.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 |
3326 |
++ adrp \tmp1, empty_zero_page |
3327 |
++ msr ttbr1_el1, \tmp2 |
3328 |
++ isb |
3329 |
++ tlbi vmalle1 |
3330 |
++ dsb nsh |
3331 |
++ isb |
3332 |
++.endm |
3333 |
++ |
3334 |
+ /* |
3335 |
+ * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd) |
3336 |
+ * |
3337 |
+@@ -158,13 +172,7 @@ ENTRY(idmap_cpu_replace_ttbr1) |
3338 |
+ mrs x2, daif |
3339 |
+ msr daifset, #0xf |
3340 |
+ |
3341 |
+- adrp x1, empty_zero_page |
3342 |
+- msr ttbr1_el1, x1 |
3343 |
+- isb |
3344 |
+- |
3345 |
+- tlbi vmalle1 |
3346 |
+- dsb nsh |
3347 |
+- isb |
3348 |
++ __idmap_cpu_set_reserved_ttbr1 x1, x3 |
3349 |
+ |
3350 |
+ msr ttbr1_el1, x0 |
3351 |
+ isb |
3352 |
+@@ -175,13 +183,196 @@ ENTRY(idmap_cpu_replace_ttbr1) |
3353 |
+ ENDPROC(idmap_cpu_replace_ttbr1) |
3354 |
+ .popsection |
3355 |
+ |
3356 |
++#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
3357 |
++ .pushsection ".idmap.text", "awx" |
3358 |
++ |
3359 |
++ .macro __idmap_kpti_get_pgtable_ent, type |
3360 |
++ dc cvac, cur_\()\type\()p // Ensure any existing dirty |
3361 |
++ dmb sy // lines are written back before |
3362 |
++ ldr \type, [cur_\()\type\()p] // loading the entry |
3363 |
++ tbz \type, #0, next_\()\type // Skip invalid entries |
3364 |
++ .endm |
3365 |
++ |
3366 |
++ .macro __idmap_kpti_put_pgtable_ent_ng, type |
3367 |
++ orr \type, \type, #PTE_NG // Same bit for blocks and pages |
3368 |
++ str \type, [cur_\()\type\()p] // Update the entry and ensure it |
3369 |
++ dc civac, cur_\()\type\()p // is visible to all CPUs. |
3370 |
++ .endm |
3371 |
++ |
3372 |
++/* |
3373 |
++ * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) |
3374 |
++ * |
3375 |
++ * Called exactly once from stop_machine context by each CPU found during boot. |
3376 |
++ */ |
3377 |
++__idmap_kpti_flag: |
3378 |
++ .long 1 |
3379 |
++ENTRY(idmap_kpti_install_ng_mappings) |
3380 |
++ cpu .req w0 |
3381 |
++ num_cpus .req w1 |
3382 |
++ swapper_pa .req x2 |
3383 |
++ swapper_ttb .req x3 |
3384 |
++ flag_ptr .req x4 |
3385 |
++ cur_pgdp .req x5 |
3386 |
++ end_pgdp .req x6 |
3387 |
++ pgd .req x7 |
3388 |
++ cur_pudp .req x8 |
3389 |
++ end_pudp .req x9 |
3390 |
++ pud .req x10 |
3391 |
++ cur_pmdp .req x11 |
3392 |
++ end_pmdp .req x12 |
3393 |
++ pmd .req x13 |
3394 |
++ cur_ptep .req x14 |
3395 |
++ end_ptep .req x15 |
3396 |
++ pte .req x16 |
3397 |
++ |
3398 |
++ mrs swapper_ttb, ttbr1_el1 |
3399 |
++ adr flag_ptr, __idmap_kpti_flag |
3400 |
++ |
3401 |
++ cbnz cpu, __idmap_kpti_secondary |
3402 |
++ |
3403 |
++ /* We're the boot CPU. Wait for the others to catch up */ |
3404 |
++ sevl |
3405 |
++1: wfe |
3406 |
++ ldaxr w18, [flag_ptr] |
3407 |
++ eor w18, w18, num_cpus |
3408 |
++ cbnz w18, 1b |
3409 |
++ |
3410 |
++ /* We need to walk swapper, so turn off the MMU. */ |
3411 |
++ mrs x18, sctlr_el1 |
3412 |
++ bic x18, x18, #SCTLR_ELx_M |
3413 |
++ msr sctlr_el1, x18 |
3414 |
++ isb |
3415 |
++ |
3416 |
++ /* Everybody is enjoying the idmap, so we can rewrite swapper. */ |
3417 |
++ /* PGD */ |
3418 |
++ mov cur_pgdp, swapper_pa |
3419 |
++ add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) |
3420 |
++do_pgd: __idmap_kpti_get_pgtable_ent pgd |
3421 |
++ tbnz pgd, #1, walk_puds |
3422 |
++ __idmap_kpti_put_pgtable_ent_ng pgd |
3423 |
++next_pgd: |
3424 |
++ add cur_pgdp, cur_pgdp, #8 |
3425 |
++ cmp cur_pgdp, end_pgdp |
3426 |
++ b.ne do_pgd |
3427 |
++ |
3428 |
++ /* Publish the updated tables and nuke all the TLBs */ |
3429 |
++ dsb sy |
3430 |
++ tlbi vmalle1is |
3431 |
++ dsb ish |
3432 |
++ isb |
3433 |
++ |
3434 |
++ /* We're done: fire up the MMU again */ |
3435 |
++ mrs x18, sctlr_el1 |
3436 |
++ orr x18, x18, #SCTLR_ELx_M |
3437 |
++ msr sctlr_el1, x18 |
3438 |
++ isb |
3439 |
++ |
3440 |
++ /* Set the flag to zero to indicate that we're all done */ |
3441 |
++ str wzr, [flag_ptr] |
3442 |
++ ret |
3443 |
++ |
3444 |
++ /* PUD */ |
3445 |
++walk_puds: |
3446 |
++ .if CONFIG_PGTABLE_LEVELS > 3 |
3447 |
++ pte_to_phys cur_pudp, pgd |
3448 |
++ add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) |
3449 |
++do_pud: __idmap_kpti_get_pgtable_ent pud |
3450 |
++ tbnz pud, #1, walk_pmds |
3451 |
++ __idmap_kpti_put_pgtable_ent_ng pud |
3452 |
++next_pud: |
3453 |
++ add cur_pudp, cur_pudp, 8 |
3454 |
++ cmp cur_pudp, end_pudp |
3455 |
++ b.ne do_pud |
3456 |
++ b next_pgd |
3457 |
++ .else /* CONFIG_PGTABLE_LEVELS <= 3 */ |
3458 |
++ mov pud, pgd |
3459 |
++ b walk_pmds |
3460 |
++next_pud: |
3461 |
++ b next_pgd |
3462 |
++ .endif |
3463 |
++ |
3464 |
++ /* PMD */ |
3465 |
++walk_pmds: |
3466 |
++ .if CONFIG_PGTABLE_LEVELS > 2 |
3467 |
++ pte_to_phys cur_pmdp, pud |
3468 |
++ add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) |
3469 |
++do_pmd: __idmap_kpti_get_pgtable_ent pmd |
3470 |
++ tbnz pmd, #1, walk_ptes |
3471 |
++ __idmap_kpti_put_pgtable_ent_ng pmd |
3472 |
++next_pmd: |
3473 |
++ add cur_pmdp, cur_pmdp, #8 |
3474 |
++ cmp cur_pmdp, end_pmdp |
3475 |
++ b.ne do_pmd |
3476 |
++ b next_pud |
3477 |
++ .else /* CONFIG_PGTABLE_LEVELS <= 2 */ |
3478 |
++ mov pmd, pud |
3479 |
++ b walk_ptes |
3480 |
++next_pmd: |
3481 |
++ b next_pud |
3482 |
++ .endif |
3483 |
++ |
3484 |
++ /* PTE */ |
3485 |
++walk_ptes: |
3486 |
++ pte_to_phys cur_ptep, pmd |
3487 |
++ add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) |
3488 |
++do_pte: __idmap_kpti_get_pgtable_ent pte |
3489 |
++ __idmap_kpti_put_pgtable_ent_ng pte |
3490 |
++next_pte: |
3491 |
++ add cur_ptep, cur_ptep, #8 |
3492 |
++ cmp cur_ptep, end_ptep |
3493 |
++ b.ne do_pte |
3494 |
++ b next_pmd |
3495 |
++ |
3496 |
++ /* Secondary CPUs end up here */ |
3497 |
++__idmap_kpti_secondary: |
3498 |
++ /* Uninstall swapper before surgery begins */ |
3499 |
++ __idmap_cpu_set_reserved_ttbr1 x18, x17 |
3500 |
++ |
3501 |
++ /* Increment the flag to let the boot CPU we're ready */ |
3502 |
++1: ldxr w18, [flag_ptr] |
3503 |
++ add w18, w18, #1 |
3504 |
++ stxr w17, w18, [flag_ptr] |
3505 |
++ cbnz w17, 1b |
3506 |
++ |
3507 |
++ /* Wait for the boot CPU to finish messing around with swapper */ |
3508 |
++ sevl |
3509 |
++1: wfe |
3510 |
++ ldxr w18, [flag_ptr] |
3511 |
++ cbnz w18, 1b |
3512 |
++ |
3513 |
++ /* All done, act like nothing happened */ |
3514 |
++ msr ttbr1_el1, swapper_ttb |
3515 |
++ isb |
3516 |
++ ret |
3517 |
++ |
3518 |
++ .unreq cpu |
3519 |
++ .unreq num_cpus |
3520 |
++ .unreq swapper_pa |
3521 |
++ .unreq swapper_ttb |
3522 |
++ .unreq flag_ptr |
3523 |
++ .unreq cur_pgdp |
3524 |
++ .unreq end_pgdp |
3525 |
++ .unreq pgd |
3526 |
++ .unreq cur_pudp |
3527 |
++ .unreq end_pudp |
3528 |
++ .unreq pud |
3529 |
++ .unreq cur_pmdp |
3530 |
++ .unreq end_pmdp |
3531 |
++ .unreq pmd |
3532 |
++ .unreq cur_ptep |
3533 |
++ .unreq end_ptep |
3534 |
++ .unreq pte |
3535 |
++ENDPROC(idmap_kpti_install_ng_mappings) |
3536 |
++ .popsection |
3537 |
++#endif |
3538 |
++ |
3539 |
+ /* |
3540 |
+ * __cpu_setup |
3541 |
+ * |
3542 |
+ * Initialise the processor for turning the MMU on. Return in x0 the |
3543 |
+ * value of the SCTLR_EL1 register. |
3544 |
+ */ |
3545 |
+- .pushsection ".idmap.text", "ax" |
3546 |
++ .pushsection ".idmap.text", "awx" |
3547 |
+ ENTRY(__cpu_setup) |
3548 |
+ tlbi vmalle1 // Invalidate local TLB |
3549 |
+ dsb nsh |
3550 |
+@@ -225,7 +416,7 @@ ENTRY(__cpu_setup) |
3551 |
+ * both user and kernel. |
3552 |
+ */ |
3553 |
+ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ |
3554 |
+- TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 |
3555 |
++ TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 |
3556 |
+ tcr_set_idmap_t0sz x10, x9 |
3557 |
+ |
3558 |
+ /* |
3559 |
+diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S |
3560 |
+index 401ceb71540c..c5f05c4a4d00 100644 |
3561 |
+--- a/arch/arm64/xen/hypercall.S |
3562 |
++++ b/arch/arm64/xen/hypercall.S |
3563 |
+@@ -101,12 +101,12 @@ ENTRY(privcmd_call) |
3564 |
+ * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation |
3565 |
+ * is enabled (it implies that hardware UAO and PAN disabled). |
3566 |
+ */ |
3567 |
+- uaccess_ttbr0_enable x6, x7 |
3568 |
++ uaccess_ttbr0_enable x6, x7, x8 |
3569 |
+ hvc XEN_IMM |
3570 |
+ |
3571 |
+ /* |
3572 |
+ * Disable userspace access from kernel once the hyp call completed. |
3573 |
+ */ |
3574 |
+- uaccess_ttbr0_disable x6 |
3575 |
++ uaccess_ttbr0_disable x6, x7 |
3576 |
+ ret |
3577 |
+ ENDPROC(privcmd_call); |
3578 |
+diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c |
3579 |
+index b39a388825ae..8ace89617c1c 100644 |
3580 |
+--- a/arch/mn10300/mm/misalignment.c |
3581 |
++++ b/arch/mn10300/mm/misalignment.c |
3582 |
+@@ -437,7 +437,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) |
3583 |
+ |
3584 |
+ info.si_signo = SIGSEGV; |
3585 |
+ info.si_errno = 0; |
3586 |
+- info.si_code = 0; |
3587 |
++ info.si_code = SEGV_MAPERR; |
3588 |
+ info.si_addr = (void *) regs->pc; |
3589 |
+ force_sig_info(SIGSEGV, &info, current); |
3590 |
+ return; |
3591 |
+diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c |
3592 |
+index 803e9e756f77..8d8437169b5e 100644 |
3593 |
+--- a/arch/openrisc/kernel/traps.c |
3594 |
++++ b/arch/openrisc/kernel/traps.c |
3595 |
+@@ -306,12 +306,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) |
3596 |
+ siginfo_t info; |
3597 |
+ |
3598 |
+ if (user_mode(regs)) { |
3599 |
+- /* Send a SIGSEGV */ |
3600 |
+- info.si_signo = SIGSEGV; |
3601 |
++ /* Send a SIGBUS */ |
3602 |
++ info.si_signo = SIGBUS; |
3603 |
+ info.si_errno = 0; |
3604 |
+- /* info.si_code has been set above */ |
3605 |
+- info.si_addr = (void *)address; |
3606 |
+- force_sig_info(SIGSEGV, &info, current); |
3607 |
++ info.si_code = BUS_ADRALN; |
3608 |
++ info.si_addr = (void __user *)address; |
3609 |
++ force_sig_info(SIGBUS, &info, current); |
3610 |
+ } else { |
3611 |
+ printk("KERNEL: Unaligned Access 0x%.8lx\n", address); |
3612 |
+ show_registers(regs); |
3613 |
+diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c |
3614 |
+index f058e0c3e4d4..fd1d6c83f0c0 100644 |
3615 |
+--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c |
3616 |
++++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c |
3617 |
+@@ -141,6 +141,7 @@ static struct shash_alg alg = { |
3618 |
+ .cra_name = "crc32c", |
3619 |
+ .cra_driver_name = "crc32c-vpmsum", |
3620 |
+ .cra_priority = 200, |
3621 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
3622 |
+ .cra_blocksize = CHKSUM_BLOCK_SIZE, |
3623 |
+ .cra_ctxsize = sizeof(u32), |
3624 |
+ .cra_module = THIS_MODULE, |
3625 |
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h |
3626 |
+index f0461618bf7b..eca3f9c68907 100644 |
3627 |
+--- a/arch/powerpc/include/asm/hvcall.h |
3628 |
++++ b/arch/powerpc/include/asm/hvcall.h |
3629 |
+@@ -353,6 +353,7 @@ |
3630 |
+ #define PROC_TABLE_GTSE 0x01 |
3631 |
+ |
3632 |
+ #ifndef __ASSEMBLY__ |
3633 |
++#include <linux/types.h> |
3634 |
+ |
3635 |
+ /** |
3636 |
+ * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments |
3637 |
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig |
3638 |
+index b12b8eb39c29..648160334abf 100644 |
3639 |
+--- a/arch/powerpc/kvm/Kconfig |
3640 |
++++ b/arch/powerpc/kvm/Kconfig |
3641 |
+@@ -68,7 +68,7 @@ config KVM_BOOK3S_64 |
3642 |
+ select KVM_BOOK3S_64_HANDLER |
3643 |
+ select KVM |
3644 |
+ select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE |
3645 |
+- select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV) |
3646 |
++ select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV) |
3647 |
+ ---help--- |
3648 |
+ Support running unmodified book3s_64 and book3s_32 guest kernels |
3649 |
+ in virtual machines on book3s_64 host processors. |
3650 |
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
3651 |
+index 8d43cf205d34..f48e3379a18a 100644 |
3652 |
+--- a/arch/powerpc/kvm/book3s_hv.c |
3653 |
++++ b/arch/powerpc/kvm/book3s_hv.c |
3654 |
+@@ -999,8 +999,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) |
3655 |
+ struct kvm *kvm = vcpu->kvm; |
3656 |
+ struct kvm_vcpu *tvcpu; |
3657 |
+ |
3658 |
+- if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
3659 |
+- return EMULATE_FAIL; |
3660 |
+ if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) |
3661 |
+ return RESUME_GUEST; |
3662 |
+ if (get_op(inst) != 31) |
3663 |
+@@ -1050,6 +1048,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) |
3664 |
+ return RESUME_GUEST; |
3665 |
+ } |
3666 |
+ |
3667 |
++/* Called with vcpu->arch.vcore->lock held */ |
3668 |
+ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
3669 |
+ struct task_struct *tsk) |
3670 |
+ { |
3671 |
+@@ -1169,7 +1168,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
3672 |
+ swab32(vcpu->arch.emul_inst) : |
3673 |
+ vcpu->arch.emul_inst; |
3674 |
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { |
3675 |
++ /* Need vcore unlocked to call kvmppc_get_last_inst */ |
3676 |
++ spin_unlock(&vcpu->arch.vcore->lock); |
3677 |
+ r = kvmppc_emulate_debug_inst(run, vcpu); |
3678 |
++ spin_lock(&vcpu->arch.vcore->lock); |
3679 |
+ } else { |
3680 |
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL); |
3681 |
+ r = RESUME_GUEST; |
3682 |
+@@ -1184,8 +1186,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
3683 |
+ */ |
3684 |
+ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: |
3685 |
+ r = EMULATE_FAIL; |
3686 |
+- if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) |
3687 |
++ if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && |
3688 |
++ cpu_has_feature(CPU_FTR_ARCH_300)) { |
3689 |
++ /* Need vcore unlocked to call kvmppc_get_last_inst */ |
3690 |
++ spin_unlock(&vcpu->arch.vcore->lock); |
3691 |
+ r = kvmppc_emulate_doorbell_instr(vcpu); |
3692 |
++ spin_lock(&vcpu->arch.vcore->lock); |
3693 |
++ } |
3694 |
+ if (r == EMULATE_FAIL) { |
3695 |
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL); |
3696 |
+ r = RESUME_GUEST; |
3697 |
+@@ -2889,13 +2896,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) |
3698 |
+ /* make sure updates to secondary vcpu structs are visible now */ |
3699 |
+ smp_mb(); |
3700 |
+ |
3701 |
++ preempt_enable(); |
3702 |
++ |
3703 |
+ for (sub = 0; sub < core_info.n_subcores; ++sub) { |
3704 |
+ pvc = core_info.vc[sub]; |
3705 |
+ post_guest_process(pvc, pvc == vc); |
3706 |
+ } |
3707 |
+ |
3708 |
+ spin_lock(&vc->lock); |
3709 |
+- preempt_enable(); |
3710 |
+ |
3711 |
+ out: |
3712 |
+ vc->vcore_state = VCORE_INACTIVE; |
3713 |
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
3714 |
+index c85ac5c83bd4..2b3194b9608f 100644 |
3715 |
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
3716 |
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
3717 |
+@@ -1387,6 +1387,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
3718 |
+ blt deliver_guest_interrupt |
3719 |
+ |
3720 |
+ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ |
3721 |
++ /* Save more register state */ |
3722 |
++ mfdar r6 |
3723 |
++ mfdsisr r7 |
3724 |
++ std r6, VCPU_DAR(r9) |
3725 |
++ stw r7, VCPU_DSISR(r9) |
3726 |
++ /* don't overwrite fault_dar/fault_dsisr if HDSI */ |
3727 |
++ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE |
3728 |
++ beq mc_cont |
3729 |
++ std r6, VCPU_FAULT_DAR(r9) |
3730 |
++ stw r7, VCPU_FAULT_DSISR(r9) |
3731 |
++ |
3732 |
++ /* See if it is a machine check */ |
3733 |
++ cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK |
3734 |
++ beq machine_check_realmode |
3735 |
++mc_cont: |
3736 |
++#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
3737 |
++ addi r3, r9, VCPU_TB_RMEXIT |
3738 |
++ mr r4, r9 |
3739 |
++ bl kvmhv_accumulate_time |
3740 |
++#endif |
3741 |
+ #ifdef CONFIG_KVM_XICS |
3742 |
+ /* We are exiting, pull the VP from the XIVE */ |
3743 |
+ lwz r0, VCPU_XIVE_PUSHED(r9) |
3744 |
+@@ -1424,26 +1444,6 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ |
3745 |
+ eieio |
3746 |
+ 1: |
3747 |
+ #endif /* CONFIG_KVM_XICS */ |
3748 |
+- /* Save more register state */ |
3749 |
+- mfdar r6 |
3750 |
+- mfdsisr r7 |
3751 |
+- std r6, VCPU_DAR(r9) |
3752 |
+- stw r7, VCPU_DSISR(r9) |
3753 |
+- /* don't overwrite fault_dar/fault_dsisr if HDSI */ |
3754 |
+- cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE |
3755 |
+- beq mc_cont |
3756 |
+- std r6, VCPU_FAULT_DAR(r9) |
3757 |
+- stw r7, VCPU_FAULT_DSISR(r9) |
3758 |
+- |
3759 |
+- /* See if it is a machine check */ |
3760 |
+- cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK |
3761 |
+- beq machine_check_realmode |
3762 |
+-mc_cont: |
3763 |
+-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
3764 |
+- addi r3, r9, VCPU_TB_RMEXIT |
3765 |
+- mr r4, r9 |
3766 |
+- bl kvmhv_accumulate_time |
3767 |
+-#endif |
3768 |
+ |
3769 |
+ mr r3, r12 |
3770 |
+ /* Increment exit count, poke other threads to exit */ |
3771 |
+diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c |
3772 |
+index 992e630c227b..6f4985f357c6 100644 |
3773 |
+--- a/arch/s390/crypto/crc32-vx.c |
3774 |
++++ b/arch/s390/crypto/crc32-vx.c |
3775 |
+@@ -238,6 +238,7 @@ static struct shash_alg crc32_vx_algs[] = { |
3776 |
+ .cra_name = "crc32", |
3777 |
+ .cra_driver_name = "crc32-vx", |
3778 |
+ .cra_priority = 200, |
3779 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
3780 |
+ .cra_blocksize = CRC32_BLOCK_SIZE, |
3781 |
+ .cra_ctxsize = sizeof(struct crc_ctx), |
3782 |
+ .cra_module = THIS_MODULE, |
3783 |
+@@ -258,6 +259,7 @@ static struct shash_alg crc32_vx_algs[] = { |
3784 |
+ .cra_name = "crc32be", |
3785 |
+ .cra_driver_name = "crc32be-vx", |
3786 |
+ .cra_priority = 200, |
3787 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
3788 |
+ .cra_blocksize = CRC32_BLOCK_SIZE, |
3789 |
+ .cra_ctxsize = sizeof(struct crc_ctx), |
3790 |
+ .cra_module = THIS_MODULE, |
3791 |
+@@ -278,6 +280,7 @@ static struct shash_alg crc32_vx_algs[] = { |
3792 |
+ .cra_name = "crc32c", |
3793 |
+ .cra_driver_name = "crc32c-vx", |
3794 |
+ .cra_priority = 200, |
3795 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
3796 |
+ .cra_blocksize = CRC32_BLOCK_SIZE, |
3797 |
+ .cra_ctxsize = sizeof(struct crc_ctx), |
3798 |
+ .cra_module = THIS_MODULE, |
3799 |
+diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c |
3800 |
+index 57cff00cad17..b3770bb26211 100644 |
3801 |
+--- a/arch/sh/kernel/traps_32.c |
3802 |
++++ b/arch/sh/kernel/traps_32.c |
3803 |
+@@ -609,7 +609,8 @@ asmlinkage void do_divide_error(unsigned long r4) |
3804 |
+ break; |
3805 |
+ } |
3806 |
+ |
3807 |
+- force_sig_info(SIGFPE, &info, current); |
3808 |
++ info.si_signo = SIGFPE; |
3809 |
++ force_sig_info(info.si_signo, &info, current); |
3810 |
+ } |
3811 |
+ #endif |
3812 |
+ |
3813 |
+diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c |
3814 |
+index d1064e46efe8..8aa664638c3c 100644 |
3815 |
+--- a/arch/sparc/crypto/crc32c_glue.c |
3816 |
++++ b/arch/sparc/crypto/crc32c_glue.c |
3817 |
+@@ -133,6 +133,7 @@ static struct shash_alg alg = { |
3818 |
+ .cra_name = "crc32c", |
3819 |
+ .cra_driver_name = "crc32c-sparc64", |
3820 |
+ .cra_priority = SPARC_CR_OPCODE_PRIORITY, |
3821 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
3822 |
+ .cra_blocksize = CHKSUM_BLOCK_SIZE, |
3823 |
+ .cra_ctxsize = sizeof(u32), |
3824 |
+ .cra_alignmask = 7, |
3825 |
+diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c |
3826 |
+index 27226df3f7d8..c8d9cdacbf10 100644 |
3827 |
+--- a/arch/x86/crypto/crc32-pclmul_glue.c |
3828 |
++++ b/arch/x86/crypto/crc32-pclmul_glue.c |
3829 |
+@@ -162,6 +162,7 @@ static struct shash_alg alg = { |
3830 |
+ .cra_name = "crc32", |
3831 |
+ .cra_driver_name = "crc32-pclmul", |
3832 |
+ .cra_priority = 200, |
3833 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
3834 |
+ .cra_blocksize = CHKSUM_BLOCK_SIZE, |
3835 |
+ .cra_ctxsize = sizeof(u32), |
3836 |
+ .cra_module = THIS_MODULE, |
3837 |
+diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c |
3838 |
+index c194d5717ae5..5773e1161072 100644 |
3839 |
+--- a/arch/x86/crypto/crc32c-intel_glue.c |
3840 |
++++ b/arch/x86/crypto/crc32c-intel_glue.c |
3841 |
+@@ -226,6 +226,7 @@ static struct shash_alg alg = { |
3842 |
+ .cra_name = "crc32c", |
3843 |
+ .cra_driver_name = "crc32c-intel", |
3844 |
+ .cra_priority = 200, |
3845 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
3846 |
+ .cra_blocksize = CHKSUM_BLOCK_SIZE, |
3847 |
+ .cra_ctxsize = sizeof(u32), |
3848 |
+ .cra_module = THIS_MODULE, |
3849 |
+diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c |
3850 |
+index e32142bc071d..28c372003e44 100644 |
3851 |
+--- a/arch/x86/crypto/poly1305_glue.c |
3852 |
++++ b/arch/x86/crypto/poly1305_glue.c |
3853 |
+@@ -164,7 +164,6 @@ static struct shash_alg alg = { |
3854 |
+ .init = poly1305_simd_init, |
3855 |
+ .update = poly1305_simd_update, |
3856 |
+ .final = crypto_poly1305_final, |
3857 |
+- .setkey = crypto_poly1305_setkey, |
3858 |
+ .descsize = sizeof(struct poly1305_simd_desc_ctx), |
3859 |
+ .base = { |
3860 |
+ .cra_name = "poly1305", |
3861 |
+diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c |
3862 |
+index 36870b26067a..d08805032f01 100644 |
3863 |
+--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c |
3864 |
++++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c |
3865 |
+@@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state) |
3866 |
+ { |
3867 |
+ unsigned int j; |
3868 |
+ |
3869 |
+- state->lens[0] = 0; |
3870 |
+- state->lens[1] = 1; |
3871 |
+- state->lens[2] = 2; |
3872 |
+- state->lens[3] = 3; |
3873 |
++ /* initially all lanes are unused */ |
3874 |
++ state->lens[0] = 0xFFFFFFFF00000000; |
3875 |
++ state->lens[1] = 0xFFFFFFFF00000001; |
3876 |
++ state->lens[2] = 0xFFFFFFFF00000002; |
3877 |
++ state->lens[3] = 0xFFFFFFFF00000003; |
3878 |
++ |
3879 |
+ state->unused_lanes = 0xFF03020100; |
3880 |
+ for (j = 0; j < 4; j++) |
3881 |
+ state->ldata[j].job_in_lane = NULL; |
3882 |
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
3883 |
+index 0fce8d73403c..beb7f8795bc1 100644 |
3884 |
+--- a/arch/x86/kvm/mmu.c |
3885 |
++++ b/arch/x86/kvm/mmu.c |
3886 |
+@@ -3784,7 +3784,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) |
3887 |
+ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) |
3888 |
+ { |
3889 |
+ if (unlikely(!lapic_in_kernel(vcpu) || |
3890 |
+- kvm_event_needs_reinjection(vcpu))) |
3891 |
++ kvm_event_needs_reinjection(vcpu) || |
3892 |
++ vcpu->arch.exception.pending)) |
3893 |
+ return false; |
3894 |
+ |
3895 |
+ if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) |
3896 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
3897 |
+index 0ae4b1a86168..0ea909ca45c2 100644 |
3898 |
+--- a/arch/x86/kvm/vmx.c |
3899 |
++++ b/arch/x86/kvm/vmx.c |
3900 |
+@@ -5322,14 +5322,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, |
3901 |
+ |
3902 |
+ if (is_guest_mode(vcpu) && |
3903 |
+ vector == vmx->nested.posted_intr_nv) { |
3904 |
+- /* the PIR and ON have been set by L1. */ |
3905 |
+- kvm_vcpu_trigger_posted_interrupt(vcpu, true); |
3906 |
+ /* |
3907 |
+ * If a posted intr is not recognized by hardware, |
3908 |
+ * we will accomplish it in the next vmentry. |
3909 |
+ */ |
3910 |
+ vmx->nested.pi_pending = true; |
3911 |
+ kvm_make_request(KVM_REQ_EVENT, vcpu); |
3912 |
++ /* the PIR and ON have been set by L1. */ |
3913 |
++ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) |
3914 |
++ kvm_vcpu_kick(vcpu); |
3915 |
+ return 0; |
3916 |
+ } |
3917 |
+ return -1; |
3918 |
+@@ -11245,7 +11246,6 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) |
3919 |
+ if (block_nested_events) |
3920 |
+ return -EBUSY; |
3921 |
+ nested_vmx_inject_exception_vmexit(vcpu, exit_qual); |
3922 |
+- vcpu->arch.exception.pending = false; |
3923 |
+ return 0; |
3924 |
+ } |
3925 |
+ |
3926 |
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h |
3927 |
+index d0b95b7a90b4..6d112d8f799c 100644 |
3928 |
+--- a/arch/x86/kvm/x86.h |
3929 |
++++ b/arch/x86/kvm/x86.h |
3930 |
+@@ -12,6 +12,7 @@ |
3931 |
+ |
3932 |
+ static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) |
3933 |
+ { |
3934 |
++ vcpu->arch.exception.pending = false; |
3935 |
+ vcpu->arch.exception.injected = false; |
3936 |
+ } |
3937 |
+ |
3938 |
+diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h |
3939 |
+index eaaf1ebcc7a4..5bfbc1c401d4 100644 |
3940 |
+--- a/arch/xtensa/include/asm/futex.h |
3941 |
++++ b/arch/xtensa/include/asm/futex.h |
3942 |
+@@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
3943 |
+ u32 oldval, u32 newval) |
3944 |
+ { |
3945 |
+ int ret = 0; |
3946 |
+- u32 prev; |
3947 |
+ |
3948 |
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
3949 |
+ return -EFAULT; |
3950 |
+@@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
3951 |
+ |
3952 |
+ __asm__ __volatile__ ( |
3953 |
+ " # futex_atomic_cmpxchg_inatomic\n" |
3954 |
+- "1: l32i %1, %3, 0\n" |
3955 |
+- " mov %0, %5\n" |
3956 |
+- " wsr %1, scompare1\n" |
3957 |
+- "2: s32c1i %0, %3, 0\n" |
3958 |
+- "3:\n" |
3959 |
++ " wsr %5, scompare1\n" |
3960 |
++ "1: s32c1i %1, %4, 0\n" |
3961 |
++ " s32i %1, %6, 0\n" |
3962 |
++ "2:\n" |
3963 |
+ " .section .fixup,\"ax\"\n" |
3964 |
+ " .align 4\n" |
3965 |
+- "4: .long 3b\n" |
3966 |
+- "5: l32r %1, 4b\n" |
3967 |
+- " movi %0, %6\n" |
3968 |
++ "3: .long 2b\n" |
3969 |
++ "4: l32r %1, 3b\n" |
3970 |
++ " movi %0, %7\n" |
3971 |
+ " jx %1\n" |
3972 |
+ " .previous\n" |
3973 |
+ " .section __ex_table,\"a\"\n" |
3974 |
+- " .long 1b,5b,2b,5b\n" |
3975 |
++ " .long 1b,4b\n" |
3976 |
+ " .previous\n" |
3977 |
+- : "+r" (ret), "=&r" (prev), "+m" (*uaddr) |
3978 |
+- : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) |
3979 |
++ : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval) |
3980 |
++ : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT) |
3981 |
+ : "memory"); |
3982 |
+ |
3983 |
+- *uval = prev; |
3984 |
+ return ret; |
3985 |
+ } |
3986 |
+ |
3987 |
+diff --git a/block/blk-core.c b/block/blk-core.c |
3988 |
+index 7b30bf10b1d4..f3750389e351 100644 |
3989 |
+--- a/block/blk-core.c |
3990 |
++++ b/block/blk-core.c |
3991 |
+@@ -660,6 +660,15 @@ void blk_cleanup_queue(struct request_queue *q) |
3992 |
+ queue_flag_set(QUEUE_FLAG_DEAD, q); |
3993 |
+ spin_unlock_irq(lock); |
3994 |
+ |
3995 |
++ /* |
3996 |
++ * make sure all in-progress dispatch are completed because |
3997 |
++ * blk_freeze_queue() can only complete all requests, and |
3998 |
++ * dispatch may still be in-progress since we dispatch requests |
3999 |
++ * from more than one contexts |
4000 |
++ */ |
4001 |
++ if (q->mq_ops) |
4002 |
++ blk_mq_quiesce_queue(q); |
4003 |
++ |
4004 |
+ /* for synchronous bio-based driver finish in-flight integrity i/o */ |
4005 |
+ blk_flush_integrity(); |
4006 |
+ |
4007 |
+diff --git a/crypto/ahash.c b/crypto/ahash.c |
4008 |
+index 5e8666e6ccae..f75b5c1f7152 100644 |
4009 |
+--- a/crypto/ahash.c |
4010 |
++++ b/crypto/ahash.c |
4011 |
+@@ -193,11 +193,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
4012 |
+ unsigned int keylen) |
4013 |
+ { |
4014 |
+ unsigned long alignmask = crypto_ahash_alignmask(tfm); |
4015 |
++ int err; |
4016 |
+ |
4017 |
+ if ((unsigned long)key & alignmask) |
4018 |
+- return ahash_setkey_unaligned(tfm, key, keylen); |
4019 |
++ err = ahash_setkey_unaligned(tfm, key, keylen); |
4020 |
++ else |
4021 |
++ err = tfm->setkey(tfm, key, keylen); |
4022 |
++ |
4023 |
++ if (err) |
4024 |
++ return err; |
4025 |
+ |
4026 |
+- return tfm->setkey(tfm, key, keylen); |
4027 |
++ crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
4028 |
++ return 0; |
4029 |
+ } |
4030 |
+ EXPORT_SYMBOL_GPL(crypto_ahash_setkey); |
4031 |
+ |
4032 |
+@@ -370,7 +377,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup); |
4033 |
+ |
4034 |
+ int crypto_ahash_digest(struct ahash_request *req) |
4035 |
+ { |
4036 |
+- return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); |
4037 |
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
4038 |
++ |
4039 |
++ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
4040 |
++ return -ENOKEY; |
4041 |
++ |
4042 |
++ return crypto_ahash_op(req, tfm->digest); |
4043 |
+ } |
4044 |
+ EXPORT_SYMBOL_GPL(crypto_ahash_digest); |
4045 |
+ |
4046 |
+@@ -456,7 +468,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) |
4047 |
+ struct ahash_alg *alg = crypto_ahash_alg(hash); |
4048 |
+ |
4049 |
+ hash->setkey = ahash_nosetkey; |
4050 |
+- hash->has_setkey = false; |
4051 |
+ hash->export = ahash_no_export; |
4052 |
+ hash->import = ahash_no_import; |
4053 |
+ |
4054 |
+@@ -471,7 +482,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) |
4055 |
+ |
4056 |
+ if (alg->setkey) { |
4057 |
+ hash->setkey = alg->setkey; |
4058 |
+- hash->has_setkey = true; |
4059 |
++ if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) |
4060 |
++ crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); |
4061 |
+ } |
4062 |
+ if (alg->export) |
4063 |
+ hash->export = alg->export; |
4064 |
+@@ -655,5 +667,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) |
4065 |
+ } |
4066 |
+ EXPORT_SYMBOL_GPL(ahash_attr_alg); |
4067 |
+ |
4068 |
++bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) |
4069 |
++{ |
4070 |
++ struct crypto_alg *alg = &halg->base; |
4071 |
++ |
4072 |
++ if (alg->cra_type != &crypto_ahash_type) |
4073 |
++ return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); |
4074 |
++ |
4075 |
++ return __crypto_ahash_alg(alg)->setkey != NULL; |
4076 |
++} |
4077 |
++EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); |
4078 |
++ |
4079 |
+ MODULE_LICENSE("GPL"); |
4080 |
+ MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |
4081 |
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c |
4082 |
+index 5e92bd275ef3..39cebd3256bf 100644 |
4083 |
+--- a/crypto/algif_hash.c |
4084 |
++++ b/crypto/algif_hash.c |
4085 |
+@@ -34,11 +34,6 @@ struct hash_ctx { |
4086 |
+ struct ahash_request req; |
4087 |
+ }; |
4088 |
+ |
4089 |
+-struct algif_hash_tfm { |
4090 |
+- struct crypto_ahash *hash; |
4091 |
+- bool has_key; |
4092 |
+-}; |
4093 |
+- |
4094 |
+ static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) |
4095 |
+ { |
4096 |
+ unsigned ds; |
4097 |
+@@ -309,7 +304,7 @@ static int hash_check_key(struct socket *sock) |
4098 |
+ int err = 0; |
4099 |
+ struct sock *psk; |
4100 |
+ struct alg_sock *pask; |
4101 |
+- struct algif_hash_tfm *tfm; |
4102 |
++ struct crypto_ahash *tfm; |
4103 |
+ struct sock *sk = sock->sk; |
4104 |
+ struct alg_sock *ask = alg_sk(sk); |
4105 |
+ |
4106 |
+@@ -323,7 +318,7 @@ static int hash_check_key(struct socket *sock) |
4107 |
+ |
4108 |
+ err = -ENOKEY; |
4109 |
+ lock_sock_nested(psk, SINGLE_DEPTH_NESTING); |
4110 |
+- if (!tfm->has_key) |
4111 |
++ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
4112 |
+ goto unlock; |
4113 |
+ |
4114 |
+ if (!pask->refcnt++) |
4115 |
+@@ -414,41 +409,17 @@ static struct proto_ops algif_hash_ops_nokey = { |
4116 |
+ |
4117 |
+ static void *hash_bind(const char *name, u32 type, u32 mask) |
4118 |
+ { |
4119 |
+- struct algif_hash_tfm *tfm; |
4120 |
+- struct crypto_ahash *hash; |
4121 |
+- |
4122 |
+- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); |
4123 |
+- if (!tfm) |
4124 |
+- return ERR_PTR(-ENOMEM); |
4125 |
+- |
4126 |
+- hash = crypto_alloc_ahash(name, type, mask); |
4127 |
+- if (IS_ERR(hash)) { |
4128 |
+- kfree(tfm); |
4129 |
+- return ERR_CAST(hash); |
4130 |
+- } |
4131 |
+- |
4132 |
+- tfm->hash = hash; |
4133 |
+- |
4134 |
+- return tfm; |
4135 |
++ return crypto_alloc_ahash(name, type, mask); |
4136 |
+ } |
4137 |
+ |
4138 |
+ static void hash_release(void *private) |
4139 |
+ { |
4140 |
+- struct algif_hash_tfm *tfm = private; |
4141 |
+- |
4142 |
+- crypto_free_ahash(tfm->hash); |
4143 |
+- kfree(tfm); |
4144 |
++ crypto_free_ahash(private); |
4145 |
+ } |
4146 |
+ |
4147 |
+ static int hash_setkey(void *private, const u8 *key, unsigned int keylen) |
4148 |
+ { |
4149 |
+- struct algif_hash_tfm *tfm = private; |
4150 |
+- int err; |
4151 |
+- |
4152 |
+- err = crypto_ahash_setkey(tfm->hash, key, keylen); |
4153 |
+- tfm->has_key = !err; |
4154 |
+- |
4155 |
+- return err; |
4156 |
++ return crypto_ahash_setkey(private, key, keylen); |
4157 |
+ } |
4158 |
+ |
4159 |
+ static void hash_sock_destruct(struct sock *sk) |
4160 |
+@@ -463,11 +434,10 @@ static void hash_sock_destruct(struct sock *sk) |
4161 |
+ |
4162 |
+ static int hash_accept_parent_nokey(void *private, struct sock *sk) |
4163 |
+ { |
4164 |
+- struct hash_ctx *ctx; |
4165 |
++ struct crypto_ahash *tfm = private; |
4166 |
+ struct alg_sock *ask = alg_sk(sk); |
4167 |
+- struct algif_hash_tfm *tfm = private; |
4168 |
+- struct crypto_ahash *hash = tfm->hash; |
4169 |
+- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); |
4170 |
++ struct hash_ctx *ctx; |
4171 |
++ unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm); |
4172 |
+ |
4173 |
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL); |
4174 |
+ if (!ctx) |
4175 |
+@@ -480,7 +450,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) |
4176 |
+ |
4177 |
+ ask->private = ctx; |
4178 |
+ |
4179 |
+- ahash_request_set_tfm(&ctx->req, hash); |
4180 |
++ ahash_request_set_tfm(&ctx->req, tfm); |
4181 |
+ ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
4182 |
+ af_alg_complete, &ctx->completion); |
4183 |
+ |
4184 |
+@@ -491,9 +461,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) |
4185 |
+ |
4186 |
+ static int hash_accept_parent(void *private, struct sock *sk) |
4187 |
+ { |
4188 |
+- struct algif_hash_tfm *tfm = private; |
4189 |
++ struct crypto_ahash *tfm = private; |
4190 |
+ |
4191 |
+- if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) |
4192 |
++ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
4193 |
+ return -ENOKEY; |
4194 |
+ |
4195 |
+ return hash_accept_parent_nokey(private, sk); |
4196 |
+diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c |
4197 |
+index aa2a25fc7482..718cbce8d169 100644 |
4198 |
+--- a/crypto/crc32_generic.c |
4199 |
++++ b/crypto/crc32_generic.c |
4200 |
+@@ -133,6 +133,7 @@ static struct shash_alg alg = { |
4201 |
+ .cra_name = "crc32", |
4202 |
+ .cra_driver_name = "crc32-generic", |
4203 |
+ .cra_priority = 100, |
4204 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
4205 |
+ .cra_blocksize = CHKSUM_BLOCK_SIZE, |
4206 |
+ .cra_ctxsize = sizeof(u32), |
4207 |
+ .cra_module = THIS_MODULE, |
4208 |
+diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c |
4209 |
+index 4c0a0e271876..372320399622 100644 |
4210 |
+--- a/crypto/crc32c_generic.c |
4211 |
++++ b/crypto/crc32c_generic.c |
4212 |
+@@ -146,6 +146,7 @@ static struct shash_alg alg = { |
4213 |
+ .cra_name = "crc32c", |
4214 |
+ .cra_driver_name = "crc32c-generic", |
4215 |
+ .cra_priority = 100, |
4216 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
4217 |
+ .cra_blocksize = CHKSUM_BLOCK_SIZE, |
4218 |
+ .cra_alignmask = 3, |
4219 |
+ .cra_ctxsize = sizeof(struct chksum_ctx), |
4220 |
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c |
4221 |
+index 0508c48a45c4..248f6ba41688 100644 |
4222 |
+--- a/crypto/cryptd.c |
4223 |
++++ b/crypto/cryptd.c |
4224 |
+@@ -895,10 +895,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
4225 |
+ if (err) |
4226 |
+ goto out_free_inst; |
4227 |
+ |
4228 |
+- type = CRYPTO_ALG_ASYNC; |
4229 |
+- if (alg->cra_flags & CRYPTO_ALG_INTERNAL) |
4230 |
+- type |= CRYPTO_ALG_INTERNAL; |
4231 |
+- inst->alg.halg.base.cra_flags = type; |
4232 |
++ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | |
4233 |
++ (alg->cra_flags & (CRYPTO_ALG_INTERNAL | |
4234 |
++ CRYPTO_ALG_OPTIONAL_KEY)); |
4235 |
+ |
4236 |
+ inst->alg.halg.digestsize = salg->digestsize; |
4237 |
+ inst->alg.halg.statesize = salg->statesize; |
4238 |
+@@ -913,7 +912,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
4239 |
+ inst->alg.finup = cryptd_hash_finup_enqueue; |
4240 |
+ inst->alg.export = cryptd_hash_export; |
4241 |
+ inst->alg.import = cryptd_hash_import; |
4242 |
+- inst->alg.setkey = cryptd_hash_setkey; |
4243 |
++ if (crypto_shash_alg_has_setkey(salg)) |
4244 |
++ inst->alg.setkey = cryptd_hash_setkey; |
4245 |
+ inst->alg.digest = cryptd_hash_digest_enqueue; |
4246 |
+ |
4247 |
+ err = ahash_register_instance(tmpl, inst); |
4248 |
+diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c |
4249 |
+index eca04d3729b3..e0732d979e3b 100644 |
4250 |
+--- a/crypto/mcryptd.c |
4251 |
++++ b/crypto/mcryptd.c |
4252 |
+@@ -517,10 +517,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
4253 |
+ if (err) |
4254 |
+ goto out_free_inst; |
4255 |
+ |
4256 |
+- type = CRYPTO_ALG_ASYNC; |
4257 |
+- if (alg->cra_flags & CRYPTO_ALG_INTERNAL) |
4258 |
+- type |= CRYPTO_ALG_INTERNAL; |
4259 |
+- inst->alg.halg.base.cra_flags = type; |
4260 |
++ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | |
4261 |
++ (alg->cra_flags & (CRYPTO_ALG_INTERNAL | |
4262 |
++ CRYPTO_ALG_OPTIONAL_KEY)); |
4263 |
+ |
4264 |
+ inst->alg.halg.digestsize = halg->digestsize; |
4265 |
+ inst->alg.halg.statesize = halg->statesize; |
4266 |
+@@ -535,7 +534,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
4267 |
+ inst->alg.finup = mcryptd_hash_finup_enqueue; |
4268 |
+ inst->alg.export = mcryptd_hash_export; |
4269 |
+ inst->alg.import = mcryptd_hash_import; |
4270 |
+- inst->alg.setkey = mcryptd_hash_setkey; |
4271 |
++ if (crypto_hash_alg_has_setkey(halg)) |
4272 |
++ inst->alg.setkey = mcryptd_hash_setkey; |
4273 |
+ inst->alg.digest = mcryptd_hash_digest_enqueue; |
4274 |
+ |
4275 |
+ err = ahash_register_instance(tmpl, inst); |
4276 |
+diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c |
4277 |
+index b1c2d57dc734..ba39eb308c79 100644 |
4278 |
+--- a/crypto/poly1305_generic.c |
4279 |
++++ b/crypto/poly1305_generic.c |
4280 |
+@@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc) |
4281 |
+ } |
4282 |
+ EXPORT_SYMBOL_GPL(crypto_poly1305_init); |
4283 |
+ |
4284 |
+-int crypto_poly1305_setkey(struct crypto_shash *tfm, |
4285 |
+- const u8 *key, unsigned int keylen) |
4286 |
+-{ |
4287 |
+- /* Poly1305 requires a unique key for each tag, which implies that |
4288 |
+- * we can't set it on the tfm that gets accessed by multiple users |
4289 |
+- * simultaneously. Instead we expect the key as the first 32 bytes in |
4290 |
+- * the update() call. */ |
4291 |
+- return -ENOTSUPP; |
4292 |
+-} |
4293 |
+-EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); |
4294 |
+- |
4295 |
+ static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) |
4296 |
+ { |
4297 |
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ |
4298 |
+@@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) |
4299 |
+ dctx->s[3] = get_unaligned_le32(key + 12); |
4300 |
+ } |
4301 |
+ |
4302 |
++/* |
4303 |
++ * Poly1305 requires a unique key for each tag, which implies that we can't set |
4304 |
++ * it on the tfm that gets accessed by multiple users simultaneously. Instead we |
4305 |
++ * expect the key as the first 32 bytes in the update() call. |
4306 |
++ */ |
4307 |
+ unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, |
4308 |
+ const u8 *src, unsigned int srclen) |
4309 |
+ { |
4310 |
+@@ -281,7 +275,6 @@ static struct shash_alg poly1305_alg = { |
4311 |
+ .init = crypto_poly1305_init, |
4312 |
+ .update = crypto_poly1305_update, |
4313 |
+ .final = crypto_poly1305_final, |
4314 |
+- .setkey = crypto_poly1305_setkey, |
4315 |
+ .descsize = sizeof(struct poly1305_desc_ctx), |
4316 |
+ .base = { |
4317 |
+ .cra_name = "poly1305", |
4318 |
+diff --git a/crypto/shash.c b/crypto/shash.c |
4319 |
+index e849d3ee2e27..5d732c6bb4b2 100644 |
4320 |
+--- a/crypto/shash.c |
4321 |
++++ b/crypto/shash.c |
4322 |
+@@ -58,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, |
4323 |
+ { |
4324 |
+ struct shash_alg *shash = crypto_shash_alg(tfm); |
4325 |
+ unsigned long alignmask = crypto_shash_alignmask(tfm); |
4326 |
++ int err; |
4327 |
+ |
4328 |
+ if ((unsigned long)key & alignmask) |
4329 |
+- return shash_setkey_unaligned(tfm, key, keylen); |
4330 |
++ err = shash_setkey_unaligned(tfm, key, keylen); |
4331 |
++ else |
4332 |
++ err = shash->setkey(tfm, key, keylen); |
4333 |
++ |
4334 |
++ if (err) |
4335 |
++ return err; |
4336 |
+ |
4337 |
+- return shash->setkey(tfm, key, keylen); |
4338 |
++ crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
4339 |
++ return 0; |
4340 |
+ } |
4341 |
+ EXPORT_SYMBOL_GPL(crypto_shash_setkey); |
4342 |
+ |
4343 |
+@@ -181,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, |
4344 |
+ struct shash_alg *shash = crypto_shash_alg(tfm); |
4345 |
+ unsigned long alignmask = crypto_shash_alignmask(tfm); |
4346 |
+ |
4347 |
++ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
4348 |
++ return -ENOKEY; |
4349 |
++ |
4350 |
+ if (((unsigned long)data | (unsigned long)out) & alignmask) |
4351 |
+ return shash_digest_unaligned(desc, data, len, out); |
4352 |
+ |
4353 |
+@@ -360,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) |
4354 |
+ crt->digest = shash_async_digest; |
4355 |
+ crt->setkey = shash_async_setkey; |
4356 |
+ |
4357 |
+- crt->has_setkey = alg->setkey != shash_no_setkey; |
4358 |
++ crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & |
4359 |
++ CRYPTO_TFM_NEED_KEY); |
4360 |
+ |
4361 |
+ if (alg->export) |
4362 |
+ crt->export = shash_async_export; |
4363 |
+@@ -375,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) |
4364 |
+ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) |
4365 |
+ { |
4366 |
+ struct crypto_shash *hash = __crypto_shash_cast(tfm); |
4367 |
++ struct shash_alg *alg = crypto_shash_alg(hash); |
4368 |
++ |
4369 |
++ hash->descsize = alg->descsize; |
4370 |
++ |
4371 |
++ if (crypto_shash_alg_has_setkey(alg) && |
4372 |
++ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) |
4373 |
++ crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY); |
4374 |
+ |
4375 |
+- hash->descsize = crypto_shash_alg(hash)->descsize; |
4376 |
+ return 0; |
4377 |
+ } |
4378 |
+ |
4379 |
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c |
4380 |
+index dea0fb3d6f64..f14b4326e855 100644 |
4381 |
+--- a/drivers/acpi/nfit/core.c |
4382 |
++++ b/drivers/acpi/nfit/core.c |
4383 |
+@@ -1618,6 +1618,9 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) |
4384 |
+ struct kernfs_node *nfit_kernfs; |
4385 |
+ |
4386 |
+ nvdimm = nfit_mem->nvdimm; |
4387 |
++ if (!nvdimm) |
4388 |
++ continue; |
4389 |
++ |
4390 |
+ nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); |
4391 |
+ if (nfit_kernfs) |
4392 |
+ nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, |
4393 |
+diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c |
4394 |
+index 2fa8304171e0..7a3431018e0a 100644 |
4395 |
+--- a/drivers/acpi/sbshc.c |
4396 |
++++ b/drivers/acpi/sbshc.c |
4397 |
+@@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device) |
4398 |
+ device->driver_data = hc; |
4399 |
+ |
4400 |
+ acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc); |
4401 |
+- printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n", |
4402 |
+- hc->ec, hc->offset, hc->query_bit); |
4403 |
++ dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n", |
4404 |
++ hc->offset, hc->query_bit); |
4405 |
+ |
4406 |
+ return 0; |
4407 |
+ } |
4408 |
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c |
4409 |
+index 9f78bb03bb76..bc013f757d5d 100644 |
4410 |
+--- a/drivers/ata/ahci.c |
4411 |
++++ b/drivers/ata/ahci.c |
4412 |
+@@ -267,9 +267,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
4413 |
+ { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ |
4414 |
+ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ |
4415 |
+ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ |
4416 |
+- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ |
4417 |
++ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH M AHCI */ |
4418 |
+ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ |
4419 |
+- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ |
4420 |
++ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH M RAID */ |
4421 |
+ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ |
4422 |
+ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ |
4423 |
+ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ |
4424 |
+@@ -292,9 +292,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
4425 |
+ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ |
4426 |
+ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ |
4427 |
+ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ |
4428 |
+- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ |
4429 |
++ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT M AHCI */ |
4430 |
+ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ |
4431 |
+- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ |
4432 |
++ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT M RAID */ |
4433 |
+ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ |
4434 |
+ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ |
4435 |
+ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ |
4436 |
+@@ -303,20 +303,20 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
4437 |
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ |
4438 |
+ { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ |
4439 |
+ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ |
4440 |
+- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */ |
4441 |
++ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point M AHCI */ |
4442 |
+ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ |
4443 |
+ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ |
4444 |
+ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ |
4445 |
+- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */ |
4446 |
++ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point M RAID */ |
4447 |
+ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */ |
4448 |
+ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */ |
4449 |
+- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */ |
4450 |
++ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point M AHCI */ |
4451 |
+ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */ |
4452 |
+- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */ |
4453 |
++ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point M RAID */ |
4454 |
+ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */ |
4455 |
+- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ |
4456 |
++ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point M RAID */ |
4457 |
+ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ |
4458 |
+- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ |
4459 |
++ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point M RAID */ |
4460 |
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ |
4461 |
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ |
4462 |
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ |
4463 |
+@@ -357,21 +357,21 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
4464 |
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ |
4465 |
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ |
4466 |
+ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ |
4467 |
+- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */ |
4468 |
++ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series M AHCI */ |
4469 |
+ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ |
4470 |
+- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */ |
4471 |
++ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series M RAID */ |
4472 |
+ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ |
4473 |
+- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ |
4474 |
++ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series M RAID */ |
4475 |
+ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ |
4476 |
+- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ |
4477 |
++ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series M RAID */ |
4478 |
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ |
4479 |
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ |
4480 |
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ |
4481 |
+ { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */ |
4482 |
+- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ |
4483 |
++ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H M AHCI */ |
4484 |
+ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ |
4485 |
+ { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */ |
4486 |
+- { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ |
4487 |
++ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H M RAID */ |
4488 |
+ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ |
4489 |
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ |
4490 |
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ |
4491 |
+@@ -385,6 +385,11 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
4492 |
+ { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ |
4493 |
+ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ |
4494 |
+ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ |
4495 |
++ { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */ |
4496 |
++ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */ |
4497 |
++ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */ |
4498 |
++ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */ |
4499 |
++ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */ |
4500 |
+ |
4501 |
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
4502 |
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
4503 |
+diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c |
4504 |
+index 67974796c350..531a0915066b 100644 |
4505 |
+--- a/drivers/block/pktcdvd.c |
4506 |
++++ b/drivers/block/pktcdvd.c |
4507 |
+@@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) |
4508 |
+ bdev = bdget(dev); |
4509 |
+ if (!bdev) |
4510 |
+ return -ENOMEM; |
4511 |
++ ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); |
4512 |
++ if (ret) |
4513 |
++ return ret; |
4514 |
+ if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) { |
4515 |
+ WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); |
4516 |
+- bdput(bdev); |
4517 |
++ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); |
4518 |
+ return -EINVAL; |
4519 |
+ } |
4520 |
+- ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); |
4521 |
+- if (ret) |
4522 |
+- return ret; |
4523 |
+ |
4524 |
+ /* This is safe, since we have a reference from open(). */ |
4525 |
+ __module_get(THIS_MODULE); |
4526 |
+@@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) |
4527 |
+ pd->pkt_dev = MKDEV(pktdev_major, idx); |
4528 |
+ ret = pkt_new_dev(pd, dev); |
4529 |
+ if (ret) |
4530 |
+- goto out_new_dev; |
4531 |
++ goto out_mem2; |
4532 |
+ |
4533 |
+ /* inherit events of the host device */ |
4534 |
+ disk->events = pd->bdev->bd_disk->events; |
4535 |
+@@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) |
4536 |
+ mutex_unlock(&ctl_mutex); |
4537 |
+ return 0; |
4538 |
+ |
4539 |
+-out_new_dev: |
4540 |
+- blk_cleanup_queue(disk->queue); |
4541 |
+ out_mem2: |
4542 |
+ put_disk(disk); |
4543 |
+ out_mem: |
4544 |
+diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c |
4545 |
+index c8e945d19ffe..20142bc77554 100644 |
4546 |
+--- a/drivers/bluetooth/btsdio.c |
4547 |
++++ b/drivers/bluetooth/btsdio.c |
4548 |
+@@ -31,6 +31,7 @@ |
4549 |
+ #include <linux/errno.h> |
4550 |
+ #include <linux/skbuff.h> |
4551 |
+ |
4552 |
++#include <linux/mmc/host.h> |
4553 |
+ #include <linux/mmc/sdio_ids.h> |
4554 |
+ #include <linux/mmc/sdio_func.h> |
4555 |
+ |
4556 |
+@@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func, |
4557 |
+ tuple = tuple->next; |
4558 |
+ } |
4559 |
+ |
4560 |
++ /* BCM43341 devices soldered onto the PCB (non-removable) use an |
4561 |
++ * uart connection for bluetooth, ignore the BT SDIO interface. |
4562 |
++ */ |
4563 |
++ if (func->vendor == SDIO_VENDOR_ID_BROADCOM && |
4564 |
++ func->device == SDIO_DEVICE_ID_BROADCOM_43341 && |
4565 |
++ !mmc_card_is_removable(func->card->host)) |
4566 |
++ return -ENODEV; |
4567 |
++ |
4568 |
+ data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); |
4569 |
+ if (!data) |
4570 |
+ return -ENOMEM; |
4571 |
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c |
4572 |
+index 513a7a59d421..d54c3f6f728c 100644 |
4573 |
+--- a/drivers/bluetooth/btusb.c |
4574 |
++++ b/drivers/bluetooth/btusb.c |
4575 |
+@@ -23,6 +23,7 @@ |
4576 |
+ |
4577 |
+ #include <linux/module.h> |
4578 |
+ #include <linux/usb.h> |
4579 |
++#include <linux/usb/quirks.h> |
4580 |
+ #include <linux/firmware.h> |
4581 |
+ #include <linux/of_device.h> |
4582 |
+ #include <linux/of_irq.h> |
4583 |
+@@ -392,9 +393,8 @@ static const struct usb_device_id blacklist_table[] = { |
4584 |
+ #define BTUSB_FIRMWARE_LOADED 7 |
4585 |
+ #define BTUSB_FIRMWARE_FAILED 8 |
4586 |
+ #define BTUSB_BOOTING 9 |
4587 |
+-#define BTUSB_RESET_RESUME 10 |
4588 |
+-#define BTUSB_DIAG_RUNNING 11 |
4589 |
+-#define BTUSB_OOB_WAKE_ENABLED 12 |
4590 |
++#define BTUSB_DIAG_RUNNING 10 |
4591 |
++#define BTUSB_OOB_WAKE_ENABLED 11 |
4592 |
+ |
4593 |
+ struct btusb_data { |
4594 |
+ struct hci_dev *hdev; |
4595 |
+@@ -3102,9 +3102,9 @@ static int btusb_probe(struct usb_interface *intf, |
4596 |
+ |
4597 |
+ /* QCA Rome devices lose their updated firmware over suspend, |
4598 |
+ * but the USB hub doesn't notice any status change. |
4599 |
+- * Explicitly request a device reset on resume. |
4600 |
++ * explicitly request a device reset on resume. |
4601 |
+ */ |
4602 |
+- set_bit(BTUSB_RESET_RESUME, &data->flags); |
4603 |
++ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; |
4604 |
+ } |
4605 |
+ |
4606 |
+ #ifdef CONFIG_BT_HCIBTUSB_RTL |
4607 |
+@@ -3115,7 +3115,7 @@ static int btusb_probe(struct usb_interface *intf, |
4608 |
+ * but the USB hub doesn't notice any status change. |
4609 |
+ * Explicitly request a device reset on resume. |
4610 |
+ */ |
4611 |
+- set_bit(BTUSB_RESET_RESUME, &data->flags); |
4612 |
++ interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; |
4613 |
+ } |
4614 |
+ #endif |
4615 |
+ |
4616 |
+@@ -3280,14 +3280,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) |
4617 |
+ enable_irq(data->oob_wake_irq); |
4618 |
+ } |
4619 |
+ |
4620 |
+- /* Optionally request a device reset on resume, but only when |
4621 |
+- * wakeups are disabled. If wakeups are enabled we assume the |
4622 |
+- * device will stay powered up throughout suspend. |
4623 |
+- */ |
4624 |
+- if (test_bit(BTUSB_RESET_RESUME, &data->flags) && |
4625 |
+- !device_may_wakeup(&data->udev->dev)) |
4626 |
+- data->udev->reset_resume = 1; |
4627 |
+- |
4628 |
+ return 0; |
4629 |
+ } |
4630 |
+ |
4631 |
+diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c |
4632 |
+index 2059f79d669a..c3a23ec3e76f 100644 |
4633 |
+--- a/drivers/char/ipmi/ipmi_dmi.c |
4634 |
++++ b/drivers/char/ipmi/ipmi_dmi.c |
4635 |
+@@ -81,7 +81,10 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr, |
4636 |
+ pr_err("ipmi:dmi: Error allocation IPMI platform device"); |
4637 |
+ return; |
4638 |
+ } |
4639 |
+- pdev->driver_override = override; |
4640 |
++ pdev->driver_override = kasprintf(GFP_KERNEL, "%s", |
4641 |
++ override); |
4642 |
++ if (!pdev->driver_override) |
4643 |
++ goto err; |
4644 |
+ |
4645 |
+ if (type == IPMI_DMI_TYPE_SSIF) |
4646 |
+ goto add_properties; |
4647 |
+diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c |
4648 |
+index 8f2423789ba9..4bfeb9929ab2 100644 |
4649 |
+--- a/drivers/clocksource/timer-stm32.c |
4650 |
++++ b/drivers/clocksource/timer-stm32.c |
4651 |
+@@ -106,6 +106,10 @@ static int __init stm32_clockevent_init(struct device_node *np) |
4652 |
+ unsigned long rate, max_delta; |
4653 |
+ int irq, ret, bits, prescaler = 1; |
4654 |
+ |
4655 |
++ data = kmemdup(&clock_event_ddata, sizeof(*data), GFP_KERNEL); |
4656 |
++ if (!data) |
4657 |
++ return -ENOMEM; |
4658 |
++ |
4659 |
+ clk = of_clk_get(np, 0); |
4660 |
+ if (IS_ERR(clk)) { |
4661 |
+ ret = PTR_ERR(clk); |
4662 |
+@@ -156,8 +160,8 @@ static int __init stm32_clockevent_init(struct device_node *np) |
4663 |
+ |
4664 |
+ writel_relaxed(prescaler - 1, data->base + TIM_PSC); |
4665 |
+ writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR); |
4666 |
+- writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER); |
4667 |
+ writel_relaxed(0, data->base + TIM_SR); |
4668 |
++ writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER); |
4669 |
+ |
4670 |
+ data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ); |
4671 |
+ |
4672 |
+@@ -184,6 +188,7 @@ static int __init stm32_clockevent_init(struct device_node *np) |
4673 |
+ err_clk_enable: |
4674 |
+ clk_put(clk); |
4675 |
+ err_clk_get: |
4676 |
++ kfree(data); |
4677 |
+ return ret; |
4678 |
+ } |
4679 |
+ |
4680 |
+diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c |
4681 |
+index a753c50e9e41..9e0aa767bbbe 100644 |
4682 |
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c |
4683 |
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c |
4684 |
+@@ -111,6 +111,14 @@ static const struct of_device_id blacklist[] __initconst = { |
4685 |
+ |
4686 |
+ { .compatible = "marvell,armadaxp", }, |
4687 |
+ |
4688 |
++ { .compatible = "mediatek,mt2701", }, |
4689 |
++ { .compatible = "mediatek,mt2712", }, |
4690 |
++ { .compatible = "mediatek,mt7622", }, |
4691 |
++ { .compatible = "mediatek,mt7623", }, |
4692 |
++ { .compatible = "mediatek,mt817x", }, |
4693 |
++ { .compatible = "mediatek,mt8173", }, |
4694 |
++ { .compatible = "mediatek,mt8176", }, |
4695 |
++ |
4696 |
+ { .compatible = "nvidia,tegra124", }, |
4697 |
+ |
4698 |
+ { .compatible = "st,stih407", }, |
4699 |
+diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c |
4700 |
+index a118b9bed669..bfbf8bf77f03 100644 |
4701 |
+--- a/drivers/crypto/bfin_crc.c |
4702 |
++++ b/drivers/crypto/bfin_crc.c |
4703 |
+@@ -494,7 +494,8 @@ static struct ahash_alg algs = { |
4704 |
+ .cra_driver_name = DRIVER_NAME, |
4705 |
+ .cra_priority = 100, |
4706 |
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | |
4707 |
+- CRYPTO_ALG_ASYNC, |
4708 |
++ CRYPTO_ALG_ASYNC | |
4709 |
++ CRYPTO_ALG_OPTIONAL_KEY, |
4710 |
+ .cra_blocksize = CHKSUM_BLOCK_SIZE, |
4711 |
+ .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), |
4712 |
+ .cra_alignmask = 3, |
4713 |
+diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c |
4714 |
+index 027e121c6f70..e1d4ae1153c4 100644 |
4715 |
+--- a/drivers/crypto/caam/ctrl.c |
4716 |
++++ b/drivers/crypto/caam/ctrl.c |
4717 |
+@@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, |
4718 |
+ * without any error (HW optimizations for later |
4719 |
+ * CAAM eras), then try again. |
4720 |
+ */ |
4721 |
++ if (ret) |
4722 |
++ break; |
4723 |
++ |
4724 |
+ rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; |
4725 |
+ if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || |
4726 |
+- !(rdsta_val & (1 << sh_idx))) |
4727 |
++ !(rdsta_val & (1 << sh_idx))) { |
4728 |
+ ret = -EAGAIN; |
4729 |
+- if (ret) |
4730 |
+ break; |
4731 |
++ } |
4732 |
++ |
4733 |
+ dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); |
4734 |
+ /* Clear the contents before recreating the descriptor */ |
4735 |
+ memset(desc, 0x00, CAAM_CMD_SZ * 7); |
4736 |
+diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c |
4737 |
+index 090582baecfe..8f09b8430893 100644 |
4738 |
+--- a/drivers/crypto/stm32/stm32_crc32.c |
4739 |
++++ b/drivers/crypto/stm32/stm32_crc32.c |
4740 |
+@@ -208,6 +208,7 @@ static struct shash_alg algs[] = { |
4741 |
+ .cra_name = "crc32", |
4742 |
+ .cra_driver_name = DRIVER_NAME, |
4743 |
+ .cra_priority = 200, |
4744 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
4745 |
+ .cra_blocksize = CHKSUM_BLOCK_SIZE, |
4746 |
+ .cra_alignmask = 3, |
4747 |
+ .cra_ctxsize = sizeof(struct stm32_crc_ctx), |
4748 |
+@@ -229,6 +230,7 @@ static struct shash_alg algs[] = { |
4749 |
+ .cra_name = "crc32c", |
4750 |
+ .cra_driver_name = DRIVER_NAME, |
4751 |
+ .cra_priority = 200, |
4752 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
4753 |
+ .cra_blocksize = CHKSUM_BLOCK_SIZE, |
4754 |
+ .cra_alignmask = 3, |
4755 |
+ .cra_ctxsize = sizeof(struct stm32_crc_ctx), |
4756 |
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c |
4757 |
+index ec5f9d2bc820..80cc2be6483c 100644 |
4758 |
+--- a/drivers/dma/dmatest.c |
4759 |
++++ b/drivers/dma/dmatest.c |
4760 |
+@@ -355,7 +355,7 @@ static void dmatest_callback(void *arg) |
4761 |
+ { |
4762 |
+ struct dmatest_done *done = arg; |
4763 |
+ struct dmatest_thread *thread = |
4764 |
+- container_of(arg, struct dmatest_thread, done_wait); |
4765 |
++ container_of(done, struct dmatest_thread, test_done); |
4766 |
+ if (!thread->done) { |
4767 |
+ done->done = true; |
4768 |
+ wake_up_all(done->wait); |
4769 |
+diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c |
4770 |
+index 9c1ffe3e912b..aeb222ca3ed1 100644 |
4771 |
+--- a/drivers/edac/octeon_edac-lmc.c |
4772 |
++++ b/drivers/edac/octeon_edac-lmc.c |
4773 |
+@@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci) |
4774 |
+ if (!pvt->inject) |
4775 |
+ int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx)); |
4776 |
+ else { |
4777 |
++ int_reg.u64 = 0; |
4778 |
+ if (pvt->error_type == 1) |
4779 |
+ int_reg.s.sec_err = 1; |
4780 |
+ if (pvt->error_type == 2) |
4781 |
+diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c |
4782 |
+index d687ca3d5049..c80ec1d03274 100644 |
4783 |
+--- a/drivers/firmware/psci.c |
4784 |
++++ b/drivers/firmware/psci.c |
4785 |
+@@ -59,7 +59,10 @@ bool psci_tos_resident_on(int cpu) |
4786 |
+ return cpu == resident_cpu; |
4787 |
+ } |
4788 |
+ |
4789 |
+-struct psci_operations psci_ops; |
4790 |
++struct psci_operations psci_ops = { |
4791 |
++ .conduit = PSCI_CONDUIT_NONE, |
4792 |
++ .smccc_version = SMCCC_VERSION_1_0, |
4793 |
++}; |
4794 |
+ |
4795 |
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long, |
4796 |
+ unsigned long, unsigned long); |
4797 |
+@@ -210,6 +213,22 @@ static unsigned long psci_migrate_info_up_cpu(void) |
4798 |
+ 0, 0, 0); |
4799 |
+ } |
4800 |
+ |
4801 |
++static void set_conduit(enum psci_conduit conduit) |
4802 |
++{ |
4803 |
++ switch (conduit) { |
4804 |
++ case PSCI_CONDUIT_HVC: |
4805 |
++ invoke_psci_fn = __invoke_psci_fn_hvc; |
4806 |
++ break; |
4807 |
++ case PSCI_CONDUIT_SMC: |
4808 |
++ invoke_psci_fn = __invoke_psci_fn_smc; |
4809 |
++ break; |
4810 |
++ default: |
4811 |
++ WARN(1, "Unexpected PSCI conduit %d\n", conduit); |
4812 |
++ } |
4813 |
++ |
4814 |
++ psci_ops.conduit = conduit; |
4815 |
++} |
4816 |
++ |
4817 |
+ static int get_set_conduit_method(struct device_node *np) |
4818 |
+ { |
4819 |
+ const char *method; |
4820 |
+@@ -222,9 +241,9 @@ static int get_set_conduit_method(struct device_node *np) |
4821 |
+ } |
4822 |
+ |
4823 |
+ if (!strcmp("hvc", method)) { |
4824 |
+- invoke_psci_fn = __invoke_psci_fn_hvc; |
4825 |
++ set_conduit(PSCI_CONDUIT_HVC); |
4826 |
+ } else if (!strcmp("smc", method)) { |
4827 |
+- invoke_psci_fn = __invoke_psci_fn_smc; |
4828 |
++ set_conduit(PSCI_CONDUIT_SMC); |
4829 |
+ } else { |
4830 |
+ pr_warn("invalid \"method\" property: %s\n", method); |
4831 |
+ return -EINVAL; |
4832 |
+@@ -493,9 +512,36 @@ static void __init psci_init_migrate(void) |
4833 |
+ pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); |
4834 |
+ } |
4835 |
+ |
4836 |
++static void __init psci_init_smccc(void) |
4837 |
++{ |
4838 |
++ u32 ver = ARM_SMCCC_VERSION_1_0; |
4839 |
++ int feature; |
4840 |
++ |
4841 |
++ feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); |
4842 |
++ |
4843 |
++ if (feature != PSCI_RET_NOT_SUPPORTED) { |
4844 |
++ u32 ret; |
4845 |
++ ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); |
4846 |
++ if (ret == ARM_SMCCC_VERSION_1_1) { |
4847 |
++ psci_ops.smccc_version = SMCCC_VERSION_1_1; |
4848 |
++ ver = ret; |
4849 |
++ } |
4850 |
++ } |
4851 |
++ |
4852 |
++ /* |
4853 |
++ * Conveniently, the SMCCC and PSCI versions are encoded the |
4854 |
++ * same way. No, this isn't accidental. |
4855 |
++ */ |
4856 |
++ pr_info("SMC Calling Convention v%d.%d\n", |
4857 |
++ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); |
4858 |
++ |
4859 |
++} |
4860 |
++ |
4861 |
+ static void __init psci_0_2_set_functions(void) |
4862 |
+ { |
4863 |
+ pr_info("Using standard PSCI v0.2 function IDs\n"); |
4864 |
++ psci_ops.get_version = psci_get_version; |
4865 |
++ |
4866 |
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = |
4867 |
+ PSCI_FN_NATIVE(0_2, CPU_SUSPEND); |
4868 |
+ psci_ops.cpu_suspend = psci_cpu_suspend; |
4869 |
+@@ -539,6 +585,7 @@ static int __init psci_probe(void) |
4870 |
+ psci_init_migrate(); |
4871 |
+ |
4872 |
+ if (PSCI_VERSION_MAJOR(ver) >= 1) { |
4873 |
++ psci_init_smccc(); |
4874 |
+ psci_init_cpu_suspend(); |
4875 |
+ psci_init_system_suspend(); |
4876 |
+ } |
4877 |
+@@ -652,9 +699,9 @@ int __init psci_acpi_init(void) |
4878 |
+ pr_info("probing for conduit method from ACPI.\n"); |
4879 |
+ |
4880 |
+ if (acpi_psci_use_hvc()) |
4881 |
+- invoke_psci_fn = __invoke_psci_fn_hvc; |
4882 |
++ set_conduit(PSCI_CONDUIT_HVC); |
4883 |
+ else |
4884 |
+- invoke_psci_fn = __invoke_psci_fn_smc; |
4885 |
++ set_conduit(PSCI_CONDUIT_SMC); |
4886 |
+ |
4887 |
+ return psci_probe(); |
4888 |
+ } |
4889 |
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
4890 |
+index 095a2240af4f..46485692db48 100644 |
4891 |
+--- a/drivers/gpu/drm/i915/intel_display.c |
4892 |
++++ b/drivers/gpu/drm/i915/intel_display.c |
4893 |
+@@ -1211,23 +1211,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) |
4894 |
+ pipe_name(pipe)); |
4895 |
+ } |
4896 |
+ |
4897 |
+-static void assert_cursor(struct drm_i915_private *dev_priv, |
4898 |
+- enum pipe pipe, bool state) |
4899 |
+-{ |
4900 |
+- bool cur_state; |
4901 |
+- |
4902 |
+- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) |
4903 |
+- cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; |
4904 |
+- else |
4905 |
+- cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; |
4906 |
+- |
4907 |
+- I915_STATE_WARN(cur_state != state, |
4908 |
+- "cursor on pipe %c assertion failure (expected %s, current %s)\n", |
4909 |
+- pipe_name(pipe), onoff(state), onoff(cur_state)); |
4910 |
+-} |
4911 |
+-#define assert_cursor_enabled(d, p) assert_cursor(d, p, true) |
4912 |
+-#define assert_cursor_disabled(d, p) assert_cursor(d, p, false) |
4913 |
+- |
4914 |
+ void assert_pipe(struct drm_i915_private *dev_priv, |
4915 |
+ enum pipe pipe, bool state) |
4916 |
+ { |
4917 |
+@@ -1255,77 +1238,25 @@ void assert_pipe(struct drm_i915_private *dev_priv, |
4918 |
+ pipe_name(pipe), onoff(state), onoff(cur_state)); |
4919 |
+ } |
4920 |
+ |
4921 |
+-static void assert_plane(struct drm_i915_private *dev_priv, |
4922 |
+- enum plane plane, bool state) |
4923 |
++static void assert_plane(struct intel_plane *plane, bool state) |
4924 |
+ { |
4925 |
+- u32 val; |
4926 |
+- bool cur_state; |
4927 |
++ bool cur_state = plane->get_hw_state(plane); |
4928 |
+ |
4929 |
+- val = I915_READ(DSPCNTR(plane)); |
4930 |
+- cur_state = !!(val & DISPLAY_PLANE_ENABLE); |
4931 |
+ I915_STATE_WARN(cur_state != state, |
4932 |
+- "plane %c assertion failure (expected %s, current %s)\n", |
4933 |
+- plane_name(plane), onoff(state), onoff(cur_state)); |
4934 |
++ "%s assertion failure (expected %s, current %s)\n", |
4935 |
++ plane->base.name, onoff(state), onoff(cur_state)); |
4936 |
+ } |
4937 |
+ |
4938 |
+-#define assert_plane_enabled(d, p) assert_plane(d, p, true) |
4939 |
+-#define assert_plane_disabled(d, p) assert_plane(d, p, false) |
4940 |
++#define assert_plane_enabled(p) assert_plane(p, true) |
4941 |
++#define assert_plane_disabled(p) assert_plane(p, false) |
4942 |
+ |
4943 |
+-static void assert_planes_disabled(struct drm_i915_private *dev_priv, |
4944 |
+- enum pipe pipe) |
4945 |
++static void assert_planes_disabled(struct intel_crtc *crtc) |
4946 |
+ { |
4947 |
+- int i; |
4948 |
+- |
4949 |
+- /* Primary planes are fixed to pipes on gen4+ */ |
4950 |
+- if (INTEL_GEN(dev_priv) >= 4) { |
4951 |
+- u32 val = I915_READ(DSPCNTR(pipe)); |
4952 |
+- I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, |
4953 |
+- "plane %c assertion failure, should be disabled but not\n", |
4954 |
+- plane_name(pipe)); |
4955 |
+- return; |
4956 |
+- } |
4957 |
+- |
4958 |
+- /* Need to check both planes against the pipe */ |
4959 |
+- for_each_pipe(dev_priv, i) { |
4960 |
+- u32 val = I915_READ(DSPCNTR(i)); |
4961 |
+- enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> |
4962 |
+- DISPPLANE_SEL_PIPE_SHIFT; |
4963 |
+- I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, |
4964 |
+- "plane %c assertion failure, should be off on pipe %c but is still active\n", |
4965 |
+- plane_name(i), pipe_name(pipe)); |
4966 |
+- } |
4967 |
+-} |
4968 |
+- |
4969 |
+-static void assert_sprites_disabled(struct drm_i915_private *dev_priv, |
4970 |
+- enum pipe pipe) |
4971 |
+-{ |
4972 |
+- int sprite; |
4973 |
++ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
4974 |
++ struct intel_plane *plane; |
4975 |
+ |
4976 |
+- if (INTEL_GEN(dev_priv) >= 9) { |
4977 |
+- for_each_sprite(dev_priv, pipe, sprite) { |
4978 |
+- u32 val = I915_READ(PLANE_CTL(pipe, sprite)); |
4979 |
+- I915_STATE_WARN(val & PLANE_CTL_ENABLE, |
4980 |
+- "plane %d assertion failure, should be off on pipe %c but is still active\n", |
4981 |
+- sprite, pipe_name(pipe)); |
4982 |
+- } |
4983 |
+- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
4984 |
+- for_each_sprite(dev_priv, pipe, sprite) { |
4985 |
+- u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite)); |
4986 |
+- I915_STATE_WARN(val & SP_ENABLE, |
4987 |
+- "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
4988 |
+- sprite_name(pipe, sprite), pipe_name(pipe)); |
4989 |
+- } |
4990 |
+- } else if (INTEL_GEN(dev_priv) >= 7) { |
4991 |
+- u32 val = I915_READ(SPRCTL(pipe)); |
4992 |
+- I915_STATE_WARN(val & SPRITE_ENABLE, |
4993 |
+- "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
4994 |
+- plane_name(pipe), pipe_name(pipe)); |
4995 |
+- } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { |
4996 |
+- u32 val = I915_READ(DVSCNTR(pipe)); |
4997 |
+- I915_STATE_WARN(val & DVS_ENABLE, |
4998 |
+- "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
4999 |
+- plane_name(pipe), pipe_name(pipe)); |
5000 |
+- } |
5001 |
++ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) |
5002 |
++ assert_plane_disabled(plane); |
5003 |
+ } |
5004 |
+ |
5005 |
+ static void assert_vblank_disabled(struct drm_crtc *crtc) |
5006 |
+@@ -1926,9 +1857,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) |
5007 |
+ |
5008 |
+ DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); |
5009 |
+ |
5010 |
+- assert_planes_disabled(dev_priv, pipe); |
5011 |
+- assert_cursor_disabled(dev_priv, pipe); |
5012 |
+- assert_sprites_disabled(dev_priv, pipe); |
5013 |
++ assert_planes_disabled(crtc); |
5014 |
+ |
5015 |
+ /* |
5016 |
+ * A pipe without a PLL won't actually be able to drive bits from |
5017 |
+@@ -1997,9 +1926,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc) |
5018 |
+ * Make sure planes won't keep trying to pump pixels to us, |
5019 |
+ * or we might hang the display. |
5020 |
+ */ |
5021 |
+- assert_planes_disabled(dev_priv, pipe); |
5022 |
+- assert_cursor_disabled(dev_priv, pipe); |
5023 |
+- assert_sprites_disabled(dev_priv, pipe); |
5024 |
++ assert_planes_disabled(crtc); |
5025 |
+ |
5026 |
+ reg = PIPECONF(cpu_transcoder); |
5027 |
+ val = I915_READ(reg); |
5028 |
+@@ -2829,6 +2756,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, |
5029 |
+ crtc_state->active_planes); |
5030 |
+ } |
5031 |
+ |
5032 |
++static void intel_plane_disable_noatomic(struct intel_crtc *crtc, |
5033 |
++ struct intel_plane *plane) |
5034 |
++{ |
5035 |
++ struct intel_crtc_state *crtc_state = |
5036 |
++ to_intel_crtc_state(crtc->base.state); |
5037 |
++ struct intel_plane_state *plane_state = |
5038 |
++ to_intel_plane_state(plane->base.state); |
5039 |
++ |
5040 |
++ intel_set_plane_visible(crtc_state, plane_state, false); |
5041 |
++ |
5042 |
++ if (plane->id == PLANE_PRIMARY) |
5043 |
++ intel_pre_disable_primary_noatomic(&crtc->base); |
5044 |
++ |
5045 |
++ trace_intel_disable_plane(&plane->base, crtc); |
5046 |
++ plane->disable_plane(plane, crtc); |
5047 |
++} |
5048 |
++ |
5049 |
+ static void |
5050 |
+ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, |
5051 |
+ struct intel_initial_plane_config *plane_config) |
5052 |
+@@ -2886,12 +2830,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, |
5053 |
+ * simplest solution is to just disable the primary plane now and |
5054 |
+ * pretend the BIOS never had it enabled. |
5055 |
+ */ |
5056 |
+- intel_set_plane_visible(to_intel_crtc_state(crtc_state), |
5057 |
+- to_intel_plane_state(plane_state), |
5058 |
+- false); |
5059 |
+- intel_pre_disable_primary_noatomic(&intel_crtc->base); |
5060 |
+- trace_intel_disable_plane(primary, intel_crtc); |
5061 |
+- intel_plane->disable_plane(intel_plane, intel_crtc); |
5062 |
++ intel_plane_disable_noatomic(intel_crtc, intel_plane); |
5063 |
+ |
5064 |
+ return; |
5065 |
+ |
5066 |
+@@ -3397,6 +3336,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary, |
5067 |
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
5068 |
+ } |
5069 |
+ |
5070 |
++static bool i9xx_plane_get_hw_state(struct intel_plane *primary) |
5071 |
++{ |
5072 |
++ |
5073 |
++ struct drm_i915_private *dev_priv = to_i915(primary->base.dev); |
5074 |
++ enum intel_display_power_domain power_domain; |
5075 |
++ enum plane plane = primary->plane; |
5076 |
++ enum pipe pipe = primary->pipe; |
5077 |
++ bool ret; |
5078 |
++ |
5079 |
++ /* |
5080 |
++ * Not 100% correct for planes that can move between pipes, |
5081 |
++ * but that's only the case for gen2-4 which don't have any |
5082 |
++ * display power wells. |
5083 |
++ */ |
5084 |
++ power_domain = POWER_DOMAIN_PIPE(pipe); |
5085 |
++ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
5086 |
++ return false; |
5087 |
++ |
5088 |
++ ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE; |
5089 |
++ |
5090 |
++ intel_display_power_put(dev_priv, power_domain); |
5091 |
++ |
5092 |
++ return ret; |
5093 |
++} |
5094 |
++ |
5095 |
+ static u32 |
5096 |
+ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) |
5097 |
+ { |
5098 |
+@@ -4973,7 +4937,8 @@ void hsw_enable_ips(struct intel_crtc *crtc) |
5099 |
+ * a vblank wait. |
5100 |
+ */ |
5101 |
+ |
5102 |
+- assert_plane_enabled(dev_priv, crtc->plane); |
5103 |
++ assert_plane_enabled(to_intel_plane(crtc->base.primary)); |
5104 |
++ |
5105 |
+ if (IS_BROADWELL(dev_priv)) { |
5106 |
+ mutex_lock(&dev_priv->rps.hw_lock); |
5107 |
+ WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); |
5108 |
+@@ -5005,7 +4970,8 @@ void hsw_disable_ips(struct intel_crtc *crtc) |
5109 |
+ if (!crtc->config->ips_enabled) |
5110 |
+ return; |
5111 |
+ |
5112 |
+- assert_plane_enabled(dev_priv, crtc->plane); |
5113 |
++ assert_plane_enabled(to_intel_plane(crtc->base.primary)); |
5114 |
++ |
5115 |
+ if (IS_BROADWELL(dev_priv)) { |
5116 |
+ mutex_lock(&dev_priv->rps.hw_lock); |
5117 |
+ WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); |
5118 |
+@@ -6000,6 +5966,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, |
5119 |
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5120 |
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev); |
5121 |
+ enum intel_display_power_domain domain; |
5122 |
++ struct intel_plane *plane; |
5123 |
+ u64 domains; |
5124 |
+ struct drm_atomic_state *state; |
5125 |
+ struct intel_crtc_state *crtc_state; |
5126 |
+@@ -6008,11 +5975,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, |
5127 |
+ if (!intel_crtc->active) |
5128 |
+ return; |
5129 |
+ |
5130 |
+- if (crtc->primary->state->visible) { |
5131 |
+- intel_pre_disable_primary_noatomic(crtc); |
5132 |
++ for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) { |
5133 |
++ const struct intel_plane_state *plane_state = |
5134 |
++ to_intel_plane_state(plane->base.state); |
5135 |
+ |
5136 |
+- intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); |
5137 |
+- crtc->primary->state->visible = false; |
5138 |
++ if (plane_state->base.visible) |
5139 |
++ intel_plane_disable_noatomic(intel_crtc, plane); |
5140 |
+ } |
5141 |
+ |
5142 |
+ state = drm_atomic_state_alloc(crtc->dev); |
5143 |
+@@ -9577,6 +9545,23 @@ static void i845_disable_cursor(struct intel_plane *plane, |
5144 |
+ i845_update_cursor(plane, NULL, NULL); |
5145 |
+ } |
5146 |
+ |
5147 |
++static bool i845_cursor_get_hw_state(struct intel_plane *plane) |
5148 |
++{ |
5149 |
++ struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5150 |
++ enum intel_display_power_domain power_domain; |
5151 |
++ bool ret; |
5152 |
++ |
5153 |
++ power_domain = POWER_DOMAIN_PIPE(PIPE_A); |
5154 |
++ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
5155 |
++ return false; |
5156 |
++ |
5157 |
++ ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; |
5158 |
++ |
5159 |
++ intel_display_power_put(dev_priv, power_domain); |
5160 |
++ |
5161 |
++ return ret; |
5162 |
++} |
5163 |
++ |
5164 |
+ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, |
5165 |
+ const struct intel_plane_state *plane_state) |
5166 |
+ { |
5167 |
+@@ -9770,6 +9755,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane, |
5168 |
+ i9xx_update_cursor(plane, NULL, NULL); |
5169 |
+ } |
5170 |
+ |
5171 |
++static bool i9xx_cursor_get_hw_state(struct intel_plane *plane) |
5172 |
++{ |
5173 |
++ struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5174 |
++ enum intel_display_power_domain power_domain; |
5175 |
++ enum pipe pipe = plane->pipe; |
5176 |
++ bool ret; |
5177 |
++ |
5178 |
++ /* |
5179 |
++ * Not 100% correct for planes that can move between pipes, |
5180 |
++ * but that's only the case for gen2-3 which don't have any |
5181 |
++ * display power wells. |
5182 |
++ */ |
5183 |
++ power_domain = POWER_DOMAIN_PIPE(pipe); |
5184 |
++ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
5185 |
++ return false; |
5186 |
++ |
5187 |
++ ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; |
5188 |
++ |
5189 |
++ intel_display_power_put(dev_priv, power_domain); |
5190 |
++ |
5191 |
++ return ret; |
5192 |
++} |
5193 |
+ |
5194 |
+ /* VESA 640x480x72Hz mode to set on the pipe */ |
5195 |
+ static struct drm_display_mode load_detect_mode = { |
5196 |
+@@ -13240,6 +13247,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) |
5197 |
+ |
5198 |
+ primary->update_plane = skylake_update_primary_plane; |
5199 |
+ primary->disable_plane = skylake_disable_primary_plane; |
5200 |
++ primary->get_hw_state = skl_plane_get_hw_state; |
5201 |
+ } else if (INTEL_GEN(dev_priv) >= 9) { |
5202 |
+ intel_primary_formats = skl_primary_formats; |
5203 |
+ num_formats = ARRAY_SIZE(skl_primary_formats); |
5204 |
+@@ -13250,6 +13258,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) |
5205 |
+ |
5206 |
+ primary->update_plane = skylake_update_primary_plane; |
5207 |
+ primary->disable_plane = skylake_disable_primary_plane; |
5208 |
++ primary->get_hw_state = skl_plane_get_hw_state; |
5209 |
+ } else if (INTEL_GEN(dev_priv) >= 4) { |
5210 |
+ intel_primary_formats = i965_primary_formats; |
5211 |
+ num_formats = ARRAY_SIZE(i965_primary_formats); |
5212 |
+@@ -13257,6 +13266,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) |
5213 |
+ |
5214 |
+ primary->update_plane = i9xx_update_primary_plane; |
5215 |
+ primary->disable_plane = i9xx_disable_primary_plane; |
5216 |
++ primary->get_hw_state = i9xx_plane_get_hw_state; |
5217 |
+ } else { |
5218 |
+ intel_primary_formats = i8xx_primary_formats; |
5219 |
+ num_formats = ARRAY_SIZE(i8xx_primary_formats); |
5220 |
+@@ -13264,6 +13274,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) |
5221 |
+ |
5222 |
+ primary->update_plane = i9xx_update_primary_plane; |
5223 |
+ primary->disable_plane = i9xx_disable_primary_plane; |
5224 |
++ primary->get_hw_state = i9xx_plane_get_hw_state; |
5225 |
+ } |
5226 |
+ |
5227 |
+ if (INTEL_GEN(dev_priv) >= 9) |
5228 |
+@@ -13353,10 +13364,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, |
5229 |
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { |
5230 |
+ cursor->update_plane = i845_update_cursor; |
5231 |
+ cursor->disable_plane = i845_disable_cursor; |
5232 |
++ cursor->get_hw_state = i845_cursor_get_hw_state; |
5233 |
+ cursor->check_plane = i845_check_cursor; |
5234 |
+ } else { |
5235 |
+ cursor->update_plane = i9xx_update_cursor; |
5236 |
+ cursor->disable_plane = i9xx_disable_cursor; |
5237 |
++ cursor->get_hw_state = i9xx_cursor_get_hw_state; |
5238 |
+ cursor->check_plane = i9xx_check_cursor; |
5239 |
+ } |
5240 |
+ |
5241 |
+@@ -14704,8 +14717,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) |
5242 |
+ DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", |
5243 |
+ pipe_name(pipe)); |
5244 |
+ |
5245 |
+- assert_plane_disabled(dev_priv, PLANE_A); |
5246 |
+- assert_plane_disabled(dev_priv, PLANE_B); |
5247 |
++ WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); |
5248 |
++ WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); |
5249 |
++ WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); |
5250 |
++ WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE); |
5251 |
++ WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE); |
5252 |
+ |
5253 |
+ I915_WRITE(PIPECONF(pipe), 0); |
5254 |
+ POSTING_READ(PIPECONF(pipe)); |
5255 |
+@@ -14716,22 +14732,36 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) |
5256 |
+ POSTING_READ(DPLL(pipe)); |
5257 |
+ } |
5258 |
+ |
5259 |
+-static bool |
5260 |
+-intel_check_plane_mapping(struct intel_crtc *crtc) |
5261 |
++static bool intel_plane_mapping_ok(struct intel_crtc *crtc, |
5262 |
++ struct intel_plane *primary) |
5263 |
+ { |
5264 |
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5265 |
+- u32 val; |
5266 |
++ enum plane plane = primary->plane; |
5267 |
++ u32 val = I915_READ(DSPCNTR(plane)); |
5268 |
+ |
5269 |
+- if (INTEL_INFO(dev_priv)->num_pipes == 1) |
5270 |
+- return true; |
5271 |
++ return (val & DISPLAY_PLANE_ENABLE) == 0 || |
5272 |
++ (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe); |
5273 |
++} |
5274 |
+ |
5275 |
+- val = I915_READ(DSPCNTR(!crtc->plane)); |
5276 |
++static void |
5277 |
++intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) |
5278 |
++{ |
5279 |
++ struct intel_crtc *crtc; |
5280 |
+ |
5281 |
+- if ((val & DISPLAY_PLANE_ENABLE) && |
5282 |
+- (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) |
5283 |
+- return false; |
5284 |
++ if (INTEL_GEN(dev_priv) >= 4) |
5285 |
++ return; |
5286 |
+ |
5287 |
+- return true; |
5288 |
++ for_each_intel_crtc(&dev_priv->drm, crtc) { |
5289 |
++ struct intel_plane *plane = |
5290 |
++ to_intel_plane(crtc->base.primary); |
5291 |
++ |
5292 |
++ if (intel_plane_mapping_ok(crtc, plane)) |
5293 |
++ continue; |
5294 |
++ |
5295 |
++ DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", |
5296 |
++ plane->base.name); |
5297 |
++ intel_plane_disable_noatomic(crtc, plane); |
5298 |
++ } |
5299 |
+ } |
5300 |
+ |
5301 |
+ static bool intel_crtc_has_encoders(struct intel_crtc *crtc) |
5302 |
+@@ -14787,33 +14817,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, |
5303 |
+ |
5304 |
+ /* Disable everything but the primary plane */ |
5305 |
+ for_each_intel_plane_on_crtc(dev, crtc, plane) { |
5306 |
+- if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) |
5307 |
+- continue; |
5308 |
++ const struct intel_plane_state *plane_state = |
5309 |
++ to_intel_plane_state(plane->base.state); |
5310 |
+ |
5311 |
+- trace_intel_disable_plane(&plane->base, crtc); |
5312 |
+- plane->disable_plane(plane, crtc); |
5313 |
++ if (plane_state->base.visible && |
5314 |
++ plane->base.type != DRM_PLANE_TYPE_PRIMARY) |
5315 |
++ intel_plane_disable_noatomic(crtc, plane); |
5316 |
+ } |
5317 |
+ } |
5318 |
+ |
5319 |
+- /* We need to sanitize the plane -> pipe mapping first because this will |
5320 |
+- * disable the crtc (and hence change the state) if it is wrong. Note |
5321 |
+- * that gen4+ has a fixed plane -> pipe mapping. */ |
5322 |
+- if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) { |
5323 |
+- bool plane; |
5324 |
+- |
5325 |
+- DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", |
5326 |
+- crtc->base.base.id, crtc->base.name); |
5327 |
+- |
5328 |
+- /* Pipe has the wrong plane attached and the plane is active. |
5329 |
+- * Temporarily change the plane mapping and disable everything |
5330 |
+- * ... */ |
5331 |
+- plane = crtc->plane; |
5332 |
+- crtc->base.primary->state->visible = true; |
5333 |
+- crtc->plane = !plane; |
5334 |
+- intel_crtc_disable_noatomic(&crtc->base, ctx); |
5335 |
+- crtc->plane = plane; |
5336 |
+- } |
5337 |
+- |
5338 |
+ /* Adjust the state of the output pipe according to whether we |
5339 |
+ * have active connectors/encoders. */ |
5340 |
+ if (crtc->active && !intel_crtc_has_encoders(crtc)) |
5341 |
+@@ -14918,24 +14930,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv) |
5342 |
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); |
5343 |
+ } |
5344 |
+ |
5345 |
+-static bool primary_get_hw_state(struct intel_plane *plane) |
5346 |
+-{ |
5347 |
+- struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5348 |
+- |
5349 |
+- return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; |
5350 |
+-} |
5351 |
+- |
5352 |
+ /* FIXME read out full plane state for all planes */ |
5353 |
+ static void readout_plane_state(struct intel_crtc *crtc) |
5354 |
+ { |
5355 |
+- struct intel_plane *primary = to_intel_plane(crtc->base.primary); |
5356 |
+- bool visible; |
5357 |
++ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5358 |
++ struct intel_crtc_state *crtc_state = |
5359 |
++ to_intel_crtc_state(crtc->base.state); |
5360 |
++ struct intel_plane *plane; |
5361 |
+ |
5362 |
+- visible = crtc->active && primary_get_hw_state(primary); |
5363 |
++ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { |
5364 |
++ struct intel_plane_state *plane_state = |
5365 |
++ to_intel_plane_state(plane->base.state); |
5366 |
++ bool visible = plane->get_hw_state(plane); |
5367 |
+ |
5368 |
+- intel_set_plane_visible(to_intel_crtc_state(crtc->base.state), |
5369 |
+- to_intel_plane_state(primary->base.state), |
5370 |
+- visible); |
5371 |
++ intel_set_plane_visible(crtc_state, plane_state, visible); |
5372 |
++ } |
5373 |
+ } |
5374 |
+ |
5375 |
+ static void intel_modeset_readout_hw_state(struct drm_device *dev) |
5376 |
+@@ -15137,6 +15146,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev, |
5377 |
+ /* HW state is read out, now we need to sanitize this mess. */ |
5378 |
+ get_encoder_power_domains(dev_priv); |
5379 |
+ |
5380 |
++ intel_sanitize_plane_mapping(dev_priv); |
5381 |
++ |
5382 |
+ for_each_intel_encoder(dev, encoder) { |
5383 |
+ intel_sanitize_encoder(encoder); |
5384 |
+ } |
5385 |
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c |
5386 |
+index 09f274419eea..76cf68745870 100644 |
5387 |
+--- a/drivers/gpu/drm/i915/intel_dp.c |
5388 |
++++ b/drivers/gpu/drm/i915/intel_dp.c |
5389 |
+@@ -5340,6 +5340,12 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, |
5390 |
+ */ |
5391 |
+ final->t8 = 1; |
5392 |
+ final->t9 = 1; |
5393 |
++ |
5394 |
++ /* |
5395 |
++ * HW has only a 100msec granularity for t11_t12 so round it up |
5396 |
++ * accordingly. |
5397 |
++ */ |
5398 |
++ final->t11_t12 = roundup(final->t11_t12, 100 * 10); |
5399 |
+ } |
5400 |
+ |
5401 |
+ static void |
5402 |
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h |
5403 |
+index 79fbaf78f604..10ae9681f02d 100644 |
5404 |
+--- a/drivers/gpu/drm/i915/intel_drv.h |
5405 |
++++ b/drivers/gpu/drm/i915/intel_drv.h |
5406 |
+@@ -863,6 +863,7 @@ struct intel_plane { |
5407 |
+ const struct intel_plane_state *plane_state); |
5408 |
+ void (*disable_plane)(struct intel_plane *plane, |
5409 |
+ struct intel_crtc *crtc); |
5410 |
++ bool (*get_hw_state)(struct intel_plane *plane); |
5411 |
+ int (*check_plane)(struct intel_plane *plane, |
5412 |
+ struct intel_crtc_state *crtc_state, |
5413 |
+ struct intel_plane_state *state); |
5414 |
+@@ -1885,6 +1886,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, |
5415 |
+ struct drm_file *file_priv); |
5416 |
+ void intel_pipe_update_start(struct intel_crtc *crtc); |
5417 |
+ void intel_pipe_update_end(struct intel_crtc *crtc); |
5418 |
++bool skl_plane_get_hw_state(struct intel_plane *plane); |
5419 |
+ |
5420 |
+ /* intel_tv.c */ |
5421 |
+ void intel_tv_init(struct drm_i915_private *dev_priv); |
5422 |
+diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c |
5423 |
+index 524933b01483..f8ebeb5ffb96 100644 |
5424 |
+--- a/drivers/gpu/drm/i915/intel_sprite.c |
5425 |
++++ b/drivers/gpu/drm/i915/intel_sprite.c |
5426 |
+@@ -324,6 +324,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) |
5427 |
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
5428 |
+ } |
5429 |
+ |
5430 |
++bool |
5431 |
++skl_plane_get_hw_state(struct intel_plane *plane) |
5432 |
++{ |
5433 |
++ struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5434 |
++ enum intel_display_power_domain power_domain; |
5435 |
++ enum plane_id plane_id = plane->id; |
5436 |
++ enum pipe pipe = plane->pipe; |
5437 |
++ bool ret; |
5438 |
++ |
5439 |
++ power_domain = POWER_DOMAIN_PIPE(pipe); |
5440 |
++ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
5441 |
++ return false; |
5442 |
++ |
5443 |
++ ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE; |
5444 |
++ |
5445 |
++ intel_display_power_put(dev_priv, power_domain); |
5446 |
++ |
5447 |
++ return ret; |
5448 |
++} |
5449 |
++ |
5450 |
+ static void |
5451 |
+ chv_update_csc(struct intel_plane *plane, uint32_t format) |
5452 |
+ { |
5453 |
+@@ -501,6 +521,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) |
5454 |
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
5455 |
+ } |
5456 |
+ |
5457 |
++static bool |
5458 |
++vlv_plane_get_hw_state(struct intel_plane *plane) |
5459 |
++{ |
5460 |
++ struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5461 |
++ enum intel_display_power_domain power_domain; |
5462 |
++ enum plane_id plane_id = plane->id; |
5463 |
++ enum pipe pipe = plane->pipe; |
5464 |
++ bool ret; |
5465 |
++ |
5466 |
++ power_domain = POWER_DOMAIN_PIPE(pipe); |
5467 |
++ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
5468 |
++ return false; |
5469 |
++ |
5470 |
++ ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE; |
5471 |
++ |
5472 |
++ intel_display_power_put(dev_priv, power_domain); |
5473 |
++ |
5474 |
++ return ret; |
5475 |
++} |
5476 |
++ |
5477 |
+ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, |
5478 |
+ const struct intel_plane_state *plane_state) |
5479 |
+ { |
5480 |
+@@ -641,6 +681,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) |
5481 |
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
5482 |
+ } |
5483 |
+ |
5484 |
++static bool |
5485 |
++ivb_plane_get_hw_state(struct intel_plane *plane) |
5486 |
++{ |
5487 |
++ struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5488 |
++ enum intel_display_power_domain power_domain; |
5489 |
++ enum pipe pipe = plane->pipe; |
5490 |
++ bool ret; |
5491 |
++ |
5492 |
++ power_domain = POWER_DOMAIN_PIPE(pipe); |
5493 |
++ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
5494 |
++ return false; |
5495 |
++ |
5496 |
++ ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE; |
5497 |
++ |
5498 |
++ intel_display_power_put(dev_priv, power_domain); |
5499 |
++ |
5500 |
++ return ret; |
5501 |
++} |
5502 |
++ |
5503 |
+ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, |
5504 |
+ const struct intel_plane_state *plane_state) |
5505 |
+ { |
5506 |
+@@ -772,6 +831,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) |
5507 |
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
5508 |
+ } |
5509 |
+ |
5510 |
++static bool |
5511 |
++g4x_plane_get_hw_state(struct intel_plane *plane) |
5512 |
++{ |
5513 |
++ struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
5514 |
++ enum intel_display_power_domain power_domain; |
5515 |
++ enum pipe pipe = plane->pipe; |
5516 |
++ bool ret; |
5517 |
++ |
5518 |
++ power_domain = POWER_DOMAIN_PIPE(pipe); |
5519 |
++ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
5520 |
++ return false; |
5521 |
++ |
5522 |
++ ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE; |
5523 |
++ |
5524 |
++ intel_display_power_put(dev_priv, power_domain); |
5525 |
++ |
5526 |
++ return ret; |
5527 |
++} |
5528 |
++ |
5529 |
+ static int |
5530 |
+ intel_check_sprite_plane(struct intel_plane *plane, |
5531 |
+ struct intel_crtc_state *crtc_state, |
5532 |
+@@ -1227,6 +1305,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, |
5533 |
+ |
5534 |
+ intel_plane->update_plane = skl_update_plane; |
5535 |
+ intel_plane->disable_plane = skl_disable_plane; |
5536 |
++ intel_plane->get_hw_state = skl_plane_get_hw_state; |
5537 |
+ |
5538 |
+ plane_formats = skl_plane_formats; |
5539 |
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats); |
5540 |
+@@ -1237,6 +1316,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, |
5541 |
+ |
5542 |
+ intel_plane->update_plane = skl_update_plane; |
5543 |
+ intel_plane->disable_plane = skl_disable_plane; |
5544 |
++ intel_plane->get_hw_state = skl_plane_get_hw_state; |
5545 |
+ |
5546 |
+ plane_formats = skl_plane_formats; |
5547 |
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats); |
5548 |
+@@ -1247,6 +1327,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, |
5549 |
+ |
5550 |
+ intel_plane->update_plane = vlv_update_plane; |
5551 |
+ intel_plane->disable_plane = vlv_disable_plane; |
5552 |
++ intel_plane->get_hw_state = vlv_plane_get_hw_state; |
5553 |
+ |
5554 |
+ plane_formats = vlv_plane_formats; |
5555 |
+ num_plane_formats = ARRAY_SIZE(vlv_plane_formats); |
5556 |
+@@ -1262,6 +1343,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, |
5557 |
+ |
5558 |
+ intel_plane->update_plane = ivb_update_plane; |
5559 |
+ intel_plane->disable_plane = ivb_disable_plane; |
5560 |
++ intel_plane->get_hw_state = ivb_plane_get_hw_state; |
5561 |
+ |
5562 |
+ plane_formats = snb_plane_formats; |
5563 |
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats); |
5564 |
+@@ -1272,6 +1354,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, |
5565 |
+ |
5566 |
+ intel_plane->update_plane = g4x_update_plane; |
5567 |
+ intel_plane->disable_plane = g4x_disable_plane; |
5568 |
++ intel_plane->get_hw_state = g4x_plane_get_hw_state; |
5569 |
+ |
5570 |
+ modifiers = i9xx_plane_format_modifiers; |
5571 |
+ if (IS_GEN6(dev_priv)) { |
5572 |
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
5573 |
+index 330ca983828b..5744eb729d24 100644 |
5574 |
+--- a/drivers/hid/hid-core.c |
5575 |
++++ b/drivers/hid/hid-core.c |
5576 |
+@@ -2638,7 +2638,6 @@ static const struct hid_device_id hid_ignore_list[] = { |
5577 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, |
5578 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, |
5579 |
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, |
5580 |
+- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) }, |
5581 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, |
5582 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, |
5583 |
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, |
5584 |
+@@ -2908,6 +2907,17 @@ bool hid_ignore(struct hid_device *hdev) |
5585 |
+ strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) |
5586 |
+ return true; |
5587 |
+ break; |
5588 |
++ case USB_VENDOR_ID_ELAN: |
5589 |
++ /* |
5590 |
++ * Many Elan devices have a product id of 0x0401 and are handled |
5591 |
++ * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev |
5592 |
++ * is not (and cannot be) handled by that driver -> |
5593 |
++ * Ignore all 0x0401 devs except for the ELAN0800 dev. |
5594 |
++ */ |
5595 |
++ if (hdev->product == 0x0401 && |
5596 |
++ strncmp(hdev->name, "ELAN0800", 8) != 0) |
5597 |
++ return true; |
5598 |
++ break; |
5599 |
+ } |
5600 |
+ |
5601 |
+ if (hdev->type == HID_TYPE_USBMOUSE && |
5602 |
+diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c |
5603 |
+index 0ee0df53b91b..79d5d89bc95e 100644 |
5604 |
+--- a/drivers/media/dvb-frontends/ascot2e.c |
5605 |
++++ b/drivers/media/dvb-frontends/ascot2e.c |
5606 |
+@@ -155,7 +155,9 @@ static int ascot2e_write_regs(struct ascot2e_priv *priv, |
5607 |
+ |
5608 |
+ static int ascot2e_write_reg(struct ascot2e_priv *priv, u8 reg, u8 val) |
5609 |
+ { |
5610 |
+- return ascot2e_write_regs(priv, reg, &val, 1); |
5611 |
++ u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5612 |
++ |
5613 |
++ return ascot2e_write_regs(priv, reg, &tmp, 1); |
5614 |
+ } |
5615 |
+ |
5616 |
+ static int ascot2e_read_regs(struct ascot2e_priv *priv, |
5617 |
+diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c |
5618 |
+index 48ee9bc00c06..ccbd84fd6428 100644 |
5619 |
+--- a/drivers/media/dvb-frontends/cxd2841er.c |
5620 |
++++ b/drivers/media/dvb-frontends/cxd2841er.c |
5621 |
+@@ -257,7 +257,9 @@ static int cxd2841er_write_regs(struct cxd2841er_priv *priv, |
5622 |
+ static int cxd2841er_write_reg(struct cxd2841er_priv *priv, |
5623 |
+ u8 addr, u8 reg, u8 val) |
5624 |
+ { |
5625 |
+- return cxd2841er_write_regs(priv, addr, reg, &val, 1); |
5626 |
++ u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5627 |
++ |
5628 |
++ return cxd2841er_write_regs(priv, addr, reg, &tmp, 1); |
5629 |
+ } |
5630 |
+ |
5631 |
+ static int cxd2841er_read_regs(struct cxd2841er_priv *priv, |
5632 |
+diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c |
5633 |
+index 4bf5a551ba40..2ab8d83e5576 100644 |
5634 |
+--- a/drivers/media/dvb-frontends/helene.c |
5635 |
++++ b/drivers/media/dvb-frontends/helene.c |
5636 |
+@@ -331,7 +331,9 @@ static int helene_write_regs(struct helene_priv *priv, |
5637 |
+ |
5638 |
+ static int helene_write_reg(struct helene_priv *priv, u8 reg, u8 val) |
5639 |
+ { |
5640 |
+- return helene_write_regs(priv, reg, &val, 1); |
5641 |
++ u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5642 |
++ |
5643 |
++ return helene_write_regs(priv, reg, &tmp, 1); |
5644 |
+ } |
5645 |
+ |
5646 |
+ static int helene_read_regs(struct helene_priv *priv, |
5647 |
+diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c |
5648 |
+index 68d759c4c52e..5c8b405f2ddc 100644 |
5649 |
+--- a/drivers/media/dvb-frontends/horus3a.c |
5650 |
++++ b/drivers/media/dvb-frontends/horus3a.c |
5651 |
+@@ -89,7 +89,9 @@ static int horus3a_write_regs(struct horus3a_priv *priv, |
5652 |
+ |
5653 |
+ static int horus3a_write_reg(struct horus3a_priv *priv, u8 reg, u8 val) |
5654 |
+ { |
5655 |
+- return horus3a_write_regs(priv, reg, &val, 1); |
5656 |
++ u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5657 |
++ |
5658 |
++ return horus3a_write_regs(priv, reg, &tmp, 1); |
5659 |
+ } |
5660 |
+ |
5661 |
+ static int horus3a_enter_power_save(struct horus3a_priv *priv) |
5662 |
+diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c |
5663 |
+index 5bb1e73a10b4..ce7c443d3eac 100644 |
5664 |
+--- a/drivers/media/dvb-frontends/itd1000.c |
5665 |
++++ b/drivers/media/dvb-frontends/itd1000.c |
5666 |
+@@ -95,8 +95,9 @@ static int itd1000_read_reg(struct itd1000_state *state, u8 reg) |
5667 |
+ |
5668 |
+ static inline int itd1000_write_reg(struct itd1000_state *state, u8 r, u8 v) |
5669 |
+ { |
5670 |
+- int ret = itd1000_write_regs(state, r, &v, 1); |
5671 |
+- state->shadow[r] = v; |
5672 |
++ u8 tmp = v; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5673 |
++ int ret = itd1000_write_regs(state, r, &tmp, 1); |
5674 |
++ state->shadow[r] = tmp; |
5675 |
+ return ret; |
5676 |
+ } |
5677 |
+ |
5678 |
+diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c |
5679 |
+index 961b9a2508e0..0b23cbc021b8 100644 |
5680 |
+--- a/drivers/media/dvb-frontends/mt312.c |
5681 |
++++ b/drivers/media/dvb-frontends/mt312.c |
5682 |
+@@ -142,7 +142,10 @@ static inline int mt312_readreg(struct mt312_state *state, |
5683 |
+ static inline int mt312_writereg(struct mt312_state *state, |
5684 |
+ const enum mt312_reg_addr reg, const u8 val) |
5685 |
+ { |
5686 |
+- return mt312_write(state, reg, &val, 1); |
5687 |
++ u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5688 |
++ |
5689 |
++ |
5690 |
++ return mt312_write(state, reg, &tmp, 1); |
5691 |
+ } |
5692 |
+ |
5693 |
+ static inline u32 mt312_div(u32 a, u32 b) |
5694 |
+diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c |
5695 |
+index 02347598277a..db5dde3215f0 100644 |
5696 |
+--- a/drivers/media/dvb-frontends/stb0899_drv.c |
5697 |
++++ b/drivers/media/dvb-frontends/stb0899_drv.c |
5698 |
+@@ -539,7 +539,8 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, |
5699 |
+ |
5700 |
+ int stb0899_write_reg(struct stb0899_state *state, unsigned int reg, u8 data) |
5701 |
+ { |
5702 |
+- return stb0899_write_regs(state, reg, &data, 1); |
5703 |
++ u8 tmp = data; |
5704 |
++ return stb0899_write_regs(state, reg, &tmp, 1); |
5705 |
+ } |
5706 |
+ |
5707 |
+ /* |
5708 |
+diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c |
5709 |
+index 17a955d0031b..75509bec66e4 100644 |
5710 |
+--- a/drivers/media/dvb-frontends/stb6100.c |
5711 |
++++ b/drivers/media/dvb-frontends/stb6100.c |
5712 |
+@@ -226,12 +226,14 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st |
5713 |
+ |
5714 |
+ static int stb6100_write_reg(struct stb6100_state *state, u8 reg, u8 data) |
5715 |
+ { |
5716 |
++ u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5717 |
++ |
5718 |
+ if (unlikely(reg >= STB6100_NUMREGS)) { |
5719 |
+ dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg); |
5720 |
+ return -EREMOTEIO; |
5721 |
+ } |
5722 |
+- data = (data & stb6100_template[reg].mask) | stb6100_template[reg].set; |
5723 |
+- return stb6100_write_reg_range(state, &data, reg, 1); |
5724 |
++ tmp = (tmp & stb6100_template[reg].mask) | stb6100_template[reg].set; |
5725 |
++ return stb6100_write_reg_range(state, &tmp, reg, 1); |
5726 |
+ } |
5727 |
+ |
5728 |
+ |
5729 |
+diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c |
5730 |
+index f3529df8211d..1a726196c126 100644 |
5731 |
+--- a/drivers/media/dvb-frontends/stv0367.c |
5732 |
++++ b/drivers/media/dvb-frontends/stv0367.c |
5733 |
+@@ -166,7 +166,9 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) |
5734 |
+ |
5735 |
+ static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data) |
5736 |
+ { |
5737 |
+- return stv0367_writeregs(state, reg, &data, 1); |
5738 |
++ u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5739 |
++ |
5740 |
++ return stv0367_writeregs(state, reg, &tmp, 1); |
5741 |
+ } |
5742 |
+ |
5743 |
+ static u8 stv0367_readreg(struct stv0367_state *state, u16 reg) |
5744 |
+diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c |
5745 |
+index 7ef469c0c866..2695e1eb6d9c 100644 |
5746 |
+--- a/drivers/media/dvb-frontends/stv090x.c |
5747 |
++++ b/drivers/media/dvb-frontends/stv090x.c |
5748 |
+@@ -755,7 +755,9 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8 |
5749 |
+ |
5750 |
+ static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data) |
5751 |
+ { |
5752 |
+- return stv090x_write_regs(state, reg, &data, 1); |
5753 |
++ u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5754 |
++ |
5755 |
++ return stv090x_write_regs(state, reg, &tmp, 1); |
5756 |
+ } |
5757 |
+ |
5758 |
+ static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable) |
5759 |
+diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c |
5760 |
+index 66eba38f1014..7e8e01389c55 100644 |
5761 |
+--- a/drivers/media/dvb-frontends/stv6110x.c |
5762 |
++++ b/drivers/media/dvb-frontends/stv6110x.c |
5763 |
+@@ -97,7 +97,9 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da |
5764 |
+ |
5765 |
+ static int stv6110x_write_reg(struct stv6110x_state *stv6110x, u8 reg, u8 data) |
5766 |
+ { |
5767 |
+- return stv6110x_write_regs(stv6110x, reg, &data, 1); |
5768 |
++ u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5769 |
++ |
5770 |
++ return stv6110x_write_regs(stv6110x, reg, &tmp, 1); |
5771 |
+ } |
5772 |
+ |
5773 |
+ static int stv6110x_init(struct dvb_frontend *fe) |
5774 |
+diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c |
5775 |
+index 931e5c98da8a..b879e1571469 100644 |
5776 |
+--- a/drivers/media/dvb-frontends/ts2020.c |
5777 |
++++ b/drivers/media/dvb-frontends/ts2020.c |
5778 |
+@@ -368,7 +368,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc, |
5779 |
+ gain2 = clamp_t(long, gain2, 0, 13); |
5780 |
+ v_agc = clamp_t(long, v_agc, 400, 1100); |
5781 |
+ |
5782 |
+- *_gain = -(gain1 * 2330 + |
5783 |
++ *_gain = -((__s64)gain1 * 2330 + |
5784 |
+ gain2 * 3500 + |
5785 |
+ v_agc * 24 / 10 * 10 + |
5786 |
+ 10000); |
5787 |
+@@ -386,7 +386,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc, |
5788 |
+ gain3 = clamp_t(long, gain3, 0, 6); |
5789 |
+ v_agc = clamp_t(long, v_agc, 600, 1600); |
5790 |
+ |
5791 |
+- *_gain = -(gain1 * 2650 + |
5792 |
++ *_gain = -((__s64)gain1 * 2650 + |
5793 |
+ gain2 * 3380 + |
5794 |
+ gain3 * 2850 + |
5795 |
+ v_agc * 176 / 100 * 10 - |
5796 |
+diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c |
5797 |
+index 623355fc2666..3208b866d1cb 100644 |
5798 |
+--- a/drivers/media/dvb-frontends/zl10039.c |
5799 |
++++ b/drivers/media/dvb-frontends/zl10039.c |
5800 |
+@@ -134,7 +134,9 @@ static inline int zl10039_writereg(struct zl10039_state *state, |
5801 |
+ const enum zl10039_reg_addr reg, |
5802 |
+ const u8 val) |
5803 |
+ { |
5804 |
+- return zl10039_write(state, reg, &val, 1); |
5805 |
++ const u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ |
5806 |
++ |
5807 |
++ return zl10039_write(state, reg, &tmp, 1); |
5808 |
+ } |
5809 |
+ |
5810 |
+ static int zl10039_init(struct dvb_frontend *fe) |
5811 |
+diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c |
5812 |
+index 5e320fa4a795..be26c029546b 100644 |
5813 |
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c |
5814 |
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c |
5815 |
+@@ -494,18 +494,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, |
5816 |
+ |
5817 |
+ static int lme2510_return_status(struct dvb_usb_device *d) |
5818 |
+ { |
5819 |
+- int ret = 0; |
5820 |
++ int ret; |
5821 |
+ u8 *data; |
5822 |
+ |
5823 |
+- data = kzalloc(10, GFP_KERNEL); |
5824 |
++ data = kzalloc(6, GFP_KERNEL); |
5825 |
+ if (!data) |
5826 |
+ return -ENOMEM; |
5827 |
+ |
5828 |
+- ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), |
5829 |
+- 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200); |
5830 |
+- info("Firmware Status: %x (%x)", ret , data[2]); |
5831 |
++ ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), |
5832 |
++ 0x06, 0x80, 0x0302, 0x00, |
5833 |
++ data, 0x6, 200); |
5834 |
++ if (ret != 6) |
5835 |
++ ret = -EINVAL; |
5836 |
++ else |
5837 |
++ ret = data[2]; |
5838 |
++ |
5839 |
++ info("Firmware Status: %6ph", data); |
5840 |
+ |
5841 |
+- ret = (ret < 0) ? -ENODEV : data[2]; |
5842 |
+ kfree(data); |
5843 |
+ return ret; |
5844 |
+ } |
5845 |
+@@ -1071,8 +1076,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap) |
5846 |
+ |
5847 |
+ if (adap->fe[0]) { |
5848 |
+ info("FE Found M88RS2000"); |
5849 |
+- dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config, |
5850 |
+- &d->i2c_adap); |
5851 |
+ st->i2c_tuner_gate_w = 5; |
5852 |
+ st->i2c_tuner_gate_r = 5; |
5853 |
+ st->i2c_tuner_addr = 0x60; |
5854 |
+@@ -1138,17 +1141,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap) |
5855 |
+ ret = st->tuner_config; |
5856 |
+ break; |
5857 |
+ case TUNER_RS2000: |
5858 |
+- ret = st->tuner_config; |
5859 |
++ if (dvb_attach(ts2020_attach, adap->fe[0], |
5860 |
++ &ts2020_config, &d->i2c_adap)) |
5861 |
++ ret = st->tuner_config; |
5862 |
+ break; |
5863 |
+ default: |
5864 |
+ break; |
5865 |
+ } |
5866 |
+ |
5867 |
+- if (ret) |
5868 |
++ if (ret) { |
5869 |
+ info("TUN Found %s tuner", tun_msg[ret]); |
5870 |
+- else { |
5871 |
+- info("TUN No tuner found --- resetting device"); |
5872 |
+- lme_coldreset(d); |
5873 |
++ } else { |
5874 |
++ info("TUN No tuner found"); |
5875 |
+ return -ENODEV; |
5876 |
+ } |
5877 |
+ |
5878 |
+@@ -1189,6 +1193,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d) |
5879 |
+ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) |
5880 |
+ { |
5881 |
+ struct lme2510_state *st = d->priv; |
5882 |
++ int status; |
5883 |
+ |
5884 |
+ usb_reset_configuration(d->udev); |
5885 |
+ |
5886 |
+@@ -1197,12 +1202,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) |
5887 |
+ |
5888 |
+ st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware; |
5889 |
+ |
5890 |
+- if (lme2510_return_status(d) == 0x44) { |
5891 |
++ status = lme2510_return_status(d); |
5892 |
++ if (status == 0x44) { |
5893 |
+ *name = lme_firmware_switch(d, 0); |
5894 |
+ return COLD; |
5895 |
+ } |
5896 |
+ |
5897 |
+- return 0; |
5898 |
++ if (status != 0x47) |
5899 |
++ return -EINVAL; |
5900 |
++ |
5901 |
++ return WARM; |
5902 |
+ } |
5903 |
+ |
5904 |
+ static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type, |
5905 |
+diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c |
5906 |
+index 37dea0adc695..cfe86b4864b3 100644 |
5907 |
+--- a/drivers/media/usb/dvb-usb/cxusb.c |
5908 |
++++ b/drivers/media/usb/dvb-usb/cxusb.c |
5909 |
+@@ -677,6 +677,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component, |
5910 |
+ case XC2028_RESET_CLK: |
5911 |
+ deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg); |
5912 |
+ break; |
5913 |
++ case XC2028_I2C_FLUSH: |
5914 |
++ break; |
5915 |
+ default: |
5916 |
+ deb_info("%s: unknown command %d, arg %d\n", __func__, |
5917 |
+ command, arg); |
5918 |
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c |
5919 |
+index 92098c1b78e5..9be1e658ef47 100644 |
5920 |
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c |
5921 |
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c |
5922 |
+@@ -430,6 +430,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component, |
5923 |
+ state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); |
5924 |
+ break; |
5925 |
+ case XC2028_RESET_CLK: |
5926 |
++ case XC2028_I2C_FLUSH: |
5927 |
+ break; |
5928 |
+ default: |
5929 |
+ err("%s: unknown command %d, arg %d\n", __func__, |
5930 |
+diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c |
5931 |
+index dbe29c6c4d8b..1e8cbaf36896 100644 |
5932 |
+--- a/drivers/media/usb/hdpvr/hdpvr-core.c |
5933 |
++++ b/drivers/media/usb/hdpvr/hdpvr-core.c |
5934 |
+@@ -292,7 +292,7 @@ static int hdpvr_probe(struct usb_interface *interface, |
5935 |
+ /* register v4l2_device early so it can be used for printks */ |
5936 |
+ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { |
5937 |
+ dev_err(&interface->dev, "v4l2_device_register failed\n"); |
5938 |
+- goto error; |
5939 |
++ goto error_free_dev; |
5940 |
+ } |
5941 |
+ |
5942 |
+ mutex_init(&dev->io_mutex); |
5943 |
+@@ -301,7 +301,7 @@ static int hdpvr_probe(struct usb_interface *interface, |
5944 |
+ dev->usbc_buf = kmalloc(64, GFP_KERNEL); |
5945 |
+ if (!dev->usbc_buf) { |
5946 |
+ v4l2_err(&dev->v4l2_dev, "Out of memory\n"); |
5947 |
+- goto error; |
5948 |
++ goto error_v4l2_unregister; |
5949 |
+ } |
5950 |
+ |
5951 |
+ init_waitqueue_head(&dev->wait_buffer); |
5952 |
+@@ -339,13 +339,13 @@ static int hdpvr_probe(struct usb_interface *interface, |
5953 |
+ } |
5954 |
+ if (!dev->bulk_in_endpointAddr) { |
5955 |
+ v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n"); |
5956 |
+- goto error; |
5957 |
++ goto error_put_usb; |
5958 |
+ } |
5959 |
+ |
5960 |
+ /* init the device */ |
5961 |
+ if (hdpvr_device_init(dev)) { |
5962 |
+ v4l2_err(&dev->v4l2_dev, "device init failed\n"); |
5963 |
+- goto error; |
5964 |
++ goto error_put_usb; |
5965 |
+ } |
5966 |
+ |
5967 |
+ mutex_lock(&dev->io_mutex); |
5968 |
+@@ -353,7 +353,7 @@ static int hdpvr_probe(struct usb_interface *interface, |
5969 |
+ mutex_unlock(&dev->io_mutex); |
5970 |
+ v4l2_err(&dev->v4l2_dev, |
5971 |
+ "allocating transfer buffers failed\n"); |
5972 |
+- goto error; |
5973 |
++ goto error_put_usb; |
5974 |
+ } |
5975 |
+ mutex_unlock(&dev->io_mutex); |
5976 |
+ |
5977 |
+@@ -361,7 +361,7 @@ static int hdpvr_probe(struct usb_interface *interface, |
5978 |
+ retval = hdpvr_register_i2c_adapter(dev); |
5979 |
+ if (retval < 0) { |
5980 |
+ v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n"); |
5981 |
+- goto error; |
5982 |
++ goto error_free_buffers; |
5983 |
+ } |
5984 |
+ |
5985 |
+ client = hdpvr_register_ir_rx_i2c(dev); |
5986 |
+@@ -394,13 +394,17 @@ static int hdpvr_probe(struct usb_interface *interface, |
5987 |
+ reg_fail: |
5988 |
+ #if IS_ENABLED(CONFIG_I2C) |
5989 |
+ i2c_del_adapter(&dev->i2c_adapter); |
5990 |
++error_free_buffers: |
5991 |
+ #endif |
5992 |
++ hdpvr_free_buffers(dev); |
5993 |
++error_put_usb: |
5994 |
++ usb_put_dev(dev->udev); |
5995 |
++ kfree(dev->usbc_buf); |
5996 |
++error_v4l2_unregister: |
5997 |
++ v4l2_device_unregister(&dev->v4l2_dev); |
5998 |
++error_free_dev: |
5999 |
++ kfree(dev); |
6000 |
+ error: |
6001 |
+- if (dev) { |
6002 |
+- flush_work(&dev->worker); |
6003 |
+- /* this frees allocated memory */ |
6004 |
+- hdpvr_delete(dev); |
6005 |
+- } |
6006 |
+ return retval; |
6007 |
+ } |
6008 |
+ |
6009 |
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c |
6010 |
+index 821f2aa299ae..cbeea8343a5c 100644 |
6011 |
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c |
6012 |
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c |
6013 |
+@@ -18,8 +18,18 @@ |
6014 |
+ #include <linux/videodev2.h> |
6015 |
+ #include <linux/v4l2-subdev.h> |
6016 |
+ #include <media/v4l2-dev.h> |
6017 |
++#include <media/v4l2-fh.h> |
6018 |
++#include <media/v4l2-ctrls.h> |
6019 |
+ #include <media/v4l2-ioctl.h> |
6020 |
+ |
6021 |
++/* Use the same argument order as copy_in_user */ |
6022 |
++#define assign_in_user(to, from) \ |
6023 |
++({ \ |
6024 |
++ typeof(*from) __assign_tmp; \ |
6025 |
++ \ |
6026 |
++ get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \ |
6027 |
++}) |
6028 |
++ |
6029 |
+ static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
6030 |
+ { |
6031 |
+ long ret = -ENOIOCTLCMD; |
6032 |
+@@ -46,135 +56,75 @@ struct v4l2_window32 { |
6033 |
+ __u8 global_alpha; |
6034 |
+ }; |
6035 |
+ |
6036 |
+-static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) |
6037 |
+-{ |
6038 |
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) || |
6039 |
+- copy_from_user(&kp->w, &up->w, sizeof(up->w)) || |
6040 |
+- get_user(kp->field, &up->field) || |
6041 |
+- get_user(kp->chromakey, &up->chromakey) || |
6042 |
+- get_user(kp->clipcount, &up->clipcount) || |
6043 |
+- get_user(kp->global_alpha, &up->global_alpha)) |
6044 |
+- return -EFAULT; |
6045 |
+- if (kp->clipcount > 2048) |
6046 |
+- return -EINVAL; |
6047 |
+- if (kp->clipcount) { |
6048 |
+- struct v4l2_clip32 __user *uclips; |
6049 |
+- struct v4l2_clip __user *kclips; |
6050 |
+- int n = kp->clipcount; |
6051 |
+- compat_caddr_t p; |
6052 |
+- |
6053 |
+- if (get_user(p, &up->clips)) |
6054 |
+- return -EFAULT; |
6055 |
+- uclips = compat_ptr(p); |
6056 |
+- kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip)); |
6057 |
+- kp->clips = kclips; |
6058 |
+- while (--n >= 0) { |
6059 |
+- if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) |
6060 |
+- return -EFAULT; |
6061 |
+- if (put_user(n ? kclips + 1 : NULL, &kclips->next)) |
6062 |
+- return -EFAULT; |
6063 |
+- uclips += 1; |
6064 |
+- kclips += 1; |
6065 |
+- } |
6066 |
+- } else |
6067 |
+- kp->clips = NULL; |
6068 |
+- return 0; |
6069 |
+-} |
6070 |
+- |
6071 |
+-static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) |
6072 |
+-{ |
6073 |
+- if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) || |
6074 |
+- put_user(kp->field, &up->field) || |
6075 |
+- put_user(kp->chromakey, &up->chromakey) || |
6076 |
+- put_user(kp->clipcount, &up->clipcount) || |
6077 |
+- put_user(kp->global_alpha, &up->global_alpha)) |
6078 |
+- return -EFAULT; |
6079 |
+- return 0; |
6080 |
+-} |
6081 |
+- |
6082 |
+-static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) |
6083 |
+-{ |
6084 |
+- if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format))) |
6085 |
+- return -EFAULT; |
6086 |
+- return 0; |
6087 |
+-} |
6088 |
+- |
6089 |
+-static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, |
6090 |
+- struct v4l2_pix_format_mplane __user *up) |
6091 |
+-{ |
6092 |
+- if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane))) |
6093 |
+- return -EFAULT; |
6094 |
+- return 0; |
6095 |
+-} |
6096 |
+- |
6097 |
+-static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) |
6098 |
++static int get_v4l2_window32(struct v4l2_window __user *kp, |
6099 |
++ struct v4l2_window32 __user *up, |
6100 |
++ void __user *aux_buf, u32 aux_space) |
6101 |
+ { |
6102 |
+- if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format))) |
6103 |
+- return -EFAULT; |
6104 |
+- return 0; |
6105 |
+-} |
6106 |
+- |
6107 |
+-static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, |
6108 |
+- struct v4l2_pix_format_mplane __user *up) |
6109 |
+-{ |
6110 |
+- if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane))) |
6111 |
+- return -EFAULT; |
6112 |
+- return 0; |
6113 |
+-} |
6114 |
+- |
6115 |
+-static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up) |
6116 |
+-{ |
6117 |
+- if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format))) |
6118 |
+- return -EFAULT; |
6119 |
+- return 0; |
6120 |
+-} |
6121 |
+- |
6122 |
+-static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up) |
6123 |
+-{ |
6124 |
+- if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format))) |
6125 |
++ struct v4l2_clip32 __user *uclips; |
6126 |
++ struct v4l2_clip __user *kclips; |
6127 |
++ compat_caddr_t p; |
6128 |
++ u32 clipcount; |
6129 |
++ |
6130 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
6131 |
++ copy_in_user(&kp->w, &up->w, sizeof(up->w)) || |
6132 |
++ assign_in_user(&kp->field, &up->field) || |
6133 |
++ assign_in_user(&kp->chromakey, &up->chromakey) || |
6134 |
++ assign_in_user(&kp->global_alpha, &up->global_alpha) || |
6135 |
++ get_user(clipcount, &up->clipcount) || |
6136 |
++ put_user(clipcount, &kp->clipcount)) |
6137 |
+ return -EFAULT; |
6138 |
+- return 0; |
6139 |
+-} |
6140 |
++ if (clipcount > 2048) |
6141 |
++ return -EINVAL; |
6142 |
++ if (!clipcount) |
6143 |
++ return put_user(NULL, &kp->clips); |
6144 |
+ |
6145 |
+-static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) |
6146 |
+-{ |
6147 |
+- if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format))) |
6148 |
++ if (get_user(p, &up->clips)) |
6149 |
+ return -EFAULT; |
6150 |
+- return 0; |
6151 |
+-} |
6152 |
+- |
6153 |
+-static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) |
6154 |
+-{ |
6155 |
+- if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format))) |
6156 |
++ uclips = compat_ptr(p); |
6157 |
++ if (aux_space < clipcount * sizeof(*kclips)) |
6158 |
+ return -EFAULT; |
6159 |
+- return 0; |
6160 |
+-} |
6161 |
+- |
6162 |
+-static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) |
6163 |
+-{ |
6164 |
+- if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format))) |
6165 |
++ kclips = aux_buf; |
6166 |
++ if (put_user(kclips, &kp->clips)) |
6167 |
+ return -EFAULT; |
6168 |
+- return 0; |
6169 |
+-} |
6170 |
+ |
6171 |
+-static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) |
6172 |
+-{ |
6173 |
+- if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format))) |
6174 |
+- return -EFAULT; |
6175 |
++ while (clipcount--) { |
6176 |
++ if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) |
6177 |
++ return -EFAULT; |
6178 |
++ if (put_user(clipcount ? kclips + 1 : NULL, &kclips->next)) |
6179 |
++ return -EFAULT; |
6180 |
++ uclips++; |
6181 |
++ kclips++; |
6182 |
++ } |
6183 |
+ return 0; |
6184 |
+ } |
6185 |
+ |
6186 |
+-static inline int get_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up) |
6187 |
++static int put_v4l2_window32(struct v4l2_window __user *kp, |
6188 |
++ struct v4l2_window32 __user *up) |
6189 |
+ { |
6190 |
+- if (copy_from_user(kp, up, sizeof(struct v4l2_meta_format))) |
6191 |
++ struct v4l2_clip __user *kclips = kp->clips; |
6192 |
++ struct v4l2_clip32 __user *uclips; |
6193 |
++ compat_caddr_t p; |
6194 |
++ u32 clipcount; |
6195 |
++ |
6196 |
++ if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) || |
6197 |
++ assign_in_user(&up->field, &kp->field) || |
6198 |
++ assign_in_user(&up->chromakey, &kp->chromakey) || |
6199 |
++ assign_in_user(&up->global_alpha, &kp->global_alpha) || |
6200 |
++ get_user(clipcount, &kp->clipcount) || |
6201 |
++ put_user(clipcount, &up->clipcount)) |
6202 |
+ return -EFAULT; |
6203 |
+- return 0; |
6204 |
+-} |
6205 |
++ if (!clipcount) |
6206 |
++ return 0; |
6207 |
+ |
6208 |
+-static inline int put_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up) |
6209 |
+-{ |
6210 |
+- if (copy_to_user(up, kp, sizeof(struct v4l2_meta_format))) |
6211 |
++ if (get_user(p, &up->clips)) |
6212 |
+ return -EFAULT; |
6213 |
++ uclips = compat_ptr(p); |
6214 |
++ while (clipcount--) { |
6215 |
++ if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c))) |
6216 |
++ return -EFAULT; |
6217 |
++ uclips++; |
6218 |
++ kclips++; |
6219 |
++ } |
6220 |
+ return 0; |
6221 |
+ } |
6222 |
+ |
6223 |
+@@ -209,101 +159,164 @@ struct v4l2_create_buffers32 { |
6224 |
+ __u32 reserved[8]; |
6225 |
+ }; |
6226 |
+ |
6227 |
+-static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) |
6228 |
++static int __bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size) |
6229 |
+ { |
6230 |
+- if (get_user(kp->type, &up->type)) |
6231 |
++ u32 type; |
6232 |
++ |
6233 |
++ if (get_user(type, &up->type)) |
6234 |
+ return -EFAULT; |
6235 |
+ |
6236 |
+- switch (kp->type) { |
6237 |
++ switch (type) { |
6238 |
++ case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
6239 |
++ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: { |
6240 |
++ u32 clipcount; |
6241 |
++ |
6242 |
++ if (get_user(clipcount, &up->fmt.win.clipcount)) |
6243 |
++ return -EFAULT; |
6244 |
++ if (clipcount > 2048) |
6245 |
++ return -EINVAL; |
6246 |
++ *size = clipcount * sizeof(struct v4l2_clip); |
6247 |
++ return 0; |
6248 |
++ } |
6249 |
++ default: |
6250 |
++ *size = 0; |
6251 |
++ return 0; |
6252 |
++ } |
6253 |
++} |
6254 |
++ |
6255 |
++static int bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size) |
6256 |
++{ |
6257 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up))) |
6258 |
++ return -EFAULT; |
6259 |
++ return __bufsize_v4l2_format(up, size); |
6260 |
++} |
6261 |
++ |
6262 |
++static int __get_v4l2_format32(struct v4l2_format __user *kp, |
6263 |
++ struct v4l2_format32 __user *up, |
6264 |
++ void __user *aux_buf, u32 aux_space) |
6265 |
++{ |
6266 |
++ u32 type; |
6267 |
++ |
6268 |
++ if (get_user(type, &up->type) || put_user(type, &kp->type)) |
6269 |
++ return -EFAULT; |
6270 |
++ |
6271 |
++ switch (type) { |
6272 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: |
6273 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
6274 |
+- return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); |
6275 |
++ return copy_in_user(&kp->fmt.pix, &up->fmt.pix, |
6276 |
++ sizeof(kp->fmt.pix)) ? -EFAULT : 0; |
6277 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: |
6278 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: |
6279 |
+- return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp, |
6280 |
+- &up->fmt.pix_mp); |
6281 |
++ return copy_in_user(&kp->fmt.pix_mp, &up->fmt.pix_mp, |
6282 |
++ sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; |
6283 |
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
6284 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
6285 |
+- return get_v4l2_window32(&kp->fmt.win, &up->fmt.win); |
6286 |
++ return get_v4l2_window32(&kp->fmt.win, &up->fmt.win, |
6287 |
++ aux_buf, aux_space); |
6288 |
+ case V4L2_BUF_TYPE_VBI_CAPTURE: |
6289 |
+ case V4L2_BUF_TYPE_VBI_OUTPUT: |
6290 |
+- return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); |
6291 |
++ return copy_in_user(&kp->fmt.vbi, &up->fmt.vbi, |
6292 |
++ sizeof(kp->fmt.vbi)) ? -EFAULT : 0; |
6293 |
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: |
6294 |
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: |
6295 |
+- return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); |
6296 |
++ return copy_in_user(&kp->fmt.sliced, &up->fmt.sliced, |
6297 |
++ sizeof(kp->fmt.sliced)) ? -EFAULT : 0; |
6298 |
+ case V4L2_BUF_TYPE_SDR_CAPTURE: |
6299 |
+ case V4L2_BUF_TYPE_SDR_OUTPUT: |
6300 |
+- return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); |
6301 |
++ return copy_in_user(&kp->fmt.sdr, &up->fmt.sdr, |
6302 |
++ sizeof(kp->fmt.sdr)) ? -EFAULT : 0; |
6303 |
+ case V4L2_BUF_TYPE_META_CAPTURE: |
6304 |
+- return get_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta); |
6305 |
++ return copy_in_user(&kp->fmt.meta, &up->fmt.meta, |
6306 |
++ sizeof(kp->fmt.meta)) ? -EFAULT : 0; |
6307 |
+ default: |
6308 |
+- pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", |
6309 |
+- kp->type); |
6310 |
+ return -EINVAL; |
6311 |
+ } |
6312 |
+ } |
6313 |
+ |
6314 |
+-static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) |
6315 |
++static int get_v4l2_format32(struct v4l2_format __user *kp, |
6316 |
++ struct v4l2_format32 __user *up, |
6317 |
++ void __user *aux_buf, u32 aux_space) |
6318 |
++{ |
6319 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up))) |
6320 |
++ return -EFAULT; |
6321 |
++ return __get_v4l2_format32(kp, up, aux_buf, aux_space); |
6322 |
++} |
6323 |
++ |
6324 |
++static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *up, |
6325 |
++ u32 *size) |
6326 |
+ { |
6327 |
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) |
6328 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up))) |
6329 |
+ return -EFAULT; |
6330 |
+- return __get_v4l2_format32(kp, up); |
6331 |
++ return __bufsize_v4l2_format(&up->format, size); |
6332 |
+ } |
6333 |
+ |
6334 |
+-static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) |
6335 |
++static int get_v4l2_create32(struct v4l2_create_buffers __user *kp, |
6336 |
++ struct v4l2_create_buffers32 __user *up, |
6337 |
++ void __user *aux_buf, u32 aux_space) |
6338 |
+ { |
6339 |
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || |
6340 |
+- copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) |
6341 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
6342 |
++ copy_in_user(kp, up, |
6343 |
++ offsetof(struct v4l2_create_buffers32, format))) |
6344 |
+ return -EFAULT; |
6345 |
+- return __get_v4l2_format32(&kp->format, &up->format); |
6346 |
++ return __get_v4l2_format32(&kp->format, &up->format, |
6347 |
++ aux_buf, aux_space); |
6348 |
+ } |
6349 |
+ |
6350 |
+-static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) |
6351 |
++static int __put_v4l2_format32(struct v4l2_format __user *kp, |
6352 |
++ struct v4l2_format32 __user *up) |
6353 |
+ { |
6354 |
+- if (put_user(kp->type, &up->type)) |
6355 |
++ u32 type; |
6356 |
++ |
6357 |
++ if (get_user(type, &kp->type)) |
6358 |
+ return -EFAULT; |
6359 |
+ |
6360 |
+- switch (kp->type) { |
6361 |
++ switch (type) { |
6362 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: |
6363 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
6364 |
+- return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); |
6365 |
++ return copy_in_user(&up->fmt.pix, &kp->fmt.pix, |
6366 |
++ sizeof(kp->fmt.pix)) ? -EFAULT : 0; |
6367 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: |
6368 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: |
6369 |
+- return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp, |
6370 |
+- &up->fmt.pix_mp); |
6371 |
++ return copy_in_user(&up->fmt.pix_mp, &kp->fmt.pix_mp, |
6372 |
++ sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; |
6373 |
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
6374 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
6375 |
+ return put_v4l2_window32(&kp->fmt.win, &up->fmt.win); |
6376 |
+ case V4L2_BUF_TYPE_VBI_CAPTURE: |
6377 |
+ case V4L2_BUF_TYPE_VBI_OUTPUT: |
6378 |
+- return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); |
6379 |
++ return copy_in_user(&up->fmt.vbi, &kp->fmt.vbi, |
6380 |
++ sizeof(kp->fmt.vbi)) ? -EFAULT : 0; |
6381 |
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: |
6382 |
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: |
6383 |
+- return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); |
6384 |
++ return copy_in_user(&up->fmt.sliced, &kp->fmt.sliced, |
6385 |
++ sizeof(kp->fmt.sliced)) ? -EFAULT : 0; |
6386 |
+ case V4L2_BUF_TYPE_SDR_CAPTURE: |
6387 |
+ case V4L2_BUF_TYPE_SDR_OUTPUT: |
6388 |
+- return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); |
6389 |
++ return copy_in_user(&up->fmt.sdr, &kp->fmt.sdr, |
6390 |
++ sizeof(kp->fmt.sdr)) ? -EFAULT : 0; |
6391 |
+ case V4L2_BUF_TYPE_META_CAPTURE: |
6392 |
+- return put_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta); |
6393 |
++ return copy_in_user(&up->fmt.meta, &kp->fmt.meta, |
6394 |
++ sizeof(kp->fmt.meta)) ? -EFAULT : 0; |
6395 |
+ default: |
6396 |
+- pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", |
6397 |
+- kp->type); |
6398 |
+ return -EINVAL; |
6399 |
+ } |
6400 |
+ } |
6401 |
+ |
6402 |
+-static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) |
6403 |
++static int put_v4l2_format32(struct v4l2_format __user *kp, |
6404 |
++ struct v4l2_format32 __user *up) |
6405 |
+ { |
6406 |
+- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32))) |
6407 |
++ if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) |
6408 |
+ return -EFAULT; |
6409 |
+ return __put_v4l2_format32(kp, up); |
6410 |
+ } |
6411 |
+ |
6412 |
+-static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) |
6413 |
++static int put_v4l2_create32(struct v4l2_create_buffers __user *kp, |
6414 |
++ struct v4l2_create_buffers32 __user *up) |
6415 |
+ { |
6416 |
+- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) || |
6417 |
+- copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) || |
6418 |
+- copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) |
6419 |
++ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
6420 |
++ copy_in_user(up, kp, |
6421 |
++ offsetof(struct v4l2_create_buffers32, format)) || |
6422 |
++ copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved))) |
6423 |
+ return -EFAULT; |
6424 |
+ return __put_v4l2_format32(&kp->format, &up->format); |
6425 |
+ } |
6426 |
+@@ -317,25 +330,28 @@ struct v4l2_standard32 { |
6427 |
+ __u32 reserved[4]; |
6428 |
+ }; |
6429 |
+ |
6430 |
+-static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) |
6431 |
++static int get_v4l2_standard32(struct v4l2_standard __user *kp, |
6432 |
++ struct v4l2_standard32 __user *up) |
6433 |
+ { |
6434 |
+ /* other fields are not set by the user, nor used by the driver */ |
6435 |
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) || |
6436 |
+- get_user(kp->index, &up->index)) |
6437 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
6438 |
++ assign_in_user(&kp->index, &up->index)) |
6439 |
+ return -EFAULT; |
6440 |
+ return 0; |
6441 |
+ } |
6442 |
+ |
6443 |
+-static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) |
6444 |
++static int put_v4l2_standard32(struct v4l2_standard __user *kp, |
6445 |
++ struct v4l2_standard32 __user *up) |
6446 |
+ { |
6447 |
+- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) || |
6448 |
+- put_user(kp->index, &up->index) || |
6449 |
+- put_user(kp->id, &up->id) || |
6450 |
+- copy_to_user(up->name, kp->name, 24) || |
6451 |
+- copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) || |
6452 |
+- put_user(kp->framelines, &up->framelines) || |
6453 |
+- copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32))) |
6454 |
+- return -EFAULT; |
6455 |
++ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
6456 |
++ assign_in_user(&up->index, &kp->index) || |
6457 |
++ assign_in_user(&up->id, &kp->id) || |
6458 |
++ copy_in_user(up->name, kp->name, sizeof(up->name)) || |
6459 |
++ copy_in_user(&up->frameperiod, &kp->frameperiod, |
6460 |
++ sizeof(up->frameperiod)) || |
6461 |
++ assign_in_user(&up->framelines, &kp->framelines) || |
6462 |
++ copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) |
6463 |
++ return -EFAULT; |
6464 |
+ return 0; |
6465 |
+ } |
6466 |
+ |
6467 |
+@@ -374,136 +390,186 @@ struct v4l2_buffer32 { |
6468 |
+ __u32 reserved; |
6469 |
+ }; |
6470 |
+ |
6471 |
+-static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, |
6472 |
+- enum v4l2_memory memory) |
6473 |
++static int get_v4l2_plane32(struct v4l2_plane __user *up, |
6474 |
++ struct v4l2_plane32 __user *up32, |
6475 |
++ enum v4l2_memory memory) |
6476 |
+ { |
6477 |
+- void __user *up_pln; |
6478 |
+- compat_long_t p; |
6479 |
++ compat_ulong_t p; |
6480 |
+ |
6481 |
+ if (copy_in_user(up, up32, 2 * sizeof(__u32)) || |
6482 |
+- copy_in_user(&up->data_offset, &up32->data_offset, |
6483 |
+- sizeof(__u32))) |
6484 |
++ copy_in_user(&up->data_offset, &up32->data_offset, |
6485 |
++ sizeof(up->data_offset))) |
6486 |
+ return -EFAULT; |
6487 |
+ |
6488 |
+- if (memory == V4L2_MEMORY_USERPTR) { |
6489 |
+- if (get_user(p, &up32->m.userptr)) |
6490 |
+- return -EFAULT; |
6491 |
+- up_pln = compat_ptr(p); |
6492 |
+- if (put_user((unsigned long)up_pln, &up->m.userptr)) |
6493 |
++ switch (memory) { |
6494 |
++ case V4L2_MEMORY_MMAP: |
6495 |
++ case V4L2_MEMORY_OVERLAY: |
6496 |
++ if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, |
6497 |
++ sizeof(up32->m.mem_offset))) |
6498 |
+ return -EFAULT; |
6499 |
+- } else if (memory == V4L2_MEMORY_DMABUF) { |
6500 |
+- if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int))) |
6501 |
++ break; |
6502 |
++ case V4L2_MEMORY_USERPTR: |
6503 |
++ if (get_user(p, &up32->m.userptr) || |
6504 |
++ put_user((unsigned long)compat_ptr(p), &up->m.userptr)) |
6505 |
+ return -EFAULT; |
6506 |
+- } else { |
6507 |
+- if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, |
6508 |
+- sizeof(__u32))) |
6509 |
++ break; |
6510 |
++ case V4L2_MEMORY_DMABUF: |
6511 |
++ if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd))) |
6512 |
+ return -EFAULT; |
6513 |
++ break; |
6514 |
+ } |
6515 |
+ |
6516 |
+ return 0; |
6517 |
+ } |
6518 |
+ |
6519 |
+-static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, |
6520 |
+- enum v4l2_memory memory) |
6521 |
++static int put_v4l2_plane32(struct v4l2_plane __user *up, |
6522 |
++ struct v4l2_plane32 __user *up32, |
6523 |
++ enum v4l2_memory memory) |
6524 |
+ { |
6525 |
++ unsigned long p; |
6526 |
++ |
6527 |
+ if (copy_in_user(up32, up, 2 * sizeof(__u32)) || |
6528 |
+- copy_in_user(&up32->data_offset, &up->data_offset, |
6529 |
+- sizeof(__u32))) |
6530 |
++ copy_in_user(&up32->data_offset, &up->data_offset, |
6531 |
++ sizeof(up->data_offset))) |
6532 |
+ return -EFAULT; |
6533 |
+ |
6534 |
+- /* For MMAP, driver might've set up the offset, so copy it back. |
6535 |
+- * USERPTR stays the same (was userspace-provided), so no copying. */ |
6536 |
+- if (memory == V4L2_MEMORY_MMAP) |
6537 |
++ switch (memory) { |
6538 |
++ case V4L2_MEMORY_MMAP: |
6539 |
++ case V4L2_MEMORY_OVERLAY: |
6540 |
+ if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset, |
6541 |
+- sizeof(__u32))) |
6542 |
++ sizeof(up->m.mem_offset))) |
6543 |
++ return -EFAULT; |
6544 |
++ break; |
6545 |
++ case V4L2_MEMORY_USERPTR: |
6546 |
++ if (get_user(p, &up->m.userptr) || |
6547 |
++ put_user((compat_ulong_t)ptr_to_compat((__force void *)p), |
6548 |
++ &up32->m.userptr)) |
6549 |
+ return -EFAULT; |
6550 |
+- /* For DMABUF, driver might've set up the fd, so copy it back. */ |
6551 |
+- if (memory == V4L2_MEMORY_DMABUF) |
6552 |
+- if (copy_in_user(&up32->m.fd, &up->m.fd, |
6553 |
+- sizeof(int))) |
6554 |
++ break; |
6555 |
++ case V4L2_MEMORY_DMABUF: |
6556 |
++ if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd))) |
6557 |
+ return -EFAULT; |
6558 |
++ break; |
6559 |
++ } |
6560 |
++ |
6561 |
++ return 0; |
6562 |
++} |
6563 |
++ |
6564 |
++static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *up, u32 *size) |
6565 |
++{ |
6566 |
++ u32 type; |
6567 |
++ u32 length; |
6568 |
++ |
6569 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
6570 |
++ get_user(type, &up->type) || |
6571 |
++ get_user(length, &up->length)) |
6572 |
++ return -EFAULT; |
6573 |
+ |
6574 |
++ if (V4L2_TYPE_IS_MULTIPLANAR(type)) { |
6575 |
++ if (length > VIDEO_MAX_PLANES) |
6576 |
++ return -EINVAL; |
6577 |
++ |
6578 |
++ /* |
6579 |
++ * We don't really care if userspace decides to kill itself |
6580 |
++ * by passing a very big length value |
6581 |
++ */ |
6582 |
++ *size = length * sizeof(struct v4l2_plane); |
6583 |
++ } else { |
6584 |
++ *size = 0; |
6585 |
++ } |
6586 |
+ return 0; |
6587 |
+ } |
6588 |
+ |
6589 |
+-static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) |
6590 |
++static int get_v4l2_buffer32(struct v4l2_buffer __user *kp, |
6591 |
++ struct v4l2_buffer32 __user *up, |
6592 |
++ void __user *aux_buf, u32 aux_space) |
6593 |
+ { |
6594 |
++ u32 type; |
6595 |
++ u32 length; |
6596 |
++ enum v4l2_memory memory; |
6597 |
+ struct v4l2_plane32 __user *uplane32; |
6598 |
+ struct v4l2_plane __user *uplane; |
6599 |
+ compat_caddr_t p; |
6600 |
+ int ret; |
6601 |
+ |
6602 |
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) || |
6603 |
+- get_user(kp->index, &up->index) || |
6604 |
+- get_user(kp->type, &up->type) || |
6605 |
+- get_user(kp->flags, &up->flags) || |
6606 |
+- get_user(kp->memory, &up->memory) || |
6607 |
+- get_user(kp->length, &up->length)) |
6608 |
+- return -EFAULT; |
6609 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
6610 |
++ assign_in_user(&kp->index, &up->index) || |
6611 |
++ get_user(type, &up->type) || |
6612 |
++ put_user(type, &kp->type) || |
6613 |
++ assign_in_user(&kp->flags, &up->flags) || |
6614 |
++ get_user(memory, &up->memory) || |
6615 |
++ put_user(memory, &kp->memory) || |
6616 |
++ get_user(length, &up->length) || |
6617 |
++ put_user(length, &kp->length)) |
6618 |
++ return -EFAULT; |
6619 |
+ |
6620 |
+- if (V4L2_TYPE_IS_OUTPUT(kp->type)) |
6621 |
+- if (get_user(kp->bytesused, &up->bytesused) || |
6622 |
+- get_user(kp->field, &up->field) || |
6623 |
+- get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || |
6624 |
+- get_user(kp->timestamp.tv_usec, |
6625 |
+- &up->timestamp.tv_usec)) |
6626 |
++ if (V4L2_TYPE_IS_OUTPUT(type)) |
6627 |
++ if (assign_in_user(&kp->bytesused, &up->bytesused) || |
6628 |
++ assign_in_user(&kp->field, &up->field) || |
6629 |
++ assign_in_user(&kp->timestamp.tv_sec, |
6630 |
++ &up->timestamp.tv_sec) || |
6631 |
++ assign_in_user(&kp->timestamp.tv_usec, |
6632 |
++ &up->timestamp.tv_usec)) |
6633 |
+ return -EFAULT; |
6634 |
+ |
6635 |
+- if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { |
6636 |
+- unsigned int num_planes; |
6637 |
++ if (V4L2_TYPE_IS_MULTIPLANAR(type)) { |
6638 |
++ u32 num_planes = length; |
6639 |
+ |
6640 |
+- if (kp->length == 0) { |
6641 |
+- kp->m.planes = NULL; |
6642 |
+- /* num_planes == 0 is legal, e.g. when userspace doesn't |
6643 |
+- * need planes array on DQBUF*/ |
6644 |
+- return 0; |
6645 |
+- } else if (kp->length > VIDEO_MAX_PLANES) { |
6646 |
+- return -EINVAL; |
6647 |
++ if (num_planes == 0) { |
6648 |
++ /* |
6649 |
++ * num_planes == 0 is legal, e.g. when userspace doesn't |
6650 |
++ * need planes array on DQBUF |
6651 |
++ */ |
6652 |
++ return put_user(NULL, &kp->m.planes); |
6653 |
+ } |
6654 |
++ if (num_planes > VIDEO_MAX_PLANES) |
6655 |
++ return -EINVAL; |
6656 |
+ |
6657 |
+ if (get_user(p, &up->m.planes)) |
6658 |
+ return -EFAULT; |
6659 |
+ |
6660 |
+ uplane32 = compat_ptr(p); |
6661 |
+ if (!access_ok(VERIFY_READ, uplane32, |
6662 |
+- kp->length * sizeof(struct v4l2_plane32))) |
6663 |
++ num_planes * sizeof(*uplane32))) |
6664 |
+ return -EFAULT; |
6665 |
+ |
6666 |
+- /* We don't really care if userspace decides to kill itself |
6667 |
+- * by passing a very big num_planes value */ |
6668 |
+- uplane = compat_alloc_user_space(kp->length * |
6669 |
+- sizeof(struct v4l2_plane)); |
6670 |
+- kp->m.planes = (__force struct v4l2_plane *)uplane; |
6671 |
++ /* |
6672 |
++ * We don't really care if userspace decides to kill itself |
6673 |
++ * by passing a very big num_planes value |
6674 |
++ */ |
6675 |
++ if (aux_space < num_planes * sizeof(*uplane)) |
6676 |
++ return -EFAULT; |
6677 |
+ |
6678 |
+- for (num_planes = 0; num_planes < kp->length; num_planes++) { |
6679 |
+- ret = get_v4l2_plane32(uplane, uplane32, kp->memory); |
6680 |
++ uplane = aux_buf; |
6681 |
++ if (put_user((__force struct v4l2_plane *)uplane, |
6682 |
++ &kp->m.planes)) |
6683 |
++ return -EFAULT; |
6684 |
++ |
6685 |
++ while (num_planes--) { |
6686 |
++ ret = get_v4l2_plane32(uplane, uplane32, memory); |
6687 |
+ if (ret) |
6688 |
+ return ret; |
6689 |
+- ++uplane; |
6690 |
+- ++uplane32; |
6691 |
++ uplane++; |
6692 |
++ uplane32++; |
6693 |
+ } |
6694 |
+ } else { |
6695 |
+- switch (kp->memory) { |
6696 |
++ switch (memory) { |
6697 |
+ case V4L2_MEMORY_MMAP: |
6698 |
+- if (get_user(kp->m.offset, &up->m.offset)) |
6699 |
++ case V4L2_MEMORY_OVERLAY: |
6700 |
++ if (assign_in_user(&kp->m.offset, &up->m.offset)) |
6701 |
+ return -EFAULT; |
6702 |
+ break; |
6703 |
+- case V4L2_MEMORY_USERPTR: |
6704 |
+- { |
6705 |
+- compat_long_t tmp; |
6706 |
++ case V4L2_MEMORY_USERPTR: { |
6707 |
++ compat_ulong_t userptr; |
6708 |
+ |
6709 |
+- if (get_user(tmp, &up->m.userptr)) |
6710 |
+- return -EFAULT; |
6711 |
+- |
6712 |
+- kp->m.userptr = (unsigned long)compat_ptr(tmp); |
6713 |
+- } |
6714 |
+- break; |
6715 |
+- case V4L2_MEMORY_OVERLAY: |
6716 |
+- if (get_user(kp->m.offset, &up->m.offset)) |
6717 |
++ if (get_user(userptr, &up->m.userptr) || |
6718 |
++ put_user((unsigned long)compat_ptr(userptr), |
6719 |
++ &kp->m.userptr)) |
6720 |
+ return -EFAULT; |
6721 |
+ break; |
6722 |
++ } |
6723 |
+ case V4L2_MEMORY_DMABUF: |
6724 |
+- if (get_user(kp->m.fd, &up->m.fd)) |
6725 |
++ if (assign_in_user(&kp->m.fd, &up->m.fd)) |
6726 |
+ return -EFAULT; |
6727 |
+ break; |
6728 |
+ } |
6729 |
+@@ -512,65 +578,70 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user |
6730 |
+ return 0; |
6731 |
+ } |
6732 |
+ |
6733 |
+-static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) |
6734 |
++static int put_v4l2_buffer32(struct v4l2_buffer __user *kp, |
6735 |
++ struct v4l2_buffer32 __user *up) |
6736 |
+ { |
6737 |
++ u32 type; |
6738 |
++ u32 length; |
6739 |
++ enum v4l2_memory memory; |
6740 |
+ struct v4l2_plane32 __user *uplane32; |
6741 |
+ struct v4l2_plane __user *uplane; |
6742 |
+ compat_caddr_t p; |
6743 |
+- int num_planes; |
6744 |
+ int ret; |
6745 |
+ |
6746 |
+- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) || |
6747 |
+- put_user(kp->index, &up->index) || |
6748 |
+- put_user(kp->type, &up->type) || |
6749 |
+- put_user(kp->flags, &up->flags) || |
6750 |
+- put_user(kp->memory, &up->memory)) |
6751 |
+- return -EFAULT; |
6752 |
++ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
6753 |
++ assign_in_user(&up->index, &kp->index) || |
6754 |
++ get_user(type, &kp->type) || |
6755 |
++ put_user(type, &up->type) || |
6756 |
++ assign_in_user(&up->flags, &kp->flags) || |
6757 |
++ get_user(memory, &kp->memory) || |
6758 |
++ put_user(memory, &up->memory)) |
6759 |
++ return -EFAULT; |
6760 |
+ |
6761 |
+- if (put_user(kp->bytesused, &up->bytesused) || |
6762 |
+- put_user(kp->field, &up->field) || |
6763 |
+- put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || |
6764 |
+- put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) || |
6765 |
+- copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) || |
6766 |
+- put_user(kp->sequence, &up->sequence) || |
6767 |
+- put_user(kp->reserved2, &up->reserved2) || |
6768 |
+- put_user(kp->reserved, &up->reserved) || |
6769 |
+- put_user(kp->length, &up->length)) |
6770 |
+- return -EFAULT; |
6771 |
++ if (assign_in_user(&up->bytesused, &kp->bytesused) || |
6772 |
++ assign_in_user(&up->field, &kp->field) || |
6773 |
++ assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) || |
6774 |
++ assign_in_user(&up->timestamp.tv_usec, &kp->timestamp.tv_usec) || |
6775 |
++ copy_in_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) || |
6776 |
++ assign_in_user(&up->sequence, &kp->sequence) || |
6777 |
++ assign_in_user(&up->reserved2, &kp->reserved2) || |
6778 |
++ assign_in_user(&up->reserved, &kp->reserved) || |
6779 |
++ get_user(length, &kp->length) || |
6780 |
++ put_user(length, &up->length)) |
6781 |
++ return -EFAULT; |
6782 |
++ |
6783 |
++ if (V4L2_TYPE_IS_MULTIPLANAR(type)) { |
6784 |
++ u32 num_planes = length; |
6785 |
+ |
6786 |
+- if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { |
6787 |
+- num_planes = kp->length; |
6788 |
+ if (num_planes == 0) |
6789 |
+ return 0; |
6790 |
+ |
6791 |
+- uplane = (__force struct v4l2_plane __user *)kp->m.planes; |
6792 |
++ if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes))) |
6793 |
++ return -EFAULT; |
6794 |
+ if (get_user(p, &up->m.planes)) |
6795 |
+ return -EFAULT; |
6796 |
+ uplane32 = compat_ptr(p); |
6797 |
+ |
6798 |
+- while (--num_planes >= 0) { |
6799 |
+- ret = put_v4l2_plane32(uplane, uplane32, kp->memory); |
6800 |
++ while (num_planes--) { |
6801 |
++ ret = put_v4l2_plane32(uplane, uplane32, memory); |
6802 |
+ if (ret) |
6803 |
+ return ret; |
6804 |
+ ++uplane; |
6805 |
+ ++uplane32; |
6806 |
+ } |
6807 |
+ } else { |
6808 |
+- switch (kp->memory) { |
6809 |
++ switch (memory) { |
6810 |
+ case V4L2_MEMORY_MMAP: |
6811 |
+- if (put_user(kp->m.offset, &up->m.offset)) |
6812 |
++ case V4L2_MEMORY_OVERLAY: |
6813 |
++ if (assign_in_user(&up->m.offset, &kp->m.offset)) |
6814 |
+ return -EFAULT; |
6815 |
+ break; |
6816 |
+ case V4L2_MEMORY_USERPTR: |
6817 |
+- if (put_user(kp->m.userptr, &up->m.userptr)) |
6818 |
+- return -EFAULT; |
6819 |
+- break; |
6820 |
+- case V4L2_MEMORY_OVERLAY: |
6821 |
+- if (put_user(kp->m.offset, &up->m.offset)) |
6822 |
++ if (assign_in_user(&up->m.userptr, &kp->m.userptr)) |
6823 |
+ return -EFAULT; |
6824 |
+ break; |
6825 |
+ case V4L2_MEMORY_DMABUF: |
6826 |
+- if (put_user(kp->m.fd, &up->m.fd)) |
6827 |
++ if (assign_in_user(&up->m.fd, &kp->m.fd)) |
6828 |
+ return -EFAULT; |
6829 |
+ break; |
6830 |
+ } |
6831 |
+@@ -595,30 +666,33 @@ struct v4l2_framebuffer32 { |
6832 |
+ } fmt; |
6833 |
+ }; |
6834 |
+ |
6835 |
+-static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) |
6836 |
++static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, |
6837 |
++ struct v4l2_framebuffer32 __user *up) |
6838 |
+ { |
6839 |
+- u32 tmp; |
6840 |
+- |
6841 |
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) || |
6842 |
+- get_user(tmp, &up->base) || |
6843 |
+- get_user(kp->capability, &up->capability) || |
6844 |
+- get_user(kp->flags, &up->flags) || |
6845 |
+- copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt))) |
6846 |
+- return -EFAULT; |
6847 |
+- kp->base = (__force void *)compat_ptr(tmp); |
6848 |
++ compat_caddr_t tmp; |
6849 |
++ |
6850 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
6851 |
++ get_user(tmp, &up->base) || |
6852 |
++ put_user((__force void *)compat_ptr(tmp), &kp->base) || |
6853 |
++ assign_in_user(&kp->capability, &up->capability) || |
6854 |
++ assign_in_user(&kp->flags, &up->flags) || |
6855 |
++ copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt))) |
6856 |
++ return -EFAULT; |
6857 |
+ return 0; |
6858 |
+ } |
6859 |
+ |
6860 |
+-static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) |
6861 |
++static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, |
6862 |
++ struct v4l2_framebuffer32 __user *up) |
6863 |
+ { |
6864 |
+- u32 tmp = (u32)((unsigned long)kp->base); |
6865 |
+- |
6866 |
+- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) || |
6867 |
+- put_user(tmp, &up->base) || |
6868 |
+- put_user(kp->capability, &up->capability) || |
6869 |
+- put_user(kp->flags, &up->flags) || |
6870 |
+- copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt))) |
6871 |
+- return -EFAULT; |
6872 |
++ void *base; |
6873 |
++ |
6874 |
++ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
6875 |
++ get_user(base, &kp->base) || |
6876 |
++ put_user(ptr_to_compat(base), &up->base) || |
6877 |
++ assign_in_user(&up->capability, &kp->capability) || |
6878 |
++ assign_in_user(&up->flags, &kp->flags) || |
6879 |
++ copy_in_user(&up->fmt, &kp->fmt, sizeof(kp->fmt))) |
6880 |
++ return -EFAULT; |
6881 |
+ return 0; |
6882 |
+ } |
6883 |
+ |
6884 |
+@@ -634,18 +708,22 @@ struct v4l2_input32 { |
6885 |
+ __u32 reserved[3]; |
6886 |
+ }; |
6887 |
+ |
6888 |
+-/* The 64-bit v4l2_input struct has extra padding at the end of the struct. |
6889 |
+- Otherwise it is identical to the 32-bit version. */ |
6890 |
+-static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) |
6891 |
++/* |
6892 |
++ * The 64-bit v4l2_input struct has extra padding at the end of the struct. |
6893 |
++ * Otherwise it is identical to the 32-bit version. |
6894 |
++ */ |
6895 |
++static inline int get_v4l2_input32(struct v4l2_input __user *kp, |
6896 |
++ struct v4l2_input32 __user *up) |
6897 |
+ { |
6898 |
+- if (copy_from_user(kp, up, sizeof(struct v4l2_input32))) |
6899 |
++ if (copy_in_user(kp, up, sizeof(*up))) |
6900 |
+ return -EFAULT; |
6901 |
+ return 0; |
6902 |
+ } |
6903 |
+ |
6904 |
+-static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) |
6905 |
++static inline int put_v4l2_input32(struct v4l2_input __user *kp, |
6906 |
++ struct v4l2_input32 __user *up) |
6907 |
+ { |
6908 |
+- if (copy_to_user(up, kp, sizeof(struct v4l2_input32))) |
6909 |
++ if (copy_in_user(up, kp, sizeof(*up))) |
6910 |
+ return -EFAULT; |
6911 |
+ return 0; |
6912 |
+ } |
6913 |
+@@ -669,60 +747,95 @@ struct v4l2_ext_control32 { |
6914 |
+ }; |
6915 |
+ } __attribute__ ((packed)); |
6916 |
+ |
6917 |
+-/* The following function really belong in v4l2-common, but that causes |
6918 |
+- a circular dependency between modules. We need to think about this, but |
6919 |
+- for now this will do. */ |
6920 |
+- |
6921 |
+-/* Return non-zero if this control is a pointer type. Currently only |
6922 |
+- type STRING is a pointer type. */ |
6923 |
+-static inline int ctrl_is_pointer(u32 id) |
6924 |
++/* Return true if this control is a pointer type. */ |
6925 |
++static inline bool ctrl_is_pointer(struct file *file, u32 id) |
6926 |
+ { |
6927 |
+- switch (id) { |
6928 |
+- case V4L2_CID_RDS_TX_PS_NAME: |
6929 |
+- case V4L2_CID_RDS_TX_RADIO_TEXT: |
6930 |
+- return 1; |
6931 |
+- default: |
6932 |
+- return 0; |
6933 |
++ struct video_device *vdev = video_devdata(file); |
6934 |
++ struct v4l2_fh *fh = NULL; |
6935 |
++ struct v4l2_ctrl_handler *hdl = NULL; |
6936 |
++ struct v4l2_query_ext_ctrl qec = { id }; |
6937 |
++ const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; |
6938 |
++ |
6939 |
++ if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags)) |
6940 |
++ fh = file->private_data; |
6941 |
++ |
6942 |
++ if (fh && fh->ctrl_handler) |
6943 |
++ hdl = fh->ctrl_handler; |
6944 |
++ else if (vdev->ctrl_handler) |
6945 |
++ hdl = vdev->ctrl_handler; |
6946 |
++ |
6947 |
++ if (hdl) { |
6948 |
++ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id); |
6949 |
++ |
6950 |
++ return ctrl && ctrl->is_ptr; |
6951 |
+ } |
6952 |
++ |
6953 |
++ if (!ops || !ops->vidioc_query_ext_ctrl) |
6954 |
++ return false; |
6955 |
++ |
6956 |
++ return !ops->vidioc_query_ext_ctrl(file, fh, &qec) && |
6957 |
++ (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD); |
6958 |
++} |
6959 |
++ |
6960 |
++static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *up, |
6961 |
++ u32 *size) |
6962 |
++{ |
6963 |
++ u32 count; |
6964 |
++ |
6965 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
6966 |
++ get_user(count, &up->count)) |
6967 |
++ return -EFAULT; |
6968 |
++ if (count > V4L2_CID_MAX_CTRLS) |
6969 |
++ return -EINVAL; |
6970 |
++ *size = count * sizeof(struct v4l2_ext_control); |
6971 |
++ return 0; |
6972 |
+ } |
6973 |
+ |
6974 |
+-static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) |
6975 |
++static int get_v4l2_ext_controls32(struct file *file, |
6976 |
++ struct v4l2_ext_controls __user *kp, |
6977 |
++ struct v4l2_ext_controls32 __user *up, |
6978 |
++ void __user *aux_buf, u32 aux_space) |
6979 |
+ { |
6980 |
+ struct v4l2_ext_control32 __user *ucontrols; |
6981 |
+ struct v4l2_ext_control __user *kcontrols; |
6982 |
+- unsigned int n; |
6983 |
++ u32 count; |
6984 |
++ u32 n; |
6985 |
+ compat_caddr_t p; |
6986 |
+ |
6987 |
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) || |
6988 |
+- get_user(kp->which, &up->which) || |
6989 |
+- get_user(kp->count, &up->count) || |
6990 |
+- get_user(kp->error_idx, &up->error_idx) || |
6991 |
+- copy_from_user(kp->reserved, up->reserved, |
6992 |
+- sizeof(kp->reserved))) |
6993 |
+- return -EFAULT; |
6994 |
+- if (kp->count == 0) { |
6995 |
+- kp->controls = NULL; |
6996 |
+- return 0; |
6997 |
+- } else if (kp->count > V4L2_CID_MAX_CTRLS) { |
6998 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
6999 |
++ assign_in_user(&kp->which, &up->which) || |
7000 |
++ get_user(count, &up->count) || |
7001 |
++ put_user(count, &kp->count) || |
7002 |
++ assign_in_user(&kp->error_idx, &up->error_idx) || |
7003 |
++ copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved))) |
7004 |
++ return -EFAULT; |
7005 |
++ |
7006 |
++ if (count == 0) |
7007 |
++ return put_user(NULL, &kp->controls); |
7008 |
++ if (count > V4L2_CID_MAX_CTRLS) |
7009 |
+ return -EINVAL; |
7010 |
+- } |
7011 |
+ if (get_user(p, &up->controls)) |
7012 |
+ return -EFAULT; |
7013 |
+ ucontrols = compat_ptr(p); |
7014 |
+- if (!access_ok(VERIFY_READ, ucontrols, |
7015 |
+- kp->count * sizeof(struct v4l2_ext_control32))) |
7016 |
++ if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols))) |
7017 |
+ return -EFAULT; |
7018 |
+- kcontrols = compat_alloc_user_space(kp->count * |
7019 |
+- sizeof(struct v4l2_ext_control)); |
7020 |
+- kp->controls = (__force struct v4l2_ext_control *)kcontrols; |
7021 |
+- for (n = 0; n < kp->count; n++) { |
7022 |
++ if (aux_space < count * sizeof(*kcontrols)) |
7023 |
++ return -EFAULT; |
7024 |
++ kcontrols = aux_buf; |
7025 |
++ if (put_user((__force struct v4l2_ext_control *)kcontrols, |
7026 |
++ &kp->controls)) |
7027 |
++ return -EFAULT; |
7028 |
++ |
7029 |
++ for (n = 0; n < count; n++) { |
7030 |
+ u32 id; |
7031 |
+ |
7032 |
+ if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols))) |
7033 |
+ return -EFAULT; |
7034 |
++ |
7035 |
+ if (get_user(id, &kcontrols->id)) |
7036 |
+ return -EFAULT; |
7037 |
+- if (ctrl_is_pointer(id)) { |
7038 |
++ |
7039 |
++ if (ctrl_is_pointer(file, id)) { |
7040 |
+ void __user *s; |
7041 |
+ |
7042 |
+ if (get_user(p, &ucontrols->string)) |
7043 |
+@@ -737,43 +850,55 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext |
7044 |
+ return 0; |
7045 |
+ } |
7046 |
+ |
7047 |
+-static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) |
7048 |
++static int put_v4l2_ext_controls32(struct file *file, |
7049 |
++ struct v4l2_ext_controls __user *kp, |
7050 |
++ struct v4l2_ext_controls32 __user *up) |
7051 |
+ { |
7052 |
+ struct v4l2_ext_control32 __user *ucontrols; |
7053 |
+- struct v4l2_ext_control __user *kcontrols = |
7054 |
+- (__force struct v4l2_ext_control __user *)kp->controls; |
7055 |
+- int n = kp->count; |
7056 |
++ struct v4l2_ext_control __user *kcontrols; |
7057 |
++ u32 count; |
7058 |
++ u32 n; |
7059 |
+ compat_caddr_t p; |
7060 |
+ |
7061 |
+- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) || |
7062 |
+- put_user(kp->which, &up->which) || |
7063 |
+- put_user(kp->count, &up->count) || |
7064 |
+- put_user(kp->error_idx, &up->error_idx) || |
7065 |
+- copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) |
7066 |
+- return -EFAULT; |
7067 |
+- if (!kp->count) |
7068 |
+- return 0; |
7069 |
++ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
7070 |
++ assign_in_user(&up->which, &kp->which) || |
7071 |
++ get_user(count, &kp->count) || |
7072 |
++ put_user(count, &up->count) || |
7073 |
++ assign_in_user(&up->error_idx, &kp->error_idx) || |
7074 |
++ copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)) || |
7075 |
++ get_user(kcontrols, &kp->controls)) |
7076 |
++ return -EFAULT; |
7077 |
+ |
7078 |
++ if (!count) |
7079 |
++ return 0; |
7080 |
+ if (get_user(p, &up->controls)) |
7081 |
+ return -EFAULT; |
7082 |
+ ucontrols = compat_ptr(p); |
7083 |
+- if (!access_ok(VERIFY_WRITE, ucontrols, |
7084 |
+- n * sizeof(struct v4l2_ext_control32))) |
7085 |
++ if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols))) |
7086 |
+ return -EFAULT; |
7087 |
+ |
7088 |
+- while (--n >= 0) { |
7089 |
+- unsigned size = sizeof(*ucontrols); |
7090 |
++ for (n = 0; n < count; n++) { |
7091 |
++ unsigned int size = sizeof(*ucontrols); |
7092 |
+ u32 id; |
7093 |
+ |
7094 |
+- if (get_user(id, &kcontrols->id)) |
7095 |
++ if (get_user(id, &kcontrols->id) || |
7096 |
++ put_user(id, &ucontrols->id) || |
7097 |
++ assign_in_user(&ucontrols->size, &kcontrols->size) || |
7098 |
++ copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2, |
7099 |
++ sizeof(ucontrols->reserved2))) |
7100 |
+ return -EFAULT; |
7101 |
+- /* Do not modify the pointer when copying a pointer control. |
7102 |
+- The contents of the pointer was changed, not the pointer |
7103 |
+- itself. */ |
7104 |
+- if (ctrl_is_pointer(id)) |
7105 |
++ |
7106 |
++ /* |
7107 |
++ * Do not modify the pointer when copying a pointer control. |
7108 |
++ * The contents of the pointer was changed, not the pointer |
7109 |
++ * itself. |
7110 |
++ */ |
7111 |
++ if (ctrl_is_pointer(file, id)) |
7112 |
+ size -= sizeof(ucontrols->value64); |
7113 |
++ |
7114 |
+ if (copy_in_user(ucontrols, kcontrols, size)) |
7115 |
+ return -EFAULT; |
7116 |
++ |
7117 |
+ ucontrols++; |
7118 |
+ kcontrols++; |
7119 |
+ } |
7120 |
+@@ -793,18 +918,19 @@ struct v4l2_event32 { |
7121 |
+ __u32 reserved[8]; |
7122 |
+ }; |
7123 |
+ |
7124 |
+-static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up) |
7125 |
++static int put_v4l2_event32(struct v4l2_event __user *kp, |
7126 |
++ struct v4l2_event32 __user *up) |
7127 |
+ { |
7128 |
+- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) || |
7129 |
+- put_user(kp->type, &up->type) || |
7130 |
+- copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || |
7131 |
+- put_user(kp->pending, &up->pending) || |
7132 |
+- put_user(kp->sequence, &up->sequence) || |
7133 |
+- put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || |
7134 |
+- put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || |
7135 |
+- put_user(kp->id, &up->id) || |
7136 |
+- copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) |
7137 |
+- return -EFAULT; |
7138 |
++ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
7139 |
++ assign_in_user(&up->type, &kp->type) || |
7140 |
++ copy_in_user(&up->u, &kp->u, sizeof(kp->u)) || |
7141 |
++ assign_in_user(&up->pending, &kp->pending) || |
7142 |
++ assign_in_user(&up->sequence, &kp->sequence) || |
7143 |
++ assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) || |
7144 |
++ assign_in_user(&up->timestamp.tv_nsec, &kp->timestamp.tv_nsec) || |
7145 |
++ assign_in_user(&up->id, &kp->id) || |
7146 |
++ copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) |
7147 |
++ return -EFAULT; |
7148 |
+ return 0; |
7149 |
+ } |
7150 |
+ |
7151 |
+@@ -816,32 +942,35 @@ struct v4l2_edid32 { |
7152 |
+ compat_caddr_t edid; |
7153 |
+ }; |
7154 |
+ |
7155 |
+-static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) |
7156 |
++static int get_v4l2_edid32(struct v4l2_edid __user *kp, |
7157 |
++ struct v4l2_edid32 __user *up) |
7158 |
+ { |
7159 |
+- u32 tmp; |
7160 |
+- |
7161 |
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) || |
7162 |
+- get_user(kp->pad, &up->pad) || |
7163 |
+- get_user(kp->start_block, &up->start_block) || |
7164 |
+- get_user(kp->blocks, &up->blocks) || |
7165 |
+- get_user(tmp, &up->edid) || |
7166 |
+- copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) |
7167 |
+- return -EFAULT; |
7168 |
+- kp->edid = (__force u8 *)compat_ptr(tmp); |
7169 |
++ compat_uptr_t tmp; |
7170 |
++ |
7171 |
++ if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
7172 |
++ assign_in_user(&kp->pad, &up->pad) || |
7173 |
++ assign_in_user(&kp->start_block, &up->start_block) || |
7174 |
++ assign_in_user(&kp->blocks, &up->blocks) || |
7175 |
++ get_user(tmp, &up->edid) || |
7176 |
++ put_user(compat_ptr(tmp), &kp->edid) || |
7177 |
++ copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved))) |
7178 |
++ return -EFAULT; |
7179 |
+ return 0; |
7180 |
+ } |
7181 |
+ |
7182 |
+-static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) |
7183 |
++static int put_v4l2_edid32(struct v4l2_edid __user *kp, |
7184 |
++ struct v4l2_edid32 __user *up) |
7185 |
+ { |
7186 |
+- u32 tmp = (u32)((unsigned long)kp->edid); |
7187 |
+- |
7188 |
+- if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) || |
7189 |
+- put_user(kp->pad, &up->pad) || |
7190 |
+- put_user(kp->start_block, &up->start_block) || |
7191 |
+- put_user(kp->blocks, &up->blocks) || |
7192 |
+- put_user(tmp, &up->edid) || |
7193 |
+- copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) |
7194 |
+- return -EFAULT; |
7195 |
++ void *edid; |
7196 |
++ |
7197 |
++ if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
7198 |
++ assign_in_user(&up->pad, &kp->pad) || |
7199 |
++ assign_in_user(&up->start_block, &kp->start_block) || |
7200 |
++ assign_in_user(&up->blocks, &kp->blocks) || |
7201 |
++ get_user(edid, &kp->edid) || |
7202 |
++ put_user(ptr_to_compat(edid), &up->edid) || |
7203 |
++ copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) |
7204 |
++ return -EFAULT; |
7205 |
+ return 0; |
7206 |
+ } |
7207 |
+ |
7208 |
+@@ -873,22 +1002,23 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) |
7209 |
+ #define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32) |
7210 |
+ #define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32) |
7211 |
+ |
7212 |
++static int alloc_userspace(unsigned int size, u32 aux_space, |
7213 |
++ void __user **up_native) |
7214 |
++{ |
7215 |
++ *up_native = compat_alloc_user_space(size + aux_space); |
7216 |
++ if (!*up_native) |
7217 |
++ return -ENOMEM; |
7218 |
++ if (clear_user(*up_native, size)) |
7219 |
++ return -EFAULT; |
7220 |
++ return 0; |
7221 |
++} |
7222 |
++ |
7223 |
+ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
7224 |
+ { |
7225 |
+- union { |
7226 |
+- struct v4l2_format v2f; |
7227 |
+- struct v4l2_buffer v2b; |
7228 |
+- struct v4l2_framebuffer v2fb; |
7229 |
+- struct v4l2_input v2i; |
7230 |
+- struct v4l2_standard v2s; |
7231 |
+- struct v4l2_ext_controls v2ecs; |
7232 |
+- struct v4l2_event v2ev; |
7233 |
+- struct v4l2_create_buffers v2crt; |
7234 |
+- struct v4l2_edid v2edid; |
7235 |
+- unsigned long vx; |
7236 |
+- int vi; |
7237 |
+- } karg; |
7238 |
+ void __user *up = compat_ptr(arg); |
7239 |
++ void __user *up_native = NULL; |
7240 |
++ void __user *aux_buf; |
7241 |
++ u32 aux_space; |
7242 |
+ int compatible_arg = 1; |
7243 |
+ long err = 0; |
7244 |
+ |
7245 |
+@@ -927,30 +1057,52 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar |
7246 |
+ case VIDIOC_STREAMOFF: |
7247 |
+ case VIDIOC_S_INPUT: |
7248 |
+ case VIDIOC_S_OUTPUT: |
7249 |
+- err = get_user(karg.vi, (s32 __user *)up); |
7250 |
++ err = alloc_userspace(sizeof(unsigned int), 0, &up_native); |
7251 |
++ if (!err && assign_in_user((unsigned int __user *)up_native, |
7252 |
++ (compat_uint_t __user *)up)) |
7253 |
++ err = -EFAULT; |
7254 |
+ compatible_arg = 0; |
7255 |
+ break; |
7256 |
+ |
7257 |
+ case VIDIOC_G_INPUT: |
7258 |
+ case VIDIOC_G_OUTPUT: |
7259 |
++ err = alloc_userspace(sizeof(unsigned int), 0, &up_native); |
7260 |
+ compatible_arg = 0; |
7261 |
+ break; |
7262 |
+ |
7263 |
+ case VIDIOC_G_EDID: |
7264 |
+ case VIDIOC_S_EDID: |
7265 |
+- err = get_v4l2_edid32(&karg.v2edid, up); |
7266 |
++ err = alloc_userspace(sizeof(struct v4l2_edid), 0, &up_native); |
7267 |
++ if (!err) |
7268 |
++ err = get_v4l2_edid32(up_native, up); |
7269 |
+ compatible_arg = 0; |
7270 |
+ break; |
7271 |
+ |
7272 |
+ case VIDIOC_G_FMT: |
7273 |
+ case VIDIOC_S_FMT: |
7274 |
+ case VIDIOC_TRY_FMT: |
7275 |
+- err = get_v4l2_format32(&karg.v2f, up); |
7276 |
++ err = bufsize_v4l2_format(up, &aux_space); |
7277 |
++ if (!err) |
7278 |
++ err = alloc_userspace(sizeof(struct v4l2_format), |
7279 |
++ aux_space, &up_native); |
7280 |
++ if (!err) { |
7281 |
++ aux_buf = up_native + sizeof(struct v4l2_format); |
7282 |
++ err = get_v4l2_format32(up_native, up, |
7283 |
++ aux_buf, aux_space); |
7284 |
++ } |
7285 |
+ compatible_arg = 0; |
7286 |
+ break; |
7287 |
+ |
7288 |
+ case VIDIOC_CREATE_BUFS: |
7289 |
+- err = get_v4l2_create32(&karg.v2crt, up); |
7290 |
++ err = bufsize_v4l2_create(up, &aux_space); |
7291 |
++ if (!err) |
7292 |
++ err = alloc_userspace(sizeof(struct v4l2_create_buffers), |
7293 |
++ aux_space, &up_native); |
7294 |
++ if (!err) { |
7295 |
++ aux_buf = up_native + sizeof(struct v4l2_create_buffers); |
7296 |
++ err = get_v4l2_create32(up_native, up, |
7297 |
++ aux_buf, aux_space); |
7298 |
++ } |
7299 |
+ compatible_arg = 0; |
7300 |
+ break; |
7301 |
+ |
7302 |
+@@ -958,36 +1110,63 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar |
7303 |
+ case VIDIOC_QUERYBUF: |
7304 |
+ case VIDIOC_QBUF: |
7305 |
+ case VIDIOC_DQBUF: |
7306 |
+- err = get_v4l2_buffer32(&karg.v2b, up); |
7307 |
++ err = bufsize_v4l2_buffer(up, &aux_space); |
7308 |
++ if (!err) |
7309 |
++ err = alloc_userspace(sizeof(struct v4l2_buffer), |
7310 |
++ aux_space, &up_native); |
7311 |
++ if (!err) { |
7312 |
++ aux_buf = up_native + sizeof(struct v4l2_buffer); |
7313 |
++ err = get_v4l2_buffer32(up_native, up, |
7314 |
++ aux_buf, aux_space); |
7315 |
++ } |
7316 |
+ compatible_arg = 0; |
7317 |
+ break; |
7318 |
+ |
7319 |
+ case VIDIOC_S_FBUF: |
7320 |
+- err = get_v4l2_framebuffer32(&karg.v2fb, up); |
7321 |
++ err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, |
7322 |
++ &up_native); |
7323 |
++ if (!err) |
7324 |
++ err = get_v4l2_framebuffer32(up_native, up); |
7325 |
+ compatible_arg = 0; |
7326 |
+ break; |
7327 |
+ |
7328 |
+ case VIDIOC_G_FBUF: |
7329 |
++ err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, |
7330 |
++ &up_native); |
7331 |
+ compatible_arg = 0; |
7332 |
+ break; |
7333 |
+ |
7334 |
+ case VIDIOC_ENUMSTD: |
7335 |
+- err = get_v4l2_standard32(&karg.v2s, up); |
7336 |
++ err = alloc_userspace(sizeof(struct v4l2_standard), 0, |
7337 |
++ &up_native); |
7338 |
++ if (!err) |
7339 |
++ err = get_v4l2_standard32(up_native, up); |
7340 |
+ compatible_arg = 0; |
7341 |
+ break; |
7342 |
+ |
7343 |
+ case VIDIOC_ENUMINPUT: |
7344 |
+- err = get_v4l2_input32(&karg.v2i, up); |
7345 |
++ err = alloc_userspace(sizeof(struct v4l2_input), 0, &up_native); |
7346 |
++ if (!err) |
7347 |
++ err = get_v4l2_input32(up_native, up); |
7348 |
+ compatible_arg = 0; |
7349 |
+ break; |
7350 |
+ |
7351 |
+ case VIDIOC_G_EXT_CTRLS: |
7352 |
+ case VIDIOC_S_EXT_CTRLS: |
7353 |
+ case VIDIOC_TRY_EXT_CTRLS: |
7354 |
+- err = get_v4l2_ext_controls32(&karg.v2ecs, up); |
7355 |
++ err = bufsize_v4l2_ext_controls(up, &aux_space); |
7356 |
++ if (!err) |
7357 |
++ err = alloc_userspace(sizeof(struct v4l2_ext_controls), |
7358 |
++ aux_space, &up_native); |
7359 |
++ if (!err) { |
7360 |
++ aux_buf = up_native + sizeof(struct v4l2_ext_controls); |
7361 |
++ err = get_v4l2_ext_controls32(file, up_native, up, |
7362 |
++ aux_buf, aux_space); |
7363 |
++ } |
7364 |
+ compatible_arg = 0; |
7365 |
+ break; |
7366 |
+ case VIDIOC_DQEVENT: |
7367 |
++ err = alloc_userspace(sizeof(struct v4l2_event), 0, &up_native); |
7368 |
+ compatible_arg = 0; |
7369 |
+ break; |
7370 |
+ } |
7371 |
+@@ -996,26 +1175,26 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar |
7372 |
+ |
7373 |
+ if (compatible_arg) |
7374 |
+ err = native_ioctl(file, cmd, (unsigned long)up); |
7375 |
+- else { |
7376 |
+- mm_segment_t old_fs = get_fs(); |
7377 |
++ else |
7378 |
++ err = native_ioctl(file, cmd, (unsigned long)up_native); |
7379 |
+ |
7380 |
+- set_fs(KERNEL_DS); |
7381 |
+- err = native_ioctl(file, cmd, (unsigned long)&karg); |
7382 |
+- set_fs(old_fs); |
7383 |
+- } |
7384 |
++ if (err == -ENOTTY) |
7385 |
++ return err; |
7386 |
+ |
7387 |
+- /* Special case: even after an error we need to put the |
7388 |
+- results back for these ioctls since the error_idx will |
7389 |
+- contain information on which control failed. */ |
7390 |
++ /* |
7391 |
++ * Special case: even after an error we need to put the |
7392 |
++ * results back for these ioctls since the error_idx will |
7393 |
++ * contain information on which control failed. |
7394 |
++ */ |
7395 |
+ switch (cmd) { |
7396 |
+ case VIDIOC_G_EXT_CTRLS: |
7397 |
+ case VIDIOC_S_EXT_CTRLS: |
7398 |
+ case VIDIOC_TRY_EXT_CTRLS: |
7399 |
+- if (put_v4l2_ext_controls32(&karg.v2ecs, up)) |
7400 |
++ if (put_v4l2_ext_controls32(file, up_native, up)) |
7401 |
+ err = -EFAULT; |
7402 |
+ break; |
7403 |
+ case VIDIOC_S_EDID: |
7404 |
+- if (put_v4l2_edid32(&karg.v2edid, up)) |
7405 |
++ if (put_v4l2_edid32(up_native, up)) |
7406 |
+ err = -EFAULT; |
7407 |
+ break; |
7408 |
+ } |
7409 |
+@@ -1027,43 +1206,46 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar |
7410 |
+ case VIDIOC_S_OUTPUT: |
7411 |
+ case VIDIOC_G_INPUT: |
7412 |
+ case VIDIOC_G_OUTPUT: |
7413 |
+- err = put_user(((s32)karg.vi), (s32 __user *)up); |
7414 |
++ if (assign_in_user((compat_uint_t __user *)up, |
7415 |
++ ((unsigned int __user *)up_native))) |
7416 |
++ err = -EFAULT; |
7417 |
+ break; |
7418 |
+ |
7419 |
+ case VIDIOC_G_FBUF: |
7420 |
+- err = put_v4l2_framebuffer32(&karg.v2fb, up); |
7421 |
++ err = put_v4l2_framebuffer32(up_native, up); |
7422 |
+ break; |
7423 |
+ |
7424 |
+ case VIDIOC_DQEVENT: |
7425 |
+- err = put_v4l2_event32(&karg.v2ev, up); |
7426 |
++ err = put_v4l2_event32(up_native, up); |
7427 |
+ break; |
7428 |
+ |
7429 |
+ case VIDIOC_G_EDID: |
7430 |
+- err = put_v4l2_edid32(&karg.v2edid, up); |
7431 |
++ err = put_v4l2_edid32(up_native, up); |
7432 |
+ break; |
7433 |
+ |
7434 |
+ case VIDIOC_G_FMT: |
7435 |
+ case VIDIOC_S_FMT: |
7436 |
+ case VIDIOC_TRY_FMT: |
7437 |
+- err = put_v4l2_format32(&karg.v2f, up); |
7438 |
++ err = put_v4l2_format32(up_native, up); |
7439 |
+ break; |
7440 |
+ |
7441 |
+ case VIDIOC_CREATE_BUFS: |
7442 |
+- err = put_v4l2_create32(&karg.v2crt, up); |
7443 |
++ err = put_v4l2_create32(up_native, up); |
7444 |
+ break; |
7445 |
+ |
7446 |
++ case VIDIOC_PREPARE_BUF: |
7447 |
+ case VIDIOC_QUERYBUF: |
7448 |
+ case VIDIOC_QBUF: |
7449 |
+ case VIDIOC_DQBUF: |
7450 |
+- err = put_v4l2_buffer32(&karg.v2b, up); |
7451 |
++ err = put_v4l2_buffer32(up_native, up); |
7452 |
+ break; |
7453 |
+ |
7454 |
+ case VIDIOC_ENUMSTD: |
7455 |
+- err = put_v4l2_standard32(&karg.v2s, up); |
7456 |
++ err = put_v4l2_standard32(up_native, up); |
7457 |
+ break; |
7458 |
+ |
7459 |
+ case VIDIOC_ENUMINPUT: |
7460 |
+- err = put_v4l2_input32(&karg.v2i, up); |
7461 |
++ err = put_v4l2_input32(up_native, up); |
7462 |
+ break; |
7463 |
+ } |
7464 |
+ return err; |
7465 |
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c |
7466 |
+index b60a6b0841d1..d06941cc6a55 100644 |
7467 |
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c |
7468 |
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c |
7469 |
+@@ -1308,52 +1308,50 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, |
7470 |
+ struct file *file, void *fh, void *arg) |
7471 |
+ { |
7472 |
+ struct v4l2_fmtdesc *p = arg; |
7473 |
+- struct video_device *vfd = video_devdata(file); |
7474 |
+- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; |
7475 |
+- bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; |
7476 |
+- bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; |
7477 |
+- bool is_rx = vfd->vfl_dir != VFL_DIR_TX; |
7478 |
+- bool is_tx = vfd->vfl_dir != VFL_DIR_RX; |
7479 |
+- int ret = -EINVAL; |
7480 |
++ int ret = check_fmt(file, p->type); |
7481 |
++ |
7482 |
++ if (ret) |
7483 |
++ return ret; |
7484 |
++ ret = -EINVAL; |
7485 |
+ |
7486 |
+ switch (p->type) { |
7487 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: |
7488 |
+- if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_enum_fmt_vid_cap)) |
7489 |
++ if (unlikely(!ops->vidioc_enum_fmt_vid_cap)) |
7490 |
+ break; |
7491 |
+ ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg); |
7492 |
+ break; |
7493 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: |
7494 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_cap_mplane)) |
7495 |
++ if (unlikely(!ops->vidioc_enum_fmt_vid_cap_mplane)) |
7496 |
+ break; |
7497 |
+ ret = ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg); |
7498 |
+ break; |
7499 |
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
7500 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_overlay)) |
7501 |
++ if (unlikely(!ops->vidioc_enum_fmt_vid_overlay)) |
7502 |
+ break; |
7503 |
+ ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg); |
7504 |
+ break; |
7505 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
7506 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out)) |
7507 |
++ if (unlikely(!ops->vidioc_enum_fmt_vid_out)) |
7508 |
+ break; |
7509 |
+ ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg); |
7510 |
+ break; |
7511 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: |
7512 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out_mplane)) |
7513 |
++ if (unlikely(!ops->vidioc_enum_fmt_vid_out_mplane)) |
7514 |
+ break; |
7515 |
+ ret = ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg); |
7516 |
+ break; |
7517 |
+ case V4L2_BUF_TYPE_SDR_CAPTURE: |
7518 |
+- if (unlikely(!is_rx || !is_sdr || !ops->vidioc_enum_fmt_sdr_cap)) |
7519 |
++ if (unlikely(!ops->vidioc_enum_fmt_sdr_cap)) |
7520 |
+ break; |
7521 |
+ ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg); |
7522 |
+ break; |
7523 |
+ case V4L2_BUF_TYPE_SDR_OUTPUT: |
7524 |
+- if (unlikely(!is_tx || !is_sdr || !ops->vidioc_enum_fmt_sdr_out)) |
7525 |
++ if (unlikely(!ops->vidioc_enum_fmt_sdr_out)) |
7526 |
+ break; |
7527 |
+ ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg); |
7528 |
+ break; |
7529 |
+ case V4L2_BUF_TYPE_META_CAPTURE: |
7530 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_meta_cap)) |
7531 |
++ if (unlikely(!ops->vidioc_enum_fmt_meta_cap)) |
7532 |
+ break; |
7533 |
+ ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg); |
7534 |
+ break; |
7535 |
+@@ -1367,13 +1365,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, |
7536 |
+ struct file *file, void *fh, void *arg) |
7537 |
+ { |
7538 |
+ struct v4l2_format *p = arg; |
7539 |
+- struct video_device *vfd = video_devdata(file); |
7540 |
+- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; |
7541 |
+- bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; |
7542 |
+- bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; |
7543 |
+- bool is_rx = vfd->vfl_dir != VFL_DIR_TX; |
7544 |
+- bool is_tx = vfd->vfl_dir != VFL_DIR_RX; |
7545 |
+- int ret; |
7546 |
++ int ret = check_fmt(file, p->type); |
7547 |
++ |
7548 |
++ if (ret) |
7549 |
++ return ret; |
7550 |
+ |
7551 |
+ /* |
7552 |
+ * fmt can't be cleared for these overlay types due to the 'clips' |
7553 |
+@@ -1401,7 +1396,7 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, |
7554 |
+ |
7555 |
+ switch (p->type) { |
7556 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: |
7557 |
+- if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_g_fmt_vid_cap)) |
7558 |
++ if (unlikely(!ops->vidioc_g_fmt_vid_cap)) |
7559 |
+ break; |
7560 |
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; |
7561 |
+ ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg); |
7562 |
+@@ -1409,23 +1404,15 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, |
7563 |
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; |
7564 |
+ return ret; |
7565 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: |
7566 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap_mplane)) |
7567 |
+- break; |
7568 |
+ return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg); |
7569 |
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
7570 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_overlay)) |
7571 |
+- break; |
7572 |
+ return ops->vidioc_g_fmt_vid_overlay(file, fh, arg); |
7573 |
+ case V4L2_BUF_TYPE_VBI_CAPTURE: |
7574 |
+- if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_vbi_cap)) |
7575 |
+- break; |
7576 |
+ return ops->vidioc_g_fmt_vbi_cap(file, fh, arg); |
7577 |
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: |
7578 |
+- if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_cap)) |
7579 |
+- break; |
7580 |
+ return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg); |
7581 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
7582 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out)) |
7583 |
++ if (unlikely(!ops->vidioc_g_fmt_vid_out)) |
7584 |
+ break; |
7585 |
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; |
7586 |
+ ret = ops->vidioc_g_fmt_vid_out(file, fh, arg); |
7587 |
+@@ -1433,32 +1420,18 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, |
7588 |
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; |
7589 |
+ return ret; |
7590 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: |
7591 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_mplane)) |
7592 |
+- break; |
7593 |
+ return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg); |
7594 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
7595 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_overlay)) |
7596 |
+- break; |
7597 |
+ return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg); |
7598 |
+ case V4L2_BUF_TYPE_VBI_OUTPUT: |
7599 |
+- if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_vbi_out)) |
7600 |
+- break; |
7601 |
+ return ops->vidioc_g_fmt_vbi_out(file, fh, arg); |
7602 |
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: |
7603 |
+- if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out)) |
7604 |
+- break; |
7605 |
+ return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg); |
7606 |
+ case V4L2_BUF_TYPE_SDR_CAPTURE: |
7607 |
+- if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap)) |
7608 |
+- break; |
7609 |
+ return ops->vidioc_g_fmt_sdr_cap(file, fh, arg); |
7610 |
+ case V4L2_BUF_TYPE_SDR_OUTPUT: |
7611 |
+- if (unlikely(!is_tx || !is_sdr || !ops->vidioc_g_fmt_sdr_out)) |
7612 |
+- break; |
7613 |
+ return ops->vidioc_g_fmt_sdr_out(file, fh, arg); |
7614 |
+ case V4L2_BUF_TYPE_META_CAPTURE: |
7615 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_meta_cap)) |
7616 |
+- break; |
7617 |
+ return ops->vidioc_g_fmt_meta_cap(file, fh, arg); |
7618 |
+ } |
7619 |
+ return -EINVAL; |
7620 |
+@@ -1484,12 +1457,10 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, |
7621 |
+ { |
7622 |
+ struct v4l2_format *p = arg; |
7623 |
+ struct video_device *vfd = video_devdata(file); |
7624 |
+- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; |
7625 |
+- bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; |
7626 |
+- bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; |
7627 |
+- bool is_rx = vfd->vfl_dir != VFL_DIR_TX; |
7628 |
+- bool is_tx = vfd->vfl_dir != VFL_DIR_RX; |
7629 |
+- int ret; |
7630 |
++ int ret = check_fmt(file, p->type); |
7631 |
++ |
7632 |
++ if (ret) |
7633 |
++ return ret; |
7634 |
+ |
7635 |
+ ret = v4l_enable_media_source(vfd); |
7636 |
+ if (ret) |
7637 |
+@@ -1498,37 +1469,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, |
7638 |
+ |
7639 |
+ switch (p->type) { |
7640 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: |
7641 |
+- if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_s_fmt_vid_cap)) |
7642 |
++ if (unlikely(!ops->vidioc_s_fmt_vid_cap)) |
7643 |
+ break; |
7644 |
+ CLEAR_AFTER_FIELD(p, fmt.pix); |
7645 |
+ ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg); |
7646 |
+ /* just in case the driver zeroed it again */ |
7647 |
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; |
7648 |
+- if (is_tch) |
7649 |
++ if (vfd->vfl_type == VFL_TYPE_TOUCH) |
7650 |
+ v4l_pix_format_touch(&p->fmt.pix); |
7651 |
+ return ret; |
7652 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: |
7653 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap_mplane)) |
7654 |
++ if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) |
7655 |
+ break; |
7656 |
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
7657 |
+ return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); |
7658 |
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
7659 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_overlay)) |
7660 |
++ if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) |
7661 |
+ break; |
7662 |
+ CLEAR_AFTER_FIELD(p, fmt.win); |
7663 |
+ return ops->vidioc_s_fmt_vid_overlay(file, fh, arg); |
7664 |
+ case V4L2_BUF_TYPE_VBI_CAPTURE: |
7665 |
+- if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_vbi_cap)) |
7666 |
++ if (unlikely(!ops->vidioc_s_fmt_vbi_cap)) |
7667 |
+ break; |
7668 |
+ CLEAR_AFTER_FIELD(p, fmt.vbi); |
7669 |
+ return ops->vidioc_s_fmt_vbi_cap(file, fh, arg); |
7670 |
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: |
7671 |
+- if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_cap)) |
7672 |
++ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap)) |
7673 |
+ break; |
7674 |
+ CLEAR_AFTER_FIELD(p, fmt.sliced); |
7675 |
+ return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg); |
7676 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
7677 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out)) |
7678 |
++ if (unlikely(!ops->vidioc_s_fmt_vid_out)) |
7679 |
+ break; |
7680 |
+ CLEAR_AFTER_FIELD(p, fmt.pix); |
7681 |
+ ret = ops->vidioc_s_fmt_vid_out(file, fh, arg); |
7682 |
+@@ -1536,37 +1507,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, |
7683 |
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; |
7684 |
+ return ret; |
7685 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: |
7686 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_mplane)) |
7687 |
++ if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) |
7688 |
+ break; |
7689 |
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
7690 |
+ return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); |
7691 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
7692 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_overlay)) |
7693 |
++ if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) |
7694 |
+ break; |
7695 |
+ CLEAR_AFTER_FIELD(p, fmt.win); |
7696 |
+ return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg); |
7697 |
+ case V4L2_BUF_TYPE_VBI_OUTPUT: |
7698 |
+- if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_vbi_out)) |
7699 |
++ if (unlikely(!ops->vidioc_s_fmt_vbi_out)) |
7700 |
+ break; |
7701 |
+ CLEAR_AFTER_FIELD(p, fmt.vbi); |
7702 |
+ return ops->vidioc_s_fmt_vbi_out(file, fh, arg); |
7703 |
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: |
7704 |
+- if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_out)) |
7705 |
++ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out)) |
7706 |
+ break; |
7707 |
+ CLEAR_AFTER_FIELD(p, fmt.sliced); |
7708 |
+ return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg); |
7709 |
+ case V4L2_BUF_TYPE_SDR_CAPTURE: |
7710 |
+- if (unlikely(!is_rx || !is_sdr || !ops->vidioc_s_fmt_sdr_cap)) |
7711 |
++ if (unlikely(!ops->vidioc_s_fmt_sdr_cap)) |
7712 |
+ break; |
7713 |
+ CLEAR_AFTER_FIELD(p, fmt.sdr); |
7714 |
+ return ops->vidioc_s_fmt_sdr_cap(file, fh, arg); |
7715 |
+ case V4L2_BUF_TYPE_SDR_OUTPUT: |
7716 |
+- if (unlikely(!is_tx || !is_sdr || !ops->vidioc_s_fmt_sdr_out)) |
7717 |
++ if (unlikely(!ops->vidioc_s_fmt_sdr_out)) |
7718 |
+ break; |
7719 |
+ CLEAR_AFTER_FIELD(p, fmt.sdr); |
7720 |
+ return ops->vidioc_s_fmt_sdr_out(file, fh, arg); |
7721 |
+ case V4L2_BUF_TYPE_META_CAPTURE: |
7722 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_meta_cap)) |
7723 |
++ if (unlikely(!ops->vidioc_s_fmt_meta_cap)) |
7724 |
+ break; |
7725 |
+ CLEAR_AFTER_FIELD(p, fmt.meta); |
7726 |
+ return ops->vidioc_s_fmt_meta_cap(file, fh, arg); |
7727 |
+@@ -1578,19 +1549,16 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, |
7728 |
+ struct file *file, void *fh, void *arg) |
7729 |
+ { |
7730 |
+ struct v4l2_format *p = arg; |
7731 |
+- struct video_device *vfd = video_devdata(file); |
7732 |
+- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; |
7733 |
+- bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; |
7734 |
+- bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; |
7735 |
+- bool is_rx = vfd->vfl_dir != VFL_DIR_TX; |
7736 |
+- bool is_tx = vfd->vfl_dir != VFL_DIR_RX; |
7737 |
+- int ret; |
7738 |
++ int ret = check_fmt(file, p->type); |
7739 |
++ |
7740 |
++ if (ret) |
7741 |
++ return ret; |
7742 |
+ |
7743 |
+ v4l_sanitize_format(p); |
7744 |
+ |
7745 |
+ switch (p->type) { |
7746 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: |
7747 |
+- if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_try_fmt_vid_cap)) |
7748 |
++ if (unlikely(!ops->vidioc_try_fmt_vid_cap)) |
7749 |
+ break; |
7750 |
+ CLEAR_AFTER_FIELD(p, fmt.pix); |
7751 |
+ ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg); |
7752 |
+@@ -1598,27 +1566,27 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, |
7753 |
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; |
7754 |
+ return ret; |
7755 |
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: |
7756 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap_mplane)) |
7757 |
++ if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) |
7758 |
+ break; |
7759 |
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
7760 |
+ return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); |
7761 |
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY: |
7762 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_overlay)) |
7763 |
++ if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) |
7764 |
+ break; |
7765 |
+ CLEAR_AFTER_FIELD(p, fmt.win); |
7766 |
+ return ops->vidioc_try_fmt_vid_overlay(file, fh, arg); |
7767 |
+ case V4L2_BUF_TYPE_VBI_CAPTURE: |
7768 |
+- if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_vbi_cap)) |
7769 |
++ if (unlikely(!ops->vidioc_try_fmt_vbi_cap)) |
7770 |
+ break; |
7771 |
+ CLEAR_AFTER_FIELD(p, fmt.vbi); |
7772 |
+ return ops->vidioc_try_fmt_vbi_cap(file, fh, arg); |
7773 |
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: |
7774 |
+- if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_cap)) |
7775 |
++ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap)) |
7776 |
+ break; |
7777 |
+ CLEAR_AFTER_FIELD(p, fmt.sliced); |
7778 |
+ return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg); |
7779 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
7780 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out)) |
7781 |
++ if (unlikely(!ops->vidioc_try_fmt_vid_out)) |
7782 |
+ break; |
7783 |
+ CLEAR_AFTER_FIELD(p, fmt.pix); |
7784 |
+ ret = ops->vidioc_try_fmt_vid_out(file, fh, arg); |
7785 |
+@@ -1626,37 +1594,37 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, |
7786 |
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; |
7787 |
+ return ret; |
7788 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: |
7789 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_mplane)) |
7790 |
++ if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) |
7791 |
+ break; |
7792 |
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); |
7793 |
+ return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); |
7794 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: |
7795 |
+- if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_overlay)) |
7796 |
++ if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) |
7797 |
+ break; |
7798 |
+ CLEAR_AFTER_FIELD(p, fmt.win); |
7799 |
+ return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg); |
7800 |
+ case V4L2_BUF_TYPE_VBI_OUTPUT: |
7801 |
+- if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_vbi_out)) |
7802 |
++ if (unlikely(!ops->vidioc_try_fmt_vbi_out)) |
7803 |
+ break; |
7804 |
+ CLEAR_AFTER_FIELD(p, fmt.vbi); |
7805 |
+ return ops->vidioc_try_fmt_vbi_out(file, fh, arg); |
7806 |
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: |
7807 |
+- if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_out)) |
7808 |
++ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out)) |
7809 |
+ break; |
7810 |
+ CLEAR_AFTER_FIELD(p, fmt.sliced); |
7811 |
+ return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg); |
7812 |
+ case V4L2_BUF_TYPE_SDR_CAPTURE: |
7813 |
+- if (unlikely(!is_rx || !is_sdr || !ops->vidioc_try_fmt_sdr_cap)) |
7814 |
++ if (unlikely(!ops->vidioc_try_fmt_sdr_cap)) |
7815 |
+ break; |
7816 |
+ CLEAR_AFTER_FIELD(p, fmt.sdr); |
7817 |
+ return ops->vidioc_try_fmt_sdr_cap(file, fh, arg); |
7818 |
+ case V4L2_BUF_TYPE_SDR_OUTPUT: |
7819 |
+- if (unlikely(!is_tx || !is_sdr || !ops->vidioc_try_fmt_sdr_out)) |
7820 |
++ if (unlikely(!ops->vidioc_try_fmt_sdr_out)) |
7821 |
+ break; |
7822 |
+ CLEAR_AFTER_FIELD(p, fmt.sdr); |
7823 |
+ return ops->vidioc_try_fmt_sdr_out(file, fh, arg); |
7824 |
+ case V4L2_BUF_TYPE_META_CAPTURE: |
7825 |
+- if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_meta_cap)) |
7826 |
++ if (unlikely(!ops->vidioc_try_fmt_meta_cap)) |
7827 |
+ break; |
7828 |
+ CLEAR_AFTER_FIELD(p, fmt.meta); |
7829 |
+ return ops->vidioc_try_fmt_meta_cap(file, fh, arg); |
7830 |
+@@ -2924,8 +2892,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, |
7831 |
+ |
7832 |
+ /* Handles IOCTL */ |
7833 |
+ err = func(file, cmd, parg); |
7834 |
+- if (err == -ENOIOCTLCMD) |
7835 |
++ if (err == -ENOTTY || err == -ENOIOCTLCMD) { |
7836 |
+ err = -ENOTTY; |
7837 |
++ goto out; |
7838 |
++ } |
7839 |
++ |
7840 |
+ if (err == 0) { |
7841 |
+ if (cmd == VIDIOC_DQBUF) |
7842 |
+ trace_v4l2_dqbuf(video_devdata(file)->minor, parg); |
7843 |
+diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c |
7844 |
+index e0eb51d8c012..edf24c148fa6 100644 |
7845 |
+--- a/drivers/mtd/nand/brcmnand/brcmnand.c |
7846 |
++++ b/drivers/mtd/nand/brcmnand/brcmnand.c |
7847 |
+@@ -2193,16 +2193,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) |
7848 |
+ if (ctrl->nand_version >= 0x0702) |
7849 |
+ tmp |= ACC_CONTROL_RD_ERASED; |
7850 |
+ tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; |
7851 |
+- if (ctrl->features & BRCMNAND_HAS_PREFETCH) { |
7852 |
+- /* |
7853 |
+- * FIXME: Flash DMA + prefetch may see spurious erased-page ECC |
7854 |
+- * errors |
7855 |
+- */ |
7856 |
+- if (has_flash_dma(ctrl)) |
7857 |
+- tmp &= ~ACC_CONTROL_PREFETCH; |
7858 |
+- else |
7859 |
+- tmp |= ACC_CONTROL_PREFETCH; |
7860 |
+- } |
7861 |
++ if (ctrl->features & BRCMNAND_HAS_PREFETCH) |
7862 |
++ tmp &= ~ACC_CONTROL_PREFETCH; |
7863 |
++ |
7864 |
+ nand_writereg(ctrl, offs, tmp); |
7865 |
+ |
7866 |
+ return 0; |
7867 |
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c |
7868 |
+index 3f1d806e590a..a0b0302aea14 100644 |
7869 |
+--- a/drivers/mtd/nand/nand_base.c |
7870 |
++++ b/drivers/mtd/nand/nand_base.c |
7871 |
+@@ -2201,6 +2201,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome); |
7872 |
+ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, |
7873 |
+ struct mtd_oob_ops *ops) |
7874 |
+ { |
7875 |
++ unsigned int max_bitflips = 0; |
7876 |
+ int page, realpage, chipnr; |
7877 |
+ struct nand_chip *chip = mtd_to_nand(mtd); |
7878 |
+ struct mtd_ecc_stats stats; |
7879 |
+@@ -2258,6 +2259,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, |
7880 |
+ nand_wait_ready(mtd); |
7881 |
+ } |
7882 |
+ |
7883 |
++ max_bitflips = max_t(unsigned int, max_bitflips, ret); |
7884 |
++ |
7885 |
+ readlen -= len; |
7886 |
+ if (!readlen) |
7887 |
+ break; |
7888 |
+@@ -2283,7 +2286,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, |
7889 |
+ if (mtd->ecc_stats.failed - stats.failed) |
7890 |
+ return -EBADMSG; |
7891 |
+ |
7892 |
+- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; |
7893 |
++ return max_bitflips; |
7894 |
+ } |
7895 |
+ |
7896 |
+ /** |
7897 |
+diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c |
7898 |
+index 82244be3e766..958974821582 100644 |
7899 |
+--- a/drivers/mtd/nand/sunxi_nand.c |
7900 |
++++ b/drivers/mtd/nand/sunxi_nand.c |
7901 |
+@@ -1853,8 +1853,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, |
7902 |
+ |
7903 |
+ /* Add ECC info retrieval from DT */ |
7904 |
+ for (i = 0; i < ARRAY_SIZE(strengths); i++) { |
7905 |
+- if (ecc->strength <= strengths[i]) |
7906 |
++ if (ecc->strength <= strengths[i]) { |
7907 |
++ /* |
7908 |
++ * Update ecc->strength value with the actual strength |
7909 |
++ * that will be used by the ECC engine. |
7910 |
++ */ |
7911 |
++ ecc->strength = strengths[i]; |
7912 |
+ break; |
7913 |
++ } |
7914 |
+ } |
7915 |
+ |
7916 |
+ if (i >= ARRAY_SIZE(strengths)) { |
7917 |
+diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c |
7918 |
+index b210fdb31c98..b1fc28f63882 100644 |
7919 |
+--- a/drivers/mtd/ubi/block.c |
7920 |
++++ b/drivers/mtd/ubi/block.c |
7921 |
+@@ -99,6 +99,8 @@ struct ubiblock { |
7922 |
+ |
7923 |
+ /* Linked list of all ubiblock instances */ |
7924 |
+ static LIST_HEAD(ubiblock_devices); |
7925 |
++static DEFINE_IDR(ubiblock_minor_idr); |
7926 |
++/* Protects ubiblock_devices and ubiblock_minor_idr */ |
7927 |
+ static DEFINE_MUTEX(devices_mutex); |
7928 |
+ static int ubiblock_major; |
7929 |
+ |
7930 |
+@@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = { |
7931 |
+ .init_request = ubiblock_init_request, |
7932 |
+ }; |
7933 |
+ |
7934 |
+-static DEFINE_IDR(ubiblock_minor_idr); |
7935 |
+- |
7936 |
+ int ubiblock_create(struct ubi_volume_info *vi) |
7937 |
+ { |
7938 |
+ struct ubiblock *dev; |
7939 |
+@@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi) |
7940 |
+ /* Check that the volume isn't already handled */ |
7941 |
+ mutex_lock(&devices_mutex); |
7942 |
+ if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { |
7943 |
+- mutex_unlock(&devices_mutex); |
7944 |
+- return -EEXIST; |
7945 |
++ ret = -EEXIST; |
7946 |
++ goto out_unlock; |
7947 |
+ } |
7948 |
+- mutex_unlock(&devices_mutex); |
7949 |
+ |
7950 |
+ dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL); |
7951 |
+- if (!dev) |
7952 |
+- return -ENOMEM; |
7953 |
++ if (!dev) { |
7954 |
++ ret = -ENOMEM; |
7955 |
++ goto out_unlock; |
7956 |
++ } |
7957 |
+ |
7958 |
+ mutex_init(&dev->dev_mutex); |
7959 |
+ |
7960 |
+@@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi) |
7961 |
+ goto out_free_queue; |
7962 |
+ } |
7963 |
+ |
7964 |
+- mutex_lock(&devices_mutex); |
7965 |
+ list_add_tail(&dev->list, &ubiblock_devices); |
7966 |
+- mutex_unlock(&devices_mutex); |
7967 |
+ |
7968 |
+ /* Must be the last step: anyone can call file ops from now on */ |
7969 |
+ add_disk(dev->gd); |
7970 |
+ dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", |
7971 |
+ dev->ubi_num, dev->vol_id, vi->name); |
7972 |
++ mutex_unlock(&devices_mutex); |
7973 |
+ return 0; |
7974 |
+ |
7975 |
+ out_free_queue: |
7976 |
+@@ -457,6 +457,8 @@ int ubiblock_create(struct ubi_volume_info *vi) |
7977 |
+ put_disk(dev->gd); |
7978 |
+ out_free_dev: |
7979 |
+ kfree(dev); |
7980 |
++out_unlock: |
7981 |
++ mutex_unlock(&devices_mutex); |
7982 |
+ |
7983 |
+ return ret; |
7984 |
+ } |
7985 |
+@@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev) |
7986 |
+ int ubiblock_remove(struct ubi_volume_info *vi) |
7987 |
+ { |
7988 |
+ struct ubiblock *dev; |
7989 |
++ int ret; |
7990 |
+ |
7991 |
+ mutex_lock(&devices_mutex); |
7992 |
+ dev = find_dev_nolock(vi->ubi_num, vi->vol_id); |
7993 |
+ if (!dev) { |
7994 |
+- mutex_unlock(&devices_mutex); |
7995 |
+- return -ENODEV; |
7996 |
++ ret = -ENODEV; |
7997 |
++ goto out_unlock; |
7998 |
+ } |
7999 |
+ |
8000 |
+ /* Found a device, let's lock it so we can check if it's busy */ |
8001 |
+ mutex_lock(&dev->dev_mutex); |
8002 |
+ if (dev->refcnt > 0) { |
8003 |
+- mutex_unlock(&dev->dev_mutex); |
8004 |
+- mutex_unlock(&devices_mutex); |
8005 |
+- return -EBUSY; |
8006 |
++ ret = -EBUSY; |
8007 |
++ goto out_unlock_dev; |
8008 |
+ } |
8009 |
+ |
8010 |
+ /* Remove from device list */ |
8011 |
+ list_del(&dev->list); |
8012 |
+- mutex_unlock(&devices_mutex); |
8013 |
+- |
8014 |
+ ubiblock_cleanup(dev); |
8015 |
+ mutex_unlock(&dev->dev_mutex); |
8016 |
++ mutex_unlock(&devices_mutex); |
8017 |
++ |
8018 |
+ kfree(dev); |
8019 |
+ return 0; |
8020 |
++ |
8021 |
++out_unlock_dev: |
8022 |
++ mutex_unlock(&dev->dev_mutex); |
8023 |
++out_unlock: |
8024 |
++ mutex_unlock(&devices_mutex); |
8025 |
++ return ret; |
8026 |
+ } |
8027 |
+ |
8028 |
+ static int ubiblock_resize(struct ubi_volume_info *vi) |
8029 |
+@@ -630,6 +638,7 @@ static void ubiblock_remove_all(void) |
8030 |
+ struct ubiblock *next; |
8031 |
+ struct ubiblock *dev; |
8032 |
+ |
8033 |
++ mutex_lock(&devices_mutex); |
8034 |
+ list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { |
8035 |
+ /* The module is being forcefully removed */ |
8036 |
+ WARN_ON(dev->desc); |
8037 |
+@@ -638,6 +647,7 @@ static void ubiblock_remove_all(void) |
8038 |
+ ubiblock_cleanup(dev); |
8039 |
+ kfree(dev); |
8040 |
+ } |
8041 |
++ mutex_unlock(&devices_mutex); |
8042 |
+ } |
8043 |
+ |
8044 |
+ int __init ubiblock_init(void) |
8045 |
+diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c |
8046 |
+index 85237cf661f9..3fd8d7ff7a02 100644 |
8047 |
+--- a/drivers/mtd/ubi/vmt.c |
8048 |
++++ b/drivers/mtd/ubi/vmt.c |
8049 |
+@@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) |
8050 |
+ vol->last_eb_bytes = vol->usable_leb_size; |
8051 |
+ } |
8052 |
+ |
8053 |
++ /* Make volume "available" before it becomes accessible via sysfs */ |
8054 |
++ spin_lock(&ubi->volumes_lock); |
8055 |
++ ubi->volumes[vol_id] = vol; |
8056 |
++ ubi->vol_count += 1; |
8057 |
++ spin_unlock(&ubi->volumes_lock); |
8058 |
++ |
8059 |
+ /* Register character device for the volume */ |
8060 |
+ cdev_init(&vol->cdev, &ubi_vol_cdev_operations); |
8061 |
+ vol->cdev.owner = THIS_MODULE; |
8062 |
+@@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) |
8063 |
+ if (err) |
8064 |
+ goto out_sysfs; |
8065 |
+ |
8066 |
+- spin_lock(&ubi->volumes_lock); |
8067 |
+- ubi->volumes[vol_id] = vol; |
8068 |
+- ubi->vol_count += 1; |
8069 |
+- spin_unlock(&ubi->volumes_lock); |
8070 |
+- |
8071 |
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); |
8072 |
+ self_check_volumes(ubi); |
8073 |
+ return err; |
8074 |
+@@ -315,6 +316,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) |
8075 |
+ */ |
8076 |
+ cdev_device_del(&vol->cdev, &vol->dev); |
8077 |
+ out_mapping: |
8078 |
++ spin_lock(&ubi->volumes_lock); |
8079 |
++ ubi->volumes[vol_id] = NULL; |
8080 |
++ ubi->vol_count -= 1; |
8081 |
++ spin_unlock(&ubi->volumes_lock); |
8082 |
+ ubi_eba_destroy_table(eba_tbl); |
8083 |
+ out_acc: |
8084 |
+ spin_lock(&ubi->volumes_lock); |
8085 |
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c |
8086 |
+index b5b8cd6f481c..668b46202507 100644 |
8087 |
+--- a/drivers/mtd/ubi/wl.c |
8088 |
++++ b/drivers/mtd/ubi/wl.c |
8089 |
+@@ -1528,6 +1528,46 @@ static void shutdown_work(struct ubi_device *ubi) |
8090 |
+ } |
8091 |
+ } |
8092 |
+ |
8093 |
++/** |
8094 |
++ * erase_aeb - erase a PEB given in UBI attach info PEB |
8095 |
++ * @ubi: UBI device description object |
8096 |
++ * @aeb: UBI attach info PEB |
8097 |
++ * @sync: If true, erase synchronously. Otherwise schedule for erasure |
8098 |
++ */ |
8099 |
++static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync) |
8100 |
++{ |
8101 |
++ struct ubi_wl_entry *e; |
8102 |
++ int err; |
8103 |
++ |
8104 |
++ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); |
8105 |
++ if (!e) |
8106 |
++ return -ENOMEM; |
8107 |
++ |
8108 |
++ e->pnum = aeb->pnum; |
8109 |
++ e->ec = aeb->ec; |
8110 |
++ ubi->lookuptbl[e->pnum] = e; |
8111 |
++ |
8112 |
++ if (sync) { |
8113 |
++ err = sync_erase(ubi, e, false); |
8114 |
++ if (err) |
8115 |
++ goto out_free; |
8116 |
++ |
8117 |
++ wl_tree_add(e, &ubi->free); |
8118 |
++ ubi->free_count++; |
8119 |
++ } else { |
8120 |
++ err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false); |
8121 |
++ if (err) |
8122 |
++ goto out_free; |
8123 |
++ } |
8124 |
++ |
8125 |
++ return 0; |
8126 |
++ |
8127 |
++out_free: |
8128 |
++ wl_entry_destroy(ubi, e); |
8129 |
++ |
8130 |
++ return err; |
8131 |
++} |
8132 |
++ |
8133 |
+ /** |
8134 |
+ * ubi_wl_init - initialize the WL sub-system using attaching information. |
8135 |
+ * @ubi: UBI device description object |
8136 |
+@@ -1566,18 +1606,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
8137 |
+ list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { |
8138 |
+ cond_resched(); |
8139 |
+ |
8140 |
+- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); |
8141 |
+- if (!e) |
8142 |
++ err = erase_aeb(ubi, aeb, false); |
8143 |
++ if (err) |
8144 |
+ goto out_free; |
8145 |
+ |
8146 |
+- e->pnum = aeb->pnum; |
8147 |
+- e->ec = aeb->ec; |
8148 |
+- ubi->lookuptbl[e->pnum] = e; |
8149 |
+- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { |
8150 |
+- wl_entry_destroy(ubi, e); |
8151 |
+- goto out_free; |
8152 |
+- } |
8153 |
+- |
8154 |
+ found_pebs++; |
8155 |
+ } |
8156 |
+ |
8157 |
+@@ -1635,6 +1667,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
8158 |
+ ubi_assert(!ubi->lookuptbl[e->pnum]); |
8159 |
+ ubi->lookuptbl[e->pnum] = e; |
8160 |
+ } else { |
8161 |
++ bool sync = false; |
8162 |
++ |
8163 |
+ /* |
8164 |
+ * Usually old Fastmap PEBs are scheduled for erasure |
8165 |
+ * and we don't have to care about them but if we face |
8166 |
+@@ -1644,18 +1678,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
8167 |
+ if (ubi->lookuptbl[aeb->pnum]) |
8168 |
+ continue; |
8169 |
+ |
8170 |
+- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); |
8171 |
+- if (!e) |
8172 |
+- goto out_free; |
8173 |
++ /* |
8174 |
++ * The fastmap update code might not find a free PEB for |
8175 |
++ * writing the fastmap anchor to and then reuses the |
8176 |
++ * current fastmap anchor PEB. When this PEB gets erased |
8177 |
++ * and a power cut happens before it is written again we |
8178 |
++ * must make sure that the fastmap attach code doesn't |
8179 |
++ * find any outdated fastmap anchors, hence we erase the |
8180 |
++ * outdated fastmap anchor PEBs synchronously here. |
8181 |
++ */ |
8182 |
++ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID) |
8183 |
++ sync = true; |
8184 |
+ |
8185 |
+- e->pnum = aeb->pnum; |
8186 |
+- e->ec = aeb->ec; |
8187 |
+- ubi_assert(!ubi->lookuptbl[e->pnum]); |
8188 |
+- ubi->lookuptbl[e->pnum] = e; |
8189 |
+- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { |
8190 |
+- wl_entry_destroy(ubi, e); |
8191 |
++ err = erase_aeb(ubi, aeb, sync); |
8192 |
++ if (err) |
8193 |
+ goto out_free; |
8194 |
+- } |
8195 |
+ } |
8196 |
+ |
8197 |
+ found_pebs++; |
8198 |
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c |
8199 |
+index 71df0f70b61f..72b4527d690f 100644 |
8200 |
+--- a/drivers/pinctrl/intel/pinctrl-intel.c |
8201 |
++++ b/drivers/pinctrl/intel/pinctrl-intel.c |
8202 |
+@@ -427,6 +427,18 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) |
8203 |
+ writel(value, padcfg0); |
8204 |
+ } |
8205 |
+ |
8206 |
++static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) |
8207 |
++{ |
8208 |
++ u32 value; |
8209 |
++ |
8210 |
++ /* Put the pad into GPIO mode */ |
8211 |
++ value = readl(padcfg0) & ~PADCFG0_PMODE_MASK; |
8212 |
++ /* Disable SCI/SMI/NMI generation */ |
8213 |
++ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); |
8214 |
++ value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); |
8215 |
++ writel(value, padcfg0); |
8216 |
++} |
8217 |
++ |
8218 |
+ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, |
8219 |
+ struct pinctrl_gpio_range *range, |
8220 |
+ unsigned pin) |
8221 |
+@@ -434,7 +446,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, |
8222 |
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); |
8223 |
+ void __iomem *padcfg0; |
8224 |
+ unsigned long flags; |
8225 |
+- u32 value; |
8226 |
+ |
8227 |
+ raw_spin_lock_irqsave(&pctrl->lock, flags); |
8228 |
+ |
8229 |
+@@ -444,13 +455,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, |
8230 |
+ } |
8231 |
+ |
8232 |
+ padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); |
8233 |
+- /* Put the pad into GPIO mode */ |
8234 |
+- value = readl(padcfg0) & ~PADCFG0_PMODE_MASK; |
8235 |
+- /* Disable SCI/SMI/NMI generation */ |
8236 |
+- value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); |
8237 |
+- value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); |
8238 |
+- writel(value, padcfg0); |
8239 |
+- |
8240 |
++ intel_gpio_set_gpio_mode(padcfg0); |
8241 |
+ /* Disable TX buffer and enable RX (this will be input) */ |
8242 |
+ __intel_gpio_set_direction(padcfg0, true); |
8243 |
+ |
8244 |
+@@ -935,6 +940,8 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type) |
8245 |
+ |
8246 |
+ raw_spin_lock_irqsave(&pctrl->lock, flags); |
8247 |
+ |
8248 |
++ intel_gpio_set_gpio_mode(reg); |
8249 |
++ |
8250 |
+ value = readl(reg); |
8251 |
+ |
8252 |
+ value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV); |
8253 |
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c |
8254 |
+index 9c950bbf07ba..447763aad815 100644 |
8255 |
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c |
8256 |
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c |
8257 |
+@@ -891,16 +891,16 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, |
8258 |
+ goto fail; |
8259 |
+ } |
8260 |
+ |
8261 |
+- ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); |
8262 |
+- if (ret < 0) |
8263 |
+- goto fail; |
8264 |
+- |
8265 |
+ if (mcp->irq && mcp->irq_controller) { |
8266 |
+ ret = mcp23s08_irq_setup(mcp); |
8267 |
+ if (ret) |
8268 |
+ goto fail; |
8269 |
+ } |
8270 |
+ |
8271 |
++ ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); |
8272 |
++ if (ret < 0) |
8273 |
++ goto fail; |
8274 |
++ |
8275 |
+ mcp->pinctrl_desc.name = "mcp23xxx-pinctrl"; |
8276 |
+ mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops; |
8277 |
+ mcp->pinctrl_desc.confops = &mcp_pinconf_ops; |
8278 |
+diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c |
8279 |
+index 7450f5118445..70a0228f4e7f 100644 |
8280 |
+--- a/drivers/pinctrl/pinctrl-sx150x.c |
8281 |
++++ b/drivers/pinctrl/pinctrl-sx150x.c |
8282 |
+@@ -1144,6 +1144,27 @@ static int sx150x_probe(struct i2c_client *client, |
8283 |
+ if (ret) |
8284 |
+ return ret; |
8285 |
+ |
8286 |
++ /* Pinctrl_desc */ |
8287 |
++ pctl->pinctrl_desc.name = "sx150x-pinctrl"; |
8288 |
++ pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops; |
8289 |
++ pctl->pinctrl_desc.confops = &sx150x_pinconf_ops; |
8290 |
++ pctl->pinctrl_desc.pins = pctl->data->pins; |
8291 |
++ pctl->pinctrl_desc.npins = pctl->data->npins; |
8292 |
++ pctl->pinctrl_desc.owner = THIS_MODULE; |
8293 |
++ |
8294 |
++ ret = devm_pinctrl_register_and_init(dev, &pctl->pinctrl_desc, |
8295 |
++ pctl, &pctl->pctldev); |
8296 |
++ if (ret) { |
8297 |
++ dev_err(dev, "Failed to register pinctrl device\n"); |
8298 |
++ return ret; |
8299 |
++ } |
8300 |
++ |
8301 |
++ ret = pinctrl_enable(pctl->pctldev); |
8302 |
++ if (ret) { |
8303 |
++ dev_err(dev, "Failed to enable pinctrl device\n"); |
8304 |
++ return ret; |
8305 |
++ } |
8306 |
++ |
8307 |
+ /* Register GPIO controller */ |
8308 |
+ pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL); |
8309 |
+ pctl->gpio.base = -1; |
8310 |
+@@ -1172,6 +1193,11 @@ static int sx150x_probe(struct i2c_client *client, |
8311 |
+ if (ret) |
8312 |
+ return ret; |
8313 |
+ |
8314 |
++ ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev), |
8315 |
++ 0, 0, pctl->data->npins); |
8316 |
++ if (ret) |
8317 |
++ return ret; |
8318 |
++ |
8319 |
+ /* Add Interrupt support if an irq is specified */ |
8320 |
+ if (client->irq > 0) { |
8321 |
+ pctl->irq_chip.name = devm_kstrdup(dev, client->name, |
8322 |
+@@ -1217,20 +1243,6 @@ static int sx150x_probe(struct i2c_client *client, |
8323 |
+ client->irq); |
8324 |
+ } |
8325 |
+ |
8326 |
+- /* Pinctrl_desc */ |
8327 |
+- pctl->pinctrl_desc.name = "sx150x-pinctrl"; |
8328 |
+- pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops; |
8329 |
+- pctl->pinctrl_desc.confops = &sx150x_pinconf_ops; |
8330 |
+- pctl->pinctrl_desc.pins = pctl->data->pins; |
8331 |
+- pctl->pinctrl_desc.npins = pctl->data->npins; |
8332 |
+- pctl->pinctrl_desc.owner = THIS_MODULE; |
8333 |
+- |
8334 |
+- pctl->pctldev = pinctrl_register(&pctl->pinctrl_desc, dev, pctl); |
8335 |
+- if (IS_ERR(pctl->pctldev)) { |
8336 |
+- dev_err(dev, "Failed to register pinctrl device\n"); |
8337 |
+- return PTR_ERR(pctl->pctldev); |
8338 |
+- } |
8339 |
+- |
8340 |
+ return 0; |
8341 |
+ } |
8342 |
+ |
8343 |
+diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c |
8344 |
+index 76b8b7eed0c0..0b6467206f8e 100644 |
8345 |
+--- a/drivers/scsi/cxlflash/main.c |
8346 |
++++ b/drivers/scsi/cxlflash/main.c |
8347 |
+@@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) |
8348 |
+ cmd->parent = afu; |
8349 |
+ cmd->hwq_index = hwq_index; |
8350 |
+ |
8351 |
++ cmd->sa.ioasc = 0; |
8352 |
+ cmd->rcb.ctx_id = hwq->ctx_hndl; |
8353 |
+ cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
8354 |
+ cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); |
8355 |
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c |
8356 |
+index fe3a0da3ec97..57bf43e34863 100644 |
8357 |
+--- a/drivers/scsi/hosts.c |
8358 |
++++ b/drivers/scsi/hosts.c |
8359 |
+@@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev) |
8360 |
+ |
8361 |
+ scsi_proc_hostdir_rm(shost->hostt); |
8362 |
+ |
8363 |
++ /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */ |
8364 |
++ rcu_barrier(); |
8365 |
++ |
8366 |
+ if (shost->tmf_work_q) |
8367 |
+ destroy_workqueue(shost->tmf_work_q); |
8368 |
+ if (shost->ehandler) |
8369 |
+@@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev) |
8370 |
+ if (shost->work_q) |
8371 |
+ destroy_workqueue(shost->work_q); |
8372 |
+ |
8373 |
++ destroy_rcu_head(&shost->rcu); |
8374 |
++ |
8375 |
+ if (shost->shost_state == SHOST_CREATED) { |
8376 |
+ /* |
8377 |
+ * Free the shost_dev device name here if scsi_host_alloc() |
8378 |
+@@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) |
8379 |
+ INIT_LIST_HEAD(&shost->starved_list); |
8380 |
+ init_waitqueue_head(&shost->host_wait); |
8381 |
+ mutex_init(&shost->scan_mutex); |
8382 |
++ init_rcu_head(&shost->rcu); |
8383 |
+ |
8384 |
+ index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); |
8385 |
+ if (index < 0) |
8386 |
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c |
8387 |
+index 6acf1bb1d320..25612ccf6ff2 100644 |
8388 |
+--- a/drivers/scsi/lpfc/lpfc_init.c |
8389 |
++++ b/drivers/scsi/lpfc/lpfc_init.c |
8390 |
+@@ -9413,44 +9413,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) |
8391 |
+ lpfc_sli4_bar0_register_memmap(phba, if_type); |
8392 |
+ } |
8393 |
+ |
8394 |
+- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && |
8395 |
+- (pci_resource_start(pdev, PCI_64BIT_BAR2))) { |
8396 |
+- /* |
8397 |
+- * Map SLI4 if type 0 HBA Control Register base to a kernel |
8398 |
+- * virtual address and setup the registers. |
8399 |
+- */ |
8400 |
+- phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); |
8401 |
+- bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); |
8402 |
+- phba->sli4_hba.ctrl_regs_memmap_p = |
8403 |
+- ioremap(phba->pci_bar1_map, bar1map_len); |
8404 |
+- if (!phba->sli4_hba.ctrl_regs_memmap_p) { |
8405 |
+- dev_printk(KERN_ERR, &pdev->dev, |
8406 |
+- "ioremap failed for SLI4 HBA control registers.\n"); |
8407 |
++ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { |
8408 |
++ if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { |
8409 |
++ /* |
8410 |
++ * Map SLI4 if type 0 HBA Control Register base to a |
8411 |
++ * kernel virtual address and setup the registers. |
8412 |
++ */ |
8413 |
++ phba->pci_bar1_map = pci_resource_start(pdev, |
8414 |
++ PCI_64BIT_BAR2); |
8415 |
++ bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); |
8416 |
++ phba->sli4_hba.ctrl_regs_memmap_p = |
8417 |
++ ioremap(phba->pci_bar1_map, |
8418 |
++ bar1map_len); |
8419 |
++ if (!phba->sli4_hba.ctrl_regs_memmap_p) { |
8420 |
++ dev_err(&pdev->dev, |
8421 |
++ "ioremap failed for SLI4 HBA " |
8422 |
++ "control registers.\n"); |
8423 |
++ error = -ENOMEM; |
8424 |
++ goto out_iounmap_conf; |
8425 |
++ } |
8426 |
++ phba->pci_bar2_memmap_p = |
8427 |
++ phba->sli4_hba.ctrl_regs_memmap_p; |
8428 |
++ lpfc_sli4_bar1_register_memmap(phba); |
8429 |
++ } else { |
8430 |
++ error = -ENOMEM; |
8431 |
+ goto out_iounmap_conf; |
8432 |
+ } |
8433 |
+- phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; |
8434 |
+- lpfc_sli4_bar1_register_memmap(phba); |
8435 |
+ } |
8436 |
+ |
8437 |
+- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && |
8438 |
+- (pci_resource_start(pdev, PCI_64BIT_BAR4))) { |
8439 |
+- /* |
8440 |
+- * Map SLI4 if type 0 HBA Doorbell Register base to a kernel |
8441 |
+- * virtual address and setup the registers. |
8442 |
+- */ |
8443 |
+- phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); |
8444 |
+- bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); |
8445 |
+- phba->sli4_hba.drbl_regs_memmap_p = |
8446 |
+- ioremap(phba->pci_bar2_map, bar2map_len); |
8447 |
+- if (!phba->sli4_hba.drbl_regs_memmap_p) { |
8448 |
+- dev_printk(KERN_ERR, &pdev->dev, |
8449 |
+- "ioremap failed for SLI4 HBA doorbell registers.\n"); |
8450 |
+- goto out_iounmap_ctrl; |
8451 |
+- } |
8452 |
+- phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; |
8453 |
+- error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); |
8454 |
+- if (error) |
8455 |
++ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { |
8456 |
++ if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { |
8457 |
++ /* |
8458 |
++ * Map SLI4 if type 0 HBA Doorbell Register base to |
8459 |
++ * a kernel virtual address and setup the registers. |
8460 |
++ */ |
8461 |
++ phba->pci_bar2_map = pci_resource_start(pdev, |
8462 |
++ PCI_64BIT_BAR4); |
8463 |
++ bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); |
8464 |
++ phba->sli4_hba.drbl_regs_memmap_p = |
8465 |
++ ioremap(phba->pci_bar2_map, |
8466 |
++ bar2map_len); |
8467 |
++ if (!phba->sli4_hba.drbl_regs_memmap_p) { |
8468 |
++ dev_err(&pdev->dev, |
8469 |
++ "ioremap failed for SLI4 HBA" |
8470 |
++ " doorbell registers.\n"); |
8471 |
++ error = -ENOMEM; |
8472 |
++ goto out_iounmap_ctrl; |
8473 |
++ } |
8474 |
++ phba->pci_bar4_memmap_p = |
8475 |
++ phba->sli4_hba.drbl_regs_memmap_p; |
8476 |
++ error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); |
8477 |
++ if (error) |
8478 |
++ goto out_iounmap_all; |
8479 |
++ } else { |
8480 |
++ error = -ENOMEM; |
8481 |
+ goto out_iounmap_all; |
8482 |
++ } |
8483 |
+ } |
8484 |
+ |
8485 |
+ return 0; |
8486 |
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c |
8487 |
+index dab876c65473..fa504ba83ade 100644 |
8488 |
+--- a/drivers/scsi/scsi_error.c |
8489 |
++++ b/drivers/scsi/scsi_error.c |
8490 |
+@@ -220,6 +220,17 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd) |
8491 |
+ } |
8492 |
+ } |
8493 |
+ |
8494 |
++static void scsi_eh_inc_host_failed(struct rcu_head *head) |
8495 |
++{ |
8496 |
++ struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu); |
8497 |
++ unsigned long flags; |
8498 |
++ |
8499 |
++ spin_lock_irqsave(shost->host_lock, flags); |
8500 |
++ shost->host_failed++; |
8501 |
++ scsi_eh_wakeup(shost); |
8502 |
++ spin_unlock_irqrestore(shost->host_lock, flags); |
8503 |
++} |
8504 |
++ |
8505 |
+ /** |
8506 |
+ * scsi_eh_scmd_add - add scsi cmd to error handling. |
8507 |
+ * @scmd: scmd to run eh on. |
8508 |
+@@ -242,9 +253,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) |
8509 |
+ |
8510 |
+ scsi_eh_reset(scmd); |
8511 |
+ list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); |
8512 |
+- shost->host_failed++; |
8513 |
+- scsi_eh_wakeup(shost); |
8514 |
+ spin_unlock_irqrestore(shost->host_lock, flags); |
8515 |
++ /* |
8516 |
++ * Ensure that all tasks observe the host state change before the |
8517 |
++ * host_failed change. |
8518 |
++ */ |
8519 |
++ call_rcu(&shost->rcu, scsi_eh_inc_host_failed); |
8520 |
+ } |
8521 |
+ |
8522 |
+ /** |
8523 |
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c |
8524 |
+index 635cfa1f2ace..0d3696e9dddd 100644 |
8525 |
+--- a/drivers/scsi/scsi_lib.c |
8526 |
++++ b/drivers/scsi/scsi_lib.c |
8527 |
+@@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) |
8528 |
+ cmd->cmd_len = scsi_command_size(cmd->cmnd); |
8529 |
+ } |
8530 |
+ |
8531 |
+-void scsi_device_unbusy(struct scsi_device *sdev) |
8532 |
++/* |
8533 |
++ * Decrement the host_busy counter and wake up the error handler if necessary. |
8534 |
++ * Avoid as follows that the error handler is not woken up if shost->host_busy |
8535 |
++ * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination |
8536 |
++ * with an RCU read lock in this function to ensure that this function in its |
8537 |
++ * entirety either finishes before scsi_eh_scmd_add() increases the |
8538 |
++ * host_failed counter or that it notices the shost state change made by |
8539 |
++ * scsi_eh_scmd_add(). |
8540 |
++ */ |
8541 |
++static void scsi_dec_host_busy(struct Scsi_Host *shost) |
8542 |
+ { |
8543 |
+- struct Scsi_Host *shost = sdev->host; |
8544 |
+- struct scsi_target *starget = scsi_target(sdev); |
8545 |
+ unsigned long flags; |
8546 |
+ |
8547 |
++ rcu_read_lock(); |
8548 |
+ atomic_dec(&shost->host_busy); |
8549 |
+- if (starget->can_queue > 0) |
8550 |
+- atomic_dec(&starget->target_busy); |
8551 |
+- |
8552 |
+- if (unlikely(scsi_host_in_recovery(shost) && |
8553 |
+- (shost->host_failed || shost->host_eh_scheduled))) { |
8554 |
++ if (unlikely(scsi_host_in_recovery(shost))) { |
8555 |
+ spin_lock_irqsave(shost->host_lock, flags); |
8556 |
+- scsi_eh_wakeup(shost); |
8557 |
++ if (shost->host_failed || shost->host_eh_scheduled) |
8558 |
++ scsi_eh_wakeup(shost); |
8559 |
+ spin_unlock_irqrestore(shost->host_lock, flags); |
8560 |
+ } |
8561 |
++ rcu_read_unlock(); |
8562 |
++} |
8563 |
++ |
8564 |
++void scsi_device_unbusy(struct scsi_device *sdev) |
8565 |
++{ |
8566 |
++ struct Scsi_Host *shost = sdev->host; |
8567 |
++ struct scsi_target *starget = scsi_target(sdev); |
8568 |
++ |
8569 |
++ scsi_dec_host_busy(shost); |
8570 |
++ |
8571 |
++ if (starget->can_queue > 0) |
8572 |
++ atomic_dec(&starget->target_busy); |
8573 |
+ |
8574 |
+ atomic_dec(&sdev->device_busy); |
8575 |
+ } |
8576 |
+@@ -1532,7 +1549,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, |
8577 |
+ list_add_tail(&sdev->starved_entry, &shost->starved_list); |
8578 |
+ spin_unlock_irq(shost->host_lock); |
8579 |
+ out_dec: |
8580 |
+- atomic_dec(&shost->host_busy); |
8581 |
++ scsi_dec_host_busy(shost); |
8582 |
+ return 0; |
8583 |
+ } |
8584 |
+ |
8585 |
+@@ -1993,7 +2010,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, |
8586 |
+ return BLK_STS_OK; |
8587 |
+ |
8588 |
+ out_dec_host_busy: |
8589 |
+- atomic_dec(&shost->host_busy); |
8590 |
++ scsi_dec_host_busy(shost); |
8591 |
+ out_dec_target_busy: |
8592 |
+ if (scsi_target(sdev)->can_queue > 0) |
8593 |
+ atomic_dec(&scsi_target(sdev)->target_busy); |
8594 |
+diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c |
8595 |
+index db0572733712..ab30a0f5129c 100644 |
8596 |
+--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c |
8597 |
++++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c |
8598 |
+@@ -119,6 +119,7 @@ static struct shash_alg alg = { |
8599 |
+ .cra_name = "adler32", |
8600 |
+ .cra_driver_name = "adler32-zlib", |
8601 |
+ .cra_priority = 100, |
8602 |
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, |
8603 |
+ .cra_blocksize = CHKSUM_BLOCK_SIZE, |
8604 |
+ .cra_ctxsize = sizeof(u32), |
8605 |
+ .cra_module = THIS_MODULE, |
8606 |
+diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig |
8607 |
+index c722cbfdc7e6..3ece1335ba84 100644 |
8608 |
+--- a/drivers/watchdog/Kconfig |
8609 |
++++ b/drivers/watchdog/Kconfig |
8610 |
+@@ -1451,7 +1451,7 @@ config RC32434_WDT |
8611 |
+ |
8612 |
+ config INDYDOG |
8613 |
+ tristate "Indy/I2 Hardware Watchdog" |
8614 |
+- depends on SGI_HAS_INDYDOG || (MIPS && COMPILE_TEST) |
8615 |
++ depends on SGI_HAS_INDYDOG |
8616 |
+ help |
8617 |
+ Hardware driver for the Indy's/I2's watchdog. This is a |
8618 |
+ watchdog timer that will reboot the machine after a 60 second |
8619 |
+diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c |
8620 |
+index cb66c2f99ff1..7a6279daa8b9 100644 |
8621 |
+--- a/drivers/watchdog/gpio_wdt.c |
8622 |
++++ b/drivers/watchdog/gpio_wdt.c |
8623 |
+@@ -80,7 +80,8 @@ static int gpio_wdt_stop(struct watchdog_device *wdd) |
8624 |
+ |
8625 |
+ if (!priv->always_running) { |
8626 |
+ gpio_wdt_disable(priv); |
8627 |
+- clear_bit(WDOG_HW_RUNNING, &wdd->status); |
8628 |
++ } else { |
8629 |
++ set_bit(WDOG_HW_RUNNING, &wdd->status); |
8630 |
+ } |
8631 |
+ |
8632 |
+ return 0; |
8633 |
+diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c |
8634 |
+index 4874b0f18650..518dfa1047cb 100644 |
8635 |
+--- a/drivers/watchdog/imx2_wdt.c |
8636 |
++++ b/drivers/watchdog/imx2_wdt.c |
8637 |
+@@ -169,15 +169,21 @@ static int imx2_wdt_ping(struct watchdog_device *wdog) |
8638 |
+ return 0; |
8639 |
+ } |
8640 |
+ |
8641 |
+-static int imx2_wdt_set_timeout(struct watchdog_device *wdog, |
8642 |
+- unsigned int new_timeout) |
8643 |
++static void __imx2_wdt_set_timeout(struct watchdog_device *wdog, |
8644 |
++ unsigned int new_timeout) |
8645 |
+ { |
8646 |
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); |
8647 |
+ |
8648 |
+- wdog->timeout = new_timeout; |
8649 |
+- |
8650 |
+ regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT, |
8651 |
+ WDOG_SEC_TO_COUNT(new_timeout)); |
8652 |
++} |
8653 |
++ |
8654 |
++static int imx2_wdt_set_timeout(struct watchdog_device *wdog, |
8655 |
++ unsigned int new_timeout) |
8656 |
++{ |
8657 |
++ __imx2_wdt_set_timeout(wdog, new_timeout); |
8658 |
++ |
8659 |
++ wdog->timeout = new_timeout; |
8660 |
+ return 0; |
8661 |
+ } |
8662 |
+ |
8663 |
+@@ -371,7 +377,11 @@ static int imx2_wdt_suspend(struct device *dev) |
8664 |
+ |
8665 |
+ /* The watchdog IP block is running */ |
8666 |
+ if (imx2_wdt_is_running(wdev)) { |
8667 |
+- imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); |
8668 |
++ /* |
8669 |
++ * Don't update wdog->timeout, we'll restore the current value |
8670 |
++ * during resume. |
8671 |
++ */ |
8672 |
++ __imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); |
8673 |
+ imx2_wdt_ping(wdog); |
8674 |
+ } |
8675 |
+ |
8676 |
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
8677 |
+index c71afd424900..5eaedff28a32 100644 |
8678 |
+--- a/fs/btrfs/inode.c |
8679 |
++++ b/fs/btrfs/inode.c |
8680 |
+@@ -2101,8 +2101,15 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) |
8681 |
+ goto out; |
8682 |
+ } |
8683 |
+ |
8684 |
+- btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state, |
8685 |
+- 0); |
8686 |
++ ret = btrfs_set_extent_delalloc(inode, page_start, page_end, |
8687 |
++ &cached_state, 0); |
8688 |
++ if (ret) { |
8689 |
++ mapping_set_error(page->mapping, ret); |
8690 |
++ end_extent_writepage(page, ret, page_start, page_end); |
8691 |
++ ClearPageChecked(page); |
8692 |
++ goto out; |
8693 |
++ } |
8694 |
++ |
8695 |
+ ClearPageChecked(page); |
8696 |
+ set_page_dirty(page); |
8697 |
+ out: |
8698 |
+diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c |
8699 |
+index 24a62224b24b..6154825c30e1 100644 |
8700 |
+--- a/fs/btrfs/raid56.c |
8701 |
++++ b/fs/btrfs/raid56.c |
8702 |
+@@ -1432,14 +1432,13 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio, |
8703 |
+ */ |
8704 |
+ static void set_bio_pages_uptodate(struct bio *bio) |
8705 |
+ { |
8706 |
+- struct bio_vec bvec; |
8707 |
+- struct bvec_iter iter; |
8708 |
++ struct bio_vec *bvec; |
8709 |
++ int i; |
8710 |
+ |
8711 |
+- if (bio_flagged(bio, BIO_CLONED)) |
8712 |
+- bio->bi_iter = btrfs_io_bio(bio)->iter; |
8713 |
++ ASSERT(!bio_flagged(bio, BIO_CLONED)); |
8714 |
+ |
8715 |
+- bio_for_each_segment(bvec, bio, iter) |
8716 |
+- SetPageUptodate(bvec.bv_page); |
8717 |
++ bio_for_each_segment_all(bvec, bio, i) |
8718 |
++ SetPageUptodate(bvec->bv_page); |
8719 |
+ } |
8720 |
+ |
8721 |
+ /* |
8722 |
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c |
8723 |
+index 68abbb0db608..f2b0a7f124da 100644 |
8724 |
+--- a/fs/cifs/cifsencrypt.c |
8725 |
++++ b/fs/cifs/cifsencrypt.c |
8726 |
+@@ -325,9 +325,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt, |
8727 |
+ { |
8728 |
+ int i; |
8729 |
+ int rc; |
8730 |
+- char password_with_pad[CIFS_ENCPWD_SIZE]; |
8731 |
++ char password_with_pad[CIFS_ENCPWD_SIZE] = {0}; |
8732 |
+ |
8733 |
+- memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); |
8734 |
+ if (password) |
8735 |
+ strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); |
8736 |
+ |
8737 |
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c |
8738 |
+index 0bfc2280436d..f7db2fedfa8c 100644 |
8739 |
+--- a/fs/cifs/connect.c |
8740 |
++++ b/fs/cifs/connect.c |
8741 |
+@@ -1707,7 +1707,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, |
8742 |
+ tmp_end++; |
8743 |
+ if (!(tmp_end < end && tmp_end[1] == delim)) { |
8744 |
+ /* No it is not. Set the password to NULL */ |
8745 |
+- kfree(vol->password); |
8746 |
++ kzfree(vol->password); |
8747 |
+ vol->password = NULL; |
8748 |
+ break; |
8749 |
+ } |
8750 |
+@@ -1745,7 +1745,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, |
8751 |
+ options = end; |
8752 |
+ } |
8753 |
+ |
8754 |
+- kfree(vol->password); |
8755 |
++ kzfree(vol->password); |
8756 |
+ /* Now build new password string */ |
8757 |
+ temp_len = strlen(value); |
8758 |
+ vol->password = kzalloc(temp_len+1, GFP_KERNEL); |
8759 |
+@@ -4235,7 +4235,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) |
8760 |
+ reset_cifs_unix_caps(0, tcon, NULL, vol_info); |
8761 |
+ out: |
8762 |
+ kfree(vol_info->username); |
8763 |
+- kfree(vol_info->password); |
8764 |
++ kzfree(vol_info->password); |
8765 |
+ kfree(vol_info); |
8766 |
+ |
8767 |
+ return tcon; |
8768 |
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
8769 |
+index 92fdf9c35de2..7d6539a04fac 100644 |
8770 |
+--- a/fs/cifs/file.c |
8771 |
++++ b/fs/cifs/file.c |
8772 |
+@@ -3488,20 +3488,18 @@ static const struct vm_operations_struct cifs_file_vm_ops = { |
8773 |
+ |
8774 |
+ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) |
8775 |
+ { |
8776 |
+- int rc, xid; |
8777 |
++ int xid, rc = 0; |
8778 |
+ struct inode *inode = file_inode(file); |
8779 |
+ |
8780 |
+ xid = get_xid(); |
8781 |
+ |
8782 |
+- if (!CIFS_CACHE_READ(CIFS_I(inode))) { |
8783 |
++ if (!CIFS_CACHE_READ(CIFS_I(inode))) |
8784 |
+ rc = cifs_zap_mapping(inode); |
8785 |
+- if (rc) |
8786 |
+- return rc; |
8787 |
+- } |
8788 |
+- |
8789 |
+- rc = generic_file_mmap(file, vma); |
8790 |
+- if (rc == 0) |
8791 |
++ if (!rc) |
8792 |
++ rc = generic_file_mmap(file, vma); |
8793 |
++ if (!rc) |
8794 |
+ vma->vm_ops = &cifs_file_vm_ops; |
8795 |
++ |
8796 |
+ free_xid(xid); |
8797 |
+ return rc; |
8798 |
+ } |
8799 |
+@@ -3511,16 +3509,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) |
8800 |
+ int rc, xid; |
8801 |
+ |
8802 |
+ xid = get_xid(); |
8803 |
++ |
8804 |
+ rc = cifs_revalidate_file(file); |
8805 |
+- if (rc) { |
8806 |
++ if (rc) |
8807 |
+ cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n", |
8808 |
+ rc); |
8809 |
+- free_xid(xid); |
8810 |
+- return rc; |
8811 |
+- } |
8812 |
+- rc = generic_file_mmap(file, vma); |
8813 |
+- if (rc == 0) |
8814 |
++ if (!rc) |
8815 |
++ rc = generic_file_mmap(file, vma); |
8816 |
++ if (!rc) |
8817 |
+ vma->vm_ops = &cifs_file_vm_ops; |
8818 |
++ |
8819 |
+ free_xid(xid); |
8820 |
+ return rc; |
8821 |
+ } |
8822 |
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c |
8823 |
+index eea93ac15ef0..a0dbced4a45c 100644 |
8824 |
+--- a/fs/cifs/misc.c |
8825 |
++++ b/fs/cifs/misc.c |
8826 |
+@@ -98,14 +98,11 @@ sesInfoFree(struct cifs_ses *buf_to_free) |
8827 |
+ kfree(buf_to_free->serverOS); |
8828 |
+ kfree(buf_to_free->serverDomain); |
8829 |
+ kfree(buf_to_free->serverNOS); |
8830 |
+- if (buf_to_free->password) { |
8831 |
+- memset(buf_to_free->password, 0, strlen(buf_to_free->password)); |
8832 |
+- kfree(buf_to_free->password); |
8833 |
+- } |
8834 |
++ kzfree(buf_to_free->password); |
8835 |
+ kfree(buf_to_free->user_name); |
8836 |
+ kfree(buf_to_free->domainName); |
8837 |
+- kfree(buf_to_free->auth_key.response); |
8838 |
+- kfree(buf_to_free); |
8839 |
++ kzfree(buf_to_free->auth_key.response); |
8840 |
++ kzfree(buf_to_free); |
8841 |
+ } |
8842 |
+ |
8843 |
+ struct cifs_tcon * |
8844 |
+@@ -136,10 +133,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free) |
8845 |
+ } |
8846 |
+ atomic_dec(&tconInfoAllocCount); |
8847 |
+ kfree(buf_to_free->nativeFileSystem); |
8848 |
+- if (buf_to_free->password) { |
8849 |
+- memset(buf_to_free->password, 0, strlen(buf_to_free->password)); |
8850 |
+- kfree(buf_to_free->password); |
8851 |
+- } |
8852 |
++ kzfree(buf_to_free->password); |
8853 |
+ kfree(buf_to_free); |
8854 |
+ } |
8855 |
+ |
8856 |
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
8857 |
+index 01346b8b6edb..66af1f8a13cc 100644 |
8858 |
+--- a/fs/cifs/smb2pdu.c |
8859 |
++++ b/fs/cifs/smb2pdu.c |
8860 |
+@@ -733,8 +733,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) |
8861 |
+ } |
8862 |
+ |
8863 |
+ /* check validate negotiate info response matches what we got earlier */ |
8864 |
+- if (pneg_rsp->Dialect != |
8865 |
+- cpu_to_le16(tcon->ses->server->vals->protocol_id)) |
8866 |
++ if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect)) |
8867 |
+ goto vneg_out; |
8868 |
+ |
8869 |
+ if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode)) |
8870 |
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c |
8871 |
+index 7eae33ffa3fc..e31d6ed3ec32 100644 |
8872 |
+--- a/fs/devpts/inode.c |
8873 |
++++ b/fs/devpts/inode.c |
8874 |
+@@ -168,11 +168,11 @@ struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi) |
8875 |
+ dput(path.dentry); |
8876 |
+ if (err) { |
8877 |
+ mntput(path.mnt); |
8878 |
+- path.mnt = ERR_PTR(err); |
8879 |
++ return ERR_PTR(err); |
8880 |
+ } |
8881 |
+ if (DEVPTS_SB(path.mnt->mnt_sb) != fsi) { |
8882 |
+ mntput(path.mnt); |
8883 |
+- path.mnt = ERR_PTR(-ENODEV); |
8884 |
++ return ERR_PTR(-ENODEV); |
8885 |
+ } |
8886 |
+ return path.mnt; |
8887 |
+ } |
8888 |
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c |
8889 |
+index 9698e51656b1..d8f49c412f50 100644 |
8890 |
+--- a/fs/kernfs/file.c |
8891 |
++++ b/fs/kernfs/file.c |
8892 |
+@@ -275,7 +275,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, |
8893 |
+ { |
8894 |
+ struct kernfs_open_file *of = kernfs_of(file); |
8895 |
+ const struct kernfs_ops *ops; |
8896 |
+- size_t len; |
8897 |
++ ssize_t len; |
8898 |
+ char *buf; |
8899 |
+ |
8900 |
+ if (of->atomic_write_len) { |
8901 |
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c |
8902 |
+index d2972d537469..8c10b0562e75 100644 |
8903 |
+--- a/fs/nfs/direct.c |
8904 |
++++ b/fs/nfs/direct.c |
8905 |
+@@ -775,10 +775,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) |
8906 |
+ |
8907 |
+ spin_lock(&dreq->lock); |
8908 |
+ |
8909 |
+- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { |
8910 |
+- dreq->flags = 0; |
8911 |
++ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) |
8912 |
+ dreq->error = hdr->error; |
8913 |
+- } |
8914 |
+ if (dreq->error == 0) { |
8915 |
+ nfs_direct_good_bytes(dreq, hdr); |
8916 |
+ if (nfs_write_need_commit(hdr)) { |
8917 |
+diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c |
8918 |
+index 508126eb49f9..3db2b7464748 100644 |
8919 |
+--- a/fs/nfs/filelayout/filelayout.c |
8920 |
++++ b/fs/nfs/filelayout/filelayout.c |
8921 |
+@@ -895,9 +895,7 @@ fl_pnfs_update_layout(struct inode *ino, |
8922 |
+ |
8923 |
+ lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode, |
8924 |
+ gfp_flags); |
8925 |
+- if (!lseg) |
8926 |
+- lseg = ERR_PTR(-ENOMEM); |
8927 |
+- if (IS_ERR(lseg)) |
8928 |
++ if (IS_ERR_OR_NULL(lseg)) |
8929 |
+ goto out; |
8930 |
+ |
8931 |
+ lo = NFS_I(ino)->layout; |
8932 |
+diff --git a/fs/nfs/io.c b/fs/nfs/io.c |
8933 |
+index 20fef85d2bb1..9034b4926909 100644 |
8934 |
+--- a/fs/nfs/io.c |
8935 |
++++ b/fs/nfs/io.c |
8936 |
+@@ -99,7 +99,7 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode) |
8937 |
+ { |
8938 |
+ if (!test_bit(NFS_INO_ODIRECT, &nfsi->flags)) { |
8939 |
+ set_bit(NFS_INO_ODIRECT, &nfsi->flags); |
8940 |
+- nfs_wb_all(inode); |
8941 |
++ nfs_sync_mapping(inode->i_mapping); |
8942 |
+ } |
8943 |
+ } |
8944 |
+ |
8945 |
+diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c |
8946 |
+index 30426c1a1bbd..22dc30a679a0 100644 |
8947 |
+--- a/fs/nfs/nfs4idmap.c |
8948 |
++++ b/fs/nfs/nfs4idmap.c |
8949 |
+@@ -568,9 +568,13 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, |
8950 |
+ struct idmap_msg *im; |
8951 |
+ struct idmap *idmap = (struct idmap *)aux; |
8952 |
+ struct key *key = cons->key; |
8953 |
+- int ret = -ENOMEM; |
8954 |
++ int ret = -ENOKEY; |
8955 |
++ |
8956 |
++ if (!aux) |
8957 |
++ goto out1; |
8958 |
+ |
8959 |
+ /* msg and im are freed in idmap_pipe_destroy_msg */ |
8960 |
++ ret = -ENOMEM; |
8961 |
+ data = kzalloc(sizeof(*data), GFP_KERNEL); |
8962 |
+ if (!data) |
8963 |
+ goto out1; |
8964 |
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c |
8965 |
+index 14ed9791ec9c..549c916d2859 100644 |
8966 |
+--- a/fs/nfs/nfs4xdr.c |
8967 |
++++ b/fs/nfs/nfs4xdr.c |
8968 |
+@@ -7668,6 +7668,22 @@ nfs4_stat_to_errno(int stat) |
8969 |
+ .p_name = #proc, \ |
8970 |
+ } |
8971 |
+ |
8972 |
++#if defined(CONFIG_NFS_V4_1) |
8973 |
++#define PROC41(proc, argtype, restype) \ |
8974 |
++ PROC(proc, argtype, restype) |
8975 |
++#else |
8976 |
++#define PROC41(proc, argtype, restype) \ |
8977 |
++ STUB(proc) |
8978 |
++#endif |
8979 |
++ |
8980 |
++#if defined(CONFIG_NFS_V4_2) |
8981 |
++#define PROC42(proc, argtype, restype) \ |
8982 |
++ PROC(proc, argtype, restype) |
8983 |
++#else |
8984 |
++#define PROC42(proc, argtype, restype) \ |
8985 |
++ STUB(proc) |
8986 |
++#endif |
8987 |
++ |
8988 |
+ const struct rpc_procinfo nfs4_procedures[] = { |
8989 |
+ PROC(READ, enc_read, dec_read), |
8990 |
+ PROC(WRITE, enc_write, dec_write), |
8991 |
+@@ -7688,7 +7704,6 @@ const struct rpc_procinfo nfs4_procedures[] = { |
8992 |
+ PROC(ACCESS, enc_access, dec_access), |
8993 |
+ PROC(GETATTR, enc_getattr, dec_getattr), |
8994 |
+ PROC(LOOKUP, enc_lookup, dec_lookup), |
8995 |
+- PROC(LOOKUPP, enc_lookupp, dec_lookupp), |
8996 |
+ PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root), |
8997 |
+ PROC(REMOVE, enc_remove, dec_remove), |
8998 |
+ PROC(RENAME, enc_rename, dec_rename), |
8999 |
+@@ -7707,33 +7722,30 @@ const struct rpc_procinfo nfs4_procedures[] = { |
9000 |
+ PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner), |
9001 |
+ PROC(SECINFO, enc_secinfo, dec_secinfo), |
9002 |
+ PROC(FSID_PRESENT, enc_fsid_present, dec_fsid_present), |
9003 |
+-#if defined(CONFIG_NFS_V4_1) |
9004 |
+- PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id), |
9005 |
+- PROC(CREATE_SESSION, enc_create_session, dec_create_session), |
9006 |
+- PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), |
9007 |
+- PROC(SEQUENCE, enc_sequence, dec_sequence), |
9008 |
+- PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), |
9009 |
+- PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete), |
9010 |
+- PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo), |
9011 |
+- PROC(LAYOUTGET, enc_layoutget, dec_layoutget), |
9012 |
+- PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit), |
9013 |
+- PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn), |
9014 |
+- PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name), |
9015 |
+- PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid), |
9016 |
+- PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid), |
9017 |
++ PROC41(EXCHANGE_ID, enc_exchange_id, dec_exchange_id), |
9018 |
++ PROC41(CREATE_SESSION, enc_create_session, dec_create_session), |
9019 |
++ PROC41(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), |
9020 |
++ PROC41(SEQUENCE, enc_sequence, dec_sequence), |
9021 |
++ PROC41(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), |
9022 |
++ PROC41(RECLAIM_COMPLETE,enc_reclaim_complete, dec_reclaim_complete), |
9023 |
++ PROC41(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo), |
9024 |
++ PROC41(LAYOUTGET, enc_layoutget, dec_layoutget), |
9025 |
++ PROC41(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit), |
9026 |
++ PROC41(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn), |
9027 |
++ PROC41(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name), |
9028 |
++ PROC41(TEST_STATEID, enc_test_stateid, dec_test_stateid), |
9029 |
++ PROC41(FREE_STATEID, enc_free_stateid, dec_free_stateid), |
9030 |
+ STUB(GETDEVICELIST), |
9031 |
+- PROC(BIND_CONN_TO_SESSION, |
9032 |
++ PROC41(BIND_CONN_TO_SESSION, |
9033 |
+ enc_bind_conn_to_session, dec_bind_conn_to_session), |
9034 |
+- PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid), |
9035 |
+-#endif /* CONFIG_NFS_V4_1 */ |
9036 |
+-#ifdef CONFIG_NFS_V4_2 |
9037 |
+- PROC(SEEK, enc_seek, dec_seek), |
9038 |
+- PROC(ALLOCATE, enc_allocate, dec_allocate), |
9039 |
+- PROC(DEALLOCATE, enc_deallocate, dec_deallocate), |
9040 |
+- PROC(LAYOUTSTATS, enc_layoutstats, dec_layoutstats), |
9041 |
+- PROC(CLONE, enc_clone, dec_clone), |
9042 |
+- PROC(COPY, enc_copy, dec_copy), |
9043 |
+-#endif /* CONFIG_NFS_V4_2 */ |
9044 |
++ PROC41(DESTROY_CLIENTID,enc_destroy_clientid, dec_destroy_clientid), |
9045 |
++ PROC42(SEEK, enc_seek, dec_seek), |
9046 |
++ PROC42(ALLOCATE, enc_allocate, dec_allocate), |
9047 |
++ PROC42(DEALLOCATE, enc_deallocate, dec_deallocate), |
9048 |
++ PROC42(LAYOUTSTATS, enc_layoutstats, dec_layoutstats), |
9049 |
++ PROC42(CLONE, enc_clone, dec_clone), |
9050 |
++ PROC42(COPY, enc_copy, dec_copy), |
9051 |
++ PROC(LOOKUPP, enc_lookupp, dec_lookupp), |
9052 |
+ }; |
9053 |
+ |
9054 |
+ static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)]; |
9055 |
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c |
9056 |
+index 3bcd669a3152..5f2f852ef506 100644 |
9057 |
+--- a/fs/nfs/pnfs.c |
9058 |
++++ b/fs/nfs/pnfs.c |
9059 |
+@@ -2237,7 +2237,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, |
9060 |
+ nfs_pageio_reset_write_mds(desc); |
9061 |
+ mirror->pg_recoalesce = 1; |
9062 |
+ } |
9063 |
+- hdr->release(hdr); |
9064 |
++ hdr->completion_ops->completion(hdr); |
9065 |
+ } |
9066 |
+ |
9067 |
+ static enum pnfs_try_status |
9068 |
+@@ -2360,7 +2360,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, |
9069 |
+ nfs_pageio_reset_read_mds(desc); |
9070 |
+ mirror->pg_recoalesce = 1; |
9071 |
+ } |
9072 |
+- hdr->release(hdr); |
9073 |
++ hdr->completion_ops->completion(hdr); |
9074 |
+ } |
9075 |
+ |
9076 |
+ /* |
9077 |
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c |
9078 |
+index de325804941d..76da415be39a 100644 |
9079 |
+--- a/fs/nfs/write.c |
9080 |
++++ b/fs/nfs/write.c |
9081 |
+@@ -1836,6 +1836,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) |
9082 |
+ set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); |
9083 |
+ next: |
9084 |
+ nfs_unlock_and_release_request(req); |
9085 |
++ /* Latency breaker */ |
9086 |
++ cond_resched(); |
9087 |
+ } |
9088 |
+ nfss = NFS_SERVER(data->inode); |
9089 |
+ if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) |
9090 |
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c |
9091 |
+index d94a51dc4e32..7fa7d68baa6d 100644 |
9092 |
+--- a/fs/overlayfs/readdir.c |
9093 |
++++ b/fs/overlayfs/readdir.c |
9094 |
+@@ -575,8 +575,15 @@ static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path) |
9095 |
+ return ERR_PTR(res); |
9096 |
+ } |
9097 |
+ if (list_empty(&cache->entries)) { |
9098 |
+- /* Good oportunity to get rid of an unnecessary "impure" flag */ |
9099 |
+- ovl_do_removexattr(ovl_dentry_upper(dentry), OVL_XATTR_IMPURE); |
9100 |
++ /* |
9101 |
++ * A good opportunity to get rid of an unneeded "impure" flag. |
9102 |
++ * Removing the "impure" xattr is best effort. |
9103 |
++ */ |
9104 |
++ if (!ovl_want_write(dentry)) { |
9105 |
++ ovl_do_removexattr(ovl_dentry_upper(dentry), |
9106 |
++ OVL_XATTR_IMPURE); |
9107 |
++ ovl_drop_write(dentry); |
9108 |
++ } |
9109 |
+ ovl_clear_flag(OVL_IMPURE, d_inode(dentry)); |
9110 |
+ kfree(cache); |
9111 |
+ return NULL; |
9112 |
+@@ -751,10 +758,14 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, |
9113 |
+ struct dentry *dentry = file->f_path.dentry; |
9114 |
+ struct file *realfile = od->realfile; |
9115 |
+ |
9116 |
++ /* Nothing to sync for lower */ |
9117 |
++ if (!OVL_TYPE_UPPER(ovl_path_type(dentry))) |
9118 |
++ return 0; |
9119 |
++ |
9120 |
+ /* |
9121 |
+ * Need to check if we started out being a lower dir, but got copied up |
9122 |
+ */ |
9123 |
+- if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { |
9124 |
++ if (!od->is_upper) { |
9125 |
+ struct inode *inode = file_inode(file); |
9126 |
+ |
9127 |
+ realfile = READ_ONCE(od->upperfile); |
9128 |
+diff --git a/fs/pipe.c b/fs/pipe.c |
9129 |
+index f0f4ab36c444..8ef7d7bef775 100644 |
9130 |
+--- a/fs/pipe.c |
9131 |
++++ b/fs/pipe.c |
9132 |
+@@ -610,12 +610,17 @@ static unsigned long account_pipe_buffers(struct user_struct *user, |
9133 |
+ |
9134 |
+ static bool too_many_pipe_buffers_soft(unsigned long user_bufs) |
9135 |
+ { |
9136 |
+- return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft; |
9137 |
++ return pipe_user_pages_soft && user_bufs > pipe_user_pages_soft; |
9138 |
+ } |
9139 |
+ |
9140 |
+ static bool too_many_pipe_buffers_hard(unsigned long user_bufs) |
9141 |
+ { |
9142 |
+- return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard; |
9143 |
++ return pipe_user_pages_hard && user_bufs > pipe_user_pages_hard; |
9144 |
++} |
9145 |
++ |
9146 |
++static bool is_unprivileged_user(void) |
9147 |
++{ |
9148 |
++ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); |
9149 |
+ } |
9150 |
+ |
9151 |
+ struct pipe_inode_info *alloc_pipe_info(void) |
9152 |
+@@ -634,12 +639,12 @@ struct pipe_inode_info *alloc_pipe_info(void) |
9153 |
+ |
9154 |
+ user_bufs = account_pipe_buffers(user, 0, pipe_bufs); |
9155 |
+ |
9156 |
+- if (too_many_pipe_buffers_soft(user_bufs)) { |
9157 |
++ if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) { |
9158 |
+ user_bufs = account_pipe_buffers(user, pipe_bufs, 1); |
9159 |
+ pipe_bufs = 1; |
9160 |
+ } |
9161 |
+ |
9162 |
+- if (too_many_pipe_buffers_hard(user_bufs)) |
9163 |
++ if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user()) |
9164 |
+ goto out_revert_acct; |
9165 |
+ |
9166 |
+ pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer), |
9167 |
+@@ -1069,7 +1074,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg) |
9168 |
+ if (nr_pages > pipe->buffers && |
9169 |
+ (too_many_pipe_buffers_hard(user_bufs) || |
9170 |
+ too_many_pipe_buffers_soft(user_bufs)) && |
9171 |
+- !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { |
9172 |
++ is_unprivileged_user()) { |
9173 |
+ ret = -EPERM; |
9174 |
+ goto out_revert_acct; |
9175 |
+ } |
9176 |
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c |
9177 |
+index 4bc85cb8be6a..e8a93bc8285d 100644 |
9178 |
+--- a/fs/proc/kcore.c |
9179 |
++++ b/fs/proc/kcore.c |
9180 |
+@@ -512,23 +512,15 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) |
9181 |
+ return -EFAULT; |
9182 |
+ } else { |
9183 |
+ if (kern_addr_valid(start)) { |
9184 |
+- unsigned long n; |
9185 |
+- |
9186 |
+ /* |
9187 |
+ * Using bounce buffer to bypass the |
9188 |
+ * hardened user copy kernel text checks. |
9189 |
+ */ |
9190 |
+- memcpy(buf, (char *) start, tsz); |
9191 |
+- n = copy_to_user(buffer, buf, tsz); |
9192 |
+- /* |
9193 |
+- * We cannot distinguish between fault on source |
9194 |
+- * and fault on destination. When this happens |
9195 |
+- * we clear too and hope it will trigger the |
9196 |
+- * EFAULT again. |
9197 |
+- */ |
9198 |
+- if (n) { |
9199 |
+- if (clear_user(buffer + tsz - n, |
9200 |
+- n)) |
9201 |
++ if (probe_kernel_read(buf, (void *) start, tsz)) { |
9202 |
++ if (clear_user(buffer, tsz)) |
9203 |
++ return -EFAULT; |
9204 |
++ } else { |
9205 |
++ if (copy_to_user(buffer, buf, tsz)) |
9206 |
+ return -EFAULT; |
9207 |
+ } |
9208 |
+ } else { |
9209 |
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c |
9210 |
+index 417fe0b29f23..ef820f803176 100644 |
9211 |
+--- a/fs/ubifs/dir.c |
9212 |
++++ b/fs/ubifs/dir.c |
9213 |
+@@ -1216,10 +1216,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, |
9214 |
+ ostr.len = disk_link.len; |
9215 |
+ |
9216 |
+ err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr); |
9217 |
+- if (err) { |
9218 |
+- kfree(sd); |
9219 |
++ if (err) |
9220 |
+ goto out_inode; |
9221 |
+- } |
9222 |
+ |
9223 |
+ sd->len = cpu_to_le16(ostr.len); |
9224 |
+ disk_link.name = (char *)sd; |
9225 |
+@@ -1251,11 +1249,10 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, |
9226 |
+ goto out_cancel; |
9227 |
+ mutex_unlock(&dir_ui->ui_mutex); |
9228 |
+ |
9229 |
+- ubifs_release_budget(c, &req); |
9230 |
+ insert_inode_hash(inode); |
9231 |
+ d_instantiate(dentry, inode); |
9232 |
+- fscrypt_free_filename(&nm); |
9233 |
+- return 0; |
9234 |
++ err = 0; |
9235 |
++ goto out_fname; |
9236 |
+ |
9237 |
+ out_cancel: |
9238 |
+ dir->i_size -= sz_change; |
9239 |
+@@ -1268,6 +1265,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, |
9240 |
+ fscrypt_free_filename(&nm); |
9241 |
+ out_budg: |
9242 |
+ ubifs_release_budget(c, &req); |
9243 |
++ kfree(sd); |
9244 |
+ return err; |
9245 |
+ } |
9246 |
+ |
9247 |
+diff --git a/include/crypto/hash.h b/include/crypto/hash.h |
9248 |
+index b5727bcd2336..74827781593c 100644 |
9249 |
+--- a/include/crypto/hash.h |
9250 |
++++ b/include/crypto/hash.h |
9251 |
+@@ -205,7 +205,6 @@ struct crypto_ahash { |
9252 |
+ unsigned int keylen); |
9253 |
+ |
9254 |
+ unsigned int reqsize; |
9255 |
+- bool has_setkey; |
9256 |
+ struct crypto_tfm base; |
9257 |
+ }; |
9258 |
+ |
9259 |
+@@ -405,11 +404,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req) |
9260 |
+ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
9261 |
+ unsigned int keylen); |
9262 |
+ |
9263 |
+-static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm) |
9264 |
+-{ |
9265 |
+- return tfm->has_setkey; |
9266 |
+-} |
9267 |
+- |
9268 |
+ /** |
9269 |
+ * crypto_ahash_finup() - update and finalize message digest |
9270 |
+ * @req: reference to the ahash_request handle that holds all information |
9271 |
+@@ -481,7 +475,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out) |
9272 |
+ */ |
9273 |
+ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) |
9274 |
+ { |
9275 |
+- return crypto_ahash_reqtfm(req)->import(req, in); |
9276 |
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
9277 |
++ |
9278 |
++ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
9279 |
++ return -ENOKEY; |
9280 |
++ |
9281 |
++ return tfm->import(req, in); |
9282 |
+ } |
9283 |
+ |
9284 |
+ /** |
9285 |
+@@ -498,7 +497,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) |
9286 |
+ */ |
9287 |
+ static inline int crypto_ahash_init(struct ahash_request *req) |
9288 |
+ { |
9289 |
+- return crypto_ahash_reqtfm(req)->init(req); |
9290 |
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
9291 |
++ |
9292 |
++ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
9293 |
++ return -ENOKEY; |
9294 |
++ |
9295 |
++ return tfm->init(req); |
9296 |
+ } |
9297 |
+ |
9298 |
+ /** |
9299 |
+@@ -851,7 +855,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) |
9300 |
+ */ |
9301 |
+ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) |
9302 |
+ { |
9303 |
+- return crypto_shash_alg(desc->tfm)->import(desc, in); |
9304 |
++ struct crypto_shash *tfm = desc->tfm; |
9305 |
++ |
9306 |
++ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
9307 |
++ return -ENOKEY; |
9308 |
++ |
9309 |
++ return crypto_shash_alg(tfm)->import(desc, in); |
9310 |
+ } |
9311 |
+ |
9312 |
+ /** |
9313 |
+@@ -867,7 +876,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) |
9314 |
+ */ |
9315 |
+ static inline int crypto_shash_init(struct shash_desc *desc) |
9316 |
+ { |
9317 |
+- return crypto_shash_alg(desc->tfm)->init(desc); |
9318 |
++ struct crypto_shash *tfm = desc->tfm; |
9319 |
++ |
9320 |
++ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
9321 |
++ return -ENOKEY; |
9322 |
++ |
9323 |
++ return crypto_shash_alg(tfm)->init(desc); |
9324 |
+ } |
9325 |
+ |
9326 |
+ /** |
9327 |
+diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h |
9328 |
+index c2bae8da642c..27040a46d50a 100644 |
9329 |
+--- a/include/crypto/internal/hash.h |
9330 |
++++ b/include/crypto/internal/hash.h |
9331 |
+@@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) |
9332 |
+ return alg->setkey != shash_no_setkey; |
9333 |
+ } |
9334 |
+ |
9335 |
++bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); |
9336 |
++ |
9337 |
+ int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, |
9338 |
+ struct hash_alg_common *alg, |
9339 |
+ struct crypto_instance *inst); |
9340 |
+diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h |
9341 |
+index c65567d01e8e..f718a19da82f 100644 |
9342 |
+--- a/include/crypto/poly1305.h |
9343 |
++++ b/include/crypto/poly1305.h |
9344 |
+@@ -31,8 +31,6 @@ struct poly1305_desc_ctx { |
9345 |
+ }; |
9346 |
+ |
9347 |
+ int crypto_poly1305_init(struct shash_desc *desc); |
9348 |
+-int crypto_poly1305_setkey(struct crypto_shash *tfm, |
9349 |
+- const u8 *key, unsigned int keylen); |
9350 |
+ unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, |
9351 |
+ const u8 *src, unsigned int srclen); |
9352 |
+ int crypto_poly1305_update(struct shash_desc *desc, |
9353 |
+diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h |
9354 |
+new file mode 100644 |
9355 |
+index 000000000000..e518e4e3dfb5 |
9356 |
+--- /dev/null |
9357 |
++++ b/include/kvm/arm_psci.h |
9358 |
+@@ -0,0 +1,51 @@ |
9359 |
++/* |
9360 |
++ * Copyright (C) 2012,2013 - ARM Ltd |
9361 |
++ * Author: Marc Zyngier <marc.zyngier@×××.com> |
9362 |
++ * |
9363 |
++ * This program is free software; you can redistribute it and/or modify |
9364 |
++ * it under the terms of the GNU General Public License version 2 as |
9365 |
++ * published by the Free Software Foundation. |
9366 |
++ * |
9367 |
++ * This program is distributed in the hope that it will be useful, |
9368 |
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9369 |
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
9370 |
++ * GNU General Public License for more details. |
9371 |
++ * |
9372 |
++ * You should have received a copy of the GNU General Public License |
9373 |
++ * along with this program. If not, see <http://www.gnu.org/licenses/>. |
9374 |
++ */ |
9375 |
++ |
9376 |
++#ifndef __KVM_ARM_PSCI_H__ |
9377 |
++#define __KVM_ARM_PSCI_H__ |
9378 |
++ |
9379 |
++#include <linux/kvm_host.h> |
9380 |
++#include <uapi/linux/psci.h> |
9381 |
++ |
9382 |
++#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) |
9383 |
++#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) |
9384 |
++#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) |
9385 |
++ |
9386 |
++#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 |
9387 |
++ |
9388 |
++/* |
9389 |
++ * We need the KVM pointer independently from the vcpu as we can call |
9390 |
++ * this from HYP, and need to apply kern_hyp_va on it... |
9391 |
++ */ |
9392 |
++static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) |
9393 |
++{ |
9394 |
++ /* |
9395 |
++ * Our PSCI implementation stays the same across versions from |
9396 |
++ * v0.2 onward, only adding the few mandatory functions (such |
9397 |
++ * as FEATURES with 1.0) that are required by newer |
9398 |
++ * revisions. It is thus safe to return the latest. |
9399 |
++ */ |
9400 |
++ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) |
9401 |
++ return KVM_ARM_PSCI_LATEST; |
9402 |
++ |
9403 |
++ return KVM_ARM_PSCI_0_1; |
9404 |
++} |
9405 |
++ |
9406 |
++ |
9407 |
++int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); |
9408 |
++ |
9409 |
++#endif /* __KVM_ARM_PSCI_H__ */ |
9410 |
+diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h |
9411 |
+index 4c5bca38c653..a031897fca76 100644 |
9412 |
+--- a/include/linux/arm-smccc.h |
9413 |
++++ b/include/linux/arm-smccc.h |
9414 |
+@@ -14,14 +14,16 @@ |
9415 |
+ #ifndef __LINUX_ARM_SMCCC_H |
9416 |
+ #define __LINUX_ARM_SMCCC_H |
9417 |
+ |
9418 |
++#include <uapi/linux/const.h> |
9419 |
++ |
9420 |
+ /* |
9421 |
+ * This file provides common defines for ARM SMC Calling Convention as |
9422 |
+ * specified in |
9423 |
+ * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html |
9424 |
+ */ |
9425 |
+ |
9426 |
+-#define ARM_SMCCC_STD_CALL 0 |
9427 |
+-#define ARM_SMCCC_FAST_CALL 1 |
9428 |
++#define ARM_SMCCC_STD_CALL _AC(0,U) |
9429 |
++#define ARM_SMCCC_FAST_CALL _AC(1,U) |
9430 |
+ #define ARM_SMCCC_TYPE_SHIFT 31 |
9431 |
+ |
9432 |
+ #define ARM_SMCCC_SMC_32 0 |
9433 |
+@@ -60,6 +62,24 @@ |
9434 |
+ #define ARM_SMCCC_QUIRK_NONE 0 |
9435 |
+ #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ |
9436 |
+ |
9437 |
++#define ARM_SMCCC_VERSION_1_0 0x10000 |
9438 |
++#define ARM_SMCCC_VERSION_1_1 0x10001 |
9439 |
++ |
9440 |
++#define ARM_SMCCC_VERSION_FUNC_ID \ |
9441 |
++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ |
9442 |
++ ARM_SMCCC_SMC_32, \ |
9443 |
++ 0, 0) |
9444 |
++ |
9445 |
++#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \ |
9446 |
++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ |
9447 |
++ ARM_SMCCC_SMC_32, \ |
9448 |
++ 0, 1) |
9449 |
++ |
9450 |
++#define ARM_SMCCC_ARCH_WORKAROUND_1 \ |
9451 |
++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ |
9452 |
++ ARM_SMCCC_SMC_32, \ |
9453 |
++ 0, 0x8000) |
9454 |
++ |
9455 |
+ #ifndef __ASSEMBLY__ |
9456 |
+ |
9457 |
+ #include <linux/linkage.h> |
9458 |
+@@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, |
9459 |
+ |
9460 |
+ #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) |
9461 |
+ |
9462 |
++/* SMCCC v1.1 implementation madness follows */ |
9463 |
++#ifdef CONFIG_ARM64 |
9464 |
++ |
9465 |
++#define SMCCC_SMC_INST "smc #0" |
9466 |
++#define SMCCC_HVC_INST "hvc #0" |
9467 |
++ |
9468 |
++#elif defined(CONFIG_ARM) |
9469 |
++#include <asm/opcodes-sec.h> |
9470 |
++#include <asm/opcodes-virt.h> |
9471 |
++ |
9472 |
++#define SMCCC_SMC_INST __SMC(0) |
9473 |
++#define SMCCC_HVC_INST __HVC(0) |
9474 |
++ |
9475 |
++#endif |
9476 |
++ |
9477 |
++#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x |
9478 |
++ |
9479 |
++#define __count_args(...) \ |
9480 |
++ ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) |
9481 |
++ |
9482 |
++#define __constraint_write_0 \ |
9483 |
++ "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) |
9484 |
++#define __constraint_write_1 \ |
9485 |
++ "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) |
9486 |
++#define __constraint_write_2 \ |
9487 |
++ "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) |
9488 |
++#define __constraint_write_3 \ |
9489 |
++ "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) |
9490 |
++#define __constraint_write_4 __constraint_write_3 |
9491 |
++#define __constraint_write_5 __constraint_write_4 |
9492 |
++#define __constraint_write_6 __constraint_write_5 |
9493 |
++#define __constraint_write_7 __constraint_write_6 |
9494 |
++ |
9495 |
++#define __constraint_read_0 |
9496 |
++#define __constraint_read_1 |
9497 |
++#define __constraint_read_2 |
9498 |
++#define __constraint_read_3 |
9499 |
++#define __constraint_read_4 "r" (r4) |
9500 |
++#define __constraint_read_5 __constraint_read_4, "r" (r5) |
9501 |
++#define __constraint_read_6 __constraint_read_5, "r" (r6) |
9502 |
++#define __constraint_read_7 __constraint_read_6, "r" (r7) |
9503 |
++ |
9504 |
++#define __declare_arg_0(a0, res) \ |
9505 |
++ struct arm_smccc_res *___res = res; \ |
9506 |
++ register u32 r0 asm("r0") = a0; \ |
9507 |
++ register unsigned long r1 asm("r1"); \ |
9508 |
++ register unsigned long r2 asm("r2"); \ |
9509 |
++ register unsigned long r3 asm("r3") |
9510 |
++ |
9511 |
++#define __declare_arg_1(a0, a1, res) \ |
9512 |
++ struct arm_smccc_res *___res = res; \ |
9513 |
++ register u32 r0 asm("r0") = a0; \ |
9514 |
++ register typeof(a1) r1 asm("r1") = a1; \ |
9515 |
++ register unsigned long r2 asm("r2"); \ |
9516 |
++ register unsigned long r3 asm("r3") |
9517 |
++ |
9518 |
++#define __declare_arg_2(a0, a1, a2, res) \ |
9519 |
++ struct arm_smccc_res *___res = res; \ |
9520 |
++ register u32 r0 asm("r0") = a0; \ |
9521 |
++ register typeof(a1) r1 asm("r1") = a1; \ |
9522 |
++ register typeof(a2) r2 asm("r2") = a2; \ |
9523 |
++ register unsigned long r3 asm("r3") |
9524 |
++ |
9525 |
++#define __declare_arg_3(a0, a1, a2, a3, res) \ |
9526 |
++ struct arm_smccc_res *___res = res; \ |
9527 |
++ register u32 r0 asm("r0") = a0; \ |
9528 |
++ register typeof(a1) r1 asm("r1") = a1; \ |
9529 |
++ register typeof(a2) r2 asm("r2") = a2; \ |
9530 |
++ register typeof(a3) r3 asm("r3") = a3 |
9531 |
++ |
9532 |
++#define __declare_arg_4(a0, a1, a2, a3, a4, res) \ |
9533 |
++ __declare_arg_3(a0, a1, a2, a3, res); \ |
9534 |
++ register typeof(a4) r4 asm("r4") = a4 |
9535 |
++ |
9536 |
++#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ |
9537 |
++ __declare_arg_4(a0, a1, a2, a3, a4, res); \ |
9538 |
++ register typeof(a5) r5 asm("r5") = a5 |
9539 |
++ |
9540 |
++#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ |
9541 |
++ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ |
9542 |
++ register typeof(a6) r6 asm("r6") = a6 |
9543 |
++ |
9544 |
++#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ |
9545 |
++ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ |
9546 |
++ register typeof(a7) r7 asm("r7") = a7 |
9547 |
++ |
9548 |
++#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) |
9549 |
++#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) |
9550 |
++ |
9551 |
++#define ___constraints(count) \ |
9552 |
++ : __constraint_write_ ## count \ |
9553 |
++ : __constraint_read_ ## count \ |
9554 |
++ : "memory" |
9555 |
++#define __constraints(count) ___constraints(count) |
9556 |
++ |
9557 |
++/* |
9558 |
++ * We have an output list that is not necessarily used, and GCC feels |
9559 |
++ * entitled to optimise the whole sequence away. "volatile" is what |
9560 |
++ * makes it stick. |
9561 |
++ */ |
9562 |
++#define __arm_smccc_1_1(inst, ...) \ |
9563 |
++ do { \ |
9564 |
++ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ |
9565 |
++ asm volatile(inst "\n" \ |
9566 |
++ __constraints(__count_args(__VA_ARGS__))); \ |
9567 |
++ if (___res) \ |
9568 |
++ *___res = (typeof(*___res)){r0, r1, r2, r3}; \ |
9569 |
++ } while (0) |
9570 |
++ |
9571 |
++/* |
9572 |
++ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call |
9573 |
++ * |
9574 |
++ * This is a variadic macro taking one to eight source arguments, and |
9575 |
++ * an optional return structure. |
9576 |
++ * |
9577 |
++ * @a0-a7: arguments passed in registers 0 to 7 |
9578 |
++ * @res: result values from registers 0 to 3 |
9579 |
++ * |
9580 |
++ * This macro is used to make SMC calls following SMC Calling Convention v1.1. |
9581 |
++ * The content of the supplied param are copied to registers 0 to 7 prior |
9582 |
++ * to the SMC instruction. The return values are updated with the content |
9583 |
++ * from register 0 to 3 on return from the SMC instruction if not NULL. |
9584 |
++ */ |
9585 |
++#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__) |
9586 |
++ |
9587 |
++/* |
9588 |
++ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call |
9589 |
++ * |
9590 |
++ * This is a variadic macro taking one to eight source arguments, and |
9591 |
++ * an optional return structure. |
9592 |
++ * |
9593 |
++ * @a0-a7: arguments passed in registers 0 to 7 |
9594 |
++ * @res: result values from registers 0 to 3 |
9595 |
++ * |
9596 |
++ * This macro is used to make HVC calls following SMC Calling Convention v1.1. |
9597 |
++ * The content of the supplied param are copied to registers 0 to 7 prior |
9598 |
++ * to the HVC instruction. The return values are updated with the content |
9599 |
++ * from register 0 to 3 on return from the HVC instruction if not NULL. |
9600 |
++ */ |
9601 |
++#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) |
9602 |
++ |
9603 |
+ #endif /*__ASSEMBLY__*/ |
9604 |
+ #endif /*__LINUX_ARM_SMCCC_H*/ |
9605 |
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h |
9606 |
+index 84da9978e951..cc36484d29e1 100644 |
9607 |
+--- a/include/linux/crypto.h |
9608 |
++++ b/include/linux/crypto.h |
9609 |
+@@ -105,9 +105,17 @@ |
9610 |
+ */ |
9611 |
+ #define CRYPTO_ALG_INTERNAL 0x00002000 |
9612 |
+ |
9613 |
++/* |
9614 |
++ * Set if the algorithm has a ->setkey() method but can be used without |
9615 |
++ * calling it first, i.e. there is a default key. |
9616 |
++ */ |
9617 |
++#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 |
9618 |
++ |
9619 |
+ /* |
9620 |
+ * Transform masks and values (for crt_flags). |
9621 |
+ */ |
9622 |
++#define CRYPTO_TFM_NEED_KEY 0x00000001 |
9623 |
++ |
9624 |
+ #define CRYPTO_TFM_REQ_MASK 0x000fff00 |
9625 |
+ #define CRYPTO_TFM_RES_MASK 0xfff00000 |
9626 |
+ |
9627 |
+diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h |
9628 |
+index 3aa56e3104bb..b5b43f94f311 100644 |
9629 |
+--- a/include/linux/mtd/map.h |
9630 |
++++ b/include/linux/mtd/map.h |
9631 |
+@@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd); |
9632 |
+ #define INVALIDATE_CACHED_RANGE(map, from, size) \ |
9633 |
+ do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) |
9634 |
+ |
9635 |
+- |
9636 |
+-static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2) |
9637 |
+-{ |
9638 |
+- int i; |
9639 |
+- |
9640 |
+- for (i = 0; i < map_words(map); i++) { |
9641 |
+- if (val1.x[i] != val2.x[i]) |
9642 |
+- return 0; |
9643 |
+- } |
9644 |
+- |
9645 |
+- return 1; |
9646 |
+-} |
9647 |
+- |
9648 |
+-static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2) |
9649 |
+-{ |
9650 |
+- map_word r; |
9651 |
+- int i; |
9652 |
+- |
9653 |
+- for (i = 0; i < map_words(map); i++) |
9654 |
+- r.x[i] = val1.x[i] & val2.x[i]; |
9655 |
+- |
9656 |
+- return r; |
9657 |
+-} |
9658 |
+- |
9659 |
+-static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2) |
9660 |
+-{ |
9661 |
+- map_word r; |
9662 |
+- int i; |
9663 |
+- |
9664 |
+- for (i = 0; i < map_words(map); i++) |
9665 |
+- r.x[i] = val1.x[i] & ~val2.x[i]; |
9666 |
+- |
9667 |
+- return r; |
9668 |
+-} |
9669 |
+- |
9670 |
+-static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2) |
9671 |
+-{ |
9672 |
+- map_word r; |
9673 |
+- int i; |
9674 |
+- |
9675 |
+- for (i = 0; i < map_words(map); i++) |
9676 |
+- r.x[i] = val1.x[i] | val2.x[i]; |
9677 |
+- |
9678 |
+- return r; |
9679 |
+-} |
9680 |
+- |
9681 |
+-static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3) |
9682 |
+-{ |
9683 |
+- int i; |
9684 |
+- |
9685 |
+- for (i = 0; i < map_words(map); i++) { |
9686 |
+- if ((val1.x[i] & val2.x[i]) != val3.x[i]) |
9687 |
+- return 0; |
9688 |
+- } |
9689 |
+- |
9690 |
+- return 1; |
9691 |
+-} |
9692 |
+- |
9693 |
+-static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2) |
9694 |
+-{ |
9695 |
+- int i; |
9696 |
+- |
9697 |
+- for (i = 0; i < map_words(map); i++) { |
9698 |
+- if (val1.x[i] & val2.x[i]) |
9699 |
+- return 1; |
9700 |
+- } |
9701 |
+- |
9702 |
+- return 0; |
9703 |
+-} |
9704 |
++#define map_word_equal(map, val1, val2) \ |
9705 |
++({ \ |
9706 |
++ int i, ret = 1; \ |
9707 |
++ for (i = 0; i < map_words(map); i++) \ |
9708 |
++ if ((val1).x[i] != (val2).x[i]) { \ |
9709 |
++ ret = 0; \ |
9710 |
++ break; \ |
9711 |
++ } \ |
9712 |
++ ret; \ |
9713 |
++}) |
9714 |
++ |
9715 |
++#define map_word_and(map, val1, val2) \ |
9716 |
++({ \ |
9717 |
++ map_word r; \ |
9718 |
++ int i; \ |
9719 |
++ for (i = 0; i < map_words(map); i++) \ |
9720 |
++ r.x[i] = (val1).x[i] & (val2).x[i]; \ |
9721 |
++ r; \ |
9722 |
++}) |
9723 |
++ |
9724 |
++#define map_word_clr(map, val1, val2) \ |
9725 |
++({ \ |
9726 |
++ map_word r; \ |
9727 |
++ int i; \ |
9728 |
++ for (i = 0; i < map_words(map); i++) \ |
9729 |
++ r.x[i] = (val1).x[i] & ~(val2).x[i]; \ |
9730 |
++ r; \ |
9731 |
++}) |
9732 |
++ |
9733 |
++#define map_word_or(map, val1, val2) \ |
9734 |
++({ \ |
9735 |
++ map_word r; \ |
9736 |
++ int i; \ |
9737 |
++ for (i = 0; i < map_words(map); i++) \ |
9738 |
++ r.x[i] = (val1).x[i] | (val2).x[i]; \ |
9739 |
++ r; \ |
9740 |
++}) |
9741 |
++ |
9742 |
++#define map_word_andequal(map, val1, val2, val3) \ |
9743 |
++({ \ |
9744 |
++ int i, ret = 1; \ |
9745 |
++ for (i = 0; i < map_words(map); i++) { \ |
9746 |
++ if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ |
9747 |
++ ret = 0; \ |
9748 |
++ break; \ |
9749 |
++ } \ |
9750 |
++ } \ |
9751 |
++ ret; \ |
9752 |
++}) |
9753 |
++ |
9754 |
++#define map_word_bitsset(map, val1, val2) \ |
9755 |
++({ \ |
9756 |
++ int i, ret = 0; \ |
9757 |
++ for (i = 0; i < map_words(map); i++) { \ |
9758 |
++ if ((val1).x[i] & (val2).x[i]) { \ |
9759 |
++ ret = 1; \ |
9760 |
++ break; \ |
9761 |
++ } \ |
9762 |
++ } \ |
9763 |
++ ret; \ |
9764 |
++}) |
9765 |
+ |
9766 |
+ static inline map_word map_word_load(struct map_info *map, const void *ptr) |
9767 |
+ { |
9768 |
+diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h |
9769 |
+index 47adac640191..57ffaa20d564 100644 |
9770 |
+--- a/include/linux/nfs4.h |
9771 |
++++ b/include/linux/nfs4.h |
9772 |
+@@ -457,7 +457,12 @@ enum lock_type4 { |
9773 |
+ |
9774 |
+ #define NFS4_DEBUG 1 |
9775 |
+ |
9776 |
+-/* Index of predefined Linux client operations */ |
9777 |
++/* |
9778 |
++ * Index of predefined Linux client operations |
9779 |
++ * |
9780 |
++ * To ensure that /proc/net/rpc/nfs remains correctly ordered, please |
9781 |
++ * append only to this enum when adding new client operations. |
9782 |
++ */ |
9783 |
+ |
9784 |
+ enum { |
9785 |
+ NFSPROC4_CLNT_NULL = 0, /* Unused */ |
9786 |
+@@ -480,7 +485,6 @@ enum { |
9787 |
+ NFSPROC4_CLNT_ACCESS, |
9788 |
+ NFSPROC4_CLNT_GETATTR, |
9789 |
+ NFSPROC4_CLNT_LOOKUP, |
9790 |
+- NFSPROC4_CLNT_LOOKUPP, |
9791 |
+ NFSPROC4_CLNT_LOOKUP_ROOT, |
9792 |
+ NFSPROC4_CLNT_REMOVE, |
9793 |
+ NFSPROC4_CLNT_RENAME, |
9794 |
+@@ -500,7 +504,6 @@ enum { |
9795 |
+ NFSPROC4_CLNT_SECINFO, |
9796 |
+ NFSPROC4_CLNT_FSID_PRESENT, |
9797 |
+ |
9798 |
+- /* nfs41 */ |
9799 |
+ NFSPROC4_CLNT_EXCHANGE_ID, |
9800 |
+ NFSPROC4_CLNT_CREATE_SESSION, |
9801 |
+ NFSPROC4_CLNT_DESTROY_SESSION, |
9802 |
+@@ -518,13 +521,14 @@ enum { |
9803 |
+ NFSPROC4_CLNT_BIND_CONN_TO_SESSION, |
9804 |
+ NFSPROC4_CLNT_DESTROY_CLIENTID, |
9805 |
+ |
9806 |
+- /* nfs42 */ |
9807 |
+ NFSPROC4_CLNT_SEEK, |
9808 |
+ NFSPROC4_CLNT_ALLOCATE, |
9809 |
+ NFSPROC4_CLNT_DEALLOCATE, |
9810 |
+ NFSPROC4_CLNT_LAYOUTSTATS, |
9811 |
+ NFSPROC4_CLNT_CLONE, |
9812 |
+ NFSPROC4_CLNT_COPY, |
9813 |
++ |
9814 |
++ NFSPROC4_CLNT_LOOKUPP, |
9815 |
+ }; |
9816 |
+ |
9817 |
+ /* nfs41 types */ |
9818 |
+diff --git a/include/linux/psci.h b/include/linux/psci.h |
9819 |
+index bdea1cb5e1db..347077cf19c6 100644 |
9820 |
+--- a/include/linux/psci.h |
9821 |
++++ b/include/linux/psci.h |
9822 |
+@@ -25,7 +25,19 @@ bool psci_tos_resident_on(int cpu); |
9823 |
+ int psci_cpu_init_idle(unsigned int cpu); |
9824 |
+ int psci_cpu_suspend_enter(unsigned long index); |
9825 |
+ |
9826 |
++enum psci_conduit { |
9827 |
++ PSCI_CONDUIT_NONE, |
9828 |
++ PSCI_CONDUIT_SMC, |
9829 |
++ PSCI_CONDUIT_HVC, |
9830 |
++}; |
9831 |
++ |
9832 |
++enum smccc_version { |
9833 |
++ SMCCC_VERSION_1_0, |
9834 |
++ SMCCC_VERSION_1_1, |
9835 |
++}; |
9836 |
++ |
9837 |
+ struct psci_operations { |
9838 |
++ u32 (*get_version)(void); |
9839 |
+ int (*cpu_suspend)(u32 state, unsigned long entry_point); |
9840 |
+ int (*cpu_off)(u32 state); |
9841 |
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); |
9842 |
+@@ -33,6 +45,8 @@ struct psci_operations { |
9843 |
+ int (*affinity_info)(unsigned long target_affinity, |
9844 |
+ unsigned long lowest_affinity_level); |
9845 |
+ int (*migrate_info_type)(void); |
9846 |
++ enum psci_conduit conduit; |
9847 |
++ enum smccc_version smccc_version; |
9848 |
+ }; |
9849 |
+ |
9850 |
+ extern struct psci_operations psci_ops; |
9851 |
+diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h |
9852 |
+index a8b7bf879ced..1a1df0d21ee3 100644 |
9853 |
+--- a/include/scsi/scsi_host.h |
9854 |
++++ b/include/scsi/scsi_host.h |
9855 |
+@@ -571,6 +571,8 @@ struct Scsi_Host { |
9856 |
+ struct blk_mq_tag_set tag_set; |
9857 |
+ }; |
9858 |
+ |
9859 |
++ struct rcu_head rcu; |
9860 |
++ |
9861 |
+ atomic_t host_busy; /* commands actually active on low-level */ |
9862 |
+ atomic_t host_blocked; |
9863 |
+ |
9864 |
+diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h |
9865 |
+index 760e52a9640f..b3bcabe380da 100644 |
9866 |
+--- a/include/uapi/linux/psci.h |
9867 |
++++ b/include/uapi/linux/psci.h |
9868 |
+@@ -88,6 +88,9 @@ |
9869 |
+ (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT) |
9870 |
+ #define PSCI_VERSION_MINOR(ver) \ |
9871 |
+ ((ver) & PSCI_VERSION_MINOR_MASK) |
9872 |
++#define PSCI_VERSION(maj, min) \ |
9873 |
++ ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \ |
9874 |
++ ((min) & PSCI_VERSION_MINOR_MASK)) |
9875 |
+ |
9876 |
+ /* PSCI features decoding (>=1.0) */ |
9877 |
+ #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1 |
9878 |
+diff --git a/kernel/async.c b/kernel/async.c |
9879 |
+index 2cbd3dd5940d..a893d6170944 100644 |
9880 |
+--- a/kernel/async.c |
9881 |
++++ b/kernel/async.c |
9882 |
+@@ -84,20 +84,24 @@ static atomic_t entry_count; |
9883 |
+ |
9884 |
+ static async_cookie_t lowest_in_progress(struct async_domain *domain) |
9885 |
+ { |
9886 |
+- struct list_head *pending; |
9887 |
++ struct async_entry *first = NULL; |
9888 |
+ async_cookie_t ret = ASYNC_COOKIE_MAX; |
9889 |
+ unsigned long flags; |
9890 |
+ |
9891 |
+ spin_lock_irqsave(&async_lock, flags); |
9892 |
+ |
9893 |
+- if (domain) |
9894 |
+- pending = &domain->pending; |
9895 |
+- else |
9896 |
+- pending = &async_global_pending; |
9897 |
++ if (domain) { |
9898 |
++ if (!list_empty(&domain->pending)) |
9899 |
++ first = list_first_entry(&domain->pending, |
9900 |
++ struct async_entry, domain_list); |
9901 |
++ } else { |
9902 |
++ if (!list_empty(&async_global_pending)) |
9903 |
++ first = list_first_entry(&async_global_pending, |
9904 |
++ struct async_entry, global_list); |
9905 |
++ } |
9906 |
+ |
9907 |
+- if (!list_empty(pending)) |
9908 |
+- ret = list_first_entry(pending, struct async_entry, |
9909 |
+- domain_list)->cookie; |
9910 |
++ if (first) |
9911 |
++ ret = first->cookie; |
9912 |
+ |
9913 |
+ spin_unlock_irqrestore(&async_lock, flags); |
9914 |
+ return ret; |
9915 |
+diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c |
9916 |
+index 5033b66d2753..7a577bd989a4 100644 |
9917 |
+--- a/kernel/rcu/update.c |
9918 |
++++ b/kernel/rcu/update.c |
9919 |
+@@ -421,11 +421,13 @@ void init_rcu_head(struct rcu_head *head) |
9920 |
+ { |
9921 |
+ debug_object_init(head, &rcuhead_debug_descr); |
9922 |
+ } |
9923 |
++EXPORT_SYMBOL_GPL(init_rcu_head); |
9924 |
+ |
9925 |
+ void destroy_rcu_head(struct rcu_head *head) |
9926 |
+ { |
9927 |
+ debug_object_free(head, &rcuhead_debug_descr); |
9928 |
+ } |
9929 |
++EXPORT_SYMBOL_GPL(destroy_rcu_head); |
9930 |
+ |
9931 |
+ static bool rcuhead_is_static_object(void *addr) |
9932 |
+ { |
9933 |
+diff --git a/kernel/relay.c b/kernel/relay.c |
9934 |
+index 39a9dfc69486..55da824f4adc 100644 |
9935 |
+--- a/kernel/relay.c |
9936 |
++++ b/kernel/relay.c |
9937 |
+@@ -611,7 +611,6 @@ struct rchan *relay_open(const char *base_filename, |
9938 |
+ |
9939 |
+ kref_put(&chan->kref, relay_destroy_channel); |
9940 |
+ mutex_unlock(&relay_channels_mutex); |
9941 |
+- kfree(chan); |
9942 |
+ return NULL; |
9943 |
+ } |
9944 |
+ EXPORT_SYMBOL_GPL(relay_open); |
9945 |
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c |
9946 |
+index 7464c5c4de46..298f62b8662d 100644 |
9947 |
+--- a/kernel/sched/rt.c |
9948 |
++++ b/kernel/sched/rt.c |
9949 |
+@@ -1907,9 +1907,8 @@ static void push_rt_tasks(struct rq *rq) |
9950 |
+ * the rt_loop_next will cause the iterator to perform another scan. |
9951 |
+ * |
9952 |
+ */ |
9953 |
+-static int rto_next_cpu(struct rq *rq) |
9954 |
++static int rto_next_cpu(struct root_domain *rd) |
9955 |
+ { |
9956 |
+- struct root_domain *rd = rq->rd; |
9957 |
+ int next; |
9958 |
+ int cpu; |
9959 |
+ |
9960 |
+@@ -1985,19 +1984,24 @@ static void tell_cpu_to_push(struct rq *rq) |
9961 |
+ * Otherwise it is finishing up and an ipi needs to be sent. |
9962 |
+ */ |
9963 |
+ if (rq->rd->rto_cpu < 0) |
9964 |
+- cpu = rto_next_cpu(rq); |
9965 |
++ cpu = rto_next_cpu(rq->rd); |
9966 |
+ |
9967 |
+ raw_spin_unlock(&rq->rd->rto_lock); |
9968 |
+ |
9969 |
+ rto_start_unlock(&rq->rd->rto_loop_start); |
9970 |
+ |
9971 |
+- if (cpu >= 0) |
9972 |
++ if (cpu >= 0) { |
9973 |
++ /* Make sure the rd does not get freed while pushing */ |
9974 |
++ sched_get_rd(rq->rd); |
9975 |
+ irq_work_queue_on(&rq->rd->rto_push_work, cpu); |
9976 |
++ } |
9977 |
+ } |
9978 |
+ |
9979 |
+ /* Called from hardirq context */ |
9980 |
+ void rto_push_irq_work_func(struct irq_work *work) |
9981 |
+ { |
9982 |
++ struct root_domain *rd = |
9983 |
++ container_of(work, struct root_domain, rto_push_work); |
9984 |
+ struct rq *rq; |
9985 |
+ int cpu; |
9986 |
+ |
9987 |
+@@ -2013,18 +2017,20 @@ void rto_push_irq_work_func(struct irq_work *work) |
9988 |
+ raw_spin_unlock(&rq->lock); |
9989 |
+ } |
9990 |
+ |
9991 |
+- raw_spin_lock(&rq->rd->rto_lock); |
9992 |
++ raw_spin_lock(&rd->rto_lock); |
9993 |
+ |
9994 |
+ /* Pass the IPI to the next rt overloaded queue */ |
9995 |
+- cpu = rto_next_cpu(rq); |
9996 |
++ cpu = rto_next_cpu(rd); |
9997 |
+ |
9998 |
+- raw_spin_unlock(&rq->rd->rto_lock); |
9999 |
++ raw_spin_unlock(&rd->rto_lock); |
10000 |
+ |
10001 |
+- if (cpu < 0) |
10002 |
++ if (cpu < 0) { |
10003 |
++ sched_put_rd(rd); |
10004 |
+ return; |
10005 |
++ } |
10006 |
+ |
10007 |
+ /* Try the next RT overloaded CPU */ |
10008 |
+- irq_work_queue_on(&rq->rd->rto_push_work, cpu); |
10009 |
++ irq_work_queue_on(&rd->rto_push_work, cpu); |
10010 |
+ } |
10011 |
+ #endif /* HAVE_RT_PUSH_IPI */ |
10012 |
+ |
10013 |
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h |
10014 |
+index b732e779fe7d..307c35d33660 100644 |
10015 |
+--- a/kernel/sched/sched.h |
10016 |
++++ b/kernel/sched/sched.h |
10017 |
+@@ -661,6 +661,8 @@ extern struct mutex sched_domains_mutex; |
10018 |
+ extern void init_defrootdomain(void); |
10019 |
+ extern int sched_init_domains(const struct cpumask *cpu_map); |
10020 |
+ extern void rq_attach_root(struct rq *rq, struct root_domain *rd); |
10021 |
++extern void sched_get_rd(struct root_domain *rd); |
10022 |
++extern void sched_put_rd(struct root_domain *rd); |
10023 |
+ |
10024 |
+ #ifdef HAVE_RT_PUSH_IPI |
10025 |
+ extern void rto_push_irq_work_func(struct irq_work *work); |
10026 |
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c |
10027 |
+index 093f2ceba2e2..659e075ef70b 100644 |
10028 |
+--- a/kernel/sched/topology.c |
10029 |
++++ b/kernel/sched/topology.c |
10030 |
+@@ -258,6 +258,19 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) |
10031 |
+ call_rcu_sched(&old_rd->rcu, free_rootdomain); |
10032 |
+ } |
10033 |
+ |
10034 |
++void sched_get_rd(struct root_domain *rd) |
10035 |
++{ |
10036 |
++ atomic_inc(&rd->refcount); |
10037 |
++} |
10038 |
++ |
10039 |
++void sched_put_rd(struct root_domain *rd) |
10040 |
++{ |
10041 |
++ if (!atomic_dec_and_test(&rd->refcount)) |
10042 |
++ return; |
10043 |
++ |
10044 |
++ call_rcu_sched(&rd->rcu, free_rootdomain); |
10045 |
++} |
10046 |
++ |
10047 |
+ static int init_rootdomain(struct root_domain *rd) |
10048 |
+ { |
10049 |
+ if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) |
10050 |
+diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c |
10051 |
+index 98feab7933c7..929ecb7d6b78 100644 |
10052 |
+--- a/kernel/sched/wait.c |
10053 |
++++ b/kernel/sched/wait.c |
10054 |
+@@ -27,7 +27,7 @@ void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq |
10055 |
+ |
10056 |
+ wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; |
10057 |
+ spin_lock_irqsave(&wq_head->lock, flags); |
10058 |
+- __add_wait_queue_entry_tail(wq_head, wq_entry); |
10059 |
++ __add_wait_queue(wq_head, wq_entry); |
10060 |
+ spin_unlock_irqrestore(&wq_head->lock, flags); |
10061 |
+ } |
10062 |
+ EXPORT_SYMBOL(add_wait_queue); |
10063 |
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c |
10064 |
+index 8319e09e15b9..7379bcf3baa0 100644 |
10065 |
+--- a/kernel/trace/ftrace.c |
10066 |
++++ b/kernel/trace/ftrace.c |
10067 |
+@@ -4488,7 +4488,6 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, |
10068 |
+ func_g.type = filter_parse_regex(glob, strlen(glob), |
10069 |
+ &func_g.search, ¬); |
10070 |
+ func_g.len = strlen(func_g.search); |
10071 |
+- func_g.search = glob; |
10072 |
+ |
10073 |
+ /* we do not support '!' for function probes */ |
10074 |
+ if (WARN_ON(not)) |
10075 |
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug |
10076 |
+index ff21b4dbb392..00cb02daeddd 100644 |
10077 |
+--- a/lib/Kconfig.debug |
10078 |
++++ b/lib/Kconfig.debug |
10079 |
+@@ -217,7 +217,7 @@ config ENABLE_MUST_CHECK |
10080 |
+ config FRAME_WARN |
10081 |
+ int "Warn for stack frames larger than (needs gcc 4.4)" |
10082 |
+ range 0 8192 |
10083 |
+- default 0 if KASAN |
10084 |
++ default 3072 if KASAN_EXTRA |
10085 |
+ default 2048 if GCC_PLUGIN_LATENT_ENTROPY |
10086 |
+ default 1280 if (!64BIT && PARISC) |
10087 |
+ default 1024 if (!64BIT && !PARISC) |
10088 |
+diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan |
10089 |
+index bd38aab05929..3d35d062970d 100644 |
10090 |
+--- a/lib/Kconfig.kasan |
10091 |
++++ b/lib/Kconfig.kasan |
10092 |
+@@ -20,6 +20,17 @@ config KASAN |
10093 |
+ Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB |
10094 |
+ (the resulting kernel does not boot). |
10095 |
+ |
10096 |
++config KASAN_EXTRA |
10097 |
++ bool "KAsan: extra checks" |
10098 |
++ depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST |
10099 |
++ help |
10100 |
++ This enables further checks in the kernel address sanitizer, for now |
10101 |
++ it only includes the address-use-after-scope check that can lead |
10102 |
++ to excessive kernel stack usage, frame size warnings and longer |
10103 |
++ compile time. |
10104 |
++ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more |
10105 |
++ |
10106 |
++ |
10107 |
+ choice |
10108 |
+ prompt "Instrumentation type" |
10109 |
+ depends on KASAN |
10110 |
+diff --git a/lib/ubsan.c b/lib/ubsan.c |
10111 |
+index fb0409df1bcf..50d1d5c25deb 100644 |
10112 |
+--- a/lib/ubsan.c |
10113 |
++++ b/lib/ubsan.c |
10114 |
+@@ -265,14 +265,14 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, |
10115 |
+ } |
10116 |
+ EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); |
10117 |
+ |
10118 |
+-static void handle_null_ptr_deref(struct type_mismatch_data *data) |
10119 |
++static void handle_null_ptr_deref(struct type_mismatch_data_common *data) |
10120 |
+ { |
10121 |
+ unsigned long flags; |
10122 |
+ |
10123 |
+- if (suppress_report(&data->location)) |
10124 |
++ if (suppress_report(data->location)) |
10125 |
+ return; |
10126 |
+ |
10127 |
+- ubsan_prologue(&data->location, &flags); |
10128 |
++ ubsan_prologue(data->location, &flags); |
10129 |
+ |
10130 |
+ pr_err("%s null pointer of type %s\n", |
10131 |
+ type_check_kinds[data->type_check_kind], |
10132 |
+@@ -281,15 +281,15 @@ static void handle_null_ptr_deref(struct type_mismatch_data *data) |
10133 |
+ ubsan_epilogue(&flags); |
10134 |
+ } |
10135 |
+ |
10136 |
+-static void handle_missaligned_access(struct type_mismatch_data *data, |
10137 |
++static void handle_misaligned_access(struct type_mismatch_data_common *data, |
10138 |
+ unsigned long ptr) |
10139 |
+ { |
10140 |
+ unsigned long flags; |
10141 |
+ |
10142 |
+- if (suppress_report(&data->location)) |
10143 |
++ if (suppress_report(data->location)) |
10144 |
+ return; |
10145 |
+ |
10146 |
+- ubsan_prologue(&data->location, &flags); |
10147 |
++ ubsan_prologue(data->location, &flags); |
10148 |
+ |
10149 |
+ pr_err("%s misaligned address %p for type %s\n", |
10150 |
+ type_check_kinds[data->type_check_kind], |
10151 |
+@@ -299,15 +299,15 @@ static void handle_missaligned_access(struct type_mismatch_data *data, |
10152 |
+ ubsan_epilogue(&flags); |
10153 |
+ } |
10154 |
+ |
10155 |
+-static void handle_object_size_mismatch(struct type_mismatch_data *data, |
10156 |
++static void handle_object_size_mismatch(struct type_mismatch_data_common *data, |
10157 |
+ unsigned long ptr) |
10158 |
+ { |
10159 |
+ unsigned long flags; |
10160 |
+ |
10161 |
+- if (suppress_report(&data->location)) |
10162 |
++ if (suppress_report(data->location)) |
10163 |
+ return; |
10164 |
+ |
10165 |
+- ubsan_prologue(&data->location, &flags); |
10166 |
++ ubsan_prologue(data->location, &flags); |
10167 |
+ pr_err("%s address %p with insufficient space\n", |
10168 |
+ type_check_kinds[data->type_check_kind], |
10169 |
+ (void *) ptr); |
10170 |
+@@ -315,19 +315,47 @@ static void handle_object_size_mismatch(struct type_mismatch_data *data, |
10171 |
+ ubsan_epilogue(&flags); |
10172 |
+ } |
10173 |
+ |
10174 |
+-void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, |
10175 |
++static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, |
10176 |
+ unsigned long ptr) |
10177 |
+ { |
10178 |
+ |
10179 |
+ if (!ptr) |
10180 |
+ handle_null_ptr_deref(data); |
10181 |
+ else if (data->alignment && !IS_ALIGNED(ptr, data->alignment)) |
10182 |
+- handle_missaligned_access(data, ptr); |
10183 |
++ handle_misaligned_access(data, ptr); |
10184 |
+ else |
10185 |
+ handle_object_size_mismatch(data, ptr); |
10186 |
+ } |
10187 |
++ |
10188 |
++void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, |
10189 |
++ unsigned long ptr) |
10190 |
++{ |
10191 |
++ struct type_mismatch_data_common common_data = { |
10192 |
++ .location = &data->location, |
10193 |
++ .type = data->type, |
10194 |
++ .alignment = data->alignment, |
10195 |
++ .type_check_kind = data->type_check_kind |
10196 |
++ }; |
10197 |
++ |
10198 |
++ ubsan_type_mismatch_common(&common_data, ptr); |
10199 |
++} |
10200 |
+ EXPORT_SYMBOL(__ubsan_handle_type_mismatch); |
10201 |
+ |
10202 |
++void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data, |
10203 |
++ unsigned long ptr) |
10204 |
++{ |
10205 |
++ |
10206 |
++ struct type_mismatch_data_common common_data = { |
10207 |
++ .location = &data->location, |
10208 |
++ .type = data->type, |
10209 |
++ .alignment = 1UL << data->log_alignment, |
10210 |
++ .type_check_kind = data->type_check_kind |
10211 |
++ }; |
10212 |
++ |
10213 |
++ ubsan_type_mismatch_common(&common_data, ptr); |
10214 |
++} |
10215 |
++EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); |
10216 |
++ |
10217 |
+ void __ubsan_handle_nonnull_return(struct nonnull_return_data *data) |
10218 |
+ { |
10219 |
+ unsigned long flags; |
10220 |
+diff --git a/lib/ubsan.h b/lib/ubsan.h |
10221 |
+index 88f23557edbe..7e30b26497e0 100644 |
10222 |
+--- a/lib/ubsan.h |
10223 |
++++ b/lib/ubsan.h |
10224 |
+@@ -37,6 +37,20 @@ struct type_mismatch_data { |
10225 |
+ unsigned char type_check_kind; |
10226 |
+ }; |
10227 |
+ |
10228 |
++struct type_mismatch_data_v1 { |
10229 |
++ struct source_location location; |
10230 |
++ struct type_descriptor *type; |
10231 |
++ unsigned char log_alignment; |
10232 |
++ unsigned char type_check_kind; |
10233 |
++}; |
10234 |
++ |
10235 |
++struct type_mismatch_data_common { |
10236 |
++ struct source_location *location; |
10237 |
++ struct type_descriptor *type; |
10238 |
++ unsigned long alignment; |
10239 |
++ unsigned char type_check_kind; |
10240 |
++}; |
10241 |
++ |
10242 |
+ struct nonnull_arg_data { |
10243 |
+ struct source_location location; |
10244 |
+ struct source_location attr_location; |
10245 |
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c |
10246 |
+index b68168fcc06a..9d43c1f40274 100644 |
10247 |
+--- a/net/dccp/proto.c |
10248 |
++++ b/net/dccp/proto.c |
10249 |
+@@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags) |
10250 |
+ { |
10251 |
+ struct inet_connection_sock *icsk = inet_csk(sk); |
10252 |
+ struct inet_sock *inet = inet_sk(sk); |
10253 |
++ struct dccp_sock *dp = dccp_sk(sk); |
10254 |
+ int err = 0; |
10255 |
+ const int old_state = sk->sk_state; |
10256 |
+ |
10257 |
+@@ -278,6 +279,10 @@ int dccp_disconnect(struct sock *sk, int flags) |
10258 |
+ sk->sk_err = ECONNRESET; |
10259 |
+ |
10260 |
+ dccp_clear_xmit_timers(sk); |
10261 |
++ ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); |
10262 |
++ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); |
10263 |
++ dp->dccps_hc_rx_ccid = NULL; |
10264 |
++ dp->dccps_hc_tx_ccid = NULL; |
10265 |
+ |
10266 |
+ __skb_queue_purge(&sk->sk_receive_queue); |
10267 |
+ __skb_queue_purge(&sk->sk_write_queue); |
10268 |
+diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan |
10269 |
+index 1ce7115aa499..97a56c0b565a 100644 |
10270 |
+--- a/scripts/Makefile.kasan |
10271 |
++++ b/scripts/Makefile.kasan |
10272 |
+@@ -30,5 +30,10 @@ else |
10273 |
+ endif |
10274 |
+ endif |
10275 |
+ |
10276 |
++ifdef CONFIG_KASAN_EXTRA |
10277 |
+ CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope) |
10278 |
+ endif |
10279 |
++ |
10280 |
++CFLAGS_KASAN_NOSANITIZE := -fno-builtin |
10281 |
++ |
10282 |
++endif |
10283 |
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib |
10284 |
+index 04b5633df1cf..0b46136a91a8 100644 |
10285 |
+--- a/scripts/Makefile.lib |
10286 |
++++ b/scripts/Makefile.lib |
10287 |
+@@ -128,7 +128,7 @@ endif |
10288 |
+ ifeq ($(CONFIG_KASAN),y) |
10289 |
+ _c_flags += $(if $(patsubst n%,, \ |
10290 |
+ $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ |
10291 |
+- $(CFLAGS_KASAN)) |
10292 |
++ $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)) |
10293 |
+ endif |
10294 |
+ |
10295 |
+ ifeq ($(CONFIG_UBSAN),y) |
10296 |
+diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c |
10297 |
+index e7d766d56c8e..55859c5b456f 100644 |
10298 |
+--- a/sound/soc/intel/skylake/skl-nhlt.c |
10299 |
++++ b/sound/soc/intel/skylake/skl-nhlt.c |
10300 |
+@@ -41,7 +41,8 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev) |
10301 |
+ obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL); |
10302 |
+ if (obj && obj->type == ACPI_TYPE_BUFFER) { |
10303 |
+ nhlt_ptr = (struct nhlt_resource_desc *)obj->buffer.pointer; |
10304 |
+- nhlt_table = (struct nhlt_acpi_table *) |
10305 |
++ if (nhlt_ptr->length) |
10306 |
++ nhlt_table = (struct nhlt_acpi_table *) |
10307 |
+ memremap(nhlt_ptr->min_addr, nhlt_ptr->length, |
10308 |
+ MEMREMAP_WB); |
10309 |
+ ACPI_FREE(obj); |
10310 |
+diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c |
10311 |
+index b6590467fd14..66fc13a2396a 100644 |
10312 |
+--- a/sound/soc/rockchip/rockchip_i2s.c |
10313 |
++++ b/sound/soc/rockchip/rockchip_i2s.c |
10314 |
+@@ -504,6 +504,7 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg) |
10315 |
+ case I2S_INTCR: |
10316 |
+ case I2S_XFER: |
10317 |
+ case I2S_CLR: |
10318 |
++ case I2S_TXDR: |
10319 |
+ case I2S_RXDR: |
10320 |
+ case I2S_FIFOLR: |
10321 |
+ case I2S_INTSR: |
10322 |
+@@ -518,6 +519,9 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg) |
10323 |
+ switch (reg) { |
10324 |
+ case I2S_INTSR: |
10325 |
+ case I2S_CLR: |
10326 |
++ case I2S_FIFOLR: |
10327 |
++ case I2S_TXDR: |
10328 |
++ case I2S_RXDR: |
10329 |
+ return true; |
10330 |
+ default: |
10331 |
+ return false; |
10332 |
+@@ -527,6 +531,8 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg) |
10333 |
+ static bool rockchip_i2s_precious_reg(struct device *dev, unsigned int reg) |
10334 |
+ { |
10335 |
+ switch (reg) { |
10336 |
++ case I2S_RXDR: |
10337 |
++ return true; |
10338 |
+ default: |
10339 |
+ return false; |
10340 |
+ } |
10341 |
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c |
10342 |
+index 9cd028aa1509..2e458eb45586 100644 |
10343 |
+--- a/tools/objtool/check.c |
10344 |
++++ b/tools/objtool/check.c |
10345 |
+@@ -851,8 +851,14 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func, |
10346 |
+ * This is a fairly uncommon pattern which is new for GCC 6. As of this |
10347 |
+ * writing, there are 11 occurrences of it in the allmodconfig kernel. |
10348 |
+ * |
10349 |
++ * As of GCC 7 there are quite a few more of these and the 'in between' code |
10350 |
++ * is significant. Esp. with KASAN enabled some of the code between the mov |
10351 |
++ * and jmpq uses .rodata itself, which can confuse things. |
10352 |
++ * |
10353 |
+ * TODO: Once we have DWARF CFI and smarter instruction decoding logic, |
10354 |
+ * ensure the same register is used in the mov and jump instructions. |
10355 |
++ * |
10356 |
++ * NOTE: RETPOLINE made it harder still to decode dynamic jumps. |
10357 |
+ */ |
10358 |
+ static struct rela *find_switch_table(struct objtool_file *file, |
10359 |
+ struct symbol *func, |
10360 |
+@@ -874,12 +880,25 @@ static struct rela *find_switch_table(struct objtool_file *file, |
10361 |
+ text_rela->addend + 4); |
10362 |
+ if (!rodata_rela) |
10363 |
+ return NULL; |
10364 |
++ |
10365 |
+ file->ignore_unreachables = true; |
10366 |
+ return rodata_rela; |
10367 |
+ } |
10368 |
+ |
10369 |
+ /* case 3 */ |
10370 |
+- func_for_each_insn_continue_reverse(file, func, insn) { |
10371 |
++ /* |
10372 |
++ * Backward search using the @first_jump_src links, these help avoid |
10373 |
++ * much of the 'in between' code. Which avoids us getting confused by |
10374 |
++ * it. |
10375 |
++ */ |
10376 |
++ for (insn = list_prev_entry(insn, list); |
10377 |
++ |
10378 |
++ &insn->list != &file->insn_list && |
10379 |
++ insn->sec == func->sec && |
10380 |
++ insn->offset >= func->offset; |
10381 |
++ |
10382 |
++ insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { |
10383 |
++ |
10384 |
+ if (insn->type == INSN_JUMP_DYNAMIC) |
10385 |
+ break; |
10386 |
+ |
10387 |
+@@ -909,14 +928,32 @@ static struct rela *find_switch_table(struct objtool_file *file, |
10388 |
+ return NULL; |
10389 |
+ } |
10390 |
+ |
10391 |
++ |
10392 |
+ static int add_func_switch_tables(struct objtool_file *file, |
10393 |
+ struct symbol *func) |
10394 |
+ { |
10395 |
+- struct instruction *insn, *prev_jump = NULL; |
10396 |
++ struct instruction *insn, *last = NULL, *prev_jump = NULL; |
10397 |
+ struct rela *rela, *prev_rela = NULL; |
10398 |
+ int ret; |
10399 |
+ |
10400 |
+ func_for_each_insn(file, func, insn) { |
10401 |
++ if (!last) |
10402 |
++ last = insn; |
10403 |
++ |
10404 |
++ /* |
10405 |
++ * Store back-pointers for unconditional forward jumps such |
10406 |
++ * that find_switch_table() can back-track using those and |
10407 |
++ * avoid some potentially confusing code. |
10408 |
++ */ |
10409 |
++ if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && |
10410 |
++ insn->offset > last->offset && |
10411 |
++ insn->jump_dest->offset > insn->offset && |
10412 |
++ !insn->jump_dest->first_jump_src) { |
10413 |
++ |
10414 |
++ insn->jump_dest->first_jump_src = insn; |
10415 |
++ last = insn->jump_dest; |
10416 |
++ } |
10417 |
++ |
10418 |
+ if (insn->type != INSN_JUMP_DYNAMIC) |
10419 |
+ continue; |
10420 |
+ |
10421 |
+diff --git a/tools/objtool/check.h b/tools/objtool/check.h |
10422 |
+index dbadb304a410..23a1d065cae1 100644 |
10423 |
+--- a/tools/objtool/check.h |
10424 |
++++ b/tools/objtool/check.h |
10425 |
+@@ -47,6 +47,7 @@ struct instruction { |
10426 |
+ bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; |
10427 |
+ struct symbol *call_dest; |
10428 |
+ struct instruction *jump_dest; |
10429 |
++ struct instruction *first_jump_src; |
10430 |
+ struct list_head alts; |
10431 |
+ struct symbol *func; |
10432 |
+ struct stack_op stack_op; |
10433 |
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c |
10434 |
+index 9a07ee94a230..8b6c42dc1aa9 100644 |
10435 |
+--- a/virt/kvm/arm/arm.c |
10436 |
++++ b/virt/kvm/arm/arm.c |
10437 |
+@@ -29,6 +29,7 @@ |
10438 |
+ #include <linux/kvm.h> |
10439 |
+ #include <trace/events/kvm.h> |
10440 |
+ #include <kvm/arm_pmu.h> |
10441 |
++#include <kvm/arm_psci.h> |
10442 |
+ |
10443 |
+ #define CREATE_TRACE_POINTS |
10444 |
+ #include "trace.h" |
10445 |
+@@ -44,7 +45,6 @@ |
10446 |
+ #include <asm/kvm_mmu.h> |
10447 |
+ #include <asm/kvm_emulate.h> |
10448 |
+ #include <asm/kvm_coproc.h> |
10449 |
+-#include <asm/kvm_psci.h> |
10450 |
+ #include <asm/sections.h> |
10451 |
+ |
10452 |
+ #ifdef REQUIRES_VIRT |
10453 |
+@@ -1139,7 +1139,7 @@ static void cpu_init_hyp_mode(void *dummy) |
10454 |
+ pgd_ptr = kvm_mmu_get_httbr(); |
10455 |
+ stack_page = __this_cpu_read(kvm_arm_hyp_stack_page); |
10456 |
+ hyp_stack_ptr = stack_page + PAGE_SIZE; |
10457 |
+- vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector); |
10458 |
++ vector_ptr = (unsigned long)kvm_get_hyp_vector(); |
10459 |
+ |
10460 |
+ __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); |
10461 |
+ __cpu_init_stage2(); |
10462 |
+@@ -1220,6 +1220,7 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, |
10463 |
+ cpu_hyp_reset(); |
10464 |
+ |
10465 |
+ return NOTIFY_OK; |
10466 |
++ case CPU_PM_ENTER_FAILED: |
10467 |
+ case CPU_PM_EXIT: |
10468 |
+ if (__this_cpu_read(kvm_arm_hardware_enabled)) |
10469 |
+ /* The hardware was enabled before suspend. */ |
10470 |
+@@ -1384,6 +1385,12 @@ static int init_hyp_mode(void) |
10471 |
+ goto out_err; |
10472 |
+ } |
10473 |
+ |
10474 |
++ err = kvm_map_vectors(); |
10475 |
++ if (err) { |
10476 |
++ kvm_err("Cannot map vectors\n"); |
10477 |
++ goto out_err; |
10478 |
++ } |
10479 |
++ |
10480 |
+ /* |
10481 |
+ * Map the Hyp stack pages |
10482 |
+ */ |
10483 |
+diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c |
10484 |
+index f1e363bab5e8..6919352cbf15 100644 |
10485 |
+--- a/virt/kvm/arm/psci.c |
10486 |
++++ b/virt/kvm/arm/psci.c |
10487 |
+@@ -15,16 +15,16 @@ |
10488 |
+ * along with this program. If not, see <http://www.gnu.org/licenses/>. |
10489 |
+ */ |
10490 |
+ |
10491 |
++#include <linux/arm-smccc.h> |
10492 |
+ #include <linux/preempt.h> |
10493 |
+ #include <linux/kvm_host.h> |
10494 |
+ #include <linux/wait.h> |
10495 |
+ |
10496 |
+ #include <asm/cputype.h> |
10497 |
+ #include <asm/kvm_emulate.h> |
10498 |
+-#include <asm/kvm_psci.h> |
10499 |
+ #include <asm/kvm_host.h> |
10500 |
+ |
10501 |
+-#include <uapi/linux/psci.h> |
10502 |
++#include <kvm/arm_psci.h> |
10503 |
+ |
10504 |
+ /* |
10505 |
+ * This is an implementation of the Power State Coordination Interface |
10506 |
+@@ -33,6 +33,38 @@ |
10507 |
+ |
10508 |
+ #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) |
10509 |
+ |
10510 |
++static u32 smccc_get_function(struct kvm_vcpu *vcpu) |
10511 |
++{ |
10512 |
++ return vcpu_get_reg(vcpu, 0); |
10513 |
++} |
10514 |
++ |
10515 |
++static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu) |
10516 |
++{ |
10517 |
++ return vcpu_get_reg(vcpu, 1); |
10518 |
++} |
10519 |
++ |
10520 |
++static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu) |
10521 |
++{ |
10522 |
++ return vcpu_get_reg(vcpu, 2); |
10523 |
++} |
10524 |
++ |
10525 |
++static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu) |
10526 |
++{ |
10527 |
++ return vcpu_get_reg(vcpu, 3); |
10528 |
++} |
10529 |
++ |
10530 |
++static void smccc_set_retval(struct kvm_vcpu *vcpu, |
10531 |
++ unsigned long a0, |
10532 |
++ unsigned long a1, |
10533 |
++ unsigned long a2, |
10534 |
++ unsigned long a3) |
10535 |
++{ |
10536 |
++ vcpu_set_reg(vcpu, 0, a0); |
10537 |
++ vcpu_set_reg(vcpu, 1, a1); |
10538 |
++ vcpu_set_reg(vcpu, 2, a2); |
10539 |
++ vcpu_set_reg(vcpu, 3, a3); |
10540 |
++} |
10541 |
++ |
10542 |
+ static unsigned long psci_affinity_mask(unsigned long affinity_level) |
10543 |
+ { |
10544 |
+ if (affinity_level <= 3) |
10545 |
+@@ -78,7 +110,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) |
10546 |
+ unsigned long context_id; |
10547 |
+ phys_addr_t target_pc; |
10548 |
+ |
10549 |
+- cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; |
10550 |
++ cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; |
10551 |
+ if (vcpu_mode_is_32bit(source_vcpu)) |
10552 |
+ cpu_id &= ~((u32) 0); |
10553 |
+ |
10554 |
+@@ -91,14 +123,14 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) |
10555 |
+ if (!vcpu) |
10556 |
+ return PSCI_RET_INVALID_PARAMS; |
10557 |
+ if (!vcpu->arch.power_off) { |
10558 |
+- if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) |
10559 |
++ if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1) |
10560 |
+ return PSCI_RET_ALREADY_ON; |
10561 |
+ else |
10562 |
+ return PSCI_RET_INVALID_PARAMS; |
10563 |
+ } |
10564 |
+ |
10565 |
+- target_pc = vcpu_get_reg(source_vcpu, 2); |
10566 |
+- context_id = vcpu_get_reg(source_vcpu, 3); |
10567 |
++ target_pc = smccc_get_arg2(source_vcpu); |
10568 |
++ context_id = smccc_get_arg3(source_vcpu); |
10569 |
+ |
10570 |
+ kvm_reset_vcpu(vcpu); |
10571 |
+ |
10572 |
+@@ -117,7 +149,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) |
10573 |
+ * NOTE: We always update r0 (or x0) because for PSCI v0.1 |
10574 |
+ * the general puspose registers are undefined upon CPU_ON. |
10575 |
+ */ |
10576 |
+- vcpu_set_reg(vcpu, 0, context_id); |
10577 |
++ smccc_set_retval(vcpu, context_id, 0, 0, 0); |
10578 |
+ vcpu->arch.power_off = false; |
10579 |
+ smp_mb(); /* Make sure the above is visible */ |
10580 |
+ |
10581 |
+@@ -137,8 +169,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) |
10582 |
+ struct kvm *kvm = vcpu->kvm; |
10583 |
+ struct kvm_vcpu *tmp; |
10584 |
+ |
10585 |
+- target_affinity = vcpu_get_reg(vcpu, 1); |
10586 |
+- lowest_affinity_level = vcpu_get_reg(vcpu, 2); |
10587 |
++ target_affinity = smccc_get_arg1(vcpu); |
10588 |
++ lowest_affinity_level = smccc_get_arg2(vcpu); |
10589 |
+ |
10590 |
+ /* Determine target affinity mask */ |
10591 |
+ target_affinity_mask = psci_affinity_mask(lowest_affinity_level); |
10592 |
+@@ -200,18 +232,10 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) |
10593 |
+ kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); |
10594 |
+ } |
10595 |
+ |
10596 |
+-int kvm_psci_version(struct kvm_vcpu *vcpu) |
10597 |
+-{ |
10598 |
+- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) |
10599 |
+- return KVM_ARM_PSCI_0_2; |
10600 |
+- |
10601 |
+- return KVM_ARM_PSCI_0_1; |
10602 |
+-} |
10603 |
+- |
10604 |
+ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) |
10605 |
+ { |
10606 |
+ struct kvm *kvm = vcpu->kvm; |
10607 |
+- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); |
10608 |
++ u32 psci_fn = smccc_get_function(vcpu); |
10609 |
+ unsigned long val; |
10610 |
+ int ret = 1; |
10611 |
+ |
10612 |
+@@ -221,7 +245,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) |
10613 |
+ * Bits[31:16] = Major Version = 0 |
10614 |
+ * Bits[15:0] = Minor Version = 2 |
10615 |
+ */ |
10616 |
+- val = 2; |
10617 |
++ val = KVM_ARM_PSCI_0_2; |
10618 |
+ break; |
10619 |
+ case PSCI_0_2_FN_CPU_SUSPEND: |
10620 |
+ case PSCI_0_2_FN64_CPU_SUSPEND: |
10621 |
+@@ -278,14 +302,56 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) |
10622 |
+ break; |
10623 |
+ } |
10624 |
+ |
10625 |
+- vcpu_set_reg(vcpu, 0, val); |
10626 |
++ smccc_set_retval(vcpu, val, 0, 0, 0); |
10627 |
++ return ret; |
10628 |
++} |
10629 |
++ |
10630 |
++static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu) |
10631 |
++{ |
10632 |
++ u32 psci_fn = smccc_get_function(vcpu); |
10633 |
++ u32 feature; |
10634 |
++ unsigned long val; |
10635 |
++ int ret = 1; |
10636 |
++ |
10637 |
++ switch(psci_fn) { |
10638 |
++ case PSCI_0_2_FN_PSCI_VERSION: |
10639 |
++ val = KVM_ARM_PSCI_1_0; |
10640 |
++ break; |
10641 |
++ case PSCI_1_0_FN_PSCI_FEATURES: |
10642 |
++ feature = smccc_get_arg1(vcpu); |
10643 |
++ switch(feature) { |
10644 |
++ case PSCI_0_2_FN_PSCI_VERSION: |
10645 |
++ case PSCI_0_2_FN_CPU_SUSPEND: |
10646 |
++ case PSCI_0_2_FN64_CPU_SUSPEND: |
10647 |
++ case PSCI_0_2_FN_CPU_OFF: |
10648 |
++ case PSCI_0_2_FN_CPU_ON: |
10649 |
++ case PSCI_0_2_FN64_CPU_ON: |
10650 |
++ case PSCI_0_2_FN_AFFINITY_INFO: |
10651 |
++ case PSCI_0_2_FN64_AFFINITY_INFO: |
10652 |
++ case PSCI_0_2_FN_MIGRATE_INFO_TYPE: |
10653 |
++ case PSCI_0_2_FN_SYSTEM_OFF: |
10654 |
++ case PSCI_0_2_FN_SYSTEM_RESET: |
10655 |
++ case PSCI_1_0_FN_PSCI_FEATURES: |
10656 |
++ case ARM_SMCCC_VERSION_FUNC_ID: |
10657 |
++ val = 0; |
10658 |
++ break; |
10659 |
++ default: |
10660 |
++ val = PSCI_RET_NOT_SUPPORTED; |
10661 |
++ break; |
10662 |
++ } |
10663 |
++ break; |
10664 |
++ default: |
10665 |
++ return kvm_psci_0_2_call(vcpu); |
10666 |
++ } |
10667 |
++ |
10668 |
++ smccc_set_retval(vcpu, val, 0, 0, 0); |
10669 |
+ return ret; |
10670 |
+ } |
10671 |
+ |
10672 |
+ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) |
10673 |
+ { |
10674 |
+ struct kvm *kvm = vcpu->kvm; |
10675 |
+- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); |
10676 |
++ u32 psci_fn = smccc_get_function(vcpu); |
10677 |
+ unsigned long val; |
10678 |
+ |
10679 |
+ switch (psci_fn) { |
10680 |
+@@ -303,7 +369,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) |
10681 |
+ break; |
10682 |
+ } |
10683 |
+ |
10684 |
+- vcpu_set_reg(vcpu, 0, val); |
10685 |
++ smccc_set_retval(vcpu, val, 0, 0, 0); |
10686 |
+ return 1; |
10687 |
+ } |
10688 |
+ |
10689 |
+@@ -321,9 +387,11 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) |
10690 |
+ * Errors: |
10691 |
+ * -EINVAL: Unrecognized PSCI function |
10692 |
+ */ |
10693 |
+-int kvm_psci_call(struct kvm_vcpu *vcpu) |
10694 |
++static int kvm_psci_call(struct kvm_vcpu *vcpu) |
10695 |
+ { |
10696 |
+- switch (kvm_psci_version(vcpu)) { |
10697 |
++ switch (kvm_psci_version(vcpu, vcpu->kvm)) { |
10698 |
++ case KVM_ARM_PSCI_1_0: |
10699 |
++ return kvm_psci_1_0_call(vcpu); |
10700 |
+ case KVM_ARM_PSCI_0_2: |
10701 |
+ return kvm_psci_0_2_call(vcpu); |
10702 |
+ case KVM_ARM_PSCI_0_1: |
10703 |
+@@ -332,3 +400,30 @@ int kvm_psci_call(struct kvm_vcpu *vcpu) |
10704 |
+ return -EINVAL; |
10705 |
+ }; |
10706 |
+ } |
10707 |
++ |
10708 |
++int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) |
10709 |
++{ |
10710 |
++ u32 func_id = smccc_get_function(vcpu); |
10711 |
++ u32 val = PSCI_RET_NOT_SUPPORTED; |
10712 |
++ u32 feature; |
10713 |
++ |
10714 |
++ switch (func_id) { |
10715 |
++ case ARM_SMCCC_VERSION_FUNC_ID: |
10716 |
++ val = ARM_SMCCC_VERSION_1_1; |
10717 |
++ break; |
10718 |
++ case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: |
10719 |
++ feature = smccc_get_arg1(vcpu); |
10720 |
++ switch(feature) { |
10721 |
++ case ARM_SMCCC_ARCH_WORKAROUND_1: |
10722 |
++ if (kvm_arm_harden_branch_predictor()) |
10723 |
++ val = 0; |
10724 |
++ break; |
10725 |
++ } |
10726 |
++ break; |
10727 |
++ default: |
10728 |
++ return kvm_psci_call(vcpu); |
10729 |
++ } |
10730 |
++ |
10731 |
++ smccc_set_retval(vcpu, val, 0, 0, 0); |
10732 |
++ return 1; |
10733 |
++} |