1 |
commit: 333a8fe029b7693b974251030f8b07f2a07a8776 |
2 |
Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org> |
3 |
AuthorDate: Thu Nov 30 12:08:35 2017 +0000 |
4 |
Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org> |
5 |
CommitDate: Thu Nov 30 12:08:35 2017 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=333a8fe0 |
7 |
|
8 |
linux kernel 4.14.3 |
9 |
|
10 |
0000_README | 4 + |
11 |
1002_linux-4.14.3.patch | 8034 +++++++++++++++++++++++++++++++++++++++++++++++ |
12 |
2 files changed, 8038 insertions(+) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index 1324e53..9aaf65a 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -51,6 +51,10 @@ Patch: 1001_linux-4.14.2.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 4.14.2 |
21 |
|
22 |
+Patch: 1002_linux-4.14.3.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 4.14.3 |
25 |
+ |
26 |
Patch: 1500_XATTR_USER_PREFIX.patch |
27 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
28 |
Desc: Support for namespace user.pax.* on tmpfs. |
29 |
|
30 |
diff --git a/1002_linux-4.14.3.patch b/1002_linux-4.14.3.patch |
31 |
new file mode 100644 |
32 |
index 0000000..269ad50 |
33 |
--- /dev/null |
34 |
+++ b/1002_linux-4.14.3.patch |
35 |
@@ -0,0 +1,8034 @@ |
36 |
+diff --git a/Makefile b/Makefile |
37 |
+index 75d89dc2b94a..ede4de0d8634 100644 |
38 |
+--- a/Makefile |
39 |
++++ b/Makefile |
40 |
+@@ -1,7 +1,7 @@ |
41 |
+ # SPDX-License-Identifier: GPL-2.0 |
42 |
+ VERSION = 4 |
43 |
+ PATCHLEVEL = 14 |
44 |
+-SUBLEVEL = 2 |
45 |
++SUBLEVEL = 3 |
46 |
+ EXTRAVERSION = |
47 |
+ NAME = Petit Gorille |
48 |
+ |
49 |
+diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c |
50 |
+index 35ff45470dbf..fc3b44028cfb 100644 |
51 |
+--- a/arch/arm/mm/dump.c |
52 |
++++ b/arch/arm/mm/dump.c |
53 |
+@@ -129,8 +129,8 @@ static const struct prot_bits section_bits[] = { |
54 |
+ .val = PMD_SECT_USER, |
55 |
+ .set = "USR", |
56 |
+ }, { |
57 |
+- .mask = L_PMD_SECT_RDONLY, |
58 |
+- .val = L_PMD_SECT_RDONLY, |
59 |
++ .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2, |
60 |
++ .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2, |
61 |
+ .set = "ro", |
62 |
+ .clear = "RW", |
63 |
+ #elif __LINUX_ARM_ARCH__ >= 6 |
64 |
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c |
65 |
+index ad80548325fe..0f6d1537f330 100644 |
66 |
+--- a/arch/arm/mm/init.c |
67 |
++++ b/arch/arm/mm/init.c |
68 |
+@@ -639,8 +639,8 @@ static struct section_perm ro_perms[] = { |
69 |
+ .start = (unsigned long)_stext, |
70 |
+ .end = (unsigned long)__init_begin, |
71 |
+ #ifdef CONFIG_ARM_LPAE |
72 |
+- .mask = ~L_PMD_SECT_RDONLY, |
73 |
+- .prot = L_PMD_SECT_RDONLY, |
74 |
++ .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), |
75 |
++ .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, |
76 |
+ #else |
77 |
+ .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), |
78 |
+ .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, |
79 |
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi |
80 |
+index d8dd3298b15c..fb8d76a17bc5 100644 |
81 |
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi |
82 |
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi |
83 |
+@@ -49,6 +49,14 @@ |
84 |
+ |
85 |
+ / { |
86 |
+ compatible = "amlogic,meson-gxl"; |
87 |
++ |
88 |
++ reserved-memory { |
89 |
++ /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */ |
90 |
++ secmon_reserved_alt: secmon@05000000 { |
91 |
++ reg = <0x0 0x05000000 0x0 0x300000>; |
92 |
++ no-map; |
93 |
++ }; |
94 |
++ }; |
95 |
+ }; |
96 |
+ |
97 |
+ ðmac { |
98 |
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h |
99 |
+index b46e54c2399b..c9530b5b5ca8 100644 |
100 |
+--- a/arch/arm64/include/asm/pgtable.h |
101 |
++++ b/arch/arm64/include/asm/pgtable.h |
102 |
+@@ -98,6 +98,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
103 |
+ ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) |
104 |
+ #define pte_valid_young(pte) \ |
105 |
+ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) |
106 |
++#define pte_valid_user(pte) \ |
107 |
++ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) |
108 |
+ |
109 |
+ /* |
110 |
+ * Could the pte be present in the TLB? We must check mm_tlb_flush_pending |
111 |
+@@ -107,6 +109,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
112 |
+ #define pte_accessible(mm, pte) \ |
113 |
+ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) |
114 |
+ |
115 |
++/* |
116 |
++ * p??_access_permitted() is true for valid user mappings (subject to the |
117 |
++ * write permission check) other than user execute-only which do not have the |
118 |
++ * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. |
119 |
++ */ |
120 |
++#define pte_access_permitted(pte, write) \ |
121 |
++ (pte_valid_user(pte) && (!(write) || pte_write(pte))) |
122 |
++#define pmd_access_permitted(pmd, write) \ |
123 |
++ (pte_access_permitted(pmd_pte(pmd), (write))) |
124 |
++#define pud_access_permitted(pud, write) \ |
125 |
++ (pte_access_permitted(pud_pte(pud), (write))) |
126 |
++ |
127 |
+ static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) |
128 |
+ { |
129 |
+ pte_val(pte) &= ~pgprot_val(prot); |
130 |
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig |
131 |
+index 5d3284d20678..c3d798b44030 100644 |
132 |
+--- a/arch/mips/Kconfig |
133 |
++++ b/arch/mips/Kconfig |
134 |
+@@ -65,7 +65,7 @@ config MIPS |
135 |
+ select HAVE_PERF_EVENTS |
136 |
+ select HAVE_REGS_AND_STACK_ACCESS_API |
137 |
+ select HAVE_SYSCALL_TRACEPOINTS |
138 |
+- select HAVE_VIRT_CPU_ACCOUNTING_GEN |
139 |
++ select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP |
140 |
+ select IRQ_FORCED_THREADING |
141 |
+ select MODULES_USE_ELF_RELA if MODULES && 64BIT |
142 |
+ select MODULES_USE_ELF_REL if MODULES |
143 |
+diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c |
144 |
+index d4f2407a42c6..8307a8a02667 100644 |
145 |
+--- a/arch/mips/bcm47xx/leds.c |
146 |
++++ b/arch/mips/bcm47xx/leds.c |
147 |
+@@ -331,7 +331,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = { |
148 |
+ /* Verified on: WRT54GS V1.0 */ |
149 |
+ static const struct gpio_led |
150 |
+ bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = { |
151 |
+- BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), |
152 |
++ BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), |
153 |
+ BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), |
154 |
+ BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), |
155 |
+ }; |
156 |
+diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile |
157 |
+index 9e09cc4556b3..398994312361 100644 |
158 |
+--- a/arch/mips/boot/dts/brcm/Makefile |
159 |
++++ b/arch/mips/boot/dts/brcm/Makefile |
160 |
+@@ -23,7 +23,6 @@ dtb-$(CONFIG_DT_NONE) += \ |
161 |
+ bcm63268-comtrend-vr-3032u.dtb \ |
162 |
+ bcm93384wvg.dtb \ |
163 |
+ bcm93384wvg_viper.dtb \ |
164 |
+- bcm96358nb4ser.dtb \ |
165 |
+ bcm96368mvwg.dtb \ |
166 |
+ bcm9ejtagprb.dtb \ |
167 |
+ bcm97125cbmb.dtb \ |
168 |
+diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h |
169 |
+index 83054f79f72a..feb069cbf44e 100644 |
170 |
+--- a/arch/mips/include/asm/asmmacro.h |
171 |
++++ b/arch/mips/include/asm/asmmacro.h |
172 |
+@@ -19,6 +19,9 @@ |
173 |
+ #include <asm/asmmacro-64.h> |
174 |
+ #endif |
175 |
+ |
176 |
++/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ |
177 |
++#undef fp |
178 |
++ |
179 |
+ /* |
180 |
+ * Helper macros for generating raw instruction encodings. |
181 |
+ */ |
182 |
+@@ -105,6 +108,7 @@ |
183 |
+ .macro fpu_save_16odd thread |
184 |
+ .set push |
185 |
+ .set mips64r2 |
186 |
++ .set fp=64 |
187 |
+ SET_HARDFLOAT |
188 |
+ sdc1 $f1, THREAD_FPR1(\thread) |
189 |
+ sdc1 $f3, THREAD_FPR3(\thread) |
190 |
+@@ -126,8 +130,8 @@ |
191 |
+ .endm |
192 |
+ |
193 |
+ .macro fpu_save_double thread status tmp |
194 |
+-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
195 |
+- defined(CONFIG_CPU_MIPS32_R6) |
196 |
++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ |
197 |
++ defined(CONFIG_CPU_MIPSR6) |
198 |
+ sll \tmp, \status, 5 |
199 |
+ bgez \tmp, 10f |
200 |
+ fpu_save_16odd \thread |
201 |
+@@ -163,6 +167,7 @@ |
202 |
+ .macro fpu_restore_16odd thread |
203 |
+ .set push |
204 |
+ .set mips64r2 |
205 |
++ .set fp=64 |
206 |
+ SET_HARDFLOAT |
207 |
+ ldc1 $f1, THREAD_FPR1(\thread) |
208 |
+ ldc1 $f3, THREAD_FPR3(\thread) |
209 |
+@@ -184,8 +189,8 @@ |
210 |
+ .endm |
211 |
+ |
212 |
+ .macro fpu_restore_double thread status tmp |
213 |
+-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
214 |
+- defined(CONFIG_CPU_MIPS32_R6) |
215 |
++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ |
216 |
++ defined(CONFIG_CPU_MIPSR6) |
217 |
+ sll \tmp, \status, 5 |
218 |
+ bgez \tmp, 10f # 16 register mode? |
219 |
+ |
220 |
+@@ -234,9 +239,6 @@ |
221 |
+ .endm |
222 |
+ |
223 |
+ #ifdef TOOLCHAIN_SUPPORTS_MSA |
224 |
+-/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ |
225 |
+-#undef fp |
226 |
+- |
227 |
+ .macro _cfcmsa rd, cs |
228 |
+ .set push |
229 |
+ .set mips32r2 |
230 |
+diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h |
231 |
+index 7e25c5cc353a..89e9fb7976fe 100644 |
232 |
+--- a/arch/mips/include/asm/cmpxchg.h |
233 |
++++ b/arch/mips/include/asm/cmpxchg.h |
234 |
+@@ -204,8 +204,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
235 |
+ #else |
236 |
+ #include <asm-generic/cmpxchg-local.h> |
237 |
+ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
238 |
++#ifndef CONFIG_SMP |
239 |
+ #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) |
240 |
+ #endif |
241 |
++#endif |
242 |
+ |
243 |
+ #undef __scbeqz |
244 |
+ |
245 |
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c |
246 |
+index 1395654cfc8d..5a09c2901a76 100644 |
247 |
+--- a/arch/mips/kernel/ptrace.c |
248 |
++++ b/arch/mips/kernel/ptrace.c |
249 |
+@@ -618,6 +618,19 @@ static const struct user_regset_view user_mips64_view = { |
250 |
+ .n = ARRAY_SIZE(mips64_regsets), |
251 |
+ }; |
252 |
+ |
253 |
++#ifdef CONFIG_MIPS32_N32 |
254 |
++ |
255 |
++static const struct user_regset_view user_mipsn32_view = { |
256 |
++ .name = "mipsn32", |
257 |
++ .e_flags = EF_MIPS_ABI2, |
258 |
++ .e_machine = ELF_ARCH, |
259 |
++ .ei_osabi = ELF_OSABI, |
260 |
++ .regsets = mips64_regsets, |
261 |
++ .n = ARRAY_SIZE(mips64_regsets), |
262 |
++}; |
263 |
++ |
264 |
++#endif /* CONFIG_MIPS32_N32 */ |
265 |
++ |
266 |
+ #endif /* CONFIG_64BIT */ |
267 |
+ |
268 |
+ const struct user_regset_view *task_user_regset_view(struct task_struct *task) |
269 |
+@@ -628,6 +641,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) |
270 |
+ #ifdef CONFIG_MIPS32_O32 |
271 |
+ if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) |
272 |
+ return &user_mips_view; |
273 |
++#endif |
274 |
++#ifdef CONFIG_MIPS32_N32 |
275 |
++ if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) |
276 |
++ return &user_mipsn32_view; |
277 |
+ #endif |
278 |
+ return &user_mips64_view; |
279 |
+ #endif |
280 |
+diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S |
281 |
+index 0a83b1708b3c..8e3a6020c613 100644 |
282 |
+--- a/arch/mips/kernel/r4k_fpu.S |
283 |
++++ b/arch/mips/kernel/r4k_fpu.S |
284 |
+@@ -40,8 +40,8 @@ |
285 |
+ */ |
286 |
+ LEAF(_save_fp) |
287 |
+ EXPORT_SYMBOL(_save_fp) |
288 |
+-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
289 |
+- defined(CONFIG_CPU_MIPS32_R6) |
290 |
++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ |
291 |
++ defined(CONFIG_CPU_MIPSR6) |
292 |
+ mfc0 t0, CP0_STATUS |
293 |
+ #endif |
294 |
+ fpu_save_double a0 t0 t1 # clobbers t1 |
295 |
+@@ -52,8 +52,8 @@ EXPORT_SYMBOL(_save_fp) |
296 |
+ * Restore a thread's fp context. |
297 |
+ */ |
298 |
+ LEAF(_restore_fp) |
299 |
+-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
300 |
+- defined(CONFIG_CPU_MIPS32_R6) |
301 |
++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ |
302 |
++ defined(CONFIG_CPU_MIPSR6) |
303 |
+ mfc0 t0, CP0_STATUS |
304 |
+ #endif |
305 |
+ fpu_restore_double a0 t0 t1 # clobbers t1 |
306 |
+@@ -246,11 +246,11 @@ LEAF(_save_fp_context) |
307 |
+ cfc1 t1, fcr31 |
308 |
+ .set pop |
309 |
+ |
310 |
+-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
311 |
+- defined(CONFIG_CPU_MIPS32_R6) |
312 |
++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ |
313 |
++ defined(CONFIG_CPU_MIPSR6) |
314 |
+ .set push |
315 |
+ SET_HARDFLOAT |
316 |
+-#ifdef CONFIG_CPU_MIPS32_R2 |
317 |
++#ifdef CONFIG_CPU_MIPSR2 |
318 |
+ .set mips32r2 |
319 |
+ .set fp=64 |
320 |
+ mfc0 t0, CP0_STATUS |
321 |
+@@ -314,11 +314,11 @@ LEAF(_save_fp_context) |
322 |
+ LEAF(_restore_fp_context) |
323 |
+ EX lw t1, 0(a1) |
324 |
+ |
325 |
+-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
326 |
+- defined(CONFIG_CPU_MIPS32_R6) |
327 |
++#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ |
328 |
++ defined(CONFIG_CPU_MIPSR6) |
329 |
+ .set push |
330 |
+ SET_HARDFLOAT |
331 |
+-#ifdef CONFIG_CPU_MIPS32_R2 |
332 |
++#ifdef CONFIG_CPU_MIPSR2 |
333 |
+ .set mips32r2 |
334 |
+ .set fp=64 |
335 |
+ mfc0 t0, CP0_STATUS |
336 |
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c |
337 |
+index 16d9ef5a78c5..6f57212f5659 100644 |
338 |
+--- a/arch/mips/math-emu/cp1emu.c |
339 |
++++ b/arch/mips/math-emu/cp1emu.c |
340 |
+@@ -1795,7 +1795,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
341 |
+ SPFROMREG(fs, MIPSInst_FS(ir)); |
342 |
+ SPFROMREG(fd, MIPSInst_FD(ir)); |
343 |
+ rv.s = ieee754sp_maddf(fd, fs, ft); |
344 |
+- break; |
345 |
++ goto copcsr; |
346 |
+ } |
347 |
+ |
348 |
+ case fmsubf_op: { |
349 |
+@@ -1809,7 +1809,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
350 |
+ SPFROMREG(fs, MIPSInst_FS(ir)); |
351 |
+ SPFROMREG(fd, MIPSInst_FD(ir)); |
352 |
+ rv.s = ieee754sp_msubf(fd, fs, ft); |
353 |
+- break; |
354 |
++ goto copcsr; |
355 |
+ } |
356 |
+ |
357 |
+ case frint_op: { |
358 |
+@@ -1834,7 +1834,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
359 |
+ SPFROMREG(fs, MIPSInst_FS(ir)); |
360 |
+ rv.w = ieee754sp_2008class(fs); |
361 |
+ rfmt = w_fmt; |
362 |
+- break; |
363 |
++ goto copcsr; |
364 |
+ } |
365 |
+ |
366 |
+ case fmin_op: { |
367 |
+@@ -1847,7 +1847,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
368 |
+ SPFROMREG(ft, MIPSInst_FT(ir)); |
369 |
+ SPFROMREG(fs, MIPSInst_FS(ir)); |
370 |
+ rv.s = ieee754sp_fmin(fs, ft); |
371 |
+- break; |
372 |
++ goto copcsr; |
373 |
+ } |
374 |
+ |
375 |
+ case fmina_op: { |
376 |
+@@ -1860,7 +1860,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
377 |
+ SPFROMREG(ft, MIPSInst_FT(ir)); |
378 |
+ SPFROMREG(fs, MIPSInst_FS(ir)); |
379 |
+ rv.s = ieee754sp_fmina(fs, ft); |
380 |
+- break; |
381 |
++ goto copcsr; |
382 |
+ } |
383 |
+ |
384 |
+ case fmax_op: { |
385 |
+@@ -1873,7 +1873,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
386 |
+ SPFROMREG(ft, MIPSInst_FT(ir)); |
387 |
+ SPFROMREG(fs, MIPSInst_FS(ir)); |
388 |
+ rv.s = ieee754sp_fmax(fs, ft); |
389 |
+- break; |
390 |
++ goto copcsr; |
391 |
+ } |
392 |
+ |
393 |
+ case fmaxa_op: { |
394 |
+@@ -1886,7 +1886,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
395 |
+ SPFROMREG(ft, MIPSInst_FT(ir)); |
396 |
+ SPFROMREG(fs, MIPSInst_FS(ir)); |
397 |
+ rv.s = ieee754sp_fmaxa(fs, ft); |
398 |
+- break; |
399 |
++ goto copcsr; |
400 |
+ } |
401 |
+ |
402 |
+ case fabs_op: |
403 |
+@@ -2165,7 +2165,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
404 |
+ DPFROMREG(fs, MIPSInst_FS(ir)); |
405 |
+ DPFROMREG(fd, MIPSInst_FD(ir)); |
406 |
+ rv.d = ieee754dp_maddf(fd, fs, ft); |
407 |
+- break; |
408 |
++ goto copcsr; |
409 |
+ } |
410 |
+ |
411 |
+ case fmsubf_op: { |
412 |
+@@ -2179,7 +2179,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
413 |
+ DPFROMREG(fs, MIPSInst_FS(ir)); |
414 |
+ DPFROMREG(fd, MIPSInst_FD(ir)); |
415 |
+ rv.d = ieee754dp_msubf(fd, fs, ft); |
416 |
+- break; |
417 |
++ goto copcsr; |
418 |
+ } |
419 |
+ |
420 |
+ case frint_op: { |
421 |
+@@ -2204,7 +2204,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
422 |
+ DPFROMREG(fs, MIPSInst_FS(ir)); |
423 |
+ rv.l = ieee754dp_2008class(fs); |
424 |
+ rfmt = l_fmt; |
425 |
+- break; |
426 |
++ goto copcsr; |
427 |
+ } |
428 |
+ |
429 |
+ case fmin_op: { |
430 |
+@@ -2217,7 +2217,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
431 |
+ DPFROMREG(ft, MIPSInst_FT(ir)); |
432 |
+ DPFROMREG(fs, MIPSInst_FS(ir)); |
433 |
+ rv.d = ieee754dp_fmin(fs, ft); |
434 |
+- break; |
435 |
++ goto copcsr; |
436 |
+ } |
437 |
+ |
438 |
+ case fmina_op: { |
439 |
+@@ -2230,7 +2230,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
440 |
+ DPFROMREG(ft, MIPSInst_FT(ir)); |
441 |
+ DPFROMREG(fs, MIPSInst_FS(ir)); |
442 |
+ rv.d = ieee754dp_fmina(fs, ft); |
443 |
+- break; |
444 |
++ goto copcsr; |
445 |
+ } |
446 |
+ |
447 |
+ case fmax_op: { |
448 |
+@@ -2243,7 +2243,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
449 |
+ DPFROMREG(ft, MIPSInst_FT(ir)); |
450 |
+ DPFROMREG(fs, MIPSInst_FS(ir)); |
451 |
+ rv.d = ieee754dp_fmax(fs, ft); |
452 |
+- break; |
453 |
++ goto copcsr; |
454 |
+ } |
455 |
+ |
456 |
+ case fmaxa_op: { |
457 |
+@@ -2256,7 +2256,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, |
458 |
+ DPFROMREG(ft, MIPSInst_FT(ir)); |
459 |
+ DPFROMREG(fs, MIPSInst_FS(ir)); |
460 |
+ rv.d = ieee754dp_fmaxa(fs, ft); |
461 |
+- break; |
462 |
++ goto copcsr; |
463 |
+ } |
464 |
+ |
465 |
+ case fabs_op: |
466 |
+diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c |
467 |
+index 90fba9bf98da..27ac00c36bc0 100644 |
468 |
+--- a/arch/mips/pci/pci-mt7620.c |
469 |
++++ b/arch/mips/pci/pci-mt7620.c |
470 |
+@@ -121,7 +121,7 @@ static int wait_pciephy_busy(void) |
471 |
+ else |
472 |
+ break; |
473 |
+ if (retry++ > WAITRETRY_MAX) { |
474 |
+- printk(KERN_WARN "PCIE-PHY retry failed.\n"); |
475 |
++ pr_warn("PCIE-PHY retry failed.\n"); |
476 |
+ return -1; |
477 |
+ } |
478 |
+ } |
479 |
+diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c |
480 |
+index 9be8b08ae46b..41b71c4352c2 100644 |
481 |
+--- a/arch/mips/ralink/mt7620.c |
482 |
++++ b/arch/mips/ralink/mt7620.c |
483 |
+@@ -145,8 +145,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { |
484 |
+ FUNC("i2c", 0, 4, 2), |
485 |
+ }; |
486 |
+ |
487 |
+-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; |
488 |
+-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; |
489 |
++static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) }; |
490 |
++static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; |
491 |
+ static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; |
492 |
+ static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; |
493 |
+ |
494 |
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S |
495 |
+index 41e60a9c7db2..e775f80ae28c 100644 |
496 |
+--- a/arch/parisc/kernel/syscall.S |
497 |
++++ b/arch/parisc/kernel/syscall.S |
498 |
+@@ -690,15 +690,15 @@ cas_action: |
499 |
+ /* ELF32 Process entry path */ |
500 |
+ lws_compare_and_swap_2: |
501 |
+ #ifdef CONFIG_64BIT |
502 |
+- /* Clip the input registers */ |
503 |
++ /* Clip the input registers. We don't need to clip %r23 as we |
504 |
++ only use it for word operations */ |
505 |
+ depdi 0, 31, 32, %r26 |
506 |
+ depdi 0, 31, 32, %r25 |
507 |
+ depdi 0, 31, 32, %r24 |
508 |
+- depdi 0, 31, 32, %r23 |
509 |
+ #endif |
510 |
+ |
511 |
+ /* Check the validity of the size pointer */ |
512 |
+- subi,>>= 4, %r23, %r0 |
513 |
++ subi,>>= 3, %r23, %r0 |
514 |
+ b,n lws_exit_nosys |
515 |
+ |
516 |
+ /* Jump to the functions which will load the old and new values into |
517 |
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S |
518 |
+index 1c80bd292e48..06598142d755 100644 |
519 |
+--- a/arch/powerpc/kernel/exceptions-64s.S |
520 |
++++ b/arch/powerpc/kernel/exceptions-64s.S |
521 |
+@@ -542,7 +542,7 @@ EXC_COMMON_BEGIN(instruction_access_common) |
522 |
+ RECONCILE_IRQ_STATE(r10, r11) |
523 |
+ ld r12,_MSR(r1) |
524 |
+ ld r3,_NIP(r1) |
525 |
+- andis. r4,r12,DSISR_BAD_FAULT_64S@h |
526 |
++ andis. r4,r12,DSISR_SRR1_MATCH_64S@h |
527 |
+ li r5,0x400 |
528 |
+ std r3,_DAR(r1) |
529 |
+ std r4,_DSISR(r1) |
530 |
+diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c |
531 |
+index e9436c5e1e09..3d7539b90010 100644 |
532 |
+--- a/arch/powerpc/kernel/signal.c |
533 |
++++ b/arch/powerpc/kernel/signal.c |
534 |
+@@ -103,7 +103,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, |
535 |
+ static void do_signal(struct task_struct *tsk) |
536 |
+ { |
537 |
+ sigset_t *oldset = sigmask_to_save(); |
538 |
+- struct ksignal ksig; |
539 |
++ struct ksignal ksig = { .sig = 0 }; |
540 |
+ int ret; |
541 |
+ int is32 = is_32bit_task(); |
542 |
+ |
543 |
+diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c |
544 |
+index 90644db9d38e..8e0cf8f186df 100644 |
545 |
+--- a/arch/powerpc/kvm/book3s_hv_builtin.c |
546 |
++++ b/arch/powerpc/kvm/book3s_hv_builtin.c |
547 |
+@@ -529,6 +529,8 @@ static inline bool is_rm(void) |
548 |
+ |
549 |
+ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) |
550 |
+ { |
551 |
++ if (!kvmppc_xics_enabled(vcpu)) |
552 |
++ return H_TOO_HARD; |
553 |
+ if (xive_enabled()) { |
554 |
+ if (is_rm()) |
555 |
+ return xive_rm_h_xirr(vcpu); |
556 |
+@@ -541,6 +543,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) |
557 |
+ |
558 |
+ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) |
559 |
+ { |
560 |
++ if (!kvmppc_xics_enabled(vcpu)) |
561 |
++ return H_TOO_HARD; |
562 |
+ vcpu->arch.gpr[5] = get_tb(); |
563 |
+ if (xive_enabled()) { |
564 |
+ if (is_rm()) |
565 |
+@@ -554,6 +558,8 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) |
566 |
+ |
567 |
+ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) |
568 |
+ { |
569 |
++ if (!kvmppc_xics_enabled(vcpu)) |
570 |
++ return H_TOO_HARD; |
571 |
+ if (xive_enabled()) { |
572 |
+ if (is_rm()) |
573 |
+ return xive_rm_h_ipoll(vcpu, server); |
574 |
+@@ -567,6 +573,8 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) |
575 |
+ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, |
576 |
+ unsigned long mfrr) |
577 |
+ { |
578 |
++ if (!kvmppc_xics_enabled(vcpu)) |
579 |
++ return H_TOO_HARD; |
580 |
+ if (xive_enabled()) { |
581 |
+ if (is_rm()) |
582 |
+ return xive_rm_h_ipi(vcpu, server, mfrr); |
583 |
+@@ -579,6 +587,8 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, |
584 |
+ |
585 |
+ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) |
586 |
+ { |
587 |
++ if (!kvmppc_xics_enabled(vcpu)) |
588 |
++ return H_TOO_HARD; |
589 |
+ if (xive_enabled()) { |
590 |
+ if (is_rm()) |
591 |
+ return xive_rm_h_cppr(vcpu, cppr); |
592 |
+@@ -591,6 +601,8 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) |
593 |
+ |
594 |
+ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) |
595 |
+ { |
596 |
++ if (!kvmppc_xics_enabled(vcpu)) |
597 |
++ return H_TOO_HARD; |
598 |
+ if (xive_enabled()) { |
599 |
+ if (is_rm()) |
600 |
+ return xive_rm_h_eoi(vcpu, xirr); |
601 |
+diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c |
602 |
+index c9de03e0c1f1..d469224c4ada 100644 |
603 |
+--- a/arch/powerpc/lib/code-patching.c |
604 |
++++ b/arch/powerpc/lib/code-patching.c |
605 |
+@@ -21,6 +21,7 @@ |
606 |
+ #include <asm/tlbflush.h> |
607 |
+ #include <asm/page.h> |
608 |
+ #include <asm/code-patching.h> |
609 |
++#include <asm/setup.h> |
610 |
+ |
611 |
+ static int __patch_instruction(unsigned int *addr, unsigned int instr) |
612 |
+ { |
613 |
+@@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr) |
614 |
+ * During early early boot patch_instruction is called |
615 |
+ * when text_poke_area is not ready, but we still need |
616 |
+ * to allow patching. We just do the plain old patching |
617 |
+- * We use slab_is_available and per cpu read * via this_cpu_read |
618 |
+- * of text_poke_area. Per-CPU areas might not be up early |
619 |
+- * this can create problems with just using this_cpu_read() |
620 |
+ */ |
621 |
+- if (!slab_is_available() || !this_cpu_read(text_poke_area)) |
622 |
++ if (!this_cpu_read(*PTRRELOC(&text_poke_area))) |
623 |
+ return __patch_instruction(addr, instr); |
624 |
+ |
625 |
+ local_irq_save(flags); |
626 |
+diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c |
627 |
+index 558e9d3891bf..bd022d16745c 100644 |
628 |
+--- a/arch/powerpc/mm/hugetlbpage-radix.c |
629 |
++++ b/arch/powerpc/mm/hugetlbpage-radix.c |
630 |
+@@ -49,17 +49,28 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
631 |
+ struct mm_struct *mm = current->mm; |
632 |
+ struct vm_area_struct *vma; |
633 |
+ struct hstate *h = hstate_file(file); |
634 |
++ int fixed = (flags & MAP_FIXED); |
635 |
++ unsigned long high_limit; |
636 |
+ struct vm_unmapped_area_info info; |
637 |
+ |
638 |
+- if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE)) |
639 |
+- mm->context.addr_limit = TASK_SIZE; |
640 |
++ high_limit = DEFAULT_MAP_WINDOW; |
641 |
++ if (addr >= high_limit || (fixed && (addr + len > high_limit))) |
642 |
++ high_limit = TASK_SIZE; |
643 |
+ |
644 |
+ if (len & ~huge_page_mask(h)) |
645 |
+ return -EINVAL; |
646 |
+- if (len > mm->task_size) |
647 |
++ if (len > high_limit) |
648 |
+ return -ENOMEM; |
649 |
++ if (fixed) { |
650 |
++ if (addr > high_limit - len) |
651 |
++ return -ENOMEM; |
652 |
++ } |
653 |
+ |
654 |
+- if (flags & MAP_FIXED) { |
655 |
++ if (unlikely(addr > mm->context.addr_limit && |
656 |
++ mm->context.addr_limit != TASK_SIZE)) |
657 |
++ mm->context.addr_limit = TASK_SIZE; |
658 |
++ |
659 |
++ if (fixed) { |
660 |
+ if (prepare_hugepage_range(file, addr, len)) |
661 |
+ return -EINVAL; |
662 |
+ return addr; |
663 |
+@@ -68,7 +79,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
664 |
+ if (addr) { |
665 |
+ addr = ALIGN(addr, huge_page_size(h)); |
666 |
+ vma = find_vma(mm, addr); |
667 |
+- if (mm->task_size - len >= addr && |
668 |
++ if (high_limit - len >= addr && |
669 |
+ (!vma || addr + len <= vm_start_gap(vma))) |
670 |
+ return addr; |
671 |
+ } |
672 |
+@@ -79,12 +90,9 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
673 |
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
674 |
+ info.length = len; |
675 |
+ info.low_limit = PAGE_SIZE; |
676 |
+- info.high_limit = current->mm->mmap_base; |
677 |
++ info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); |
678 |
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
679 |
+ info.align_offset = 0; |
680 |
+ |
681 |
+- if (addr > DEFAULT_MAP_WINDOW) |
682 |
+- info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW; |
683 |
+- |
684 |
+ return vm_unmapped_area(&info); |
685 |
+ } |
686 |
+diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c |
687 |
+index 5d78b193fec4..6d476a7b5611 100644 |
688 |
+--- a/arch/powerpc/mm/mmap.c |
689 |
++++ b/arch/powerpc/mm/mmap.c |
690 |
+@@ -106,22 +106,32 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, |
691 |
+ { |
692 |
+ struct mm_struct *mm = current->mm; |
693 |
+ struct vm_area_struct *vma; |
694 |
++ int fixed = (flags & MAP_FIXED); |
695 |
++ unsigned long high_limit; |
696 |
+ struct vm_unmapped_area_info info; |
697 |
+ |
698 |
++ high_limit = DEFAULT_MAP_WINDOW; |
699 |
++ if (addr >= high_limit || (fixed && (addr + len > high_limit))) |
700 |
++ high_limit = TASK_SIZE; |
701 |
++ |
702 |
++ if (len > high_limit) |
703 |
++ return -ENOMEM; |
704 |
++ if (fixed) { |
705 |
++ if (addr > high_limit - len) |
706 |
++ return -ENOMEM; |
707 |
++ } |
708 |
++ |
709 |
+ if (unlikely(addr > mm->context.addr_limit && |
710 |
+ mm->context.addr_limit != TASK_SIZE)) |
711 |
+ mm->context.addr_limit = TASK_SIZE; |
712 |
+ |
713 |
+- if (len > mm->task_size - mmap_min_addr) |
714 |
+- return -ENOMEM; |
715 |
+- |
716 |
+- if (flags & MAP_FIXED) |
717 |
++ if (fixed) |
718 |
+ return addr; |
719 |
+ |
720 |
+ if (addr) { |
721 |
+ addr = PAGE_ALIGN(addr); |
722 |
+ vma = find_vma(mm, addr); |
723 |
+- if (mm->task_size - len >= addr && addr >= mmap_min_addr && |
724 |
++ if (high_limit - len >= addr && addr >= mmap_min_addr && |
725 |
+ (!vma || addr + len <= vm_start_gap(vma))) |
726 |
+ return addr; |
727 |
+ } |
728 |
+@@ -129,13 +139,9 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, |
729 |
+ info.flags = 0; |
730 |
+ info.length = len; |
731 |
+ info.low_limit = mm->mmap_base; |
732 |
++ info.high_limit = high_limit; |
733 |
+ info.align_mask = 0; |
734 |
+ |
735 |
+- if (unlikely(addr > DEFAULT_MAP_WINDOW)) |
736 |
+- info.high_limit = mm->context.addr_limit; |
737 |
+- else |
738 |
+- info.high_limit = DEFAULT_MAP_WINDOW; |
739 |
+- |
740 |
+ return vm_unmapped_area(&info); |
741 |
+ } |
742 |
+ |
743 |
+@@ -149,37 +155,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, |
744 |
+ struct vm_area_struct *vma; |
745 |
+ struct mm_struct *mm = current->mm; |
746 |
+ unsigned long addr = addr0; |
747 |
++ int fixed = (flags & MAP_FIXED); |
748 |
++ unsigned long high_limit; |
749 |
+ struct vm_unmapped_area_info info; |
750 |
+ |
751 |
++ high_limit = DEFAULT_MAP_WINDOW; |
752 |
++ if (addr >= high_limit || (fixed && (addr + len > high_limit))) |
753 |
++ high_limit = TASK_SIZE; |
754 |
++ |
755 |
++ if (len > high_limit) |
756 |
++ return -ENOMEM; |
757 |
++ if (fixed) { |
758 |
++ if (addr > high_limit - len) |
759 |
++ return -ENOMEM; |
760 |
++ } |
761 |
++ |
762 |
+ if (unlikely(addr > mm->context.addr_limit && |
763 |
+ mm->context.addr_limit != TASK_SIZE)) |
764 |
+ mm->context.addr_limit = TASK_SIZE; |
765 |
+ |
766 |
+- /* requested length too big for entire address space */ |
767 |
+- if (len > mm->task_size - mmap_min_addr) |
768 |
+- return -ENOMEM; |
769 |
+- |
770 |
+- if (flags & MAP_FIXED) |
771 |
++ if (fixed) |
772 |
+ return addr; |
773 |
+ |
774 |
+- /* requesting a specific address */ |
775 |
+ if (addr) { |
776 |
+ addr = PAGE_ALIGN(addr); |
777 |
+ vma = find_vma(mm, addr); |
778 |
+- if (mm->task_size - len >= addr && addr >= mmap_min_addr && |
779 |
+- (!vma || addr + len <= vm_start_gap(vma))) |
780 |
++ if (high_limit - len >= addr && addr >= mmap_min_addr && |
781 |
++ (!vma || addr + len <= vm_start_gap(vma))) |
782 |
+ return addr; |
783 |
+ } |
784 |
+ |
785 |
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
786 |
+ info.length = len; |
787 |
+ info.low_limit = max(PAGE_SIZE, mmap_min_addr); |
788 |
+- info.high_limit = mm->mmap_base; |
789 |
++ info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); |
790 |
+ info.align_mask = 0; |
791 |
+ |
792 |
+- if (addr > DEFAULT_MAP_WINDOW) |
793 |
+- info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW; |
794 |
+- |
795 |
+ addr = vm_unmapped_area(&info); |
796 |
+ if (!(addr & ~PAGE_MASK)) |
797 |
+ return addr; |
798 |
+diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c |
799 |
+index 05e15386d4cb..b94fb62e60fd 100644 |
800 |
+--- a/arch/powerpc/mm/mmu_context_book3s64.c |
801 |
++++ b/arch/powerpc/mm/mmu_context_book3s64.c |
802 |
+@@ -93,11 +93,11 @@ static int hash__init_new_context(struct mm_struct *mm) |
803 |
+ return index; |
804 |
+ |
805 |
+ /* |
806 |
+- * We do switch_slb() early in fork, even before we setup the |
807 |
+- * mm->context.addr_limit. Default to max task size so that we copy the |
808 |
+- * default values to paca which will help us to handle slb miss early. |
809 |
++ * In the case of exec, use the default limit, |
810 |
++ * otherwise inherit it from the mm we are duplicating. |
811 |
+ */ |
812 |
+- mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; |
813 |
++ if (!mm->context.addr_limit) |
814 |
++ mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; |
815 |
+ |
816 |
+ /* |
817 |
+ * The old code would re-promote on fork, we don't do that when using |
818 |
+diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c |
819 |
+index 39c252b54d16..cfbbee941a76 100644 |
820 |
+--- a/arch/powerpc/mm/pgtable-radix.c |
821 |
++++ b/arch/powerpc/mm/pgtable-radix.c |
822 |
+@@ -169,6 +169,16 @@ void radix__mark_rodata_ro(void) |
823 |
+ { |
824 |
+ unsigned long start, end; |
825 |
+ |
826 |
++ /* |
827 |
++ * mark_rodata_ro() will mark itself as !writable at some point. |
828 |
++ * Due to DD1 workaround in radix__pte_update(), we'll end up with |
829 |
++ * an invalid pte and the system will crash quite severly. |
830 |
++ */ |
831 |
++ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { |
832 |
++ pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n"); |
833 |
++ return; |
834 |
++ } |
835 |
++ |
836 |
+ start = (unsigned long)_stext; |
837 |
+ end = (unsigned long)__init_begin; |
838 |
+ |
839 |
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c |
840 |
+index 45f6740dd407..a4f93699194b 100644 |
841 |
+--- a/arch/powerpc/mm/slice.c |
842 |
++++ b/arch/powerpc/mm/slice.c |
843 |
+@@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, |
844 |
+ { |
845 |
+ struct vm_area_struct *vma; |
846 |
+ |
847 |
+- if ((mm->task_size - len) < addr) |
848 |
++ if ((mm->context.addr_limit - len) < addr) |
849 |
+ return 0; |
850 |
+ vma = find_vma(mm, addr); |
851 |
+ return (!vma || (addr + len) <= vm_start_gap(vma)); |
852 |
+@@ -133,7 +133,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret) |
853 |
+ if (!slice_low_has_vma(mm, i)) |
854 |
+ ret->low_slices |= 1u << i; |
855 |
+ |
856 |
+- if (mm->task_size <= SLICE_LOW_TOP) |
857 |
++ if (mm->context.addr_limit <= SLICE_LOW_TOP) |
858 |
+ return; |
859 |
+ |
860 |
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) |
861 |
+@@ -412,25 +412,31 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, |
862 |
+ struct slice_mask compat_mask; |
863 |
+ int fixed = (flags & MAP_FIXED); |
864 |
+ int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); |
865 |
++ unsigned long page_size = 1UL << pshift; |
866 |
+ struct mm_struct *mm = current->mm; |
867 |
+ unsigned long newaddr; |
868 |
+ unsigned long high_limit; |
869 |
+ |
870 |
+- /* |
871 |
+- * Check if we need to expland slice area. |
872 |
+- */ |
873 |
+- if (unlikely(addr > mm->context.addr_limit && |
874 |
+- mm->context.addr_limit != TASK_SIZE)) { |
875 |
+- mm->context.addr_limit = TASK_SIZE; |
876 |
++ high_limit = DEFAULT_MAP_WINDOW; |
877 |
++ if (addr >= high_limit || (fixed && (addr + len > high_limit))) |
878 |
++ high_limit = TASK_SIZE; |
879 |
++ |
880 |
++ if (len > high_limit) |
881 |
++ return -ENOMEM; |
882 |
++ if (len & (page_size - 1)) |
883 |
++ return -EINVAL; |
884 |
++ if (fixed) { |
885 |
++ if (addr & (page_size - 1)) |
886 |
++ return -EINVAL; |
887 |
++ if (addr > high_limit - len) |
888 |
++ return -ENOMEM; |
889 |
++ } |
890 |
++ |
891 |
++ if (high_limit > mm->context.addr_limit) { |
892 |
++ mm->context.addr_limit = high_limit; |
893 |
+ on_each_cpu(slice_flush_segments, mm, 1); |
894 |
+ } |
895 |
+- /* |
896 |
+- * This mmap request can allocate upt to 512TB |
897 |
+- */ |
898 |
+- if (addr > DEFAULT_MAP_WINDOW) |
899 |
+- high_limit = mm->context.addr_limit; |
900 |
+- else |
901 |
+- high_limit = DEFAULT_MAP_WINDOW; |
902 |
++ |
903 |
+ /* |
904 |
+ * init different masks |
905 |
+ */ |
906 |
+@@ -446,27 +452,19 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, |
907 |
+ |
908 |
+ /* Sanity checks */ |
909 |
+ BUG_ON(mm->task_size == 0); |
910 |
++ BUG_ON(mm->context.addr_limit == 0); |
911 |
+ VM_BUG_ON(radix_enabled()); |
912 |
+ |
913 |
+ slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); |
914 |
+ slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n", |
915 |
+ addr, len, flags, topdown); |
916 |
+ |
917 |
+- if (len > mm->task_size) |
918 |
+- return -ENOMEM; |
919 |
+- if (len & ((1ul << pshift) - 1)) |
920 |
+- return -EINVAL; |
921 |
+- if (fixed && (addr & ((1ul << pshift) - 1))) |
922 |
+- return -EINVAL; |
923 |
+- if (fixed && addr > (mm->task_size - len)) |
924 |
+- return -ENOMEM; |
925 |
+- |
926 |
+ /* If hint, make sure it matches our alignment restrictions */ |
927 |
+ if (!fixed && addr) { |
928 |
+- addr = _ALIGN_UP(addr, 1ul << pshift); |
929 |
++ addr = _ALIGN_UP(addr, page_size); |
930 |
+ slice_dbg(" aligned addr=%lx\n", addr); |
931 |
+ /* Ignore hint if it's too large or overlaps a VMA */ |
932 |
+- if (addr > mm->task_size - len || |
933 |
++ if (addr > high_limit - len || |
934 |
+ !slice_area_is_free(mm, addr, len)) |
935 |
+ addr = 0; |
936 |
+ } |
937 |
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c |
938 |
+index 36344117c680..cf64e16f92c2 100644 |
939 |
+--- a/arch/powerpc/perf/imc-pmu.c |
940 |
++++ b/arch/powerpc/perf/imc-pmu.c |
941 |
+@@ -467,7 +467,7 @@ static int nest_imc_event_init(struct perf_event *event) |
942 |
+ * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). |
943 |
+ * Get the base memory addresss for this cpu. |
944 |
+ */ |
945 |
+- chip_id = topology_physical_package_id(event->cpu); |
946 |
++ chip_id = cpu_to_chip_id(event->cpu); |
947 |
+ pcni = pmu->mem_info; |
948 |
+ do { |
949 |
+ if (pcni->id == chip_id) { |
950 |
+@@ -524,19 +524,19 @@ static int nest_imc_event_init(struct perf_event *event) |
951 |
+ */ |
952 |
+ static int core_imc_mem_init(int cpu, int size) |
953 |
+ { |
954 |
+- int phys_id, rc = 0, core_id = (cpu / threads_per_core); |
955 |
++ int nid, rc = 0, core_id = (cpu / threads_per_core); |
956 |
+ struct imc_mem_info *mem_info; |
957 |
+ |
958 |
+ /* |
959 |
+ * alloc_pages_node() will allocate memory for core in the |
960 |
+ * local node only. |
961 |
+ */ |
962 |
+- phys_id = topology_physical_package_id(cpu); |
963 |
++ nid = cpu_to_node(cpu); |
964 |
+ mem_info = &core_imc_pmu->mem_info[core_id]; |
965 |
+ mem_info->id = core_id; |
966 |
+ |
967 |
+ /* We need only vbase for core counters */ |
968 |
+- mem_info->vbase = page_address(alloc_pages_node(phys_id, |
969 |
++ mem_info->vbase = page_address(alloc_pages_node(nid, |
970 |
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
971 |
+ __GFP_NOWARN, get_order(size))); |
972 |
+ if (!mem_info->vbase) |
973 |
+@@ -797,14 +797,14 @@ static int core_imc_event_init(struct perf_event *event) |
974 |
+ static int thread_imc_mem_alloc(int cpu_id, int size) |
975 |
+ { |
976 |
+ u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id); |
977 |
+- int phys_id = topology_physical_package_id(cpu_id); |
978 |
++ int nid = cpu_to_node(cpu_id); |
979 |
+ |
980 |
+ if (!local_mem) { |
981 |
+ /* |
982 |
+ * This case could happen only once at start, since we dont |
983 |
+ * free the memory in cpu offline path. |
984 |
+ */ |
985 |
+- local_mem = page_address(alloc_pages_node(phys_id, |
986 |
++ local_mem = page_address(alloc_pages_node(nid, |
987 |
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
988 |
+ __GFP_NOWARN, get_order(size))); |
989 |
+ if (!local_mem) |
990 |
+diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h |
991 |
+index c21fe1d57c00..ec7b476c1ac5 100644 |
992 |
+--- a/arch/s390/include/asm/switch_to.h |
993 |
++++ b/arch/s390/include/asm/switch_to.h |
994 |
+@@ -37,8 +37,8 @@ static inline void restore_access_regs(unsigned int *acrs) |
995 |
+ save_ri_cb(prev->thread.ri_cb); \ |
996 |
+ save_gs_cb(prev->thread.gs_cb); \ |
997 |
+ } \ |
998 |
++ update_cr_regs(next); \ |
999 |
+ if (next->mm) { \ |
1000 |
+- update_cr_regs(next); \ |
1001 |
+ set_cpu_flag(CIF_FPU); \ |
1002 |
+ restore_access_regs(&next->thread.acrs[0]); \ |
1003 |
+ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ |
1004 |
+diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c |
1005 |
+index f7e82302a71e..2394557653d5 100644 |
1006 |
+--- a/arch/s390/kernel/dis.c |
1007 |
++++ b/arch/s390/kernel/dis.c |
1008 |
+@@ -1548,6 +1548,7 @@ static struct s390_insn opcode_e7[] = { |
1009 |
+ { "vfsq", 0xce, INSTR_VRR_VV000MM }, |
1010 |
+ { "vfs", 0xe2, INSTR_VRR_VVV00MM }, |
1011 |
+ { "vftci", 0x4a, INSTR_VRI_VVIMM }, |
1012 |
++ { "", 0, INSTR_INVALID } |
1013 |
+ }; |
1014 |
+ |
1015 |
+ static struct s390_insn opcode_eb[] = { |
1016 |
+@@ -1953,7 +1954,7 @@ void show_code(struct pt_regs *regs) |
1017 |
+ { |
1018 |
+ char *mode = user_mode(regs) ? "User" : "Krnl"; |
1019 |
+ unsigned char code[64]; |
1020 |
+- char buffer[64], *ptr; |
1021 |
++ char buffer[128], *ptr; |
1022 |
+ mm_segment_t old_fs; |
1023 |
+ unsigned long addr; |
1024 |
+ int start, end, opsize, hops, i; |
1025 |
+@@ -2016,7 +2017,7 @@ void show_code(struct pt_regs *regs) |
1026 |
+ start += opsize; |
1027 |
+ pr_cont("%s", buffer); |
1028 |
+ ptr = buffer; |
1029 |
+- ptr += sprintf(ptr, "\n "); |
1030 |
++ ptr += sprintf(ptr, "\n\t "); |
1031 |
+ hops++; |
1032 |
+ } |
1033 |
+ pr_cont("\n"); |
1034 |
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c |
1035 |
+index b945448b9eae..f7b280f0ab16 100644 |
1036 |
+--- a/arch/s390/kernel/early.c |
1037 |
++++ b/arch/s390/kernel/early.c |
1038 |
+@@ -375,8 +375,10 @@ static __init void detect_machine_facilities(void) |
1039 |
+ S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; |
1040 |
+ if (test_facility(40)) |
1041 |
+ S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; |
1042 |
+- if (test_facility(50) && test_facility(73)) |
1043 |
++ if (test_facility(50) && test_facility(73)) { |
1044 |
+ S390_lowcore.machine_flags |= MACHINE_FLAG_TE; |
1045 |
++ __ctl_set_bit(0, 55); |
1046 |
++ } |
1047 |
+ if (test_facility(51)) |
1048 |
+ S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; |
1049 |
+ if (test_facility(129)) { |
1050 |
+diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c |
1051 |
+index bff39b66c9ff..9ee794e14f33 100644 |
1052 |
+--- a/arch/s390/kernel/guarded_storage.c |
1053 |
++++ b/arch/s390/kernel/guarded_storage.c |
1054 |
+@@ -14,9 +14,11 @@ |
1055 |
+ |
1056 |
+ void exit_thread_gs(void) |
1057 |
+ { |
1058 |
++ preempt_disable(); |
1059 |
+ kfree(current->thread.gs_cb); |
1060 |
+ kfree(current->thread.gs_bc_cb); |
1061 |
+ current->thread.gs_cb = current->thread.gs_bc_cb = NULL; |
1062 |
++ preempt_enable(); |
1063 |
+ } |
1064 |
+ |
1065 |
+ static int gs_enable(void) |
1066 |
+diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c |
1067 |
+index b0ba2c26b45e..d6f7782e75c9 100644 |
1068 |
+--- a/arch/s390/kernel/machine_kexec.c |
1069 |
++++ b/arch/s390/kernel/machine_kexec.c |
1070 |
+@@ -269,6 +269,7 @@ static void __do_machine_kexec(void *data) |
1071 |
+ s390_reset_system(); |
1072 |
+ data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); |
1073 |
+ |
1074 |
++ __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */ |
1075 |
+ /* Call the moving routine */ |
1076 |
+ (*data_mover)(&image->head, image->start); |
1077 |
+ |
1078 |
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c |
1079 |
+index a4a84fb08046..203b7cd7c348 100644 |
1080 |
+--- a/arch/s390/kernel/process.c |
1081 |
++++ b/arch/s390/kernel/process.c |
1082 |
+@@ -100,6 +100,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp, |
1083 |
+ memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); |
1084 |
+ memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); |
1085 |
+ clear_tsk_thread_flag(p, TIF_SINGLE_STEP); |
1086 |
++ p->thread.per_flags = 0; |
1087 |
+ /* Initialize per thread user and system timer values */ |
1088 |
+ p->thread.user_timer = 0; |
1089 |
+ p->thread.guest_timer = 0; |
1090 |
+diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S |
1091 |
+index ca37e5d5b40c..9c2c96da23d0 100644 |
1092 |
+--- a/arch/s390/kernel/relocate_kernel.S |
1093 |
++++ b/arch/s390/kernel/relocate_kernel.S |
1094 |
+@@ -29,7 +29,6 @@ |
1095 |
+ ENTRY(relocate_kernel) |
1096 |
+ basr %r13,0 # base address |
1097 |
+ .base: |
1098 |
+- stnsm sys_msk-.base(%r13),0xfb # disable DAT |
1099 |
+ stctg %c0,%c15,ctlregs-.base(%r13) |
1100 |
+ stmg %r0,%r15,gprregs-.base(%r13) |
1101 |
+ lghi %r0,3 |
1102 |
+@@ -103,8 +102,6 @@ ENTRY(relocate_kernel) |
1103 |
+ .align 8 |
1104 |
+ load_psw: |
1105 |
+ .long 0x00080000,0x80000000 |
1106 |
+- sys_msk: |
1107 |
+- .quad 0 |
1108 |
+ ctlregs: |
1109 |
+ .rept 16 |
1110 |
+ .quad 0 |
1111 |
+diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c |
1112 |
+index 32aefb215e59..d85c64821a6b 100644 |
1113 |
+--- a/arch/s390/kernel/runtime_instr.c |
1114 |
++++ b/arch/s390/kernel/runtime_instr.c |
1115 |
+@@ -50,11 +50,13 @@ void exit_thread_runtime_instr(void) |
1116 |
+ { |
1117 |
+ struct task_struct *task = current; |
1118 |
+ |
1119 |
++ preempt_disable(); |
1120 |
+ if (!task->thread.ri_cb) |
1121 |
+ return; |
1122 |
+ disable_runtime_instr(); |
1123 |
+ kfree(task->thread.ri_cb); |
1124 |
+ task->thread.ri_cb = NULL; |
1125 |
++ preempt_enable(); |
1126 |
+ } |
1127 |
+ |
1128 |
+ SYSCALL_DEFINE1(s390_runtime_instr, int, command) |
1129 |
+@@ -65,9 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command) |
1130 |
+ return -EOPNOTSUPP; |
1131 |
+ |
1132 |
+ if (command == S390_RUNTIME_INSTR_STOP) { |
1133 |
+- preempt_disable(); |
1134 |
+ exit_thread_runtime_instr(); |
1135 |
+- preempt_enable(); |
1136 |
+ return 0; |
1137 |
+ } |
1138 |
+ |
1139 |
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S |
1140 |
+index bcfc5668dcb2..518d9286b3d1 100644 |
1141 |
+--- a/arch/x86/entry/entry_64.S |
1142 |
++++ b/arch/x86/entry/entry_64.S |
1143 |
+@@ -51,15 +51,19 @@ ENTRY(native_usergs_sysret64) |
1144 |
+ END(native_usergs_sysret64) |
1145 |
+ #endif /* CONFIG_PARAVIRT */ |
1146 |
+ |
1147 |
+-.macro TRACE_IRQS_IRETQ |
1148 |
++.macro TRACE_IRQS_FLAGS flags:req |
1149 |
+ #ifdef CONFIG_TRACE_IRQFLAGS |
1150 |
+- bt $9, EFLAGS(%rsp) /* interrupts off? */ |
1151 |
++ bt $9, \flags /* interrupts off? */ |
1152 |
+ jnc 1f |
1153 |
+ TRACE_IRQS_ON |
1154 |
+ 1: |
1155 |
+ #endif |
1156 |
+ .endm |
1157 |
+ |
1158 |
++.macro TRACE_IRQS_IRETQ |
1159 |
++ TRACE_IRQS_FLAGS EFLAGS(%rsp) |
1160 |
++.endm |
1161 |
++ |
1162 |
+ /* |
1163 |
+ * When dynamic function tracer is enabled it will add a breakpoint |
1164 |
+ * to all locations that it is about to modify, sync CPUs, update |
1165 |
+@@ -148,8 +152,6 @@ ENTRY(entry_SYSCALL_64) |
1166 |
+ movq %rsp, PER_CPU_VAR(rsp_scratch) |
1167 |
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
1168 |
+ |
1169 |
+- TRACE_IRQS_OFF |
1170 |
+- |
1171 |
+ /* Construct struct pt_regs on stack */ |
1172 |
+ pushq $__USER_DS /* pt_regs->ss */ |
1173 |
+ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ |
1174 |
+@@ -170,6 +172,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) |
1175 |
+ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ |
1176 |
+ UNWIND_HINT_REGS extra=0 |
1177 |
+ |
1178 |
++ TRACE_IRQS_OFF |
1179 |
++ |
1180 |
+ /* |
1181 |
+ * If we need to do entry work or if we guess we'll need to do |
1182 |
+ * exit work, go straight to the slow path. |
1183 |
+@@ -923,11 +927,13 @@ ENTRY(native_load_gs_index) |
1184 |
+ FRAME_BEGIN |
1185 |
+ pushfq |
1186 |
+ DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
1187 |
++ TRACE_IRQS_OFF |
1188 |
+ SWAPGS |
1189 |
+ .Lgs_change: |
1190 |
+ movl %edi, %gs |
1191 |
+ 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE |
1192 |
+ SWAPGS |
1193 |
++ TRACE_IRQS_FLAGS (%rsp) |
1194 |
+ popfq |
1195 |
+ FRAME_END |
1196 |
+ ret |
1197 |
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c |
1198 |
+index 9fb9a1f1e47b..f94855000d4e 100644 |
1199 |
+--- a/arch/x86/events/intel/core.c |
1200 |
++++ b/arch/x86/events/intel/core.c |
1201 |
+@@ -3730,6 +3730,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1"); |
1202 |
+ EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); |
1203 |
+ |
1204 |
+ static struct attribute *hsw_events_attrs[] = { |
1205 |
++ EVENT_PTR(mem_ld_hsw), |
1206 |
++ EVENT_PTR(mem_st_hsw), |
1207 |
++ EVENT_PTR(td_slots_issued), |
1208 |
++ EVENT_PTR(td_slots_retired), |
1209 |
++ EVENT_PTR(td_fetch_bubbles), |
1210 |
++ EVENT_PTR(td_total_slots), |
1211 |
++ EVENT_PTR(td_total_slots_scale), |
1212 |
++ EVENT_PTR(td_recovery_bubbles), |
1213 |
++ EVENT_PTR(td_recovery_bubbles_scale), |
1214 |
++ NULL |
1215 |
++}; |
1216 |
++ |
1217 |
++static struct attribute *hsw_tsx_events_attrs[] = { |
1218 |
+ EVENT_PTR(tx_start), |
1219 |
+ EVENT_PTR(tx_commit), |
1220 |
+ EVENT_PTR(tx_abort), |
1221 |
+@@ -3742,18 +3755,16 @@ static struct attribute *hsw_events_attrs[] = { |
1222 |
+ EVENT_PTR(el_conflict), |
1223 |
+ EVENT_PTR(cycles_t), |
1224 |
+ EVENT_PTR(cycles_ct), |
1225 |
+- EVENT_PTR(mem_ld_hsw), |
1226 |
+- EVENT_PTR(mem_st_hsw), |
1227 |
+- EVENT_PTR(td_slots_issued), |
1228 |
+- EVENT_PTR(td_slots_retired), |
1229 |
+- EVENT_PTR(td_fetch_bubbles), |
1230 |
+- EVENT_PTR(td_total_slots), |
1231 |
+- EVENT_PTR(td_total_slots_scale), |
1232 |
+- EVENT_PTR(td_recovery_bubbles), |
1233 |
+- EVENT_PTR(td_recovery_bubbles_scale), |
1234 |
+ NULL |
1235 |
+ }; |
1236 |
+ |
1237 |
++static __init struct attribute **get_hsw_events_attrs(void) |
1238 |
++{ |
1239 |
++ return boot_cpu_has(X86_FEATURE_RTM) ? |
1240 |
++ merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) : |
1241 |
++ hsw_events_attrs; |
1242 |
++} |
1243 |
++ |
1244 |
+ static ssize_t freeze_on_smi_show(struct device *cdev, |
1245 |
+ struct device_attribute *attr, |
1246 |
+ char *buf) |
1247 |
+@@ -4182,7 +4193,7 @@ __init int intel_pmu_init(void) |
1248 |
+ |
1249 |
+ x86_pmu.hw_config = hsw_hw_config; |
1250 |
+ x86_pmu.get_event_constraints = hsw_get_event_constraints; |
1251 |
+- x86_pmu.cpu_events = hsw_events_attrs; |
1252 |
++ x86_pmu.cpu_events = get_hsw_events_attrs(); |
1253 |
+ x86_pmu.lbr_double_abort = true; |
1254 |
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? |
1255 |
+ hsw_format_attr : nhm_format_attr; |
1256 |
+@@ -4221,7 +4232,7 @@ __init int intel_pmu_init(void) |
1257 |
+ |
1258 |
+ x86_pmu.hw_config = hsw_hw_config; |
1259 |
+ x86_pmu.get_event_constraints = hsw_get_event_constraints; |
1260 |
+- x86_pmu.cpu_events = hsw_events_attrs; |
1261 |
++ x86_pmu.cpu_events = get_hsw_events_attrs(); |
1262 |
+ x86_pmu.limit_period = bdw_limit_period; |
1263 |
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? |
1264 |
+ hsw_format_attr : nhm_format_attr; |
1265 |
+@@ -4279,7 +4290,7 @@ __init int intel_pmu_init(void) |
1266 |
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? |
1267 |
+ hsw_format_attr : nhm_format_attr; |
1268 |
+ extra_attr = merge_attr(extra_attr, skl_format_attr); |
1269 |
+- x86_pmu.cpu_events = hsw_events_attrs; |
1270 |
++ x86_pmu.cpu_events = get_hsw_events_attrs(); |
1271 |
+ intel_pmu_pebs_data_source_skl( |
1272 |
+ boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); |
1273 |
+ pr_cont("Skylake events, "); |
1274 |
+diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c |
1275 |
+index 410c5dadcee3..3a4b12809ab5 100644 |
1276 |
+--- a/arch/x86/kernel/mpparse.c |
1277 |
++++ b/arch/x86/kernel/mpparse.c |
1278 |
+@@ -431,6 +431,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) |
1279 |
+ } |
1280 |
+ |
1281 |
+ static unsigned long mpf_base; |
1282 |
++static bool mpf_found; |
1283 |
+ |
1284 |
+ static unsigned long __init get_mpc_size(unsigned long physptr) |
1285 |
+ { |
1286 |
+@@ -504,7 +505,7 @@ void __init default_get_smp_config(unsigned int early) |
1287 |
+ if (!smp_found_config) |
1288 |
+ return; |
1289 |
+ |
1290 |
+- if (!mpf_base) |
1291 |
++ if (!mpf_found) |
1292 |
+ return; |
1293 |
+ |
1294 |
+ if (acpi_lapic && early) |
1295 |
+@@ -593,6 +594,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) |
1296 |
+ smp_found_config = 1; |
1297 |
+ #endif |
1298 |
+ mpf_base = base; |
1299 |
++ mpf_found = true; |
1300 |
+ |
1301 |
+ pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", |
1302 |
+ base, base + sizeof(*mpf) - 1, mpf); |
1303 |
+@@ -858,7 +860,7 @@ static int __init update_mp_table(void) |
1304 |
+ if (!enable_update_mptable) |
1305 |
+ return 0; |
1306 |
+ |
1307 |
+- if (!mpf_base) |
1308 |
++ if (!mpf_found) |
1309 |
+ return 0; |
1310 |
+ |
1311 |
+ mpf = early_memremap(mpf_base, sizeof(*mpf)); |
1312 |
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
1313 |
+index 0e68f0b3cbf7..ca209a4a7834 100644 |
1314 |
+--- a/arch/x86/kvm/svm.c |
1315 |
++++ b/arch/x86/kvm/svm.c |
1316 |
+@@ -3657,6 +3657,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
1317 |
+ u32 ecx = msr->index; |
1318 |
+ u64 data = msr->data; |
1319 |
+ switch (ecx) { |
1320 |
++ case MSR_IA32_CR_PAT: |
1321 |
++ if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) |
1322 |
++ return 1; |
1323 |
++ vcpu->arch.pat = data; |
1324 |
++ svm->vmcb->save.g_pat = data; |
1325 |
++ mark_dirty(svm->vmcb, VMCB_NPT); |
1326 |
++ break; |
1327 |
+ case MSR_IA32_TSC: |
1328 |
+ kvm_write_tsc(vcpu, msr); |
1329 |
+ break; |
1330 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
1331 |
+index a6f4f095f8f4..21cad7068cbf 100644 |
1332 |
+--- a/arch/x86/kvm/vmx.c |
1333 |
++++ b/arch/x86/kvm/vmx.c |
1334 |
+@@ -202,6 +202,10 @@ struct loaded_vmcs { |
1335 |
+ bool nmi_known_unmasked; |
1336 |
+ unsigned long vmcs_host_cr3; /* May not match real cr3 */ |
1337 |
+ unsigned long vmcs_host_cr4; /* May not match real cr4 */ |
1338 |
++ /* Support for vnmi-less CPUs */ |
1339 |
++ int soft_vnmi_blocked; |
1340 |
++ ktime_t entry_time; |
1341 |
++ s64 vnmi_blocked_time; |
1342 |
+ struct list_head loaded_vmcss_on_cpu_link; |
1343 |
+ }; |
1344 |
+ |
1345 |
+@@ -1286,6 +1290,11 @@ static inline bool cpu_has_vmx_invpcid(void) |
1346 |
+ SECONDARY_EXEC_ENABLE_INVPCID; |
1347 |
+ } |
1348 |
+ |
1349 |
++static inline bool cpu_has_virtual_nmis(void) |
1350 |
++{ |
1351 |
++ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; |
1352 |
++} |
1353 |
++ |
1354 |
+ static inline bool cpu_has_vmx_wbinvd_exit(void) |
1355 |
+ { |
1356 |
+ return vmcs_config.cpu_based_2nd_exec_ctrl & |
1357 |
+@@ -1343,11 +1352,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) |
1358 |
+ (vmcs12->secondary_vm_exec_control & bit); |
1359 |
+ } |
1360 |
+ |
1361 |
+-static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) |
1362 |
+-{ |
1363 |
+- return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; |
1364 |
+-} |
1365 |
+- |
1366 |
+ static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) |
1367 |
+ { |
1368 |
+ return vmcs12->pin_based_vm_exec_control & |
1369 |
+@@ -3699,9 +3703,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) |
1370 |
+ &_vmexit_control) < 0) |
1371 |
+ return -EIO; |
1372 |
+ |
1373 |
+- min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | |
1374 |
+- PIN_BASED_VIRTUAL_NMIS; |
1375 |
+- opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER; |
1376 |
++ min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; |
1377 |
++ opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | |
1378 |
++ PIN_BASED_VMX_PREEMPTION_TIMER; |
1379 |
+ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, |
1380 |
+ &_pin_based_exec_control) < 0) |
1381 |
+ return -EIO; |
1382 |
+@@ -5667,7 +5671,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) |
1383 |
+ |
1384 |
+ static void enable_nmi_window(struct kvm_vcpu *vcpu) |
1385 |
+ { |
1386 |
+- if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { |
1387 |
++ if (!cpu_has_virtual_nmis() || |
1388 |
++ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { |
1389 |
+ enable_irq_window(vcpu); |
1390 |
+ return; |
1391 |
+ } |
1392 |
+@@ -5707,6 +5712,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) |
1393 |
+ { |
1394 |
+ struct vcpu_vmx *vmx = to_vmx(vcpu); |
1395 |
+ |
1396 |
++ if (!cpu_has_virtual_nmis()) { |
1397 |
++ /* |
1398 |
++ * Tracking the NMI-blocked state in software is built upon |
1399 |
++ * finding the next open IRQ window. This, in turn, depends on |
1400 |
++ * well-behaving guests: They have to keep IRQs disabled at |
1401 |
++ * least as long as the NMI handler runs. Otherwise we may |
1402 |
++ * cause NMI nesting, maybe breaking the guest. But as this is |
1403 |
++ * highly unlikely, we can live with the residual risk. |
1404 |
++ */ |
1405 |
++ vmx->loaded_vmcs->soft_vnmi_blocked = 1; |
1406 |
++ vmx->loaded_vmcs->vnmi_blocked_time = 0; |
1407 |
++ } |
1408 |
++ |
1409 |
+ ++vcpu->stat.nmi_injections; |
1410 |
+ vmx->loaded_vmcs->nmi_known_unmasked = false; |
1411 |
+ |
1412 |
+@@ -5725,6 +5743,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) |
1413 |
+ struct vcpu_vmx *vmx = to_vmx(vcpu); |
1414 |
+ bool masked; |
1415 |
+ |
1416 |
++ if (!cpu_has_virtual_nmis()) |
1417 |
++ return vmx->loaded_vmcs->soft_vnmi_blocked; |
1418 |
+ if (vmx->loaded_vmcs->nmi_known_unmasked) |
1419 |
+ return false; |
1420 |
+ masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; |
1421 |
+@@ -5736,13 +5756,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) |
1422 |
+ { |
1423 |
+ struct vcpu_vmx *vmx = to_vmx(vcpu); |
1424 |
+ |
1425 |
+- vmx->loaded_vmcs->nmi_known_unmasked = !masked; |
1426 |
+- if (masked) |
1427 |
+- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
1428 |
+- GUEST_INTR_STATE_NMI); |
1429 |
+- else |
1430 |
+- vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, |
1431 |
+- GUEST_INTR_STATE_NMI); |
1432 |
++ if (!cpu_has_virtual_nmis()) { |
1433 |
++ if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { |
1434 |
++ vmx->loaded_vmcs->soft_vnmi_blocked = masked; |
1435 |
++ vmx->loaded_vmcs->vnmi_blocked_time = 0; |
1436 |
++ } |
1437 |
++ } else { |
1438 |
++ vmx->loaded_vmcs->nmi_known_unmasked = !masked; |
1439 |
++ if (masked) |
1440 |
++ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
1441 |
++ GUEST_INTR_STATE_NMI); |
1442 |
++ else |
1443 |
++ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, |
1444 |
++ GUEST_INTR_STATE_NMI); |
1445 |
++ } |
1446 |
+ } |
1447 |
+ |
1448 |
+ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) |
1449 |
+@@ -5750,6 +5777,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) |
1450 |
+ if (to_vmx(vcpu)->nested.nested_run_pending) |
1451 |
+ return 0; |
1452 |
+ |
1453 |
++ if (!cpu_has_virtual_nmis() && |
1454 |
++ to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) |
1455 |
++ return 0; |
1456 |
++ |
1457 |
+ return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & |
1458 |
+ (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
1459 |
+ | GUEST_INTR_STATE_NMI)); |
1460 |
+@@ -6478,6 +6509,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) |
1461 |
+ * AAK134, BY25. |
1462 |
+ */ |
1463 |
+ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && |
1464 |
++ cpu_has_virtual_nmis() && |
1465 |
+ (exit_qualification & INTR_INFO_UNBLOCK_NMI)) |
1466 |
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); |
1467 |
+ |
1468 |
+@@ -6961,7 +6993,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) |
1469 |
+ } |
1470 |
+ |
1471 |
+ /* Create a new VMCS */ |
1472 |
+- item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); |
1473 |
++ item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL); |
1474 |
+ if (!item) |
1475 |
+ return NULL; |
1476 |
+ item->vmcs02.vmcs = alloc_vmcs(); |
1477 |
+@@ -7978,6 +8010,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) |
1478 |
+ * "blocked by NMI" bit has to be set before next VM entry. |
1479 |
+ */ |
1480 |
+ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && |
1481 |
++ cpu_has_virtual_nmis() && |
1482 |
+ (exit_qualification & INTR_INFO_UNBLOCK_NMI)) |
1483 |
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
1484 |
+ GUEST_INTR_STATE_NMI); |
1485 |
+@@ -8822,6 +8855,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) |
1486 |
+ return 0; |
1487 |
+ } |
1488 |
+ |
1489 |
++ if (unlikely(!cpu_has_virtual_nmis() && |
1490 |
++ vmx->loaded_vmcs->soft_vnmi_blocked)) { |
1491 |
++ if (vmx_interrupt_allowed(vcpu)) { |
1492 |
++ vmx->loaded_vmcs->soft_vnmi_blocked = 0; |
1493 |
++ } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && |
1494 |
++ vcpu->arch.nmi_pending) { |
1495 |
++ /* |
1496 |
++ * This CPU don't support us in finding the end of an |
1497 |
++ * NMI-blocked window if the guest runs with IRQs |
1498 |
++ * disabled. So we pull the trigger after 1 s of |
1499 |
++ * futile waiting, but inform the user about this. |
1500 |
++ */ |
1501 |
++ printk(KERN_WARNING "%s: Breaking out of NMI-blocked " |
1502 |
++ "state on VCPU %d after 1 s timeout\n", |
1503 |
++ __func__, vcpu->vcpu_id); |
1504 |
++ vmx->loaded_vmcs->soft_vnmi_blocked = 0; |
1505 |
++ } |
1506 |
++ } |
1507 |
++ |
1508 |
+ if (exit_reason < kvm_vmx_max_exit_handlers |
1509 |
+ && kvm_vmx_exit_handlers[exit_reason]) |
1510 |
+ return kvm_vmx_exit_handlers[exit_reason](vcpu); |
1511 |
+@@ -9104,33 +9156,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) |
1512 |
+ |
1513 |
+ idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; |
1514 |
+ |
1515 |
+- if (vmx->loaded_vmcs->nmi_known_unmasked) |
1516 |
+- return; |
1517 |
+- /* |
1518 |
+- * Can't use vmx->exit_intr_info since we're not sure what |
1519 |
+- * the exit reason is. |
1520 |
+- */ |
1521 |
+- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
1522 |
+- unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; |
1523 |
+- vector = exit_intr_info & INTR_INFO_VECTOR_MASK; |
1524 |
+- /* |
1525 |
+- * SDM 3: 27.7.1.2 (September 2008) |
1526 |
+- * Re-set bit "block by NMI" before VM entry if vmexit caused by |
1527 |
+- * a guest IRET fault. |
1528 |
+- * SDM 3: 23.2.2 (September 2008) |
1529 |
+- * Bit 12 is undefined in any of the following cases: |
1530 |
+- * If the VM exit sets the valid bit in the IDT-vectoring |
1531 |
+- * information field. |
1532 |
+- * If the VM exit is due to a double fault. |
1533 |
+- */ |
1534 |
+- if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && |
1535 |
+- vector != DF_VECTOR && !idtv_info_valid) |
1536 |
+- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
1537 |
+- GUEST_INTR_STATE_NMI); |
1538 |
+- else |
1539 |
+- vmx->loaded_vmcs->nmi_known_unmasked = |
1540 |
+- !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) |
1541 |
+- & GUEST_INTR_STATE_NMI); |
1542 |
++ if (cpu_has_virtual_nmis()) { |
1543 |
++ if (vmx->loaded_vmcs->nmi_known_unmasked) |
1544 |
++ return; |
1545 |
++ /* |
1546 |
++ * Can't use vmx->exit_intr_info since we're not sure what |
1547 |
++ * the exit reason is. |
1548 |
++ */ |
1549 |
++ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
1550 |
++ unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; |
1551 |
++ vector = exit_intr_info & INTR_INFO_VECTOR_MASK; |
1552 |
++ /* |
1553 |
++ * SDM 3: 27.7.1.2 (September 2008) |
1554 |
++ * Re-set bit "block by NMI" before VM entry if vmexit caused by |
1555 |
++ * a guest IRET fault. |
1556 |
++ * SDM 3: 23.2.2 (September 2008) |
1557 |
++ * Bit 12 is undefined in any of the following cases: |
1558 |
++ * If the VM exit sets the valid bit in the IDT-vectoring |
1559 |
++ * information field. |
1560 |
++ * If the VM exit is due to a double fault. |
1561 |
++ */ |
1562 |
++ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && |
1563 |
++ vector != DF_VECTOR && !idtv_info_valid) |
1564 |
++ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
1565 |
++ GUEST_INTR_STATE_NMI); |
1566 |
++ else |
1567 |
++ vmx->loaded_vmcs->nmi_known_unmasked = |
1568 |
++ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) |
1569 |
++ & GUEST_INTR_STATE_NMI); |
1570 |
++ } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) |
1571 |
++ vmx->loaded_vmcs->vnmi_blocked_time += |
1572 |
++ ktime_to_ns(ktime_sub(ktime_get(), |
1573 |
++ vmx->loaded_vmcs->entry_time)); |
1574 |
+ } |
1575 |
+ |
1576 |
+ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, |
1577 |
+@@ -9247,6 +9304,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) |
1578 |
+ struct vcpu_vmx *vmx = to_vmx(vcpu); |
1579 |
+ unsigned long debugctlmsr, cr3, cr4; |
1580 |
+ |
1581 |
++ /* Record the guest's net vcpu time for enforced NMI injections. */ |
1582 |
++ if (unlikely(!cpu_has_virtual_nmis() && |
1583 |
++ vmx->loaded_vmcs->soft_vnmi_blocked)) |
1584 |
++ vmx->loaded_vmcs->entry_time = ktime_get(); |
1585 |
++ |
1586 |
+ /* Don't enter VMX if guest state is invalid, let the exit handler |
1587 |
+ start emulation until we arrive back to a valid state */ |
1588 |
+ if (vmx->emulation_required) |
1589 |
+@@ -11325,6 +11387,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, |
1590 |
+ vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); |
1591 |
+ vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); |
1592 |
+ vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); |
1593 |
++ vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); |
1594 |
++ vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); |
1595 |
+ |
1596 |
+ /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ |
1597 |
+ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) |
1598 |
+diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt |
1599 |
+index 12e377184ee4..c4d55919fac1 100644 |
1600 |
+--- a/arch/x86/lib/x86-opcode-map.txt |
1601 |
++++ b/arch/x86/lib/x86-opcode-map.txt |
1602 |
+@@ -896,7 +896,7 @@ EndTable |
1603 |
+ |
1604 |
+ GrpTable: Grp3_1 |
1605 |
+ 0: TEST Eb,Ib |
1606 |
+-1: |
1607 |
++1: TEST Eb,Ib |
1608 |
+ 2: NOT Eb |
1609 |
+ 3: NEG Eb |
1610 |
+ 4: MUL AL,Eb |
1611 |
+diff --git a/block/blk-core.c b/block/blk-core.c |
1612 |
+index 048be4aa6024..33ee583cfe45 100644 |
1613 |
+--- a/block/blk-core.c |
1614 |
++++ b/block/blk-core.c |
1615 |
+@@ -333,6 +333,7 @@ EXPORT_SYMBOL(blk_stop_queue); |
1616 |
+ void blk_sync_queue(struct request_queue *q) |
1617 |
+ { |
1618 |
+ del_timer_sync(&q->timeout); |
1619 |
++ cancel_work_sync(&q->timeout_work); |
1620 |
+ |
1621 |
+ if (q->mq_ops) { |
1622 |
+ struct blk_mq_hw_ctx *hctx; |
1623 |
+@@ -844,6 +845,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) |
1624 |
+ setup_timer(&q->backing_dev_info->laptop_mode_wb_timer, |
1625 |
+ laptop_mode_timer_fn, (unsigned long) q); |
1626 |
+ setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); |
1627 |
++ INIT_WORK(&q->timeout_work, NULL); |
1628 |
+ INIT_LIST_HEAD(&q->queue_head); |
1629 |
+ INIT_LIST_HEAD(&q->timeout_list); |
1630 |
+ INIT_LIST_HEAD(&q->icq_list); |
1631 |
+diff --git a/block/blk-timeout.c b/block/blk-timeout.c |
1632 |
+index 17ec83bb0900..6427be7ac363 100644 |
1633 |
+--- a/block/blk-timeout.c |
1634 |
++++ b/block/blk-timeout.c |
1635 |
+@@ -134,8 +134,6 @@ void blk_timeout_work(struct work_struct *work) |
1636 |
+ struct request *rq, *tmp; |
1637 |
+ int next_set = 0; |
1638 |
+ |
1639 |
+- if (blk_queue_enter(q, true)) |
1640 |
+- return; |
1641 |
+ spin_lock_irqsave(q->queue_lock, flags); |
1642 |
+ |
1643 |
+ list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) |
1644 |
+@@ -145,7 +143,6 @@ void blk_timeout_work(struct work_struct *work) |
1645 |
+ mod_timer(&q->timeout, round_jiffies_up(next)); |
1646 |
+ |
1647 |
+ spin_unlock_irqrestore(q->queue_lock, flags); |
1648 |
+- blk_queue_exit(q); |
1649 |
+ } |
1650 |
+ |
1651 |
+ /** |
1652 |
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c |
1653 |
+index fbcc73f7a099..18af71057b44 100644 |
1654 |
+--- a/drivers/acpi/device_pm.c |
1655 |
++++ b/drivers/acpi/device_pm.c |
1656 |
+@@ -387,6 +387,7 @@ EXPORT_SYMBOL(acpi_bus_power_manageable); |
1657 |
+ |
1658 |
+ #ifdef CONFIG_PM |
1659 |
+ static DEFINE_MUTEX(acpi_pm_notifier_lock); |
1660 |
++static DEFINE_MUTEX(acpi_pm_notifier_install_lock); |
1661 |
+ |
1662 |
+ void acpi_pm_wakeup_event(struct device *dev) |
1663 |
+ { |
1664 |
+@@ -443,24 +444,25 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, |
1665 |
+ if (!dev && !func) |
1666 |
+ return AE_BAD_PARAMETER; |
1667 |
+ |
1668 |
+- mutex_lock(&acpi_pm_notifier_lock); |
1669 |
++ mutex_lock(&acpi_pm_notifier_install_lock); |
1670 |
+ |
1671 |
+ if (adev->wakeup.flags.notifier_present) |
1672 |
+ goto out; |
1673 |
+ |
1674 |
+- adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev)); |
1675 |
+- adev->wakeup.context.dev = dev; |
1676 |
+- adev->wakeup.context.func = func; |
1677 |
+- |
1678 |
+ status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY, |
1679 |
+ acpi_pm_notify_handler, NULL); |
1680 |
+ if (ACPI_FAILURE(status)) |
1681 |
+ goto out; |
1682 |
+ |
1683 |
++ mutex_lock(&acpi_pm_notifier_lock); |
1684 |
++ adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev)); |
1685 |
++ adev->wakeup.context.dev = dev; |
1686 |
++ adev->wakeup.context.func = func; |
1687 |
+ adev->wakeup.flags.notifier_present = true; |
1688 |
++ mutex_unlock(&acpi_pm_notifier_lock); |
1689 |
+ |
1690 |
+ out: |
1691 |
+- mutex_unlock(&acpi_pm_notifier_lock); |
1692 |
++ mutex_unlock(&acpi_pm_notifier_install_lock); |
1693 |
+ return status; |
1694 |
+ } |
1695 |
+ |
1696 |
+@@ -472,7 +474,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) |
1697 |
+ { |
1698 |
+ acpi_status status = AE_BAD_PARAMETER; |
1699 |
+ |
1700 |
+- mutex_lock(&acpi_pm_notifier_lock); |
1701 |
++ mutex_lock(&acpi_pm_notifier_install_lock); |
1702 |
+ |
1703 |
+ if (!adev->wakeup.flags.notifier_present) |
1704 |
+ goto out; |
1705 |
+@@ -483,14 +485,15 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) |
1706 |
+ if (ACPI_FAILURE(status)) |
1707 |
+ goto out; |
1708 |
+ |
1709 |
++ mutex_lock(&acpi_pm_notifier_lock); |
1710 |
+ adev->wakeup.context.func = NULL; |
1711 |
+ adev->wakeup.context.dev = NULL; |
1712 |
+ wakeup_source_unregister(adev->wakeup.ws); |
1713 |
+- |
1714 |
+ adev->wakeup.flags.notifier_present = false; |
1715 |
++ mutex_unlock(&acpi_pm_notifier_lock); |
1716 |
+ |
1717 |
+ out: |
1718 |
+- mutex_unlock(&acpi_pm_notifier_lock); |
1719 |
++ mutex_unlock(&acpi_pm_notifier_install_lock); |
1720 |
+ return status; |
1721 |
+ } |
1722 |
+ |
1723 |
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
1724 |
+index 236b14324780..82b3ce5e937e 100644 |
1725 |
+--- a/drivers/acpi/ec.c |
1726 |
++++ b/drivers/acpi/ec.c |
1727 |
+@@ -486,8 +486,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec) |
1728 |
+ { |
1729 |
+ if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) |
1730 |
+ ec_log_drv("event unblocked"); |
1731 |
+- if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) |
1732 |
+- advance_transaction(ec); |
1733 |
++ /* |
1734 |
++ * Unconditionally invoke this once after enabling the event |
1735 |
++ * handling mechanism to detect the pending events. |
1736 |
++ */ |
1737 |
++ advance_transaction(ec); |
1738 |
+ } |
1739 |
+ |
1740 |
+ static inline void __acpi_ec_disable_event(struct acpi_ec *ec) |
1741 |
+@@ -1456,11 +1459,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events) |
1742 |
+ if (test_bit(EC_FLAGS_STARTED, &ec->flags) && |
1743 |
+ ec->reference_count >= 1) |
1744 |
+ acpi_ec_enable_gpe(ec, true); |
1745 |
+- |
1746 |
+- /* EC is fully operational, allow queries */ |
1747 |
+- acpi_ec_enable_event(ec); |
1748 |
+ } |
1749 |
+ } |
1750 |
++ /* EC is fully operational, allow queries */ |
1751 |
++ acpi_ec_enable_event(ec); |
1752 |
+ |
1753 |
+ return 0; |
1754 |
+ } |
1755 |
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c |
1756 |
+index e4effef0c83f..ea20e0eb4d5a 100644 |
1757 |
+--- a/drivers/ata/libata-eh.c |
1758 |
++++ b/drivers/ata/libata-eh.c |
1759 |
+@@ -2264,8 +2264,8 @@ static void ata_eh_link_autopsy(struct ata_link *link) |
1760 |
+ if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) |
1761 |
+ eflags |= ATA_EFLAG_DUBIOUS_XFER; |
1762 |
+ ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); |
1763 |
++ trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); |
1764 |
+ } |
1765 |
+- trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); |
1766 |
+ DPRINTK("EXIT\n"); |
1767 |
+ } |
1768 |
+ |
1769 |
+diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c |
1770 |
+index 0b718886479b..87509cb69f79 100644 |
1771 |
+--- a/drivers/base/power/opp/of.c |
1772 |
++++ b/drivers/base/power/opp/of.c |
1773 |
+@@ -397,6 +397,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) |
1774 |
+ dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, |
1775 |
+ ret); |
1776 |
+ _dev_pm_opp_remove_table(opp_table, dev, false); |
1777 |
++ of_node_put(np); |
1778 |
+ goto put_opp_table; |
1779 |
+ } |
1780 |
+ } |
1781 |
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c |
1782 |
+index 9adfb5445f8d..5f2a4240a204 100644 |
1783 |
+--- a/drivers/block/nbd.c |
1784 |
++++ b/drivers/block/nbd.c |
1785 |
+@@ -288,15 +288,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, |
1786 |
+ cmd->status = BLK_STS_TIMEOUT; |
1787 |
+ return BLK_EH_HANDLED; |
1788 |
+ } |
1789 |
+- |
1790 |
+- /* If we are waiting on our dead timer then we could get timeout |
1791 |
+- * callbacks for our request. For this we just want to reset the timer |
1792 |
+- * and let the queue side take care of everything. |
1793 |
+- */ |
1794 |
+- if (!completion_done(&cmd->send_complete)) { |
1795 |
+- nbd_config_put(nbd); |
1796 |
+- return BLK_EH_RESET_TIMER; |
1797 |
+- } |
1798 |
+ config = nbd->config; |
1799 |
+ |
1800 |
+ if (config->num_connections > 1) { |
1801 |
+@@ -723,9 +714,9 @@ static int wait_for_reconnect(struct nbd_device *nbd) |
1802 |
+ return 0; |
1803 |
+ if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) |
1804 |
+ return 0; |
1805 |
+- wait_event_interruptible_timeout(config->conn_wait, |
1806 |
+- atomic_read(&config->live_connections), |
1807 |
+- config->dead_conn_timeout); |
1808 |
++ wait_event_timeout(config->conn_wait, |
1809 |
++ atomic_read(&config->live_connections), |
1810 |
++ config->dead_conn_timeout); |
1811 |
+ return atomic_read(&config->live_connections); |
1812 |
+ } |
1813 |
+ |
1814 |
+@@ -740,6 +731,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) |
1815 |
+ if (!refcount_inc_not_zero(&nbd->config_refs)) { |
1816 |
+ dev_err_ratelimited(disk_to_dev(nbd->disk), |
1817 |
+ "Socks array is empty\n"); |
1818 |
++ blk_mq_start_request(req); |
1819 |
+ return -EINVAL; |
1820 |
+ } |
1821 |
+ config = nbd->config; |
1822 |
+@@ -748,6 +740,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) |
1823 |
+ dev_err_ratelimited(disk_to_dev(nbd->disk), |
1824 |
+ "Attempted send on invalid socket\n"); |
1825 |
+ nbd_config_put(nbd); |
1826 |
++ blk_mq_start_request(req); |
1827 |
+ return -EINVAL; |
1828 |
+ } |
1829 |
+ cmd->status = BLK_STS_OK; |
1830 |
+@@ -771,6 +764,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) |
1831 |
+ */ |
1832 |
+ sock_shutdown(nbd); |
1833 |
+ nbd_config_put(nbd); |
1834 |
++ blk_mq_start_request(req); |
1835 |
+ return -EIO; |
1836 |
+ } |
1837 |
+ goto again; |
1838 |
+@@ -781,6 +775,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) |
1839 |
+ * here so that it gets put _after_ the request that is already on the |
1840 |
+ * dispatch list. |
1841 |
+ */ |
1842 |
++ blk_mq_start_request(req); |
1843 |
+ if (unlikely(nsock->pending && nsock->pending != req)) { |
1844 |
+ blk_mq_requeue_request(req, true); |
1845 |
+ ret = 0; |
1846 |
+@@ -793,10 +788,10 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) |
1847 |
+ ret = nbd_send_cmd(nbd, cmd, index); |
1848 |
+ if (ret == -EAGAIN) { |
1849 |
+ dev_err_ratelimited(disk_to_dev(nbd->disk), |
1850 |
+- "Request send failed trying another connection\n"); |
1851 |
++ "Request send failed, requeueing\n"); |
1852 |
+ nbd_mark_nsock_dead(nbd, nsock, 1); |
1853 |
+- mutex_unlock(&nsock->tx_lock); |
1854 |
+- goto again; |
1855 |
++ blk_mq_requeue_request(req, true); |
1856 |
++ ret = 0; |
1857 |
+ } |
1858 |
+ out: |
1859 |
+ mutex_unlock(&nsock->tx_lock); |
1860 |
+@@ -820,7 +815,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, |
1861 |
+ * done sending everything over the wire. |
1862 |
+ */ |
1863 |
+ init_completion(&cmd->send_complete); |
1864 |
+- blk_mq_start_request(bd->rq); |
1865 |
+ |
1866 |
+ /* We can be called directly from the user space process, which means we |
1867 |
+ * could possibly have signals pending so our sendmsg will fail. In |
1868 |
+diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c |
1869 |
+index d00c4fdae924..bd810d01538a 100644 |
1870 |
+--- a/drivers/bluetooth/btqcomsmd.c |
1871 |
++++ b/drivers/bluetooth/btqcomsmd.c |
1872 |
+@@ -26,6 +26,7 @@ |
1873 |
+ struct btqcomsmd { |
1874 |
+ struct hci_dev *hdev; |
1875 |
+ |
1876 |
++ bdaddr_t bdaddr; |
1877 |
+ struct rpmsg_endpoint *acl_channel; |
1878 |
+ struct rpmsg_endpoint *cmd_channel; |
1879 |
+ }; |
1880 |
+@@ -100,6 +101,38 @@ static int btqcomsmd_close(struct hci_dev *hdev) |
1881 |
+ return 0; |
1882 |
+ } |
1883 |
+ |
1884 |
++static int btqcomsmd_setup(struct hci_dev *hdev) |
1885 |
++{ |
1886 |
++ struct btqcomsmd *btq = hci_get_drvdata(hdev); |
1887 |
++ struct sk_buff *skb; |
1888 |
++ int err; |
1889 |
++ |
1890 |
++ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); |
1891 |
++ if (IS_ERR(skb)) |
1892 |
++ return PTR_ERR(skb); |
1893 |
++ kfree_skb(skb); |
1894 |
++ |
1895 |
++ /* Devices do not have persistent storage for BD address. If no |
1896 |
++ * BD address has been retrieved during probe, mark the device |
1897 |
++ * as having an invalid BD address. |
1898 |
++ */ |
1899 |
++ if (!bacmp(&btq->bdaddr, BDADDR_ANY)) { |
1900 |
++ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); |
1901 |
++ return 0; |
1902 |
++ } |
1903 |
++ |
1904 |
++ /* When setting a configured BD address fails, mark the device |
1905 |
++ * as having an invalid BD address. |
1906 |
++ */ |
1907 |
++ err = qca_set_bdaddr_rome(hdev, &btq->bdaddr); |
1908 |
++ if (err) { |
1909 |
++ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); |
1910 |
++ return 0; |
1911 |
++ } |
1912 |
++ |
1913 |
++ return 0; |
1914 |
++} |
1915 |
++ |
1916 |
+ static int btqcomsmd_probe(struct platform_device *pdev) |
1917 |
+ { |
1918 |
+ struct btqcomsmd *btq; |
1919 |
+@@ -135,6 +168,7 @@ static int btqcomsmd_probe(struct platform_device *pdev) |
1920 |
+ hdev->open = btqcomsmd_open; |
1921 |
+ hdev->close = btqcomsmd_close; |
1922 |
+ hdev->send = btqcomsmd_send; |
1923 |
++ hdev->setup = btqcomsmd_setup; |
1924 |
+ hdev->set_bdaddr = qca_set_bdaddr_rome; |
1925 |
+ |
1926 |
+ ret = hci_register_dev(hdev); |
1927 |
+diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c |
1928 |
+index 13eb04f72389..148815470431 100644 |
1929 |
+--- a/drivers/clk/ti/clk-dra7-atl.c |
1930 |
++++ b/drivers/clk/ti/clk-dra7-atl.c |
1931 |
+@@ -274,8 +274,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) |
1932 |
+ |
1933 |
+ /* Get configuration for the ATL instances */ |
1934 |
+ snprintf(prop, sizeof(prop), "atl%u", i); |
1935 |
+- of_node_get(node); |
1936 |
+- cfg_node = of_find_node_by_name(node, prop); |
1937 |
++ cfg_node = of_get_child_by_name(node, prop); |
1938 |
+ if (cfg_node) { |
1939 |
+ ret = of_property_read_u32(cfg_node, "bws", |
1940 |
+ &cdesc->bws); |
1941 |
+diff --git a/drivers/dax/super.c b/drivers/dax/super.c |
1942 |
+index 557b93703532..c4cd034a3820 100644 |
1943 |
+--- a/drivers/dax/super.c |
1944 |
++++ b/drivers/dax/super.c |
1945 |
+@@ -344,6 +344,9 @@ static struct inode *dax_alloc_inode(struct super_block *sb) |
1946 |
+ struct inode *inode; |
1947 |
+ |
1948 |
+ dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); |
1949 |
++ if (!dax_dev) |
1950 |
++ return NULL; |
1951 |
++ |
1952 |
+ inode = &dax_dev->inode; |
1953 |
+ inode->i_rdev = 0; |
1954 |
+ return inode; |
1955 |
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c |
1956 |
+index 4c4b46586af2..2af79e4f3235 100644 |
1957 |
+--- a/drivers/infiniband/core/cm.c |
1958 |
++++ b/drivers/infiniband/core/cm.c |
1959 |
+@@ -1575,7 +1575,7 @@ static void cm_format_req_event(struct cm_work *work, |
1960 |
+ param->bth_pkey = cm_get_bth_pkey(work); |
1961 |
+ param->port = cm_id_priv->av.port->port_num; |
1962 |
+ param->primary_path = &work->path[0]; |
1963 |
+- if (req_msg->alt_local_lid) |
1964 |
++ if (cm_req_has_alt_path(req_msg)) |
1965 |
+ param->alternate_path = &work->path[1]; |
1966 |
+ else |
1967 |
+ param->alternate_path = NULL; |
1968 |
+@@ -1856,7 +1856,8 @@ static int cm_req_handler(struct cm_work *work) |
1969 |
+ cm_process_routed_req(req_msg, work->mad_recv_wc->wc); |
1970 |
+ |
1971 |
+ memset(&work->path[0], 0, sizeof(work->path[0])); |
1972 |
+- memset(&work->path[1], 0, sizeof(work->path[1])); |
1973 |
++ if (cm_req_has_alt_path(req_msg)) |
1974 |
++ memset(&work->path[1], 0, sizeof(work->path[1])); |
1975 |
+ grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr); |
1976 |
+ ret = ib_get_cached_gid(work->port->cm_dev->ib_device, |
1977 |
+ work->port->port_num, |
1978 |
+@@ -3817,14 +3818,16 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, |
1979 |
+ struct cm_port *port = mad_agent->context; |
1980 |
+ struct cm_work *work; |
1981 |
+ enum ib_cm_event_type event; |
1982 |
++ bool alt_path = false; |
1983 |
+ u16 attr_id; |
1984 |
+ int paths = 0; |
1985 |
+ int going_down = 0; |
1986 |
+ |
1987 |
+ switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { |
1988 |
+ case CM_REQ_ATTR_ID: |
1989 |
+- paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> |
1990 |
+- alt_local_lid != 0); |
1991 |
++ alt_path = cm_req_has_alt_path((struct cm_req_msg *) |
1992 |
++ mad_recv_wc->recv_buf.mad); |
1993 |
++ paths = 1 + (alt_path != 0); |
1994 |
+ event = IB_CM_REQ_RECEIVED; |
1995 |
+ break; |
1996 |
+ case CM_MRA_ATTR_ID: |
1997 |
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c |
1998 |
+index f8f53bb90837..cb91245e9163 100644 |
1999 |
+--- a/drivers/infiniband/core/mad.c |
2000 |
++++ b/drivers/infiniband/core/mad.c |
2001 |
+@@ -1974,14 +1974,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, |
2002 |
+ unsigned long flags; |
2003 |
+ int ret; |
2004 |
+ |
2005 |
++ INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); |
2006 |
+ ret = ib_mad_enforce_security(mad_agent_priv, |
2007 |
+ mad_recv_wc->wc->pkey_index); |
2008 |
+ if (ret) { |
2009 |
+ ib_free_recv_mad(mad_recv_wc); |
2010 |
+ deref_mad_agent(mad_agent_priv); |
2011 |
++ return; |
2012 |
+ } |
2013 |
+ |
2014 |
+- INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); |
2015 |
+ list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); |
2016 |
+ if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { |
2017 |
+ mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, |
2018 |
+diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c |
2019 |
+index 88bdafb297f5..28607bb42d87 100644 |
2020 |
+--- a/drivers/infiniband/core/security.c |
2021 |
++++ b/drivers/infiniband/core/security.c |
2022 |
+@@ -87,16 +87,14 @@ static int enforce_qp_pkey_security(u16 pkey, |
2023 |
+ if (ret) |
2024 |
+ return ret; |
2025 |
+ |
2026 |
+- if (qp_sec->qp == qp_sec->qp->real_qp) { |
2027 |
+- list_for_each_entry(shared_qp_sec, |
2028 |
+- &qp_sec->shared_qp_list, |
2029 |
+- shared_qp_list) { |
2030 |
+- ret = security_ib_pkey_access(shared_qp_sec->security, |
2031 |
+- subnet_prefix, |
2032 |
+- pkey); |
2033 |
+- if (ret) |
2034 |
+- return ret; |
2035 |
+- } |
2036 |
++ list_for_each_entry(shared_qp_sec, |
2037 |
++ &qp_sec->shared_qp_list, |
2038 |
++ shared_qp_list) { |
2039 |
++ ret = security_ib_pkey_access(shared_qp_sec->security, |
2040 |
++ subnet_prefix, |
2041 |
++ pkey); |
2042 |
++ if (ret) |
2043 |
++ return ret; |
2044 |
+ } |
2045 |
+ return 0; |
2046 |
+ } |
2047 |
+@@ -560,15 +558,22 @@ int ib_security_modify_qp(struct ib_qp *qp, |
2048 |
+ int ret = 0; |
2049 |
+ struct ib_ports_pkeys *tmp_pps; |
2050 |
+ struct ib_ports_pkeys *new_pps; |
2051 |
+- bool special_qp = (qp->qp_type == IB_QPT_SMI || |
2052 |
+- qp->qp_type == IB_QPT_GSI || |
2053 |
+- qp->qp_type >= IB_QPT_RESERVED1); |
2054 |
++ struct ib_qp *real_qp = qp->real_qp; |
2055 |
++ bool special_qp = (real_qp->qp_type == IB_QPT_SMI || |
2056 |
++ real_qp->qp_type == IB_QPT_GSI || |
2057 |
++ real_qp->qp_type >= IB_QPT_RESERVED1); |
2058 |
+ bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || |
2059 |
+ (qp_attr_mask & IB_QP_ALT_PATH)); |
2060 |
+ |
2061 |
++ /* The port/pkey settings are maintained only for the real QP. Open |
2062 |
++ * handles on the real QP will be in the shared_qp_list. When |
2063 |
++ * enforcing security on the real QP all the shared QPs will be |
2064 |
++ * checked as well. |
2065 |
++ */ |
2066 |
++ |
2067 |
+ if (pps_change && !special_qp) { |
2068 |
+- mutex_lock(&qp->qp_sec->mutex); |
2069 |
+- new_pps = get_new_pps(qp, |
2070 |
++ mutex_lock(&real_qp->qp_sec->mutex); |
2071 |
++ new_pps = get_new_pps(real_qp, |
2072 |
+ qp_attr, |
2073 |
+ qp_attr_mask); |
2074 |
+ |
2075 |
+@@ -586,14 +591,14 @@ int ib_security_modify_qp(struct ib_qp *qp, |
2076 |
+ |
2077 |
+ if (!ret) |
2078 |
+ ret = check_qp_port_pkey_settings(new_pps, |
2079 |
+- qp->qp_sec); |
2080 |
++ real_qp->qp_sec); |
2081 |
+ } |
2082 |
+ |
2083 |
+ if (!ret) |
2084 |
+- ret = qp->device->modify_qp(qp->real_qp, |
2085 |
+- qp_attr, |
2086 |
+- qp_attr_mask, |
2087 |
+- udata); |
2088 |
++ ret = real_qp->device->modify_qp(real_qp, |
2089 |
++ qp_attr, |
2090 |
++ qp_attr_mask, |
2091 |
++ udata); |
2092 |
+ |
2093 |
+ if (pps_change && !special_qp) { |
2094 |
+ /* Clean up the lists and free the appropriate |
2095 |
+@@ -602,8 +607,8 @@ int ib_security_modify_qp(struct ib_qp *qp, |
2096 |
+ if (ret) { |
2097 |
+ tmp_pps = new_pps; |
2098 |
+ } else { |
2099 |
+- tmp_pps = qp->qp_sec->ports_pkeys; |
2100 |
+- qp->qp_sec->ports_pkeys = new_pps; |
2101 |
++ tmp_pps = real_qp->qp_sec->ports_pkeys; |
2102 |
++ real_qp->qp_sec->ports_pkeys = new_pps; |
2103 |
+ } |
2104 |
+ |
2105 |
+ if (tmp_pps) { |
2106 |
+@@ -611,7 +616,7 @@ int ib_security_modify_qp(struct ib_qp *qp, |
2107 |
+ port_pkey_list_remove(&tmp_pps->alt); |
2108 |
+ } |
2109 |
+ kfree(tmp_pps); |
2110 |
+- mutex_unlock(&qp->qp_sec->mutex); |
2111 |
++ mutex_unlock(&real_qp->qp_sec->mutex); |
2112 |
+ } |
2113 |
+ return ret; |
2114 |
+ } |
2115 |
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c |
2116 |
+index 0be42787759f..312444386f54 100644 |
2117 |
+--- a/drivers/infiniband/hw/hfi1/chip.c |
2118 |
++++ b/drivers/infiniband/hw/hfi1/chip.c |
2119 |
+@@ -13074,7 +13074,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) |
2120 |
+ first_sdma = last_general; |
2121 |
+ last_sdma = first_sdma + dd->num_sdma; |
2122 |
+ first_rx = last_sdma; |
2123 |
+- last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; |
2124 |
++ last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts; |
2125 |
+ |
2126 |
+ /* VNIC MSIx interrupts get mapped when VNIC contexts are created */ |
2127 |
+ dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues; |
2128 |
+@@ -13294,8 +13294,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd) |
2129 |
+ * slow source, SDMACleanupDone) |
2130 |
+ * N interrupts - one per used SDMA engine |
2131 |
+ * M interrupt - one per kernel receive context |
2132 |
++ * V interrupt - one for each VNIC context |
2133 |
+ */ |
2134 |
+- total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; |
2135 |
++ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts; |
2136 |
+ |
2137 |
+ /* ask for MSI-X interrupts */ |
2138 |
+ request = request_msix(dd, total); |
2139 |
+@@ -13356,10 +13357,12 @@ static int set_up_interrupts(struct hfi1_devdata *dd) |
2140 |
+ * in array of contexts |
2141 |
+ * freectxts - number of free user contexts |
2142 |
+ * num_send_contexts - number of PIO send contexts being used |
2143 |
++ * num_vnic_contexts - number of contexts reserved for VNIC |
2144 |
+ */ |
2145 |
+ static int set_up_context_variables(struct hfi1_devdata *dd) |
2146 |
+ { |
2147 |
+ unsigned long num_kernel_contexts; |
2148 |
++ u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT; |
2149 |
+ int total_contexts; |
2150 |
+ int ret; |
2151 |
+ unsigned ngroups; |
2152 |
+@@ -13393,6 +13396,14 @@ static int set_up_context_variables(struct hfi1_devdata *dd) |
2153 |
+ num_kernel_contexts); |
2154 |
+ num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; |
2155 |
+ } |
2156 |
++ |
2157 |
++ /* Accommodate VNIC contexts if possible */ |
2158 |
++ if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) { |
2159 |
++ dd_dev_err(dd, "No receive contexts available for VNIC\n"); |
2160 |
++ num_vnic_contexts = 0; |
2161 |
++ } |
2162 |
++ total_contexts = num_kernel_contexts + num_vnic_contexts; |
2163 |
++ |
2164 |
+ /* |
2165 |
+ * User contexts: |
2166 |
+ * - default to 1 user context per real (non-HT) CPU core if |
2167 |
+@@ -13402,19 +13413,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd) |
2168 |
+ num_user_contexts = |
2169 |
+ cpumask_weight(&node_affinity.real_cpu_mask); |
2170 |
+ |
2171 |
+- total_contexts = num_kernel_contexts + num_user_contexts; |
2172 |
+- |
2173 |
+ /* |
2174 |
+ * Adjust the counts given a global max. |
2175 |
+ */ |
2176 |
+- if (total_contexts > dd->chip_rcv_contexts) { |
2177 |
++ if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) { |
2178 |
+ dd_dev_err(dd, |
2179 |
+ "Reducing # user receive contexts to: %d, from %d\n", |
2180 |
+- (int)(dd->chip_rcv_contexts - num_kernel_contexts), |
2181 |
++ (int)(dd->chip_rcv_contexts - total_contexts), |
2182 |
+ (int)num_user_contexts); |
2183 |
+- num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts; |
2184 |
+ /* recalculate */ |
2185 |
+- total_contexts = num_kernel_contexts + num_user_contexts; |
2186 |
++ num_user_contexts = dd->chip_rcv_contexts - total_contexts; |
2187 |
+ } |
2188 |
+ |
2189 |
+ /* each user context requires an entry in the RMT */ |
2190 |
+@@ -13427,25 +13435,24 @@ static int set_up_context_variables(struct hfi1_devdata *dd) |
2191 |
+ user_rmt_reduced); |
2192 |
+ /* recalculate */ |
2193 |
+ num_user_contexts = user_rmt_reduced; |
2194 |
+- total_contexts = num_kernel_contexts + num_user_contexts; |
2195 |
+ } |
2196 |
+ |
2197 |
+- /* Accommodate VNIC contexts */ |
2198 |
+- if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts) |
2199 |
+- total_contexts += HFI1_NUM_VNIC_CTXT; |
2200 |
++ total_contexts += num_user_contexts; |
2201 |
+ |
2202 |
+ /* the first N are kernel contexts, the rest are user/vnic contexts */ |
2203 |
+ dd->num_rcv_contexts = total_contexts; |
2204 |
+ dd->n_krcv_queues = num_kernel_contexts; |
2205 |
+ dd->first_dyn_alloc_ctxt = num_kernel_contexts; |
2206 |
++ dd->num_vnic_contexts = num_vnic_contexts; |
2207 |
+ dd->num_user_contexts = num_user_contexts; |
2208 |
+ dd->freectxts = num_user_contexts; |
2209 |
+ dd_dev_info(dd, |
2210 |
+- "rcv contexts: chip %d, used %d (kernel %d, user %d)\n", |
2211 |
++ "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n", |
2212 |
+ (int)dd->chip_rcv_contexts, |
2213 |
+ (int)dd->num_rcv_contexts, |
2214 |
+ (int)dd->n_krcv_queues, |
2215 |
+- (int)dd->num_rcv_contexts - dd->n_krcv_queues); |
2216 |
++ dd->num_vnic_contexts, |
2217 |
++ dd->num_user_contexts); |
2218 |
+ |
2219 |
+ /* |
2220 |
+ * Receive array allocation: |
2221 |
+diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h |
2222 |
+index 3ac9c307a285..6ff44dc606eb 100644 |
2223 |
+--- a/drivers/infiniband/hw/hfi1/hfi.h |
2224 |
++++ b/drivers/infiniband/hw/hfi1/hfi.h |
2225 |
+@@ -1047,6 +1047,8 @@ struct hfi1_devdata { |
2226 |
+ u64 z_send_schedule; |
2227 |
+ |
2228 |
+ u64 __percpu *send_schedule; |
2229 |
++ /* number of reserved contexts for VNIC usage */ |
2230 |
++ u16 num_vnic_contexts; |
2231 |
+ /* number of receive contexts in use by the driver */ |
2232 |
+ u32 num_rcv_contexts; |
2233 |
+ /* number of pio send contexts in use by the driver */ |
2234 |
+diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c |
2235 |
+index 6d2702ef34ac..25e867393463 100644 |
2236 |
+--- a/drivers/infiniband/hw/hfi1/sysfs.c |
2237 |
++++ b/drivers/infiniband/hw/hfi1/sysfs.c |
2238 |
+@@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device, |
2239 |
+ * give a more accurate picture of total contexts available. |
2240 |
+ */ |
2241 |
+ return scnprintf(buf, PAGE_SIZE, "%u\n", |
2242 |
+- min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt, |
2243 |
++ min(dd->num_user_contexts, |
2244 |
+ (u32)dd->sc_sizes[SC_USER].count)); |
2245 |
+ } |
2246 |
+ |
2247 |
+diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c |
2248 |
+index f419cbb05928..1a17708be46a 100644 |
2249 |
+--- a/drivers/infiniband/hw/hfi1/vnic_main.c |
2250 |
++++ b/drivers/infiniband/hw/hfi1/vnic_main.c |
2251 |
+@@ -840,6 +840,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, |
2252 |
+ struct rdma_netdev *rn; |
2253 |
+ int i, size, rc; |
2254 |
+ |
2255 |
++ if (!dd->num_vnic_contexts) |
2256 |
++ return ERR_PTR(-ENOMEM); |
2257 |
++ |
2258 |
+ if (!port_num || (port_num > dd->num_pports)) |
2259 |
+ return ERR_PTR(-EINVAL); |
2260 |
+ |
2261 |
+@@ -848,7 +851,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, |
2262 |
+ |
2263 |
+ size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo); |
2264 |
+ netdev = alloc_netdev_mqs(size, name, name_assign_type, setup, |
2265 |
+- dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT); |
2266 |
++ dd->chip_sdma_engines, dd->num_vnic_contexts); |
2267 |
+ if (!netdev) |
2268 |
+ return ERR_PTR(-ENOMEM); |
2269 |
+ |
2270 |
+@@ -856,7 +859,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, |
2271 |
+ vinfo = opa_vnic_dev_priv(netdev); |
2272 |
+ vinfo->dd = dd; |
2273 |
+ vinfo->num_tx_q = dd->chip_sdma_engines; |
2274 |
+- vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT; |
2275 |
++ vinfo->num_rx_q = dd->num_vnic_contexts; |
2276 |
+ vinfo->netdev = netdev; |
2277 |
+ rn->free_rdma_netdev = hfi1_vnic_free_rn; |
2278 |
+ rn->set_id = hfi1_vnic_set_vesw_id; |
2279 |
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c |
2280 |
+index fa5ccdb3bb2a..60d7b493ed2d 100644 |
2281 |
+--- a/drivers/infiniband/ulp/srp/ib_srp.c |
2282 |
++++ b/drivers/infiniband/ulp/srp/ib_srp.c |
2283 |
+@@ -665,12 +665,19 @@ static void srp_path_rec_completion(int status, |
2284 |
+ static int srp_lookup_path(struct srp_rdma_ch *ch) |
2285 |
+ { |
2286 |
+ struct srp_target_port *target = ch->target; |
2287 |
+- int ret; |
2288 |
++ int ret = -ENODEV; |
2289 |
+ |
2290 |
+ ch->path.numb_path = 1; |
2291 |
+ |
2292 |
+ init_completion(&ch->done); |
2293 |
+ |
2294 |
++ /* |
2295 |
++ * Avoid that the SCSI host can be removed by srp_remove_target() |
2296 |
++ * before srp_path_rec_completion() is called. |
2297 |
++ */ |
2298 |
++ if (!scsi_host_get(target->scsi_host)) |
2299 |
++ goto out; |
2300 |
++ |
2301 |
+ ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, |
2302 |
+ target->srp_host->srp_dev->dev, |
2303 |
+ target->srp_host->port, |
2304 |
+@@ -684,18 +691,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch) |
2305 |
+ GFP_KERNEL, |
2306 |
+ srp_path_rec_completion, |
2307 |
+ ch, &ch->path_query); |
2308 |
+- if (ch->path_query_id < 0) |
2309 |
+- return ch->path_query_id; |
2310 |
++ ret = ch->path_query_id; |
2311 |
++ if (ret < 0) |
2312 |
++ goto put; |
2313 |
+ |
2314 |
+ ret = wait_for_completion_interruptible(&ch->done); |
2315 |
+ if (ret < 0) |
2316 |
+- return ret; |
2317 |
++ goto put; |
2318 |
+ |
2319 |
+- if (ch->status < 0) |
2320 |
++ ret = ch->status; |
2321 |
++ if (ret < 0) |
2322 |
+ shost_printk(KERN_WARNING, target->scsi_host, |
2323 |
+ PFX "Path record query failed\n"); |
2324 |
+ |
2325 |
+- return ch->status; |
2326 |
++put: |
2327 |
++ scsi_host_put(target->scsi_host); |
2328 |
++ |
2329 |
++out: |
2330 |
++ return ret; |
2331 |
+ } |
2332 |
+ |
2333 |
+ static int srp_send_req(struct srp_rdma_ch *ch, bool multich) |
2334 |
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c |
2335 |
+index 9e8e9220f816..95178b4e3565 100644 |
2336 |
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c |
2337 |
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c |
2338 |
+@@ -2777,7 +2777,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) |
2339 |
+ { |
2340 |
+ const char *p; |
2341 |
+ unsigned len, count, leading_zero_bytes; |
2342 |
+- int ret, rc; |
2343 |
++ int ret; |
2344 |
+ |
2345 |
+ p = name; |
2346 |
+ if (strncasecmp(p, "0x", 2) == 0) |
2347 |
+@@ -2789,10 +2789,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) |
2348 |
+ count = min(len / 2, 16U); |
2349 |
+ leading_zero_bytes = 16 - count; |
2350 |
+ memset(i_port_id, 0, leading_zero_bytes); |
2351 |
+- rc = hex2bin(i_port_id + leading_zero_bytes, p, count); |
2352 |
+- if (rc < 0) |
2353 |
+- pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); |
2354 |
+- ret = 0; |
2355 |
++ ret = hex2bin(i_port_id + leading_zero_bytes, p, count); |
2356 |
++ if (ret < 0) |
2357 |
++ pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret); |
2358 |
+ out: |
2359 |
+ return ret; |
2360 |
+ } |
2361 |
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c |
2362 |
+index b5df99c6f680..3b35271114ee 100644 |
2363 |
+--- a/drivers/irqchip/irq-gic-v3.c |
2364 |
++++ b/drivers/irqchip/irq-gic-v3.c |
2365 |
+@@ -1071,18 +1071,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) |
2366 |
+ int nr_parts; |
2367 |
+ struct partition_affinity *parts; |
2368 |
+ |
2369 |
+- parts_node = of_find_node_by_name(gic_node, "ppi-partitions"); |
2370 |
++ parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); |
2371 |
+ if (!parts_node) |
2372 |
+ return; |
2373 |
+ |
2374 |
+ nr_parts = of_get_child_count(parts_node); |
2375 |
+ |
2376 |
+ if (!nr_parts) |
2377 |
+- return; |
2378 |
++ goto out_put_node; |
2379 |
+ |
2380 |
+ parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL); |
2381 |
+ if (WARN_ON(!parts)) |
2382 |
+- return; |
2383 |
++ goto out_put_node; |
2384 |
+ |
2385 |
+ for_each_child_of_node(parts_node, child_part) { |
2386 |
+ struct partition_affinity *part; |
2387 |
+@@ -1149,6 +1149,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) |
2388 |
+ |
2389 |
+ gic_data.ppi_descs[i] = desc; |
2390 |
+ } |
2391 |
++ |
2392 |
++out_put_node: |
2393 |
++ of_node_put(parts_node); |
2394 |
+ } |
2395 |
+ |
2396 |
+ static void __init gic_of_setup_kvm_info(struct device_node *node) |
2397 |
+diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c |
2398 |
+index ae6146311934..f052a3eb2098 100644 |
2399 |
+--- a/drivers/mailbox/bcm-flexrm-mailbox.c |
2400 |
++++ b/drivers/mailbox/bcm-flexrm-mailbox.c |
2401 |
+@@ -1365,8 +1365,8 @@ static void flexrm_shutdown(struct mbox_chan *chan) |
2402 |
+ /* Disable/inactivate ring */ |
2403 |
+ writel_relaxed(0x0, ring->regs + RING_CONTROL); |
2404 |
+ |
2405 |
+- /* Flush ring with timeout of 1s */ |
2406 |
+- timeout = 1000; |
2407 |
++ /* Set ring flush state */ |
2408 |
++ timeout = 1000; /* timeout of 1s */ |
2409 |
+ writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), |
2410 |
+ ring->regs + RING_CONTROL); |
2411 |
+ do { |
2412 |
+@@ -1374,7 +1374,23 @@ static void flexrm_shutdown(struct mbox_chan *chan) |
2413 |
+ FLUSH_DONE_MASK) |
2414 |
+ break; |
2415 |
+ mdelay(1); |
2416 |
+- } while (timeout--); |
2417 |
++ } while (--timeout); |
2418 |
++ if (!timeout) |
2419 |
++ dev_err(ring->mbox->dev, |
2420 |
++ "setting ring%d flush state timedout\n", ring->num); |
2421 |
++ |
2422 |
++ /* Clear ring flush state */ |
2423 |
++ timeout = 1000; /* timeout of 1s */ |
2424 |
++ writel_relaxed(0x0, ring + RING_CONTROL); |
2425 |
++ do { |
2426 |
++ if (!(readl_relaxed(ring + RING_FLUSH_DONE) & |
2427 |
++ FLUSH_DONE_MASK)) |
2428 |
++ break; |
2429 |
++ mdelay(1); |
2430 |
++ } while (--timeout); |
2431 |
++ if (!timeout) |
2432 |
++ dev_err(ring->mbox->dev, |
2433 |
++ "clearing ring%d flush state timedout\n", ring->num); |
2434 |
+ |
2435 |
+ /* Abort all in-flight requests */ |
2436 |
+ for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { |
2437 |
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c |
2438 |
+index 08035634795c..c9934139d609 100644 |
2439 |
+--- a/drivers/md/bcache/alloc.c |
2440 |
++++ b/drivers/md/bcache/alloc.c |
2441 |
+@@ -407,7 +407,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) |
2442 |
+ |
2443 |
+ finish_wait(&ca->set->bucket_wait, &w); |
2444 |
+ out: |
2445 |
+- wake_up_process(ca->alloc_thread); |
2446 |
++ if (ca->alloc_thread) |
2447 |
++ wake_up_process(ca->alloc_thread); |
2448 |
+ |
2449 |
+ trace_bcache_alloc(ca, reserve); |
2450 |
+ |
2451 |
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c |
2452 |
+index d2121637b4ab..cae57b5be817 100644 |
2453 |
+--- a/drivers/md/bitmap.c |
2454 |
++++ b/drivers/md/bitmap.c |
2455 |
+@@ -625,7 +625,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) |
2456 |
+ err = read_sb_page(bitmap->mddev, |
2457 |
+ offset, |
2458 |
+ sb_page, |
2459 |
+- 0, PAGE_SIZE); |
2460 |
++ 0, sizeof(bitmap_super_t)); |
2461 |
+ } |
2462 |
+ if (err) |
2463 |
+ return err; |
2464 |
+@@ -2123,7 +2123,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, |
2465 |
+ if (store.sb_page && bitmap->storage.sb_page) |
2466 |
+ memcpy(page_address(store.sb_page), |
2467 |
+ page_address(bitmap->storage.sb_page), |
2468 |
+- PAGE_SIZE); |
2469 |
++ sizeof(bitmap_super_t)); |
2470 |
+ bitmap_file_unmap(&bitmap->storage); |
2471 |
+ bitmap->storage = store; |
2472 |
+ |
2473 |
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c |
2474 |
+index d216a8f7bc22..8e3adcb46851 100644 |
2475 |
+--- a/drivers/md/dm-bufio.c |
2476 |
++++ b/drivers/md/dm-bufio.c |
2477 |
+@@ -974,7 +974,8 @@ static void __get_memory_limit(struct dm_bufio_client *c, |
2478 |
+ buffers = c->minimum_buffers; |
2479 |
+ |
2480 |
+ *limit_buffers = buffers; |
2481 |
+- *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; |
2482 |
++ *threshold_buffers = mult_frac(buffers, |
2483 |
++ DM_BUFIO_WRITEBACK_PERCENT, 100); |
2484 |
+ } |
2485 |
+ |
2486 |
+ /* |
2487 |
+@@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void) |
2488 |
+ memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); |
2489 |
+ memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); |
2490 |
+ |
2491 |
+- mem = (__u64)((totalram_pages - totalhigh_pages) * |
2492 |
+- DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT; |
2493 |
++ mem = (__u64)mult_frac(totalram_pages - totalhigh_pages, |
2494 |
++ DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; |
2495 |
+ |
2496 |
+ if (mem > ULONG_MAX) |
2497 |
+ mem = ULONG_MAX; |
2498 |
+ |
2499 |
+ #ifdef CONFIG_MMU |
2500 |
+- /* |
2501 |
+- * Get the size of vmalloc space the same way as VMALLOC_TOTAL |
2502 |
+- * in fs/proc/internal.h |
2503 |
+- */ |
2504 |
+- if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100) |
2505 |
+- mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100; |
2506 |
++ if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) |
2507 |
++ mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); |
2508 |
+ #endif |
2509 |
+ |
2510 |
+ dm_bufio_default_cache_size = mem; |
2511 |
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c |
2512 |
+index 8785134c9f1f..0b7edfd0b454 100644 |
2513 |
+--- a/drivers/md/dm-cache-target.c |
2514 |
++++ b/drivers/md/dm-cache-target.c |
2515 |
+@@ -1201,6 +1201,18 @@ static void background_work_end(struct cache *cache) |
2516 |
+ |
2517 |
+ /*----------------------------------------------------------------*/ |
2518 |
+ |
2519 |
++static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) |
2520 |
++{ |
2521 |
++ return (bio_data_dir(bio) == WRITE) && |
2522 |
++ (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); |
2523 |
++} |
2524 |
++ |
2525 |
++static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) |
2526 |
++{ |
2527 |
++ return writeback_mode(&cache->features) && |
2528 |
++ (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); |
2529 |
++} |
2530 |
++ |
2531 |
+ static void quiesce(struct dm_cache_migration *mg, |
2532 |
+ void (*continuation)(struct work_struct *)) |
2533 |
+ { |
2534 |
+@@ -1474,12 +1486,50 @@ static void mg_upgrade_lock(struct work_struct *ws) |
2535 |
+ } |
2536 |
+ } |
2537 |
+ |
2538 |
++static void mg_full_copy(struct work_struct *ws) |
2539 |
++{ |
2540 |
++ struct dm_cache_migration *mg = ws_to_mg(ws); |
2541 |
++ struct cache *cache = mg->cache; |
2542 |
++ struct policy_work *op = mg->op; |
2543 |
++ bool is_policy_promote = (op->op == POLICY_PROMOTE); |
2544 |
++ |
2545 |
++ if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || |
2546 |
++ is_discarded_oblock(cache, op->oblock)) { |
2547 |
++ mg_upgrade_lock(ws); |
2548 |
++ return; |
2549 |
++ } |
2550 |
++ |
2551 |
++ init_continuation(&mg->k, mg_upgrade_lock); |
2552 |
++ |
2553 |
++ if (copy(mg, is_policy_promote)) { |
2554 |